modeling_mega.py 107 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273
  1. # coding=utf-8
  2. # Copyright 2023 The Mega Authors and The HuggingFace Inc. team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """PyTorch MEGA model."""
  16. import math
  17. from typing import List, Optional, Tuple, Union
  18. import torch
  19. import torch.nn.functional as F
  20. import torch.utils.checkpoint
  21. from torch import nn
  22. from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
  23. from ....activations import ACT2FN
  24. from ....modeling_outputs import (
  25. BaseModelOutputWithPoolingAndCrossAttentions,
  26. CausalLMOutputWithCrossAttentions,
  27. MaskedLMOutput,
  28. MultipleChoiceModelOutput,
  29. QuestionAnsweringModelOutput,
  30. SequenceClassifierOutput,
  31. TokenClassifierOutput,
  32. )
  33. from ....modeling_utils import PreTrainedModel
  34. from ....pytorch_utils import ALL_LAYERNORM_LAYERS
  35. from ....utils import (
  36. add_code_sample_docstrings,
  37. add_start_docstrings,
  38. add_start_docstrings_to_model_forward,
  39. logging,
  40. replace_return_docstrings,
  41. )
  42. from .configuration_mega import MegaConfig
  43. logger = logging.get_logger(__name__)
  44. _CHECKPOINT_FOR_DOC = "mnaylor/mega-base-wikitext"
  45. _CONFIG_FOR_DOC = "MegaConfig"
  46. class MegaEmbeddings(nn.Module):
  47. """
  48. Mega's basic implementation does not incorporate token type embeddings, so this is a stripped-down version of
  49. RoBERTa's embeddings which optionally includes token types
  50. """
  51. def __init__(self, config: MegaConfig):
  52. super().__init__()
  53. self.word_embeddings = nn.Embedding(config.vocab_size, config.hidden_size, padding_idx=config.pad_token_id)
  54. self.use_token_types = config.add_token_type_embeddings
  55. if self.use_token_types:
  56. self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
  57. # registering a buffer here allows model tracing when not passing optional token type IDs
  58. # more info at transformers issue #5664
  59. self.register_buffer(
  60. "token_type_ids", torch.zeros(config.max_positions, dtype=torch.long).expand((1, -1)), persistent=False
  61. )
  62. self.padding_idx = config.pad_token_id
  63. def forward(self, input_ids=None, token_type_ids=None, inputs_embeds=None):
  64. if (input_ids is None) and (inputs_embeds is None):
  65. raise ValueError("Must provide one of input_ids or inputs_embeds")
  66. elif input_ids is not None:
  67. input_shape = input_ids.size()
  68. device = input_ids.device
  69. # get the word embeddings if only IDs are provided
  70. inputs_embeds = self.word_embeddings(input_ids)
  71. else:
  72. input_shape = inputs_embeds.size()[:-1]
  73. device = inputs_embeds.device
  74. # the original Mega implementation did not include token type embeddings, so we add
  75. # an option to use them if desired; if embeddings are present and token type IDs are
  76. # not provided, we will use a registered buffer (which helps with tracing)
  77. if self.use_token_types:
  78. if token_type_ids is None:
  79. if hasattr(self, "token_type_ids"):
  80. buffered_token_type_ids = self.token_type_ids[:, : input_shape[1]]
  81. buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], input_shape[1])
  82. token_type_ids = buffered_token_type_ids_expanded
  83. else:
  84. token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
  85. # access token type embeddings
  86. token_type_embeddings = self.token_type_embeddings(token_type_ids)
  87. # add the token type embeddings to the word embeddings
  88. embeddings = inputs_embeds + token_type_embeddings
  89. else:
  90. embeddings = inputs_embeds
  91. return embeddings
  92. class MegaSimpleRelativePositionalBias(nn.Module):
  93. """
  94. Simple relative positional embeddings copied from the Mega repo; renamed variables for better readability
  95. """
  96. def __init__(self, config: MegaConfig):
  97. super().__init__()
  98. self.config = config
  99. self.max_positions = self.config.max_positions if self.config.chunk_size < 0 else self.config.chunk_size
  100. self.rel_pos_bias = nn.Parameter(torch.Tensor(2 * config.max_positions - 1))
  101. def forward(self, seq_len):
  102. if seq_len > self.max_positions:
  103. raise ValueError("Sequence length {} going beyond max length {}".format(seq_len, self.max_positions))
  104. # seq_len * 2 - 1
  105. bias = self.rel_pos_bias[(self.max_positions - seq_len) : (self.max_positions + seq_len - 1)]
  106. # seq_len * 3 - 1
  107. tile = F.pad(bias, (0, seq_len))
  108. # (seq_len * 3 - 1) * seq_len
  109. tile = torch.tile(tile, (seq_len,))
  110. tile = tile[:-seq_len]
  111. # seq_len x (3 * seq_len - 2)
  112. tile = tile.view(seq_len, 3 * seq_len - 2)
  113. start = (2 * seq_len - 1) // 2
  114. end = tile.size(1) - start
  115. tile = tile[:, start:end]
  116. return tile
  117. class MegaRotaryRelativePositionalBias(nn.Module):
  118. """
  119. Rotary relative bias for positional information; similar in concept to RoPE (i.e. RoFormer) but taken from the Mega
  120. repo due to differences in implementation.
  121. When initialized, produces a positional bias which ranges from position 0 to config.max_positions, but can
  122. extrapolate to longer sequences. Can be indexed according to input position IDs
  123. """
  124. def __init__(self, config: MegaConfig):
  125. super().__init__()
  126. if config.hidden_size % 2 != 0:
  127. raise RuntimeError("Rotary positional bias requires `hidden_size` to be a multiple of 2")
  128. self.config = config
  129. self.embed_dim = config.shared_representation_size
  130. self.max_positions = self.config.max_positions if self.config.chunk_size < 0 else self.config.chunk_size
  131. self.sine, self.cosine = MegaRotaryRelativePositionalBias.get_sinusoid_embeddings(
  132. config.max_positions, self.embed_dim
  133. )
  134. # alpha and beta parameters for the rotary bias; beta renamed to b_param to avoid clashes with tf/flax weight handling
  135. # in loading pretrained weights
  136. self.alpha = nn.Parameter(torch.Tensor(1, self.embed_dim))
  137. self.b_param = nn.Parameter(torch.Tensor(1, self.embed_dim))
  138. self.register_buffer("_float_tensor", torch.FloatTensor([0.0]))
  139. @staticmethod
  140. def get_sinusoid_embeddings(max_positions: int, embedding_dim: int):
  141. half_dim = embedding_dim // 2
  142. emb = math.log(10000) / half_dim
  143. emb = torch.exp(torch.arange(half_dim, dtype=torch.int64).float() * -emb)
  144. emb = torch.arange(max_positions, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
  145. return torch.sin(emb), torch.cos(emb)
  146. def rotary(self, input):
  147. seq_len, embed_dim = input.size()
  148. chunk_1, chunk_2 = torch.chunk(input, 2, dim=-1)
  149. if self.sine is None or seq_len > self.sine.size(0):
  150. self.sine, self.cosine = MegaRotaryRelativePositionalBias.get_sinusoid_embeddings(seq_len, embed_dim)
  151. self.max_positions = seq_len
  152. self.sine = self.sine.to(self._float_tensor)
  153. self.cosine = self.cosine.to(self._float_tensor)
  154. sin = self.sine[:seq_len]
  155. cos = self.cosine[:seq_len]
  156. return torch.cat([chunk_1 * cos - chunk_2 * sin, chunk_2 * cos + chunk_1 * sin], dim=1)
  157. def forward(self, seq_len):
  158. rotary_alpha = self.rotary(self.alpha.expand(seq_len, self.embed_dim))
  159. rotary_beta = self.rotary(self.b_param.expand(seq_len, self.embed_dim))
  160. bias = torch.einsum("mk,nk->mn", rotary_alpha, rotary_beta)
  161. return bias
  162. class MegaDropout(nn.Module):
  163. """
  164. A unified class for standard dropout functionality and featurewise dropout.
  165. The original fairseq Mega repo used 2 classes for these, which included some unnecessary handling of training logic
  166. and an unused `inplace` option. The original implementation used torch.nn.functional instead of submodules, which
  167. is retained here as well.
  168. """
  169. def __init__(self, dropout_probability, is_featurewise=False):
  170. super().__init__()
  171. self.dropout_probability = dropout_probability
  172. self.is_featurewise = is_featurewise
  173. def forward(self, input, batch_first: bool = False):
  174. if self.is_featurewise:
  175. if batch_first:
  176. # (batch_size X sequence_length X feature_dimension)
  177. # -> (batch_size X feature_dimension X sequence_length)
  178. # -> (batch_size X sequence_length X feature_dimension)
  179. return F.dropout2d(
  180. input.transpose(-1, -2), p=self.dropout_probability, training=self.training
  181. ).transpose(-1, -2)
  182. else:
  183. if input.dim() != 3:
  184. raise ValueError(
  185. "Feature dropout inputs must be exactly 3-dimensional if inputs are ordered [sequence length, batch size, hidden dimension]"
  186. )
  187. # (sequence_length X batch_size X feature_dimension)
  188. # -> (batch_size X feature_dimension X sequence_length)
  189. # -> (sequence_length X batch_size X feature_dimension)
  190. return F.dropout2d(input.permute(1, 2, 0), p=self.dropout_probability, training=self.training).permute(
  191. 2, 0, 1
  192. )
  193. else:
  194. return F.dropout(input, p=self.dropout_probability, training=self.training)
  195. class MegaRMSNorm(nn.Module):
  196. """
  197. RMSNorm used in Mega implementation. Differs from T5's RMSNorm by applying the weight prior to taking the square
  198. root (as opposed to after in T5)
  199. """
  200. def __init__(self, number_features, eps=1e-6, affine=True):
  201. super().__init__()
  202. self.num_features = number_features
  203. self.eps = eps
  204. self.affine = affine
  205. if affine:
  206. self.weight = nn.Parameter(torch.Tensor(self.num_features))
  207. else:
  208. self.register_parameter("weight", None)
  209. def forward(self, input):
  210. mean_square = torch.mean(torch.square(input), dim=-1, keepdim=True)
  211. if self.weight is not None:
  212. input = input * self.weight
  213. input * torch.rsqrt(mean_square + self.eps)
  214. return input
  215. def extra_repr(self):
  216. return f"{self.num_features}, eps={self.eps}, affine={self.affine}"
  217. class MegaScaleNorm(nn.Module):
  218. """
  219. Scale normalization introduced in MEGA which is similar to RMSNorm, but uses a single parameter for scalar
  220. multiplication instead of a vector, and applies over a specified dimension
  221. """
  222. def __init__(self, dim, eps=1e-6, affine=True):
  223. super().__init__()
  224. self.dim = dim
  225. self.eps = eps
  226. self.affine = affine
  227. if affine:
  228. self.scalar = nn.Parameter(torch.Tensor(1))
  229. else:
  230. self.register_parameter("scalar", None)
  231. def forward(self, input):
  232. mean_square = torch.mean(torch.square(input), dim=self.dim, keepdim=True)
  233. if self.scalar is not None:
  234. input = self.scalar * input
  235. output = input * torch.rsqrt(mean_square + self.eps)
  236. return output
  237. class MegaSequenceNorm(nn.Module):
  238. """
  239. A wrapper class for various layer normalization options used in Mega. Used to handle differences in expectations on
  240. input axis locations for different normalization methods.
  241. """
  242. def __init__(self, norm_type, embedding_dim, eps=1e-5, affine=True, export=False):
  243. super().__init__()
  244. if norm_type == "layernorm":
  245. self.norm = nn.LayerNorm(embedding_dim, eps, elementwise_affine=affine)
  246. elif norm_type == "scalenorm":
  247. self.norm = MegaScaleNorm(dim=-1, eps=eps, affine=affine)
  248. elif norm_type == "rmsnorm":
  249. self.norm = MegaRMSNorm(embedding_dim, eps=eps, affine=affine)
  250. elif norm_type == "batchnorm":
  251. self.norm = nn.BatchNorm1d(embedding_dim, eps=eps, affine=affine)
  252. elif norm_type == "syncbatchnorm":
  253. self.norm = nn.SyncBatchNorm(embedding_dim, eps=eps, affine=affine)
  254. else:
  255. raise ValueError("Unknown norm type: {}".format(norm_type))
  256. def forward(self, input):
  257. if isinstance(self.norm, nn.modules.batchnorm._BatchNorm):
  258. if input.dim() != 3:
  259. raise ValueError("BatchNorm inputs must be exactly 3-dimensional")
  260. input = input.permute(1, 2, 0)
  261. input = self.norm(input)
  262. return input.permute(2, 0, 1)
  263. else:
  264. return self.norm(input)
  265. # add this layernorm class to ALL_LAYERNORM_LAYERS
  266. ALL_LAYERNORM_LAYERS.append(MegaSequenceNorm)
  267. class MegaMultiDimensionDampedEma(nn.Module):
  268. """
  269. Mega's Exponential Moving Average layer, largely left unmodified from the original repo with the exception of
  270. variable names and moving away from the stateful representation of incremental decoding state. See
  271. "https://arxiv.org/abs/2209.10655" for more details.
  272. """
  273. def __init__(self, config: MegaConfig):
  274. super().__init__()
  275. self.config = config
  276. self.embed_dim = config.hidden_size
  277. self.ndim = config.ema_projection_size
  278. self.bidirectional = config.bidirectional
  279. self.truncation = config.truncation
  280. self.scale = math.sqrt(1.0 / self.ndim)
  281. kernel_dim = 2 * config.hidden_size if self.bidirectional else config.hidden_size
  282. # renamed delta (damping_factor) and alpha (decay_factor) to be more descriptive of what the parameters are doing
  283. self.damping_factor = nn.Parameter(torch.Tensor(kernel_dim, self.ndim, 1))
  284. self.decay_factor = nn.Parameter(torch.Tensor(kernel_dim, self.ndim, 1))
  285. # renamed gamma (kernel_projection_matrix) and beta (ema_expansion_matrix) respectively to avoid HF renaming
  286. # things and align with the paper's description of these params' behavior
  287. self.ema_expansion_matrix = nn.Parameter(torch.Tensor(kernel_dim, self.ndim, 1))
  288. self.kernel_projection_matrix = nn.Parameter(torch.Tensor(kernel_dim, self.ndim))
  289. # renamed omega to residual_weight to describe what it's doing
  290. self.residual_weight = nn.Parameter(torch.Tensor(config.hidden_size))
  291. self._kernel = None
  292. self._coeffs = None
  293. def _compute_ema_coefficients(self):
  294. self._coeffs = None
  295. # convert the alpha and delta parameters (kernel_dim x EMA projection size x 1) to [0, 1] with sigmoid
  296. damping_factor = torch.sigmoid(self.damping_factor)
  297. decay_factor = torch.sigmoid(self.decay_factor)
  298. previous_timestep_weight = 1.0 - damping_factor * decay_factor
  299. return damping_factor, previous_timestep_weight
  300. def _compute_efficient_ema_kernel(self, length: int):
  301. # computes the kernel used for efficient damped EMA applied via FFT convolution
  302. self._kernel = None
  303. # p and q have shape (kernel_dim x ema_projection_size x 1)
  304. damping_factor, previous_timestep_weight = self._compute_ema_coefficients()
  305. # extend the kernel to (kernel_dim X ema_projection_size X sequence_length) and
  306. # multiply q by sequential ints up to the sequence length
  307. vander = torch.arange(length).to(damping_factor).view(1, 1, length) * torch.log(previous_timestep_weight)
  308. kernel = (damping_factor * self.ema_expansion_matrix) * torch.exp(vander)
  309. # (kernel_dim X ema_projection_size X sequence_length) -> (kernel_dim, sequence_length)
  310. return torch.einsum("dnl,dn->dl", kernel, self.kernel_projection_matrix * self.scale)
  311. def get_ema_coefficients(self):
  312. if self.training:
  313. return self._compute_ema_coefficients()
  314. else:
  315. if self._coeffs is None:
  316. self._coeffs = self._compute_ema_coefficients()
  317. return self._coeffs
  318. def get_ema_kernel(self, length: int):
  319. kernel_size = length if self.truncation is None else min(self.truncation, length)
  320. if self.training:
  321. return self._compute_efficient_ema_kernel(kernel_size)
  322. else:
  323. if self._kernel is None or self._kernel.size(-1) < kernel_size:
  324. self._kernel = self._compute_efficient_ema_kernel(kernel_size)
  325. return self._kernel[..., :kernel_size]
  326. def fft_convolution(self, inputs, kernel, length):
  327. # this is a wrapper for repeated use of EMA calculation via FFT (fast Fourier transform) convolution
  328. inputs_fft = torch.fft.rfft(inputs.float(), n=2 * length)
  329. kernel_fft = torch.fft.rfft(kernel.float(), n=2 * length)
  330. convolved_sequence = torch.fft.irfft(inputs_fft * kernel_fft, n=2 * length)
  331. return convolved_sequence
  332. def ema_step(self, inputs, length, past_state=None):
  333. if length == 1:
  334. return self.one_ema_step(inputs, past_state=past_state)
  335. # (kernel_dim X ema_projection_size X 1)
  336. damping_factor, previous_timestep_weight = self.get_ema_coefficients()
  337. # (kernel_dim X ema_projection_size X 1+sequence_length)
  338. vander = torch.arange(length + 1).to(damping_factor).view(1, 1, length + 1) * torch.log(
  339. previous_timestep_weight
  340. )
  341. vander = torch.exp(vander)
  342. if past_state is not None:
  343. # (kernel_dim X ema_projection_size X sequence_length) * (kernel_dim X ema_projection_size X 1)
  344. # -> (kernel_dim X ema_projection_size X sequence_length)
  345. past_ema_proj = vander[:, :, 1:] * (self.kernel_projection_matrix * self.scale).unsqueeze(-1)
  346. # past_state will be (batch_size, kernel_dim, ema_projection_size)
  347. past_ema_state = torch.einsum("bdn,dnl->bdl", past_state, past_ema_proj)
  348. # (kernel_dim X ema_projection_size) * (batch_size X kernel_dim X ema_projection_size)
  349. # -> (batch_size X kernel_dim X ema_projection_size)
  350. past_vandermonde = vander[:, :, -1] * past_state
  351. else:
  352. past_ema_state = None
  353. past_vandermonde = None
  354. # (kernel_dim X ema_projection_size X sequence_length)
  355. vander = vander[:, :, :-1]
  356. kernel = (damping_factor * self.ema_expansion_matrix) * vander
  357. kernel_proj = torch.einsum("dnl,dn->dl", kernel, self.kernel_projection_matrix * self.scale)
  358. ema_output = self.fft_convolution(inputs, kernel_proj, length=length)[..., 0:length]
  359. ema_output = ema_output.type_as(inputs)
  360. if past_ema_state is not None:
  361. ema_output = ema_output + past_ema_state
  362. updated_hidden_state = torch.einsum("bdl,dnl->bdn", inputs, torch.flip(kernel, dims=[2]))
  363. if past_vandermonde is not None:
  364. updated_hidden_state = updated_hidden_state + past_vandermonde
  365. # return a tuple:
  366. # (sequence_length, batch_size, kernel_dim)
  367. # (batch_size, kernel_dim, ema_projection_size)
  368. return ema_output.permute(2, 0, 1), updated_hidden_state
  369. def one_ema_step(self, inputs, past_state=None):
  370. damping_factor, previous_timestep_weight = self.get_ema_coefficients()
  371. # (kernel_dim X ema_projection_size) x (batch_size X kernel_dim X 1)
  372. # -> (batch_size X kernel_dim X ema_projection_size)
  373. updated_state = (damping_factor * self.ema_expansion_matrix).squeeze(-1) * inputs
  374. if past_state is not None:
  375. updated_state = updated_state + previous_timestep_weight.squeeze(-1) * past_state
  376. # (batch_size X kernel_dim)
  377. out = torch.einsum("bdn,dn->bd", updated_state, self.kernel_projection_matrix * self.scale)
  378. # (1 X batch_size X kernel_dim), (batch_size X kernel_dim X ema_projection_size)
  379. return out.unsqueeze(0), updated_state
  380. def forward(
  381. self,
  382. inputs,
  383. attention_mask: Optional[torch.Tensor] = None,
  384. prev_state: Optional[torch.Tensor] = None,
  385. use_cache: bool = False,
  386. ) -> torch.Tensor:
  387. """
  388. Mega's exponential moving average (EMA) sub-layer applied prior to single-headed (traditional) self-attention
  389. Args:
  390. inputs (`torch.Tensor` of shape `(sequence_length, batch_size, hidden_size)`):
  391. Hidden state / embedding input to update via EMA based on FFT convolution
  392. attention_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
  393. Indicates which inputs are to be ignored (mostly due to padding), where elements are either 1 for *not
  394. masked* or 0 for *masked*
  395. prev_state (`torch.Tensor` of shape `(batch_size, config.ndim)`, *optional*):
  396. The hidden state returned from the previous timestep during incremental decoding.
  397. use_cache (`bool`, default `False`):
  398. Whether to perfom incremental decoding; uses `prev_state` as the prior timestep, and returns the
  399. updated EMA hidden state for use in the next step
  400. Returns:
  401. `tuple(torch.FloatTensor)` containing various elements depending on configuration ([`MegaConfig`]) and
  402. inputs:
  403. - **hidden_states** (`torch.FloatTensor` of shape `(sequence_length, batch_size, hidden_size)`) -- Hidden
  404. states updated by EMA, with same shapes as inputs
  405. - **updated_state** (*optional*, returned when `use_cache=True`) `torch.FloatTensor of shape `(batch_size,
  406. config.ndim)` -- The incremental EMA state for use in the next step of incremental decoding
  407. """
  408. seq_len, bsz, embed_dim = inputs.size()
  409. if embed_dim != self.embed_dim:
  410. raise ValueError(
  411. f"Unexpected embedding dimension received: input is {embed_dim}, model expects {self.embed_dim}"
  412. )
  413. # sequence_length X batch_size X hidden_size
  414. residual = inputs * self.residual_weight
  415. # (sequence_length x batch_size x hidden_size) -> (batch_size x hidden_size x sequence_length)
  416. inputs = inputs.permute(1, 2, 0)
  417. # mask the input: output is a tensor with 0 in the masked positions
  418. if attention_mask is not None:
  419. inputs = inputs * (attention_mask.unsqueeze(1).type_as(inputs))
  420. if self.bidirectional and use_cache:
  421. raise RuntimeError("Bidirectional EMA does not support incremental state")
  422. if use_cache:
  423. out, updated_state = self.ema_step(inputs, seq_len, past_state=prev_state)
  424. # (batch_size X hidden_size) -> (1 x batch_size x hidden_size)
  425. out = F.silu(out + residual)
  426. # if incremental decoding, return the new state along with the output
  427. return out, updated_state
  428. else:
  429. # (hidden_size x sequence_length)
  430. kernel = self.get_ema_kernel(seq_len)
  431. fft_len = seq_len
  432. s_index = 0
  433. kernel_size = kernel.size(1)
  434. if self.bidirectional:
  435. # split the kernel for each direction of EMA
  436. k1, k2 = torch.split(kernel, [self.embed_dim, self.embed_dim], dim=0)
  437. # (hidden_size X 2*sequence_length - 1)
  438. kernel = F.pad(k1, (kernel_size - 1, 0)) + F.pad(k2.flip(-1), (0, kernel_size - 1))
  439. inputs = F.pad(inputs, (kernel_size - 1, 0))
  440. fft_len = fft_len + kernel_size - 1
  441. s_index = 2 * kernel_size - 2
  442. ema_output = self.fft_convolution(inputs, kernel, length=fft_len)[..., s_index : s_index + seq_len]
  443. ema_output = ema_output.type_as(inputs)
  444. # (batch_size X hidden_size X sequence_length) -> (sequence_length X batch_size X hidden_size)
  445. gated_ema_output = F.silu(ema_output.permute(2, 0, 1) + residual)
  446. return gated_ema_output, None
  447. class MegaGatedCrossAttention(nn.Module):
  448. """
  449. Gated Structured State Attention for use in encoder-decoder model. See Mega paper for more details. Only
  450. modifications from original implementation are variable names, removing the unnecessary `before_attn_fn` and
  451. `static_kv` arguments, and the stateful representation of incremental decoder state.
  452. """
  453. def __init__(self, config: MegaConfig):
  454. super().__init__()
  455. self.config = config
  456. self.activation = ACT2FN[self.config.activation]
  457. self.attention_activation = self.config.attention_activation
  458. self.scaling = self.config.shared_representation_size**-0.5 if self.attention_activation == "softmax" else None
  459. self.dropout = MegaDropout(self.config.dropout_prob, is_featurewise=self.config.use_feature_dropout)
  460. self.hidden_dropout = MegaDropout(
  461. self.config.hidden_dropout_prob, is_featurewise=self.config.use_feature_dropout
  462. )
  463. # Attention dropout is standard dropout
  464. self.attention_dropout = MegaDropout(self.config.attention_probs_dropout_prob, is_featurewise=False)
  465. self.prenorm = self.config.normalize_before_mega
  466. self.norm = MegaSequenceNorm(
  467. self.config.normalization_type, self.config.hidden_size, affine=self.config.norm_affine
  468. )
  469. self.k_proj = nn.Linear(self.config.hidden_size, self.config.shared_representation_size)
  470. self.v_proj = nn.Linear(self.config.hidden_size, self.config.hidden_size)
  471. self.q_proj = nn.Linear(
  472. self.config.hidden_size, 2 * self.config.hidden_size + self.config.shared_representation_size
  473. )
  474. self.h_proj = nn.Linear(self.config.hidden_size, self.config.hidden_size)
  475. if self.config.relative_positional_bias == "simple":
  476. self.rel_pos_bias = MegaSimpleRelativePositionalBias(config)
  477. elif self.config.relative_positional_bias == "rotary":
  478. self.rel_pos_bias = MegaRotaryRelativePositionalBias(config)
  479. else:
  480. raise ValueError("unknown relative position bias: {}".format(self.config.relative_positional_bias))
  481. self.softmax = nn.Softmax(dim=-1)
  482. def element_attention(self, query, key, key_padding_mask, pidx):
  483. bsz, src_len, _ = key.size()
  484. tgt_len = query.size(1) if pidx is None else pidx + 1
  485. if key_padding_mask is not None:
  486. # (batch_size X source_sequence_length) --> (batch_size X 1 X 1)
  487. lengths = key_padding_mask.sum(dim=-1).view(bsz, 1, 1)
  488. else:
  489. lengths = src_len
  490. # (target_sequence_length X source_sequence_length)
  491. bias = self.rel_pos_bias(max(tgt_len, src_len))[:, :src_len]
  492. if pidx is not None:
  493. if query.size(1) != 1:
  494. raise ValueError("Position offset provided with queries longer than 1 token")
  495. # source_sequence_length
  496. bias = bias[pidx]
  497. else:
  498. # (target_sequence_length X source_sequence_length)
  499. bias = bias[:tgt_len]
  500. # (batch_size X target_sequence_length X source_sequence_length)
  501. qk = torch.bmm(query, key.transpose(1, 2)) / lengths + bias
  502. attn_weights = ACT2FN[self.attention_activation](qk).type_as(qk)
  503. if key_padding_mask is not None:
  504. attn_weights = attn_weights * key_padding_mask.unsqueeze(1)
  505. return attn_weights
  506. def softmax_attention(self, query, key, key_padding_mask, pidx):
  507. bsz, src_len, _ = key.size()
  508. tgt_len = query.size(1) if pidx is None else pidx + 1
  509. # (target_sequence_length X source_sequence_length)
  510. bias = self.rel_pos_bias(max(tgt_len, src_len))[:, :src_len]
  511. if pidx is not None:
  512. if query.size(1) != 1:
  513. raise ValueError("Position offset provided with queries longer than 1 token")
  514. # source_sequence_length
  515. bias = bias[pidx]
  516. else:
  517. # (target_sequence_length X source_sequence_length)
  518. bias = bias[:tgt_len]
  519. # scaled attention
  520. query = query * self.scaling
  521. # (batch_size X target_sequence_length X source_sequence_length)
  522. qk = torch.bmm(query, key.transpose(1, 2)) + bias
  523. if key_padding_mask is not None:
  524. qk = qk.masked_fill((1 - key_padding_mask).unsqueeze(1).to(torch.bool), float("-inf"))
  525. attn_weights = self.softmax(qk).type_as(qk)
  526. return attn_weights
  527. def forward(
  528. self,
  529. query,
  530. key: Optional[torch.Tensor],
  531. value: Optional[torch.Tensor],
  532. key_padding_mask: Optional[torch.Tensor] = None,
  533. past_key_values: Optional[Tuple[torch.Tensor]] = None,
  534. output_attentions: bool = False,
  535. use_cache: bool = False,
  536. ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
  537. """
  538. Gated cross-attention used in Mega
  539. Args:
  540. query (`torch.Tensor` of shape `(target_sequence_length, batch_size, hidden_size)`):
  541. The self (or target) sequence input used as query inputs for cross-attention
  542. key (`torch.Tensor` of shape `(source_sequence_length, batch_size, hidden_size)`):
  543. The cross (or source) sequence input with shape used as keys in cross-attention
  544. value (`torch.Tensor` of shape `(source_sequence_length, batch_size, hidden_size)`):
  545. The cross (or source) sequence input with shape used as values in cross-attention
  546. key_padding_mask (`torch.LongTensor` of shape `(batch_size, source_sequence_length)`, *optional*):
  547. Padding mask corresponding to the source sequence, where entries are 1 for *not masked* and 0 for
  548. *masked* tokens
  549. past_key_values (`tuple(torch.FloatTensor)`, *optional*):
  550. If provided, the hidden state returned from the previous timestep during incremental decoding; expects
  551. that prior cross-attention keys and values will be the last two items in the tuple
  552. output_attentions (`bool`, defaults to `False`):
  553. Whether or not to return the cross-attention weights.
  554. use_cache (`bool`, defaults to `False`):
  555. Whether to perfom incremental decoding; uses `prev_state` as the prior timestep, and returns the
  556. updated EMA hidden state for use in the next step
  557. Returns:
  558. `tuple(torch.FloatTensor)` containing various elements depending on configuration ([`MegaConfig`]) and
  559. inputs:
  560. - **hidden_states** (`torch.FloatTensor` of shape `(target_sequence_length, batch_size, hidden_size)`) --
  561. Hidden states from target sequence updated by gated cross-attention
  562. - **attn_weights** (*optional*, returned when `output_attentions=True`) `torch.FloatTensor` of shape
  563. `(batch_size, source_sequence_length, target_sequence_length)` -- The pairwise cross-attention weights
  564. corresponding to each token in the source and target sequences
  565. - **cross_key** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape `(batch_size,
  566. source_sequence_length, config.shared_representation_size)` -- The cross-attention key state for use in
  567. the next step of incremental decoding
  568. - **cross_value** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape `(batch_size,
  569. source_sequence_length, config.hidden_size)` -- The cross-attention value state for use in the next step
  570. of incremental decoding
  571. """
  572. seq_len, bsz, embed_dim = query.size()
  573. if embed_dim != self.config.hidden_size:
  574. raise ValueError(
  575. f"Unexpected embedding dimension received: input is {embed_dim} but expected {self.config.hidden_size}"
  576. )
  577. if past_key_values is not None:
  578. # make sure the inputs only have a sequence length of 1 if we're doing incremental decoding
  579. if seq_len != 1:
  580. raise ValueError(f"Incremental decoding requested with self-sequence length > 1: {seq_len}")
  581. # expect past_key_values to have (self_key, self_value, self_ema, cross_key, cross_value)
  582. prev_cross_key, prev_cross_value = past_key_values[-2:]
  583. key = value = None
  584. # use the self-attention cache to get the position id of the current step
  585. prev_self_key = past_key_values[0]
  586. num_incremental_steps = prev_self_key.size(1) + 1
  587. else:
  588. prev_cross_key = prev_cross_value = None
  589. # we still need the position id if we're doing incremental decoding (past_key_values will be None for the first step)
  590. num_incremental_steps = 0 if use_cache and (seq_len == 1) else None
  591. full_query = query
  592. if self.prenorm:
  593. full_query = self.norm(full_query)
  594. # (target_sequence_length X batch_size X 2*hidden_size + shared_representation_size)
  595. query_projected = self.q_proj(full_query)
  596. # split the query projections into separate components
  597. # - residual_weight is passed through sigmoid and sent through elementwise multiplication to the gated/weighted targets prior to being added to the query directly
  598. # - target_gate is a silu-gated tensor that is multiplied by the attention-weighted target below prior to residual connection
  599. # - attention_query is the part that is passed to the attention function
  600. residual_weight, target_gate, attention_query = torch.split(
  601. query_projected,
  602. [self.config.hidden_size, self.config.hidden_size, self.config.shared_representation_size],
  603. dim=-1,
  604. )
  605. # (target_sequence_length X batch_size X hidden_size)
  606. residual_weight = torch.sigmoid(residual_weight)
  607. target_gate = F.silu(target_gate)
  608. if key is None:
  609. if value is not None:
  610. raise ValueError("Key and value must be `None` simultaneously")
  611. projected_key = projected_value = None
  612. else:
  613. # (source_sequence_length X batch_size X shared_representation_size)
  614. projected_key = self.k_proj(key)
  615. # (source_sequence_length X batch_size X hidden_size)
  616. projected_value = self.activation(self.v_proj(key))
  617. # (target_sequence_length X batch_size X shared_representation_size)
  618. # -> (batch_size X target_sequence_length X shared_representation_size)
  619. attention_query = attention_query.transpose(0, 1)
  620. if projected_key is not None:
  621. projected_key = projected_key.transpose(0, 1)
  622. if projected_value is not None:
  623. projected_value = projected_value.transpose(0, 1)
  624. # if we're doing incremental decoding, k and v are None and need to be overwritten with past values
  625. if past_key_values is not None:
  626. projected_key = prev_cross_key
  627. projected_value = prev_cross_value
  628. # if we're returning the cache for later use, store these now for later return (can be done without having past_key_values provided)
  629. if use_cache:
  630. updated_cross_key = projected_key
  631. updated_cross_value = projected_value
  632. ctx_len = projected_key.size(1)
  633. # This is part of a workaround to get around fork/join parallelism
  634. # not supporting Optional types.
  635. if key_padding_mask is not None and key_padding_mask.dim() == 0:
  636. key_padding_mask = None
  637. if key_padding_mask is not None:
  638. if key_padding_mask.size(0) != bsz:
  639. raise ValueError("Key padding mask does not align on the batch dimension")
  640. if key_padding_mask.size(1) != ctx_len:
  641. raise ValueError("Key padding mask does not align on the sequence length dimension")
  642. if self.attention_activation == "softmax":
  643. attn_weights = self.softmax_attention(
  644. attention_query, projected_key, key_padding_mask, num_incremental_steps
  645. )
  646. else:
  647. attn_weights = self.element_attention(
  648. attention_query, projected_key, key_padding_mask, num_incremental_steps
  649. )
  650. projected_value = self.hidden_dropout(projected_value, batch_first=True)
  651. kernel = self.attention_dropout(attn_weights)
  652. # (batch_size X target_sequence_length X hidden_size)
  653. # -> (target_sequence_length X batch_size X hidden_size)
  654. weighted_targets = torch.bmm(kernel, projected_value).transpose(0, 1)
  655. # (target_sequence_length X batch_size X hidden_size)
  656. weighted_targets = self.activation(self.h_proj(weighted_targets * target_gate))
  657. weighted_targets = self.dropout(weighted_targets)
  658. out = torch.addcmul(query, residual_weight, weighted_targets - query)
  659. if not self.prenorm:
  660. out = self.norm(out)
  661. outputs = (out, attn_weights) if output_attentions else (out,)
  662. if use_cache:
  663. outputs = outputs + (updated_cross_key, updated_cross_value)
  664. return outputs
  665. class MegaMovingAverageGatedAttention(nn.Module):
  666. """
  667. Pure PyTorch implementation of Mega block; see https://arxiv.org/abs/2209.10655 and original fairseq implementation
  668. at https://github.com/facebookresearch/mega (copyright Meta Research, licensed under MIT License)
  669. Differences from original implementation include hidden state refactor and fixed inconsistency with additive /
  670. multiplicative attention masks
  671. """
  672. def __init__(self, config: MegaConfig):
  673. super().__init__()
  674. self.config = config
  675. self.activation = ACT2FN[self.config.activation]
  676. self.scaling = (
  677. self.config.shared_representation_size**-0.5 if self.config.attention_activation == "softmax" else None
  678. )
  679. self.dropout = MegaDropout(self.config.dropout_prob, is_featurewise=self.config.use_feature_dropout)
  680. self.hidden_dropout = MegaDropout(
  681. self.config.hidden_dropout_prob, is_featurewise=self.config.use_feature_dropout
  682. )
  683. # attention dropout is standard dropout
  684. self.attention_dropout = MegaDropout(self.config.attention_probs_dropout_prob, is_featurewise=False)
  685. self.norm = MegaSequenceNorm(
  686. self.config.normalization_type, self.config.hidden_size, affine=self.config.norm_affine
  687. )
  688. self.ema_gate = MegaMultiDimensionDampedEma(config)
  689. self.v_proj = nn.Linear(self.config.hidden_size, self.config.intermediate_size)
  690. self.mx_proj = nn.Linear(
  691. self.config.hidden_size,
  692. self.config.shared_representation_size + self.config.intermediate_size + 2 * self.config.hidden_size,
  693. )
  694. self.h_proj = nn.Linear(self.config.intermediate_size, self.config.hidden_size)
  695. self.qk_weight = nn.Parameter(torch.Tensor(2, self.config.shared_representation_size))
  696. self.qk_bias = nn.Parameter(torch.Tensor(2, self.config.shared_representation_size))
  697. if self.config.relative_positional_bias == "simple":
  698. self.rel_pos_bias = MegaSimpleRelativePositionalBias(config)
  699. elif self.config.relative_positional_bias == "rotary":
  700. self.rel_pos_bias = MegaRotaryRelativePositionalBias(config)
  701. else:
  702. raise ValueError(f"Unknown relative positional bias: {self.config.relative_positional_bias}")
  703. self.softmax = nn.Softmax(dim=-1)
  704. self.attention_function = (
  705. self.softmax_attention if self.config.attention_activation == "softmax" else self.element_attention
  706. )
  707. def element_attention(self, query, key, padding_mask, causal_mask):
  708. """
  709. Apply element-wise attention via relu^2 or laplace. Same as original implementation but with standardized
  710. causal attention mask. Expects the Hugging Face standard attention mask paradigm: 1 for not masked, and 0 for
  711. masked.
  712. """
  713. seq_len = key.size(2)
  714. if padding_mask is not None:
  715. # (batch_size X number of chunks X 1)
  716. lengths = padding_mask.sum(-1, keepdim=True)
  717. # (batch_size X number of chunks X 1 X 1)
  718. lengths = lengths.clamp(min=1.0).unsqueeze(-1)
  719. else:
  720. lengths = seq_len
  721. if causal_mask is not None:
  722. lengths = causal_mask.sum(dim=-1, keepdim=True)
  723. # (sequence_length X sequence_length)
  724. bias = self.rel_pos_bias(seq_len)
  725. if seq_len != query.size(2):
  726. if query.size(2) != 1:
  727. raise ValueError("Size mismatch between Q and K in element attention")
  728. # (1 X sequence_length)
  729. bias = bias[-1:]
  730. # (batch_size X number of chunks X sequence_length X sequence_length)
  731. qk = torch.matmul(query, key.transpose(2, 3)) / lengths + bias
  732. attn_weights = ACT2FN[self.config.attention_activation](qk).type_as(qk)
  733. if padding_mask is not None:
  734. attn_weights = attn_weights * padding_mask.unsqueeze(2)
  735. if causal_mask is not None:
  736. attn_weights = attn_weights * causal_mask
  737. return attn_weights
  738. def softmax_attention(self, query, key, padding_mask, causal_mask):
  739. "Standard softmax self-attention, as in the original Transformer paper"
  740. seq_len = key.size(2)
  741. # (sequence_length X sequence_length)
  742. bias = self.rel_pos_bias(seq_len)
  743. if seq_len != query.size(2):
  744. if query.size(2) != 1:
  745. raise ValueError("Size mismatch between Q and K in softmax attention")
  746. # (1 X sequence_length)
  747. bias = bias[-1:]
  748. # scaled attention
  749. query = query * self.scaling
  750. # (batch_size x number of chunks x chunk_size x chunk_size) if chunking
  751. # (batch_size x 1 x sequence_length x sequence_length) otherwise
  752. qk = torch.matmul(query, key.transpose(2, 3)) + bias
  753. # apply causal mask (presumed to be 1/0 for not masked / masked)
  754. # additive, but convert to 0/-inf (which is not explicitly in the Mega source code)
  755. if causal_mask is not None:
  756. additive_causal_mask = torch.zeros_like(causal_mask, dtype=qk.dtype)
  757. additive_causal_mask = additive_causal_mask.masked_fill((1 - causal_mask).bool(), float("-inf"))
  758. qk = qk + additive_causal_mask
  759. if padding_mask is not None:
  760. # 1 for tokens which are *not masked*
  761. # 0 for tokens which are *masked*
  762. # replace masked tokens with -inf to make softmax ignore them
  763. # need to invert the padding mask to match what mega original did
  764. padding_mask = 1 - padding_mask
  765. padding_mask_all = padding_mask.all(dim=-1, keepdim=True)
  766. padding_mask = torch.logical_and(padding_mask, ~padding_mask_all)
  767. qk = qk.masked_fill(padding_mask.unsqueeze(2).to(torch.bool), float("-inf"))
  768. attn_weights = self.softmax(qk).type_as(qk)
  769. return attn_weights
  770. def forward(
  771. self,
  772. input,
  773. padding_mask: Optional[torch.Tensor] = None,
  774. causal_mask: Optional[torch.Tensor] = None,
  775. past_key_values: Optional[Tuple[torch.Tensor]] = None,
  776. output_attentions=False,
  777. use_cache=False,
  778. ):
  779. """
  780. Mega's self-attention block, which combines multi-headed EMA with traditional self-attention
  781. Args:
  782. input (`torch.Tensor` of shape `(sequence_length, batch_size, hidden_size)`):
  783. Hidden states to be updated by Mega's self-attention
  784. padding_mask (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
  785. Indicates which inputs are to be ignored due to padding, where elements are either 1 for *not masked*
  786. or 0 for *masked*
  787. causal_mask (`torch.LongTensor` of shape `(sequence_length, sequence_length)`, *optional*):
  788. Indicates which inputs are to be ignored due to causal attention, where elements are either 1 for *not
  789. masked* or 0 for *masked*
  790. past_key_values (`tuple(torch.Tensor)`, *optional*):
  791. The hidden states returned from the previous timestep during incremental decoding; expects that
  792. self-attention key, value, and EMA states are the first 3 entries in the tuple
  793. output_attentions (`bool`, default `False`):
  794. Whether to return self-attention weights
  795. use_cache (`bool`, default `False`):
  796. Whether to perfom incremental decoding; uses `past_key_values` as prior state, and returns the updated
  797. states for use in the next step
  798. Returns:
  799. `tuple(torch.FloatTensor)` containing various elements depending on configuration ([`MegaConfig`]) and
  800. inputs:
  801. - **hidden_states** (`torch.FloatTensor` of shape `(sequence_length, batch_size, hidden_size)`) -- Hidden
  802. states from target sequence updated by Mega's self-attention
  803. - **attn_weights** (*optional*, returned when `output_attentions=True`) `torch.FloatTensor` of shape
  804. `(batch_size, 1, sequence_length, sequence_length)` -- The self-attention weights corresponding to how
  805. each token in the input sequence attends to every other token
  806. - **self_key** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape `(batch_size,
  807. sequence_length, config.shared_representation_size)` -- The self-attention key state for use in the next
  808. step of incremental decoding
  809. - **self_value** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape `(batch_size,
  810. sequence_length, config.hidden_size)` -- The self-attention value state for use in the next step of
  811. incremental decoding
  812. - **self_ema_state** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape
  813. `(batch_size, config.ndim)` The incremental EMA state for use in the next step of incremental decoding.
  814. """
  815. seq_len, bsz, embed_dim = input.size()
  816. if embed_dim != self.config.hidden_size:
  817. raise ValueError(f"Input embedding dimension should be {self.config.hidden_size}; received {embed_dim}")
  818. # store inputs for residual connection and handle pre-norm if requested
  819. residual = input
  820. if self.config.normalize_before_mega:
  821. input = self.norm(input)
  822. # (sequence_length X batch_size X hidden_size) -> (sequence_length X batch_size X intermediate_size)
  823. value = self.activation(self.v_proj(input))
  824. # unpack the incremental state if provided
  825. # assumed to be (self K, self V, self EMA state, cross K, cross V)
  826. # also assumes that incremental decoding is working one token at a time, so input sequence length must be 1
  827. if self.config.is_decoder and (past_key_values is not None):
  828. if seq_len > 1:
  829. raise ValueError(f"Incremental decoding only supports self sequence length of 1; received {seq_len}")
  830. # the first 3 items in the saved states will be these regardless of whether cross-attention is present
  831. prev_self_key, prev_self_value, prev_ema_state = past_key_values[0:3]
  832. else:
  833. prev_self_key = prev_self_value = prev_ema_state = None
  834. # ema output is (sequence_length x batch_size x hidden_size)
  835. # updated_ema_state will be None if use_cache=False; otherwise (batch_size, config.ndim)
  836. ema_out, updated_ema_state = self.ema_gate(
  837. input, attention_mask=padding_mask, prev_state=prev_ema_state, use_cache=use_cache
  838. )
  839. ema_out = self.dropout(ema_out)
  840. # (sequence_length X batch_size X hidden_size)
  841. # -> (sequence_length X batch_size X 2*hidden_size + config.shared_representation_size + config.intermediate_size)
  842. # - residual_weight -> sigmoid -> applied to residual connection in torch.addcmul
  843. # - query_key_gates -> split into two components: query_key becomes query and key for attention input, gates becomes gating for self-attention output
  844. # - intermediate_state -> added to weighted attention output, sent through activation, and has inputs subtracted during
  845. # torch.addcmul to create the final layer output
  846. base = self.mx_proj(ema_out)
  847. residual_weight, query_key_gates, intermediate_state = torch.split(
  848. base,
  849. [
  850. self.config.hidden_size,
  851. self.config.shared_representation_size + self.config.intermediate_size,
  852. self.config.hidden_size,
  853. ],
  854. dim=-1,
  855. )
  856. # (sequence_length X batch_size X hidden_size)
  857. residual_weight = torch.sigmoid(residual_weight)
  858. # (sequence_length X batch_size X shared_representation_size + intermediate_size)
  859. query_key_gates = F.silu(query_key_gates)
  860. # split into two different tensors: one for Q/K usage and the other for gating self-attention
  861. query_key, attention_gate = torch.split(
  862. query_key_gates, [self.config.shared_representation_size, self.config.intermediate_size], dim=-1
  863. )
  864. # (sequence_length X batch_size X shared_representation_size)
  865. # -> (sequence_length X batch_size X 1 X shared_representation_size)
  866. # -> (sequence_length X batch_size X 2 X shared_representation_size)
  867. query_key = query_key.unsqueeze(2) * self.qk_weight + self.qk_bias
  868. # (sequence_length X batch_size X 2 X shared_representation_size)
  869. # -> 2 tensors of (sequence_length X batch_size X shared_representation_size)
  870. query, key = torch.unbind(query_key, dim=2)
  871. # (sequence_length X batch_size X dimension)
  872. # -> (batch_size X sequence_length X dimension)
  873. # where `dimension` is either shared_representation_size (queries and keys) or intermediate_size (values)
  874. query = query.transpose(0, 1)
  875. key = key.transpose(0, 1)
  876. value = value.transpose(0, 1)
  877. if self.config.is_decoder:
  878. # combine history and current to save updated state (if history is provided)
  879. # when chunking is applied, the past states will be None at the end of the chunk, in
  880. # which case, proceed as if no K/V history had been provided
  881. # saved states are stored with shape (batch_size X sequence_length X dimension)
  882. if prev_self_key is not None:
  883. key = torch.cat([prev_self_key, key], dim=1)
  884. if prev_self_value is not None:
  885. value = torch.cat([prev_self_value, value], dim=1)
  886. # if not chunking, store as-is
  887. if not self.config.use_chunking:
  888. updated_self_key = key
  889. updated_self_value = value
  890. else:
  891. curr_len = key.size(1) % self.config.chunk_size
  892. if curr_len == 0:
  893. # if we're chunking and have reached the end of a chunk, wipe out the saved state
  894. updated_self_key = None
  895. updated_self_value = None
  896. else:
  897. updated_self_key = key
  898. updated_self_value = value
  899. ctx_len = key.size(1) # potentially differs from seq_len because of incremental decoding
  900. if not self.config.use_chunking:
  901. # if we're not chunking, treat the entire sequence as one long chunk
  902. # (batch_size X sequence_length X dimension) -> (batch_size X 1 X sequence_length X dimension)
  903. query = query.unsqueeze(1)
  904. key = key.unsqueeze(1)
  905. value = value.unsqueeze(1)
  906. if padding_mask is not None:
  907. # (batch_size X sequence_length) -> (batch_size X 1 X sequence_length)
  908. padding_mask = padding_mask.unsqueeze(1)
  909. else:
  910. # otherwise, split the sequences in the batch into `n_chunks` chunks of size `chunk_size`
  911. if seq_len < self.config.chunk_size:
  912. query = query.unsqueeze(1)
  913. else:
  914. # (batch_size X sequence_length X dimension) -> (batch_size X n_chunks X chunk_size X dimension)
  915. n_chunks = seq_len // self.config.chunk_size
  916. query = query.reshape(bsz, n_chunks, self.config.chunk_size, self.config.shared_representation_size)
  917. if ctx_len < self.config.chunk_size:
  918. key = key.unsqueeze(1)
  919. value = value.unsqueeze(1)
  920. if padding_mask is not None:
  921. padding_mask = padding_mask.unsqueeze(1)
  922. else:
  923. # (batch_size X sequence_length X dimension) -> (batch_size X n_chunks X chunk_size X dimension)
  924. n_chunks = ctx_len // self.config.chunk_size
  925. key = key.reshape(bsz, n_chunks, self.config.chunk_size, self.config.shared_representation_size)
  926. value = value.reshape(bsz, n_chunks, self.config.chunk_size, self.config.intermediate_size)
  927. if padding_mask is not None:
  928. padding_mask = padding_mask.view(bsz, n_chunks, self.config.chunk_size)
  929. # this is in the original Mega implementation to work around fork/join parallelism not supporting optional types
  930. if padding_mask is not None and padding_mask.dim() == 0:
  931. padding_mask = None
  932. attn_weights = self.attention_function(query, key, padding_mask=padding_mask, causal_mask=causal_mask)
  933. value = self.hidden_dropout(value, batch_first=True)
  934. kernel = self.attention_dropout(attn_weights)
  935. # (batch_size x n_chunks x chunk_size x intermediate_size) -> (sequence_length X batch_size X intermediate_size)
  936. weighted_self_output = (
  937. torch.matmul(kernel, value).view(bsz, seq_len, self.config.intermediate_size).transpose(0, 1)
  938. )
  939. # (sequence_length X batch_size X intermediate_size) -> (sequence_length X batch_size X hidden_size)
  940. weighted_self_output = self.activation(intermediate_state + self.h_proj(weighted_self_output * attention_gate))
  941. weighted_self_output = self.dropout(weighted_self_output)
  942. # (sequence_length X batch_size X hidden_size)
  943. out = torch.addcmul(residual, residual_weight, weighted_self_output - residual)
  944. if not self.config.normalize_before_mega:
  945. out = self.norm(out)
  946. return_values = (out, attn_weights) if output_attentions else (out,)
  947. if self.config.is_decoder:
  948. return_values = return_values + (updated_self_key, updated_self_value, updated_ema_state)
  949. return return_values
  950. class MegaNormalizedFeedForwardNetwork(nn.Module):
  951. """
  952. Normalized feed-forward network used in Mega blocks. Left as-is from original Mega repo aside from retrieving args
  953. from Hugging Face config
  954. """
  955. def __init__(self, config: MegaConfig):
  956. super().__init__()
  957. self.config = config
  958. self.hidden_dim = config.nffn_hidden_size
  959. self.act_fn = config.activation
  960. self.activation = ACT2FN[config.activation]
  961. self.dropout = MegaDropout(self.config.dropout_prob, is_featurewise=self.config.use_feature_dropout)
  962. self.hidden_dropout = MegaDropout(
  963. self.config.nffn_activation_dropout_prob, is_featurewise=self.config.use_feature_dropout
  964. )
  965. self.prenorm = self.config.normalize_before_ffn
  966. self.norm = MegaSequenceNorm(
  967. self.config.normalization_type, self.config.hidden_size, affine=self.config.norm_affine
  968. )
  969. self.fc1 = nn.Linear(self.config.hidden_size, self.config.nffn_hidden_size)
  970. self.fc2 = nn.Linear(self.config.nffn_hidden_size, self.config.hidden_size)
  971. def forward(self, inputs):
  972. residual = inputs
  973. if self.prenorm:
  974. inputs = self.norm(inputs)
  975. hidden = self.activation(self.fc1(inputs))
  976. hidden = self.hidden_dropout(hidden)
  977. output = self.fc2(hidden)
  978. output = self.dropout(output)
  979. output = output + residual
  980. if not self.prenorm:
  981. output = self.norm(output)
  982. return output
  983. class MegaBlock(nn.Module):
  984. def __init__(self, config: MegaConfig):
  985. super().__init__()
  986. self.seq_len_dim = 1
  987. self.mega_layer = MegaMovingAverageGatedAttention(config)
  988. self.nffn = MegaNormalizedFeedForwardNetwork(config) if config.use_normalized_ffn else None
  989. self.is_decoder = config.is_decoder
  990. self.add_cross_attention = config.add_cross_attention
  991. if self.add_cross_attention:
  992. if not self.is_decoder:
  993. raise ValueError(f"{self} should be used as a decoder model if cross attention is added")
  994. self.cross_attn = MegaGatedCrossAttention(config)
  995. else:
  996. self.cross_attn = None
  997. def forward(
  998. self,
  999. hidden_states: torch.Tensor,
  1000. attention_mask: Optional[torch.LongTensor] = None,
  1001. causal_mask: Optional[torch.LongTensor] = None,
  1002. encoder_hidden_states: Optional[torch.FloatTensor] = None,
  1003. encoder_attention_mask: Optional[torch.FloatTensor] = None,
  1004. past_key_value: Optional[Tuple[torch.FloatTensor]] = None,
  1005. output_attentions: Optional[bool] = False,
  1006. use_cache: bool = False,
  1007. ) -> Tuple[torch.Tensor]:
  1008. """
  1009. A single Mega layer: either encoder or decoder, with optional cross-attention and optional normalized
  1010. feed-forward layer
  1011. Args:
  1012. hidden_states (`torch.Tensor` of shape `(target_sequence_length, batch_size, hidden_size)`):
  1013. Hidden states to be updated by the Mega block
  1014. attention_mask (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
  1015. Indicates which entries in the self/target sequence are to be ignored (mostly due to padding), where
  1016. elements are either 1 for *not masked* or 0 for *masked*. Causal attention is enforced internally.
  1017. causal_mask (`torch.LongTensor` of shape `(sequence_length, sequence_length)`, *optional*):
  1018. Indicates which inputs are to be ignored due to causal attention, where elements are either 1 for *not
  1019. masked* or 0 for *masked*
  1020. encoder_hidden_states (`torch.Tensor`, of shape `(source_sequence_length, batch_size, hidden_size)`, *optional*):
  1021. Encoder hidden states to be used for cross-attention (and required for encoder-decoder model setup)
  1022. encoder_attention_mask (`torch.LongTensor` of shape `(batch_size, source_sequence_length)`, *optional*):
  1023. Indicates which entries in the cross/source sequence are to be ignored (mostly due to padding), where
  1024. elements are either 1 for *not masked* or 0 for *masked*.
  1025. past_key_value (`tuple(torch.Tensor)`, *optional*):
  1026. The hidden states returned from the previous timestep during incremental decoding; expects that
  1027. self-attention key, value, and EMA states are the first 3 entries in the tuple, and (if doing
  1028. cross-attention) cross-attention key and value are the last 2 entries in the tuple
  1029. output_attentions (`bool`, default `False`):
  1030. Whether to return self-attention weights
  1031. use_cache (`bool`, default `False`):
  1032. Whether to perfom incremental decoding; uses `past_key_value` as prior state, and returns the updated
  1033. states for use in the next step
  1034. Returns:
  1035. `tuple(torch.FloatTensor)` containing various elements depending on configuration ([`MegaConfig`]) and
  1036. inputs:
  1037. - **hidden_states** (`torch.FloatTensor` of shape `(target_sequence_length, batch_size, hidden_size)`) --
  1038. Hidden states from target sequence updated by Mega
  1039. - **self_attn_weights** (*optional*, returned when `output_attentions=True`) `torch.FloatTensor` of shape
  1040. `(batch_size, 1, target_sequence_length, target_sequence_length)` -- The self-attention weights
  1041. corresponding to how each token in the input sequence attends to every other token
  1042. - **cross_attn_weights** (*optional*, returned when `output_attentions=True` and
  1043. `config.add_cross_attention=True`) `torch.FloatTensor` of shape `(batch_size, source_sequence_length,
  1044. target_sequence_length)` -- Pairwise cross-attention weights between every entry in the source sequence
  1045. and target sequence
  1046. - **self_key** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape `(batch_size,
  1047. sequence_length, config.shared_representation_size)` -- The self-attention key state for use in the next
  1048. step of incremental decoding
  1049. - **self_value** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape `(batch_size,
  1050. sequence_length, config.hidden_size)` -- The self-attention value state for use in the next step of
  1051. incremental decoding
  1052. - **self_ema_state** (*optional*, returned when `use_cache=True`) `torch.FloatTensor` of shape
  1053. `(batch_size, config.ndim)` The incremental EMA state for use in the next step of incremental decoding.
  1054. - **cross_key** (*optional*, returned when `use_cache=True` and `config.is_decoder=True`)
  1055. `torch.FloatTensor` of shape `(batch_size, source_sequence_length, config.shared_representation_size)` --
  1056. The cross-attention key state for use in the next step of incremental decoding
  1057. - **cross_value** (*optional*, returned when `use_cache=True` and `config.is_decoder=True`)
  1058. `torch.FloatTensor` of shape `(batch_size, source_sequence_length, config.hidden_size)` -- The
  1059. cross-attention value state for use in the next step of incremental decoding
  1060. """
  1061. # incremental decoding in the MegaMultiDimensionDampedEma module requires that the attention mask has the same
  1062. # sequence length as the input tensor; if we're caching incremental states, we assume the input
  1063. # sequence length is 1 (Mega will break otherwise), so we take the padding mask for the final
  1064. # token in the input (mask is received as [batch X sequence length])
  1065. if use_cache and (past_key_value is not None) and (attention_mask is not None):
  1066. mega_padding_mask = attention_mask[:, -1].unsqueeze(-1)
  1067. else:
  1068. mega_padding_mask = attention_mask
  1069. mega_outputs = self.mega_layer(
  1070. input=hidden_states,
  1071. padding_mask=mega_padding_mask,
  1072. causal_mask=causal_mask,
  1073. past_key_values=past_key_value,
  1074. output_attentions=output_attentions,
  1075. use_cache=use_cache,
  1076. )
  1077. new_hidden_states = mega_outputs[0]
  1078. self_key, self_value, self_ema_state = mega_outputs[-3:] if use_cache else (None, None, None)
  1079. self_attention_weights = mega_outputs[1] if output_attentions else None
  1080. # optional cross attention
  1081. if self.cross_attn is not None:
  1082. if encoder_hidden_states is None:
  1083. raise ValueError("Requested cross-attention without providing encoder hidden states")
  1084. cross_attn_outputs = self.cross_attn(
  1085. query=new_hidden_states,
  1086. key=encoder_hidden_states,
  1087. value=encoder_hidden_states,
  1088. key_padding_mask=encoder_attention_mask,
  1089. past_key_values=past_key_value,
  1090. output_attentions=output_attentions,
  1091. use_cache=use_cache,
  1092. )
  1093. # update the hidden state from cross attention
  1094. new_hidden_states = cross_attn_outputs[0]
  1095. # store cross-attention k/v if caching
  1096. cross_key, cross_value = cross_attn_outputs[-2:] if use_cache else (None, None)
  1097. cross_attention_weights = cross_attn_outputs[1] if output_attentions else None
  1098. # optional NFFN follows cross attention
  1099. if self.nffn is not None:
  1100. new_hidden_states = self.nffn(new_hidden_states)
  1101. outs = (new_hidden_states,)
  1102. if output_attentions:
  1103. outs = outs + (self_attention_weights,)
  1104. if self.cross_attn is not None:
  1105. outs = outs + (cross_attention_weights,)
  1106. if use_cache:
  1107. new_key_values = (
  1108. self_key,
  1109. self_value,
  1110. self_ema_state,
  1111. )
  1112. if self.cross_attn is not None:
  1113. new_key_values = new_key_values + (cross_key, cross_value)
  1114. outs = outs + (new_key_values,)
  1115. return outs
  1116. # copied from transformers.models.roberta.modeling_roberta.RobertaPooler with Roberta->Mega
  1117. class MegaPooler(nn.Module):
  1118. def __init__(self, config):
  1119. super().__init__()
  1120. self.dense = nn.Linear(config.hidden_size, config.hidden_size)
  1121. self.activation = nn.Tanh()
  1122. def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
  1123. # We "pool" the model by simply taking the hidden state corresponding
  1124. # to the first token.
  1125. first_token_tensor = hidden_states[:, 0]
  1126. pooled_output = self.dense(first_token_tensor)
  1127. pooled_output = self.activation(pooled_output)
  1128. return pooled_output
  1129. class MegaPreTrainedModel(PreTrainedModel):
  1130. """
  1131. An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
  1132. models.
  1133. """
  1134. config_class = MegaConfig
  1135. base_model_prefix = "mega"
  1136. supports_gradient_checkpointing = False
  1137. _no_split_modules = ["MegaMovingAverageGatedAttention"]
  1138. def _init_weights(self, module):
  1139. """Initialize the weights"""
  1140. if isinstance(module, MegaMultiDimensionDampedEma):
  1141. with torch.no_grad():
  1142. # delta & alpha
  1143. nn.init.normal_(module.damping_factor, mean=0.0, std=self.config.ema_delta_alpha_range)
  1144. nn.init.normal_(module.decay_factor, mean=0.0, std=self.config.ema_delta_alpha_range)
  1145. # beta [1, -1, 1, -1, ...] seems more stable.
  1146. val = torch.ones(self.config.ema_projection_size, 1)
  1147. if self.config.ema_projection_size > 1:
  1148. idx = torch.tensor(list(range(1, self.config.ema_projection_size, 2)))
  1149. val.index_fill_(0, idx, -1.0)
  1150. module.ema_expansion_matrix.normal_(mean=0.0, std=self.config.ema_beta_range).add_(val)
  1151. # gamma & omega
  1152. nn.init.normal_(module.kernel_projection_matrix, mean=0.0, std=self.config.ema_gamma_omega_range)
  1153. nn.init.normal_(module.residual_weight, mean=0.0, std=self.config.ema_gamma_omega_range)
  1154. elif isinstance(module, MegaSimpleRelativePositionalBias):
  1155. nn.init.normal_(module.rel_pos_bias, mean=0.0, std=self.config.initializer_range)
  1156. elif isinstance(module, MegaRotaryRelativePositionalBias):
  1157. nn.init.normal_(module.alpha, mean=0.0, std=self.config.initializer_range)
  1158. nn.init.normal_(module.b_param, mean=0.0, std=self.config.initializer_range)
  1159. elif isinstance(module, MegaScaleNorm):
  1160. if self.config.norm_affine:
  1161. nn.init.constant_(module.scalar, 1.0)
  1162. elif isinstance(module, MegaRMSNorm):
  1163. if self.config.norm_affine:
  1164. nn.init.constant_(module.weight, 1.0)
  1165. elif isinstance(module, MegaMovingAverageGatedAttention):
  1166. # linear layers covered separately by the generic nn.Linear init below
  1167. nn.init.normal_(module.qk_weight, mean=0.0, std=self.config.initializer_range)
  1168. nn.init.constant_(module.qk_bias, 0.0)
  1169. elif isinstance(module, nn.Linear):
  1170. # initializes all linear layers in the entire network
  1171. module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
  1172. if module.bias is not None:
  1173. module.bias.data.zero_()
  1174. elif isinstance(module, nn.Embedding):
  1175. module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
  1176. if module.padding_idx is not None:
  1177. module.weight.data[module.padding_idx].zero_()
  1178. elif isinstance(module, nn.LayerNorm):
  1179. module.bias.data.zero_()
  1180. module.weight.data.fill_(1.0)
  1181. MEGA_START_DOCSTRING = r"""
  1182. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
  1183. library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
  1184. etc.)
  1185. This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
  1186. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
  1187. and behavior.
  1188. Parameters:
  1189. config ([`MegaConfig`]): Model configuration class with all the parameters of the
  1190. model. Initializing with a config file does not load the weights associated with the model, only the
  1191. configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
  1192. """
  1193. MEGA_INPUTS_DOCSTRING = r"""
  1194. Args:
  1195. input_ids (`torch.LongTensor` of shape `({0})`):
  1196. Indices of input sequence tokens in the vocabulary.
  1197. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
  1198. [`PreTrainedTokenizer.__call__`] for details.
  1199. [What are input IDs?](../glossary#input-ids)
  1200. attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
  1201. Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
  1202. - 1 for tokens that are **not masked**,
  1203. - 0 for tokens that are **masked**.
  1204. [What are attention masks?](../glossary#attention-mask)
  1205. token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
  1206. Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,1]`:
  1207. - 0 corresponds to a *sentence A* token,
  1208. - 1 corresponds to a *sentence B* token.
  1209. This parameter can only be used when the model is initialized with `add_token_type_embeddings` parameter
  1210. set to `True`. All the value in this tensor should be always < config.type_vocab_size.
  1211. [What are token type IDs?](../glossary#token-type-ids)
  1212. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
  1213. Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
  1214. is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
  1215. model's internal embedding lookup matrix.
  1216. output_attentions (`bool`, *optional*):
  1217. Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
  1218. tensors for more detail.
  1219. output_hidden_states (`bool`, *optional*):
  1220. Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
  1221. more detail.
  1222. return_dict (`bool`, *optional*):
  1223. Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
  1224. """
  1225. @add_start_docstrings(
  1226. "The bare MEGA Model transformer outputting raw hidden-states without any specific head on top.",
  1227. MEGA_START_DOCSTRING,
  1228. )
  1229. class MegaModel(MegaPreTrainedModel):
  1230. """
  1231. The model can behave as an encoder (with only self-attention) as well as a decoder, in which case a layer of
  1232. cross-attention is added after self-attention, following the architecture described in *Mega: Moving Average
  1233. Equipped Gated Attention*_ by Xuezhe Ma, Chunting Zhou, Xiang Kong, Junxian He, Liangke Gui, Graham Neubig,
  1234. Jonathan May, and Luke Zettlemoyer
  1235. To behave as a decoder the model needs to be initialized with the `is_decoder` argument of the configuration set to
  1236. `True` and `bidirectional` set to `False`. To be used in a Seq2Seq model, the model needs to initialized with both
  1237. `is_decoder=True` and `bidirectional=False` argument as well as `add_cross_attention` set to `True`; an
  1238. `encoder_hidden_states` is then expected as an input to the forward pass.
  1239. .. _*Mega: Moving Average Equipped Gated Attention*: https://arxiv.org/abs/2209.10655
  1240. """
  1241. def __init__(self, config: MegaConfig, add_pooling_layer=True):
  1242. super().__init__(config)
  1243. self.config = config
  1244. self.embedding_layer = MegaEmbeddings(config)
  1245. self.layers = nn.ModuleList([MegaBlock(config) for _ in range(config.num_hidden_layers)])
  1246. self.pooler = MegaPooler(config) if add_pooling_layer else None
  1247. # Initialize weights and apply final processing (retained from RoBERTa code)
  1248. self.post_init()
  1249. def get_input_embeddings(self):
  1250. return self.embedding_layer.word_embeddings
  1251. def set_input_embeddings(self, value):
  1252. self.embedding_layer.word_embeddings = value
  1253. @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  1254. @add_code_sample_docstrings(
  1255. checkpoint=_CHECKPOINT_FOR_DOC,
  1256. output_type=BaseModelOutputWithPoolingAndCrossAttentions,
  1257. config_class=_CONFIG_FOR_DOC,
  1258. )
  1259. def forward(
  1260. self,
  1261. input_ids: Optional[torch.Tensor] = None,
  1262. attention_mask: Optional[torch.Tensor] = None,
  1263. token_type_ids: Optional[torch.Tensor] = None,
  1264. inputs_embeds: Optional[torch.Tensor] = None,
  1265. encoder_hidden_states: Optional[torch.Tensor] = None,
  1266. encoder_attention_mask: Optional[torch.Tensor] = None,
  1267. past_key_values: Optional[List[torch.FloatTensor]] = None,
  1268. use_cache: Optional[bool] = None,
  1269. output_attentions: Optional[bool] = None,
  1270. output_hidden_states: Optional[bool] = None,
  1271. return_dict: Optional[bool] = None,
  1272. ) -> Union[Tuple[torch.Tensor], BaseModelOutputWithPoolingAndCrossAttentions]:
  1273. r"""
  1274. encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
  1275. Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
  1276. the model is configured as a decoder.
  1277. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
  1278. Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
  1279. the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
  1280. - 1 for tokens that are **not masked**,
  1281. - 0 for tokens that are **masked**.
  1282. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
  1283. Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
  1284. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
  1285. don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
  1286. `decoder_input_ids` of shape `(batch_size, sequence_length)`.
  1287. use_cache (`bool`, *optional*):
  1288. If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
  1289. `past_key_values`).
  1290. """
  1291. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
  1292. output_hidden_states = (
  1293. output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
  1294. )
  1295. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1296. if input_ids is not None and inputs_embeds is not None:
  1297. raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
  1298. elif input_ids is not None:
  1299. self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
  1300. input_shape = input_ids.size()
  1301. device = input_ids.device
  1302. elif inputs_embeds is not None:
  1303. input_shape = inputs_embeds.size()[:-1]
  1304. device = inputs_embeds.device
  1305. else:
  1306. raise ValueError("You have to specify either input_ids or inputs_embeds")
  1307. if self.config.use_chunking:
  1308. input_shape = torch.tensor([input_shape[0], self.config.chunk_size])
  1309. batch_size, sequence_length = input_shape
  1310. if self.config.use_chunking and (sequence_length > self.config.chunk_size):
  1311. if sequence_length % self.config.chunk_size != 0:
  1312. raise ValueError(
  1313. f"config.use_chunking is activated; input sequence length must be shorter than or a multiple of config.chunk_size\nreceived sequence length of {sequence_length} with chunk size {self.config.chunk_size}"
  1314. )
  1315. if self.config.is_decoder:
  1316. use_cache = use_cache if use_cache is not None else self.config.use_cache
  1317. # Mega expects the causal mask to be a 2D square matrix of (from) x (to) over the input sequence length
  1318. # the HF utility function generates a 3D causal mask which includes batch size, so we'll create a dummy
  1319. # mask with the correct device and all ones
  1320. temp_mask_for_extension = torch.ones((1, sequence_length), dtype=torch.long, device=device)
  1321. causal_mask = self.create_extended_attention_mask_for_decoder(input_shape, temp_mask_for_extension)
  1322. # get rid of batch dimension in the generated mask; result is (sequence_length X sequence_length)
  1323. causal_mask = causal_mask.squeeze(0)
  1324. else:
  1325. use_cache = False
  1326. causal_mask = None
  1327. # if using cache, make sure we have a tuple of tuples which matches the length of our hidden layers
  1328. if (past_key_values is not None) and (len(past_key_values) != self.config.num_hidden_layers):
  1329. raise ValueError(
  1330. f"Received past key/value cache with size mismatch; expected {self.config.num_hidden_layers}, received {len(past_key_values)}"
  1331. )
  1332. # get embeddings (batch X sequence length X embed dim)
  1333. embedding_output = self.embedding_layer(
  1334. input_ids=input_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
  1335. )
  1336. # transpose for Mega --> (seq len X batch X embed dim)
  1337. hidden_states = embedding_output.transpose(0, 1)
  1338. # we expect encoder hidden states to also have batch first in line
  1339. # with typical Hugging Face behavior (which is also how we return them)
  1340. # Mega expects sequence length first, so do the same transpose here
  1341. if encoder_hidden_states is not None:
  1342. encoder_hidden_states = encoder_hidden_states.transpose(0, 1)
  1343. # pass through mega layers
  1344. all_hidden_states = (embedding_output,) if output_hidden_states else None
  1345. all_self_attentions = () if output_attentions else None
  1346. all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
  1347. next_decoder_cache = () if use_cache else None
  1348. for i, mega_layer in enumerate(self.layers):
  1349. current_decoder_cache = past_key_values[i] if past_key_values is not None else None
  1350. mega_outputs = mega_layer(
  1351. hidden_states=hidden_states,
  1352. attention_mask=attention_mask,
  1353. causal_mask=causal_mask,
  1354. encoder_hidden_states=encoder_hidden_states,
  1355. encoder_attention_mask=encoder_attention_mask,
  1356. past_key_value=current_decoder_cache,
  1357. output_attentions=output_attentions,
  1358. use_cache=use_cache,
  1359. )
  1360. hidden_states = mega_outputs[0]
  1361. if output_hidden_states:
  1362. # store layer-wise hidden states in the way that the user expects
  1363. # (seq len X batch X embed dim) --> (batch X seq len X embed dim)
  1364. all_hidden_states += (hidden_states.transpose(0, 1),)
  1365. if output_attentions:
  1366. self_attn_weights = mega_outputs[1]
  1367. all_self_attentions += (self_attn_weights,)
  1368. if self.config.add_cross_attention:
  1369. cross_attn_weights = mega_outputs[2]
  1370. all_cross_attentions += (cross_attn_weights,)
  1371. if use_cache:
  1372. updated_cache = mega_outputs[-1]
  1373. next_decoder_cache += (updated_cache,)
  1374. # transpose final hidden states
  1375. hidden_states = hidden_states.transpose(0, 1)
  1376. # optional pooling layer
  1377. pooled_output = self.pooler(hidden_states) if self.pooler is not None else None
  1378. if not return_dict:
  1379. return (hidden_states, pooled_output) + (
  1380. all_hidden_states,
  1381. next_decoder_cache,
  1382. all_self_attentions,
  1383. all_cross_attentions,
  1384. )
  1385. return BaseModelOutputWithPoolingAndCrossAttentions(
  1386. last_hidden_state=hidden_states,
  1387. pooler_output=pooled_output,
  1388. past_key_values=next_decoder_cache,
  1389. hidden_states=all_hidden_states,
  1390. attentions=all_self_attentions,
  1391. cross_attentions=all_cross_attentions,
  1392. )
  1393. @add_start_docstrings(
  1394. """MEGA Model with a `language modeling` head on top for CLM fine-tuning.""", MEGA_START_DOCSTRING
  1395. )
  1396. class MegaForCausalLM(MegaPreTrainedModel):
  1397. _tied_weights_keys = ["lm_head.weight"]
  1398. def __init__(self, config: MegaConfig):
  1399. super().__init__(config)
  1400. if not config.is_decoder:
  1401. logger.warning("If you want to use `MegaForCausalLM` as a standalone, add `is_decoder=True.`")
  1402. self.mega = MegaModel(config, add_pooling_layer=False)
  1403. if config.add_lm_hidden_dense_layer:
  1404. self.dense = nn.Linear(config.hidden_size, config.hidden_size)
  1405. self.hidden_activation = nn.Tanh()
  1406. else:
  1407. self.dense = None
  1408. self.hidden_activation = None
  1409. self.lm_head = nn.Linear(config.hidden_size, config.vocab_size)
  1410. # Initialize weights and apply final processing
  1411. self.post_init()
  1412. def get_output_embeddings(self):
  1413. return self.lm_head
  1414. def set_output_embeddings(self, new_embeddings):
  1415. self.lm_head = new_embeddings
  1416. @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  1417. @replace_return_docstrings(output_type=CausalLMOutputWithCrossAttentions, config_class=_CONFIG_FOR_DOC)
  1418. def forward(
  1419. self,
  1420. input_ids: Optional[torch.LongTensor] = None,
  1421. attention_mask: Optional[torch.FloatTensor] = None,
  1422. token_type_ids: Optional[torch.LongTensor] = None,
  1423. inputs_embeds: Optional[torch.FloatTensor] = None,
  1424. encoder_hidden_states: Optional[torch.FloatTensor] = None,
  1425. encoder_attention_mask: Optional[torch.FloatTensor] = None,
  1426. labels: Optional[torch.LongTensor] = None,
  1427. past_key_values: Tuple[Tuple[torch.FloatTensor]] = None,
  1428. use_cache: Optional[bool] = None,
  1429. output_attentions: Optional[bool] = None,
  1430. output_hidden_states: Optional[bool] = None,
  1431. return_dict: Optional[bool] = None,
  1432. ) -> Union[Tuple[torch.Tensor], CausalLMOutputWithCrossAttentions]:
  1433. r"""
  1434. encoder_hidden_states (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
  1435. Sequence of hidden-states at the output of the last layer of the encoder. Used in the cross-attention if
  1436. the model is configured as a decoder.
  1437. encoder_attention_mask (`torch.FloatTensor` of shape `(batch_size, sequence_length)`, *optional*):
  1438. Mask to avoid performing attention on the padding token indices of the encoder input. This mask is used in
  1439. the cross-attention if the model is configured as a decoder. Mask values selected in `[0, 1]`:
  1440. - 1 for tokens that are **not masked**,
  1441. - 0 for tokens that are **masked**.
  1442. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
  1443. Labels for computing the left-to-right language modeling loss (next word prediction). Indices should be in
  1444. `[-100, 0, ..., config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are
  1445. ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
  1446. past_key_values (`tuple(tuple(torch.FloatTensor))` of length `config.n_layers` with each tuple having 4 tensors of shape `(batch_size, num_heads, sequence_length - 1, embed_size_per_head)`):
  1447. Contains precomputed key and value hidden states of the attention blocks. Can be used to speed up decoding.
  1448. If `past_key_values` are used, the user can optionally input only the last `decoder_input_ids` (those that
  1449. don't have their past key value states given to this model) of shape `(batch_size, 1)` instead of all
  1450. `decoder_input_ids` of shape `(batch_size, sequence_length)`.
  1451. use_cache (`bool`, *optional*):
  1452. If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
  1453. `past_key_values`).
  1454. Returns:
  1455. Example:
  1456. ```python
  1457. >>> from transformers import AutoTokenizer, MegaForCausalLM, AutoConfig
  1458. >>> import torch
  1459. >>> tokenizer = AutoTokenizer.from_pretrained("mnaylor/mega-base-wikitext")
  1460. >>> config = AutoConfig.from_pretrained("mnaylor/mega-base-wikitext")
  1461. >>> config.is_decoder = True
  1462. >>> config.bidirectional = False
  1463. >>> model = MegaForCausalLM.from_pretrained(
  1464. ... "mnaylor/mega-base-wikitext", config=config, ignore_mismatched_sizes=True
  1465. ... )
  1466. >>> inputs = tokenizer("Hello, my dog is cute", return_tensors="pt")
  1467. >>> outputs = model(**inputs)
  1468. >>> prediction_logits = outputs.logits
  1469. ```"""
  1470. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1471. if labels is not None:
  1472. use_cache = False
  1473. outputs = self.mega(
  1474. input_ids,
  1475. attention_mask=attention_mask,
  1476. token_type_ids=token_type_ids,
  1477. inputs_embeds=inputs_embeds,
  1478. encoder_hidden_states=encoder_hidden_states,
  1479. encoder_attention_mask=encoder_attention_mask,
  1480. past_key_values=past_key_values,
  1481. use_cache=use_cache,
  1482. output_attentions=output_attentions,
  1483. output_hidden_states=output_hidden_states,
  1484. return_dict=return_dict,
  1485. )
  1486. sequence_output = outputs[0]
  1487. if self.dense is not None:
  1488. sequence_output = self.dense(sequence_output)
  1489. sequence_output = self.hidden_activation(sequence_output)
  1490. prediction_scores = self.lm_head(sequence_output)
  1491. lm_loss = None
  1492. if labels is not None:
  1493. # we are doing next-token prediction; shift prediction scores and input ids by one
  1494. shifted_prediction_scores = prediction_scores[:, :-1, :].contiguous()
  1495. labels = labels[:, 1:].contiguous()
  1496. loss_fct = CrossEntropyLoss()
  1497. lm_loss = loss_fct(shifted_prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
  1498. if not return_dict:
  1499. output = (prediction_scores,) + outputs[2:]
  1500. return ((lm_loss,) + output) if lm_loss is not None else output
  1501. return CausalLMOutputWithCrossAttentions(
  1502. loss=lm_loss,
  1503. logits=prediction_scores,
  1504. past_key_values=outputs.past_key_values,
  1505. hidden_states=outputs.hidden_states,
  1506. attentions=outputs.attentions,
  1507. cross_attentions=outputs.cross_attentions,
  1508. )
  1509. def prepare_inputs_for_generation(self, input_ids, past_key_values=None, attention_mask=None, **model_kwargs):
  1510. input_shape = input_ids.shape
  1511. # if model is used as a decoder in encoder-decoder model, the decoder attention mask is created on the fly
  1512. if attention_mask is None:
  1513. attention_mask = input_ids.new_ones(input_shape)
  1514. # cut decoder_input_ids if past is used
  1515. if past_key_values is not None:
  1516. input_ids = input_ids[:, -1:]
  1517. return {"input_ids": input_ids, "attention_mask": attention_mask, "past_key_values": past_key_values}
  1518. def _reorder_cache(self, past_key_values, beam_idx):
  1519. reordered_past = ()
  1520. for layer_past in past_key_values:
  1521. reordered_past += (
  1522. tuple(past_state.index_select(0, beam_idx.to(past_state.device)) for past_state in layer_past),
  1523. )
  1524. return reordered_past
  1525. @add_start_docstrings("""MEGA Model with a `language modeling` head on top.""", MEGA_START_DOCSTRING)
  1526. class MegaForMaskedLM(MegaPreTrainedModel):
  1527. _tied_weights_keys = ["mlm_head.weight"]
  1528. def __init__(self, config: MegaConfig):
  1529. super().__init__(config)
  1530. if config.is_decoder:
  1531. logger.warning(
  1532. "If you want to use `MegaForMaskedLM`, set `config.is_decoder=False` for "
  1533. "bi-directional self-attention."
  1534. )
  1535. self.mega = MegaModel(config, add_pooling_layer=False)
  1536. if config.add_lm_hidden_dense_layer:
  1537. self.dense = nn.Linear(config.hidden_size, config.hidden_size)
  1538. self.hidden_activation = nn.Tanh()
  1539. else:
  1540. self.dense = None
  1541. self.hidden_activation = None
  1542. self.mlm_head = nn.Linear(config.hidden_size, config.vocab_size)
  1543. self.dropout = nn.Dropout(config.dropout_prob)
  1544. # Initialize weights and apply final processing
  1545. self.post_init()
  1546. def get_output_embeddings(self):
  1547. return self.mlm_head
  1548. def set_output_embeddings(self, new_embeddings):
  1549. self.mlm_head = new_embeddings
  1550. @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  1551. @add_code_sample_docstrings(
  1552. checkpoint=_CHECKPOINT_FOR_DOC,
  1553. output_type=MaskedLMOutput,
  1554. config_class=_CONFIG_FOR_DOC,
  1555. mask="<mask>",
  1556. expected_output="' Paris'",
  1557. expected_loss=0.1,
  1558. )
  1559. def forward(
  1560. self,
  1561. input_ids: Optional[torch.LongTensor] = None,
  1562. attention_mask: Optional[torch.FloatTensor] = None,
  1563. token_type_ids: Optional[torch.LongTensor] = None,
  1564. inputs_embeds: Optional[torch.FloatTensor] = None,
  1565. encoder_hidden_states: Optional[torch.FloatTensor] = None,
  1566. encoder_attention_mask: Optional[torch.FloatTensor] = None,
  1567. labels: Optional[torch.LongTensor] = None,
  1568. output_attentions: Optional[bool] = None,
  1569. output_hidden_states: Optional[bool] = None,
  1570. return_dict: Optional[bool] = None,
  1571. ) -> Union[Tuple[torch.Tensor], MaskedLMOutput]:
  1572. r"""
  1573. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
  1574. Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
  1575. config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
  1576. loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
  1577. kwargs (`Dict[str, any]`, optional, defaults to *{}*):
  1578. Used to hide legacy arguments that have been deprecated.
  1579. """
  1580. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1581. outputs = self.mega(
  1582. input_ids,
  1583. attention_mask=attention_mask,
  1584. token_type_ids=token_type_ids,
  1585. inputs_embeds=inputs_embeds,
  1586. encoder_hidden_states=encoder_hidden_states,
  1587. encoder_attention_mask=encoder_attention_mask,
  1588. output_attentions=output_attentions,
  1589. output_hidden_states=output_hidden_states,
  1590. return_dict=return_dict,
  1591. )
  1592. sequence_output = outputs[0]
  1593. if self.dense is not None:
  1594. sequence_output = self.dense(sequence_output)
  1595. sequence_output = self.hidden_activation(sequence_output)
  1596. prediction_scores = self.mlm_head(sequence_output)
  1597. masked_lm_loss = None
  1598. if labels is not None:
  1599. loss_fct = CrossEntropyLoss()
  1600. masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
  1601. if not return_dict:
  1602. output = (prediction_scores,) + outputs[2:]
  1603. return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
  1604. return MaskedLMOutput(
  1605. loss=masked_lm_loss,
  1606. logits=prediction_scores,
  1607. hidden_states=outputs.hidden_states,
  1608. attentions=outputs.attentions,
  1609. )
  1610. @add_start_docstrings(
  1611. """
  1612. MEGA Model transformer with a sequence classification/regression head on top (a linear layer on top of the pooled
  1613. output) e.g. for GLUE tasks.
  1614. """,
  1615. MEGA_START_DOCSTRING,
  1616. )
  1617. class MegaForSequenceClassification(MegaPreTrainedModel):
  1618. def __init__(self, config):
  1619. super().__init__(config)
  1620. self.num_labels = config.num_labels
  1621. self.config = config
  1622. self.mega = MegaModel(config, add_pooling_layer=False)
  1623. self.classifier = MegaClassificationHead(config)
  1624. # Initialize weights and apply final processing
  1625. self.post_init()
  1626. @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  1627. @add_code_sample_docstrings(
  1628. checkpoint=_CHECKPOINT_FOR_DOC,
  1629. output_type=SequenceClassifierOutput,
  1630. config_class=_CONFIG_FOR_DOC,
  1631. )
  1632. def forward(
  1633. self,
  1634. input_ids: Optional[torch.LongTensor] = None,
  1635. attention_mask: Optional[torch.FloatTensor] = None,
  1636. token_type_ids: Optional[torch.LongTensor] = None,
  1637. inputs_embeds: Optional[torch.FloatTensor] = None,
  1638. labels: Optional[torch.LongTensor] = None,
  1639. output_attentions: Optional[bool] = None,
  1640. output_hidden_states: Optional[bool] = None,
  1641. return_dict: Optional[bool] = None,
  1642. ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
  1643. r"""
  1644. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  1645. Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
  1646. config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
  1647. `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
  1648. """
  1649. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1650. outputs = self.mega(
  1651. input_ids,
  1652. attention_mask=attention_mask,
  1653. token_type_ids=token_type_ids,
  1654. inputs_embeds=inputs_embeds,
  1655. output_attentions=output_attentions,
  1656. output_hidden_states=output_hidden_states,
  1657. return_dict=return_dict,
  1658. )
  1659. sequence_output = outputs[0]
  1660. logits = self.classifier(sequence_output)
  1661. loss = None
  1662. if labels is not None:
  1663. if self.config.problem_type is None:
  1664. if self.num_labels == 1:
  1665. self.config.problem_type = "regression"
  1666. elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
  1667. self.config.problem_type = "single_label_classification"
  1668. else:
  1669. self.config.problem_type = "multi_label_classification"
  1670. if self.config.problem_type == "regression":
  1671. loss_fct = MSELoss()
  1672. if self.num_labels == 1:
  1673. loss = loss_fct(logits.squeeze(), labels.squeeze())
  1674. else:
  1675. loss = loss_fct(logits, labels)
  1676. elif self.config.problem_type == "single_label_classification":
  1677. loss_fct = CrossEntropyLoss()
  1678. loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
  1679. elif self.config.problem_type == "multi_label_classification":
  1680. loss_fct = BCEWithLogitsLoss()
  1681. loss = loss_fct(logits, labels)
  1682. if not return_dict:
  1683. output = (logits,) + outputs[2:]
  1684. return ((loss,) + output) if loss is not None else output
  1685. return SequenceClassifierOutput(
  1686. loss=loss,
  1687. logits=logits,
  1688. hidden_states=outputs.hidden_states,
  1689. attentions=outputs.attentions,
  1690. )
  1691. @add_start_docstrings(
  1692. """
  1693. MEGA Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
  1694. softmax) e.g. for RocStories/SWAG tasks.
  1695. """,
  1696. MEGA_START_DOCSTRING,
  1697. )
  1698. class MegaForMultipleChoice(MegaPreTrainedModel):
  1699. def __init__(self, config):
  1700. super().__init__(config)
  1701. self.mega = MegaModel(config)
  1702. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  1703. self.classifier = nn.Linear(config.hidden_size, 1)
  1704. # Initialize weights and apply final processing
  1705. self.post_init()
  1706. @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length"))
  1707. @add_code_sample_docstrings(
  1708. checkpoint=_CHECKPOINT_FOR_DOC,
  1709. output_type=MultipleChoiceModelOutput,
  1710. config_class=_CONFIG_FOR_DOC,
  1711. )
  1712. def forward(
  1713. self,
  1714. input_ids: Optional[torch.LongTensor] = None,
  1715. token_type_ids: Optional[torch.LongTensor] = None,
  1716. attention_mask: Optional[torch.FloatTensor] = None,
  1717. labels: Optional[torch.LongTensor] = None,
  1718. inputs_embeds: Optional[torch.FloatTensor] = None,
  1719. output_attentions: Optional[bool] = None,
  1720. output_hidden_states: Optional[bool] = None,
  1721. return_dict: Optional[bool] = None,
  1722. ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
  1723. r"""
  1724. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  1725. Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
  1726. num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
  1727. `input_ids` above)
  1728. """
  1729. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1730. num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
  1731. flat_input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
  1732. flat_token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
  1733. flat_attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
  1734. flat_inputs_embeds = (
  1735. inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
  1736. if inputs_embeds is not None
  1737. else None
  1738. )
  1739. outputs = self.mega(
  1740. flat_input_ids,
  1741. token_type_ids=flat_token_type_ids,
  1742. attention_mask=flat_attention_mask,
  1743. inputs_embeds=flat_inputs_embeds,
  1744. output_attentions=output_attentions,
  1745. output_hidden_states=output_hidden_states,
  1746. return_dict=return_dict,
  1747. )
  1748. pooled_output = outputs[1]
  1749. pooled_output = self.dropout(pooled_output)
  1750. logits = self.classifier(pooled_output)
  1751. reshaped_logits = logits.view(-1, num_choices)
  1752. loss = None
  1753. if labels is not None:
  1754. loss_fct = CrossEntropyLoss()
  1755. loss = loss_fct(reshaped_logits, labels)
  1756. if not return_dict:
  1757. output = (reshaped_logits,) + outputs[2:]
  1758. return ((loss,) + output) if loss is not None else output
  1759. return MultipleChoiceModelOutput(
  1760. loss=loss,
  1761. logits=reshaped_logits,
  1762. hidden_states=outputs.hidden_states,
  1763. attentions=outputs.attentions,
  1764. )
  1765. @add_start_docstrings(
  1766. """
  1767. MEGA Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
  1768. Named-Entity-Recognition (NER) tasks.
  1769. """,
  1770. MEGA_START_DOCSTRING,
  1771. )
  1772. class MegaForTokenClassification(MegaPreTrainedModel):
  1773. def __init__(self, config):
  1774. super().__init__(config)
  1775. self.num_labels = config.num_labels
  1776. self.mega = MegaModel(config, add_pooling_layer=False)
  1777. classifier_dropout = (
  1778. config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
  1779. )
  1780. self.dropout = nn.Dropout(classifier_dropout)
  1781. self.classifier = nn.Linear(config.hidden_size, config.num_labels)
  1782. # Initialize weights and apply final processing
  1783. self.post_init()
  1784. @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  1785. @add_code_sample_docstrings(
  1786. checkpoint=_CHECKPOINT_FOR_DOC,
  1787. output_type=TokenClassifierOutput,
  1788. config_class=_CONFIG_FOR_DOC,
  1789. )
  1790. def forward(
  1791. self,
  1792. input_ids: Optional[torch.LongTensor] = None,
  1793. attention_mask: Optional[torch.FloatTensor] = None,
  1794. token_type_ids: Optional[torch.LongTensor] = None,
  1795. inputs_embeds: Optional[torch.FloatTensor] = None,
  1796. labels: Optional[torch.LongTensor] = None,
  1797. output_attentions: Optional[bool] = None,
  1798. output_hidden_states: Optional[bool] = None,
  1799. return_dict: Optional[bool] = None,
  1800. ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
  1801. r"""
  1802. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
  1803. Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
  1804. """
  1805. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1806. outputs = self.mega(
  1807. input_ids,
  1808. attention_mask=attention_mask,
  1809. token_type_ids=token_type_ids,
  1810. inputs_embeds=inputs_embeds,
  1811. output_attentions=output_attentions,
  1812. output_hidden_states=output_hidden_states,
  1813. return_dict=return_dict,
  1814. )
  1815. sequence_output = outputs[0]
  1816. sequence_output = self.dropout(sequence_output)
  1817. logits = self.classifier(sequence_output)
  1818. loss = None
  1819. if labels is not None:
  1820. loss_fct = CrossEntropyLoss()
  1821. loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
  1822. if not return_dict:
  1823. output = (logits,) + outputs[2:]
  1824. return ((loss,) + output) if loss is not None else output
  1825. return TokenClassifierOutput(
  1826. loss=loss,
  1827. logits=logits,
  1828. hidden_states=outputs.hidden_states,
  1829. attentions=outputs.attentions,
  1830. )
  1831. # copied from transformers.models.roberta.modeling_roberta.RobertaClassificationHead with Roberta->Mega
  1832. class MegaClassificationHead(nn.Module):
  1833. """Head for sentence-level classification tasks."""
  1834. def __init__(self, config):
  1835. super().__init__()
  1836. self.dense = nn.Linear(config.hidden_size, config.hidden_size)
  1837. classifier_dropout = (
  1838. config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
  1839. )
  1840. self.dropout = nn.Dropout(classifier_dropout)
  1841. self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
  1842. def forward(self, features, **kwargs):
  1843. x = features[:, 0, :] # take <s> token (equiv. to [CLS])
  1844. x = self.dropout(x)
  1845. x = self.dense(x)
  1846. x = torch.tanh(x)
  1847. x = self.dropout(x)
  1848. x = self.out_proj(x)
  1849. return x
  1850. @add_start_docstrings(
  1851. """
  1852. MEGA Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
  1853. layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
  1854. """,
  1855. MEGA_START_DOCSTRING,
  1856. )
  1857. class MegaForQuestionAnswering(MegaPreTrainedModel):
  1858. def __init__(self, config):
  1859. super().__init__(config)
  1860. self.num_labels = config.num_labels
  1861. self.mega = MegaModel(config, add_pooling_layer=False)
  1862. self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
  1863. # Initialize weights and apply final processing
  1864. self.post_init()
  1865. @add_start_docstrings_to_model_forward(MEGA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  1866. @add_code_sample_docstrings(
  1867. checkpoint=_CHECKPOINT_FOR_DOC,
  1868. output_type=QuestionAnsweringModelOutput,
  1869. config_class=_CONFIG_FOR_DOC,
  1870. )
  1871. def forward(
  1872. self,
  1873. input_ids: Optional[torch.LongTensor] = None,
  1874. attention_mask: Optional[torch.FloatTensor] = None,
  1875. token_type_ids: Optional[torch.LongTensor] = None,
  1876. inputs_embeds: Optional[torch.FloatTensor] = None,
  1877. start_positions: Optional[torch.LongTensor] = None,
  1878. end_positions: Optional[torch.LongTensor] = None,
  1879. output_attentions: Optional[bool] = None,
  1880. output_hidden_states: Optional[bool] = None,
  1881. return_dict: Optional[bool] = None,
  1882. ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
  1883. r"""
  1884. start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  1885. Labels for position (index) of the start of the labelled span for computing the token classification loss.
  1886. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
  1887. are not taken into account for computing the loss.
  1888. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  1889. Labels for position (index) of the end of the labelled span for computing the token classification loss.
  1890. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
  1891. are not taken into account for computing the loss.
  1892. """
  1893. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1894. outputs = self.mega(
  1895. input_ids,
  1896. attention_mask=attention_mask,
  1897. token_type_ids=token_type_ids,
  1898. inputs_embeds=inputs_embeds,
  1899. output_attentions=output_attentions,
  1900. output_hidden_states=output_hidden_states,
  1901. return_dict=return_dict,
  1902. )
  1903. sequence_output = outputs[0]
  1904. logits = self.qa_outputs(sequence_output)
  1905. start_logits, end_logits = logits.split(1, dim=-1)
  1906. start_logits = start_logits.squeeze(-1).contiguous()
  1907. end_logits = end_logits.squeeze(-1).contiguous()
  1908. total_loss = None
  1909. if start_positions is not None and end_positions is not None:
  1910. # If we are on multi-GPU, split add a dimension
  1911. if len(start_positions.size()) > 1:
  1912. start_positions = start_positions.squeeze(-1)
  1913. if len(end_positions.size()) > 1:
  1914. end_positions = end_positions.squeeze(-1)
  1915. # sometimes the start/end positions are outside our model inputs, we ignore these terms
  1916. ignored_index = start_logits.size(1)
  1917. start_positions = start_positions.clamp(0, ignored_index)
  1918. end_positions = end_positions.clamp(0, ignored_index)
  1919. loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
  1920. start_loss = loss_fct(start_logits, start_positions)
  1921. end_loss = loss_fct(end_logits, end_positions)
  1922. total_loss = (start_loss + end_loss) / 2
  1923. if not return_dict:
  1924. output = (start_logits, end_logits) + outputs[2:]
  1925. return ((total_loss,) + output) if total_loss is not None else output
  1926. return QuestionAnsweringModelOutput(
  1927. loss=total_loss,
  1928. start_logits=start_logits,
  1929. end_logits=end_logits,
  1930. hidden_states=outputs.hidden_states,
  1931. attentions=outputs.attentions,
  1932. )