modeling_mobilebert.py 69 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619
  1. # MIT License
  2. #
  3. # Copyright (c) 2020 The Google AI Language Team Authors, The HuggingFace Inc. team and github/lonePatient
  4. #
  5. # Permission is hereby granted, free of charge, to any person obtaining a copy
  6. # of this software and associated documentation files (the "Software"), to deal
  7. # in the Software without restriction, including without limitation the rights
  8. # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  9. # copies of the Software, and to permit persons to whom the Software is
  10. # furnished to do so, subject to the following conditions:
  11. #
  12. # The above copyright notice and this permission notice shall be included in all
  13. # copies or substantial portions of the Software.
  14. #
  15. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16. # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17. # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
  18. # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19. # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  20. # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  21. # SOFTWARE.
  22. import math
  23. import os
  24. import warnings
  25. from dataclasses import dataclass
  26. from typing import Optional, Tuple, Union
  27. import torch
  28. from torch import nn
  29. from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
  30. from ...activations import ACT2FN
  31. from ...modeling_outputs import (
  32. BaseModelOutput,
  33. BaseModelOutputWithPooling,
  34. MaskedLMOutput,
  35. MultipleChoiceModelOutput,
  36. NextSentencePredictorOutput,
  37. QuestionAnsweringModelOutput,
  38. SequenceClassifierOutput,
  39. TokenClassifierOutput,
  40. )
  41. from ...modeling_utils import PreTrainedModel
  42. from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
  43. from ...utils import (
  44. ModelOutput,
  45. add_code_sample_docstrings,
  46. add_start_docstrings,
  47. add_start_docstrings_to_model_forward,
  48. logging,
  49. replace_return_docstrings,
  50. )
  51. from .configuration_mobilebert import MobileBertConfig
  52. logger = logging.get_logger(__name__)
  53. _CHECKPOINT_FOR_DOC = "google/mobilebert-uncased"
  54. _CONFIG_FOR_DOC = "MobileBertConfig"
  55. # TokenClassification docstring
  56. _CHECKPOINT_FOR_TOKEN_CLASSIFICATION = "mrm8488/mobilebert-finetuned-ner"
  57. _TOKEN_CLASS_EXPECTED_OUTPUT = "['I-ORG', 'I-ORG', 'O', 'O', 'O', 'O', 'O', 'I-LOC', 'O', 'I-LOC', 'I-LOC']"
  58. _TOKEN_CLASS_EXPECTED_LOSS = 0.03
  59. # QuestionAnswering docstring
  60. _CHECKPOINT_FOR_QA = "csarron/mobilebert-uncased-squad-v2"
  61. _QA_EXPECTED_OUTPUT = "'a nice puppet'"
  62. _QA_EXPECTED_LOSS = 3.98
  63. _QA_TARGET_START_INDEX = 12
  64. _QA_TARGET_END_INDEX = 13
  65. # SequenceClassification docstring
  66. _CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION = "lordtt13/emo-mobilebert"
  67. _SEQ_CLASS_EXPECTED_OUTPUT = "'others'"
  68. _SEQ_CLASS_EXPECTED_LOSS = "4.72"
  69. def load_tf_weights_in_mobilebert(model, config, tf_checkpoint_path):
  70. """Load tf checkpoints in a pytorch model."""
  71. try:
  72. import re
  73. import numpy as np
  74. import tensorflow as tf
  75. except ImportError:
  76. logger.error(
  77. "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
  78. "https://www.tensorflow.org/install/ for installation instructions."
  79. )
  80. raise
  81. tf_path = os.path.abspath(tf_checkpoint_path)
  82. logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
  83. # Load weights from TF model
  84. init_vars = tf.train.list_variables(tf_path)
  85. names = []
  86. arrays = []
  87. for name, shape in init_vars:
  88. logger.info(f"Loading TF weight {name} with shape {shape}")
  89. array = tf.train.load_variable(tf_path, name)
  90. names.append(name)
  91. arrays.append(array)
  92. for name, array in zip(names, arrays):
  93. name = name.replace("ffn_layer", "ffn")
  94. name = name.replace("FakeLayerNorm", "LayerNorm")
  95. name = name.replace("extra_output_weights", "dense/kernel")
  96. name = name.replace("bert", "mobilebert")
  97. name = name.split("/")
  98. # adam_v and adam_m are variables used in AdamWeightDecayOptimizer to calculated m and v
  99. # which are not required for using pretrained model
  100. if any(
  101. n in ["adam_v", "adam_m", "AdamWeightDecayOptimizer", "AdamWeightDecayOptimizer_1", "global_step"]
  102. for n in name
  103. ):
  104. logger.info(f"Skipping {'/'.join(name)}")
  105. continue
  106. pointer = model
  107. for m_name in name:
  108. if re.fullmatch(r"[A-Za-z]+_\d+", m_name):
  109. scope_names = re.split(r"_(\d+)", m_name)
  110. else:
  111. scope_names = [m_name]
  112. if scope_names[0] == "kernel" or scope_names[0] == "gamma":
  113. pointer = getattr(pointer, "weight")
  114. elif scope_names[0] == "output_bias" or scope_names[0] == "beta":
  115. pointer = getattr(pointer, "bias")
  116. elif scope_names[0] == "output_weights":
  117. pointer = getattr(pointer, "weight")
  118. elif scope_names[0] == "squad":
  119. pointer = getattr(pointer, "classifier")
  120. else:
  121. try:
  122. pointer = getattr(pointer, scope_names[0])
  123. except AttributeError:
  124. logger.info(f"Skipping {'/'.join(name)}")
  125. continue
  126. if len(scope_names) >= 2:
  127. num = int(scope_names[1])
  128. pointer = pointer[num]
  129. if m_name[-11:] == "_embeddings":
  130. pointer = getattr(pointer, "weight")
  131. elif m_name == "kernel":
  132. array = np.transpose(array)
  133. try:
  134. assert (
  135. pointer.shape == array.shape
  136. ), f"Pointer shape {pointer.shape} and array shape {array.shape} mismatched"
  137. except AssertionError as e:
  138. e.args += (pointer.shape, array.shape)
  139. raise
  140. logger.info(f"Initialize PyTorch weight {name}")
  141. pointer.data = torch.from_numpy(array)
  142. return model
  143. class NoNorm(nn.Module):
  144. def __init__(self, feat_size, eps=None):
  145. super().__init__()
  146. self.bias = nn.Parameter(torch.zeros(feat_size))
  147. self.weight = nn.Parameter(torch.ones(feat_size))
  148. def forward(self, input_tensor: torch.Tensor) -> torch.Tensor:
  149. return input_tensor * self.weight + self.bias
  150. NORM2FN = {"layer_norm": nn.LayerNorm, "no_norm": NoNorm}
  151. class MobileBertEmbeddings(nn.Module):
  152. """Construct the embeddings from word, position and token_type embeddings."""
  153. def __init__(self, config):
  154. super().__init__()
  155. self.trigram_input = config.trigram_input
  156. self.embedding_size = config.embedding_size
  157. self.hidden_size = config.hidden_size
  158. self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
  159. self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.hidden_size)
  160. self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size)
  161. embed_dim_multiplier = 3 if self.trigram_input else 1
  162. embedded_input_size = self.embedding_size * embed_dim_multiplier
  163. self.embedding_transformation = nn.Linear(embedded_input_size, config.hidden_size)
  164. self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size)
  165. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  166. # position_ids (1, len position emb) is contiguous in memory and exported when serialized
  167. self.register_buffer(
  168. "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
  169. )
  170. def forward(
  171. self,
  172. input_ids: Optional[torch.LongTensor] = None,
  173. token_type_ids: Optional[torch.LongTensor] = None,
  174. position_ids: Optional[torch.LongTensor] = None,
  175. inputs_embeds: Optional[torch.FloatTensor] = None,
  176. ) -> torch.Tensor:
  177. if input_ids is not None:
  178. input_shape = input_ids.size()
  179. else:
  180. input_shape = inputs_embeds.size()[:-1]
  181. seq_length = input_shape[1]
  182. if position_ids is None:
  183. position_ids = self.position_ids[:, :seq_length]
  184. if token_type_ids is None:
  185. token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
  186. if inputs_embeds is None:
  187. inputs_embeds = self.word_embeddings(input_ids)
  188. if self.trigram_input:
  189. # From the paper MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited
  190. # Devices (https://arxiv.org/abs/2004.02984)
  191. #
  192. # The embedding table in BERT models accounts for a substantial proportion of model size. To compress
  193. # the embedding layer, we reduce the embedding dimension to 128 in MobileBERT.
  194. # Then, we apply a 1D convolution with kernel size 3 on the raw token embedding to produce a 512
  195. # dimensional output.
  196. inputs_embeds = torch.cat(
  197. [
  198. nn.functional.pad(inputs_embeds[:, 1:], [0, 0, 0, 1, 0, 0], value=0.0),
  199. inputs_embeds,
  200. nn.functional.pad(inputs_embeds[:, :-1], [0, 0, 1, 0, 0, 0], value=0.0),
  201. ],
  202. dim=2,
  203. )
  204. if self.trigram_input or self.embedding_size != self.hidden_size:
  205. inputs_embeds = self.embedding_transformation(inputs_embeds)
  206. # Add positional embeddings and token type embeddings, then layer
  207. # normalize and perform dropout.
  208. position_embeddings = self.position_embeddings(position_ids)
  209. token_type_embeddings = self.token_type_embeddings(token_type_ids)
  210. embeddings = inputs_embeds + position_embeddings + token_type_embeddings
  211. embeddings = self.LayerNorm(embeddings)
  212. embeddings = self.dropout(embeddings)
  213. return embeddings
  214. class MobileBertSelfAttention(nn.Module):
  215. def __init__(self, config):
  216. super().__init__()
  217. self.num_attention_heads = config.num_attention_heads
  218. self.attention_head_size = int(config.true_hidden_size / config.num_attention_heads)
  219. self.all_head_size = self.num_attention_heads * self.attention_head_size
  220. self.query = nn.Linear(config.true_hidden_size, self.all_head_size)
  221. self.key = nn.Linear(config.true_hidden_size, self.all_head_size)
  222. self.value = nn.Linear(
  223. config.true_hidden_size if config.use_bottleneck_attention else config.hidden_size, self.all_head_size
  224. )
  225. self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
  226. def transpose_for_scores(self, x):
  227. new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
  228. x = x.view(new_x_shape)
  229. return x.permute(0, 2, 1, 3)
  230. def forward(
  231. self,
  232. query_tensor: torch.Tensor,
  233. key_tensor: torch.Tensor,
  234. value_tensor: torch.Tensor,
  235. attention_mask: Optional[torch.FloatTensor] = None,
  236. head_mask: Optional[torch.FloatTensor] = None,
  237. output_attentions: Optional[bool] = None,
  238. ) -> Tuple[torch.Tensor]:
  239. mixed_query_layer = self.query(query_tensor)
  240. mixed_key_layer = self.key(key_tensor)
  241. mixed_value_layer = self.value(value_tensor)
  242. query_layer = self.transpose_for_scores(mixed_query_layer)
  243. key_layer = self.transpose_for_scores(mixed_key_layer)
  244. value_layer = self.transpose_for_scores(mixed_value_layer)
  245. # Take the dot product between "query" and "key" to get the raw attention scores.
  246. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
  247. attention_scores = attention_scores / math.sqrt(self.attention_head_size)
  248. if attention_mask is not None:
  249. # Apply the attention mask is (precomputed for all layers in BertModel forward() function)
  250. attention_scores = attention_scores + attention_mask
  251. # Normalize the attention scores to probabilities.
  252. attention_probs = nn.functional.softmax(attention_scores, dim=-1)
  253. # This is actually dropping out entire tokens to attend to, which might
  254. # seem a bit unusual, but is taken from the original Transformer paper.
  255. attention_probs = self.dropout(attention_probs)
  256. # Mask heads if we want to
  257. if head_mask is not None:
  258. attention_probs = attention_probs * head_mask
  259. context_layer = torch.matmul(attention_probs, value_layer)
  260. context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
  261. new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
  262. context_layer = context_layer.view(new_context_layer_shape)
  263. outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
  264. return outputs
  265. class MobileBertSelfOutput(nn.Module):
  266. def __init__(self, config):
  267. super().__init__()
  268. self.use_bottleneck = config.use_bottleneck
  269. self.dense = nn.Linear(config.true_hidden_size, config.true_hidden_size)
  270. self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps)
  271. if not self.use_bottleneck:
  272. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  273. def forward(self, hidden_states: torch.Tensor, residual_tensor: torch.Tensor) -> torch.Tensor:
  274. layer_outputs = self.dense(hidden_states)
  275. if not self.use_bottleneck:
  276. layer_outputs = self.dropout(layer_outputs)
  277. layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)
  278. return layer_outputs
  279. class MobileBertAttention(nn.Module):
  280. def __init__(self, config):
  281. super().__init__()
  282. self.self = MobileBertSelfAttention(config)
  283. self.output = MobileBertSelfOutput(config)
  284. self.pruned_heads = set()
  285. def prune_heads(self, heads):
  286. if len(heads) == 0:
  287. return
  288. heads, index = find_pruneable_heads_and_indices(
  289. heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
  290. )
  291. # Prune linear layers
  292. self.self.query = prune_linear_layer(self.self.query, index)
  293. self.self.key = prune_linear_layer(self.self.key, index)
  294. self.self.value = prune_linear_layer(self.self.value, index)
  295. self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
  296. # Update hyper params and store pruned heads
  297. self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
  298. self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
  299. self.pruned_heads = self.pruned_heads.union(heads)
  300. def forward(
  301. self,
  302. query_tensor: torch.Tensor,
  303. key_tensor: torch.Tensor,
  304. value_tensor: torch.Tensor,
  305. layer_input: torch.Tensor,
  306. attention_mask: Optional[torch.FloatTensor] = None,
  307. head_mask: Optional[torch.FloatTensor] = None,
  308. output_attentions: Optional[bool] = None,
  309. ) -> Tuple[torch.Tensor]:
  310. self_outputs = self.self(
  311. query_tensor,
  312. key_tensor,
  313. value_tensor,
  314. attention_mask,
  315. head_mask,
  316. output_attentions,
  317. )
  318. # Run a linear projection of `hidden_size` then add a residual
  319. # with `layer_input`.
  320. attention_output = self.output(self_outputs[0], layer_input)
  321. outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
  322. return outputs
  323. class MobileBertIntermediate(nn.Module):
  324. def __init__(self, config):
  325. super().__init__()
  326. self.dense = nn.Linear(config.true_hidden_size, config.intermediate_size)
  327. if isinstance(config.hidden_act, str):
  328. self.intermediate_act_fn = ACT2FN[config.hidden_act]
  329. else:
  330. self.intermediate_act_fn = config.hidden_act
  331. def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
  332. hidden_states = self.dense(hidden_states)
  333. hidden_states = self.intermediate_act_fn(hidden_states)
  334. return hidden_states
  335. class OutputBottleneck(nn.Module):
  336. def __init__(self, config):
  337. super().__init__()
  338. self.dense = nn.Linear(config.true_hidden_size, config.hidden_size)
  339. self.LayerNorm = NORM2FN[config.normalization_type](config.hidden_size, eps=config.layer_norm_eps)
  340. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  341. def forward(self, hidden_states: torch.Tensor, residual_tensor: torch.Tensor) -> torch.Tensor:
  342. layer_outputs = self.dense(hidden_states)
  343. layer_outputs = self.dropout(layer_outputs)
  344. layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)
  345. return layer_outputs
  346. class MobileBertOutput(nn.Module):
  347. def __init__(self, config):
  348. super().__init__()
  349. self.use_bottleneck = config.use_bottleneck
  350. self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size)
  351. self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size)
  352. if not self.use_bottleneck:
  353. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  354. else:
  355. self.bottleneck = OutputBottleneck(config)
  356. def forward(
  357. self, intermediate_states: torch.Tensor, residual_tensor_1: torch.Tensor, residual_tensor_2: torch.Tensor
  358. ) -> torch.Tensor:
  359. layer_output = self.dense(intermediate_states)
  360. if not self.use_bottleneck:
  361. layer_output = self.dropout(layer_output)
  362. layer_output = self.LayerNorm(layer_output + residual_tensor_1)
  363. else:
  364. layer_output = self.LayerNorm(layer_output + residual_tensor_1)
  365. layer_output = self.bottleneck(layer_output, residual_tensor_2)
  366. return layer_output
  367. class BottleneckLayer(nn.Module):
  368. def __init__(self, config):
  369. super().__init__()
  370. self.dense = nn.Linear(config.hidden_size, config.intra_bottleneck_size)
  371. self.LayerNorm = NORM2FN[config.normalization_type](config.intra_bottleneck_size, eps=config.layer_norm_eps)
  372. def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
  373. layer_input = self.dense(hidden_states)
  374. layer_input = self.LayerNorm(layer_input)
  375. return layer_input
  376. class Bottleneck(nn.Module):
  377. def __init__(self, config):
  378. super().__init__()
  379. self.key_query_shared_bottleneck = config.key_query_shared_bottleneck
  380. self.use_bottleneck_attention = config.use_bottleneck_attention
  381. self.input = BottleneckLayer(config)
  382. if self.key_query_shared_bottleneck:
  383. self.attention = BottleneckLayer(config)
  384. def forward(self, hidden_states: torch.Tensor) -> Tuple[torch.Tensor]:
  385. # This method can return three different tuples of values. These different values make use of bottlenecks,
  386. # which are linear layers used to project the hidden states to a lower-dimensional vector, reducing memory
  387. # usage. These linear layer have weights that are learned during training.
  388. #
  389. # If `config.use_bottleneck_attention`, it will return the result of the bottleneck layer four times for the
  390. # key, query, value, and "layer input" to be used by the attention layer.
  391. # This bottleneck is used to project the hidden. This last layer input will be used as a residual tensor
  392. # in the attention self output, after the attention scores have been computed.
  393. #
  394. # If not `config.use_bottleneck_attention` and `config.key_query_shared_bottleneck`, this will return
  395. # four values, three of which have been passed through a bottleneck: the query and key, passed through the same
  396. # bottleneck, and the residual layer to be applied in the attention self output, through another bottleneck.
  397. #
  398. # Finally, in the last case, the values for the query, key and values are the hidden states without bottleneck,
  399. # and the residual layer will be this value passed through a bottleneck.
  400. bottlenecked_hidden_states = self.input(hidden_states)
  401. if self.use_bottleneck_attention:
  402. return (bottlenecked_hidden_states,) * 4
  403. elif self.key_query_shared_bottleneck:
  404. shared_attention_input = self.attention(hidden_states)
  405. return (shared_attention_input, shared_attention_input, hidden_states, bottlenecked_hidden_states)
  406. else:
  407. return (hidden_states, hidden_states, hidden_states, bottlenecked_hidden_states)
  408. class FFNOutput(nn.Module):
  409. def __init__(self, config):
  410. super().__init__()
  411. self.dense = nn.Linear(config.intermediate_size, config.true_hidden_size)
  412. self.LayerNorm = NORM2FN[config.normalization_type](config.true_hidden_size, eps=config.layer_norm_eps)
  413. def forward(self, hidden_states: torch.Tensor, residual_tensor: torch.Tensor) -> torch.Tensor:
  414. layer_outputs = self.dense(hidden_states)
  415. layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)
  416. return layer_outputs
  417. class FFNLayer(nn.Module):
  418. def __init__(self, config):
  419. super().__init__()
  420. self.intermediate = MobileBertIntermediate(config)
  421. self.output = FFNOutput(config)
  422. def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
  423. intermediate_output = self.intermediate(hidden_states)
  424. layer_outputs = self.output(intermediate_output, hidden_states)
  425. return layer_outputs
  426. class MobileBertLayer(nn.Module):
  427. def __init__(self, config):
  428. super().__init__()
  429. self.use_bottleneck = config.use_bottleneck
  430. self.num_feedforward_networks = config.num_feedforward_networks
  431. self.attention = MobileBertAttention(config)
  432. self.intermediate = MobileBertIntermediate(config)
  433. self.output = MobileBertOutput(config)
  434. if self.use_bottleneck:
  435. self.bottleneck = Bottleneck(config)
  436. if config.num_feedforward_networks > 1:
  437. self.ffn = nn.ModuleList([FFNLayer(config) for _ in range(config.num_feedforward_networks - 1)])
  438. def forward(
  439. self,
  440. hidden_states: torch.Tensor,
  441. attention_mask: Optional[torch.FloatTensor] = None,
  442. head_mask: Optional[torch.FloatTensor] = None,
  443. output_attentions: Optional[bool] = None,
  444. ) -> Tuple[torch.Tensor]:
  445. if self.use_bottleneck:
  446. query_tensor, key_tensor, value_tensor, layer_input = self.bottleneck(hidden_states)
  447. else:
  448. query_tensor, key_tensor, value_tensor, layer_input = [hidden_states] * 4
  449. self_attention_outputs = self.attention(
  450. query_tensor,
  451. key_tensor,
  452. value_tensor,
  453. layer_input,
  454. attention_mask,
  455. head_mask,
  456. output_attentions=output_attentions,
  457. )
  458. attention_output = self_attention_outputs[0]
  459. s = (attention_output,)
  460. outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
  461. if self.num_feedforward_networks != 1:
  462. for i, ffn_module in enumerate(self.ffn):
  463. attention_output = ffn_module(attention_output)
  464. s += (attention_output,)
  465. intermediate_output = self.intermediate(attention_output)
  466. layer_output = self.output(intermediate_output, attention_output, hidden_states)
  467. outputs = (
  468. (layer_output,)
  469. + outputs
  470. + (
  471. torch.tensor(1000),
  472. query_tensor,
  473. key_tensor,
  474. value_tensor,
  475. layer_input,
  476. attention_output,
  477. intermediate_output,
  478. )
  479. + s
  480. )
  481. return outputs
  482. class MobileBertEncoder(nn.Module):
  483. def __init__(self, config):
  484. super().__init__()
  485. self.layer = nn.ModuleList([MobileBertLayer(config) for _ in range(config.num_hidden_layers)])
  486. def forward(
  487. self,
  488. hidden_states: torch.Tensor,
  489. attention_mask: Optional[torch.FloatTensor] = None,
  490. head_mask: Optional[torch.FloatTensor] = None,
  491. output_attentions: Optional[bool] = False,
  492. output_hidden_states: Optional[bool] = False,
  493. return_dict: Optional[bool] = True,
  494. ) -> Union[Tuple, BaseModelOutput]:
  495. all_hidden_states = () if output_hidden_states else None
  496. all_attentions = () if output_attentions else None
  497. for i, layer_module in enumerate(self.layer):
  498. if output_hidden_states:
  499. all_hidden_states = all_hidden_states + (hidden_states,)
  500. layer_outputs = layer_module(
  501. hidden_states,
  502. attention_mask,
  503. head_mask[i],
  504. output_attentions,
  505. )
  506. hidden_states = layer_outputs[0]
  507. if output_attentions:
  508. all_attentions = all_attentions + (layer_outputs[1],)
  509. # Add last layer
  510. if output_hidden_states:
  511. all_hidden_states = all_hidden_states + (hidden_states,)
  512. if not return_dict:
  513. return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
  514. return BaseModelOutput(
  515. last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
  516. )
  517. class MobileBertPooler(nn.Module):
  518. def __init__(self, config):
  519. super().__init__()
  520. self.do_activate = config.classifier_activation
  521. if self.do_activate:
  522. self.dense = nn.Linear(config.hidden_size, config.hidden_size)
  523. def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
  524. # We "pool" the model by simply taking the hidden state corresponding
  525. # to the first token.
  526. first_token_tensor = hidden_states[:, 0]
  527. if not self.do_activate:
  528. return first_token_tensor
  529. else:
  530. pooled_output = self.dense(first_token_tensor)
  531. pooled_output = torch.tanh(pooled_output)
  532. return pooled_output
  533. class MobileBertPredictionHeadTransform(nn.Module):
  534. def __init__(self, config):
  535. super().__init__()
  536. self.dense = nn.Linear(config.hidden_size, config.hidden_size)
  537. if isinstance(config.hidden_act, str):
  538. self.transform_act_fn = ACT2FN[config.hidden_act]
  539. else:
  540. self.transform_act_fn = config.hidden_act
  541. self.LayerNorm = NORM2FN["layer_norm"](config.hidden_size, eps=config.layer_norm_eps)
  542. def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
  543. hidden_states = self.dense(hidden_states)
  544. hidden_states = self.transform_act_fn(hidden_states)
  545. hidden_states = self.LayerNorm(hidden_states)
  546. return hidden_states
  547. class MobileBertLMPredictionHead(nn.Module):
  548. def __init__(self, config):
  549. super().__init__()
  550. self.transform = MobileBertPredictionHeadTransform(config)
  551. # The output weights are the same as the input embeddings, but there is
  552. # an output-only bias for each token.
  553. self.dense = nn.Linear(config.vocab_size, config.hidden_size - config.embedding_size, bias=False)
  554. self.decoder = nn.Linear(config.embedding_size, config.vocab_size, bias=False)
  555. self.bias = nn.Parameter(torch.zeros(config.vocab_size))
  556. # Need a link between the two variables so that the bias is correctly resized with `resize_token_embeddings`
  557. self.decoder.bias = self.bias
  558. def _tie_weights(self) -> None:
  559. self.decoder.bias = self.bias
  560. def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
  561. hidden_states = self.transform(hidden_states)
  562. hidden_states = hidden_states.matmul(torch.cat([self.decoder.weight.t(), self.dense.weight], dim=0))
  563. hidden_states += self.decoder.bias
  564. return hidden_states
  565. class MobileBertOnlyMLMHead(nn.Module):
  566. def __init__(self, config):
  567. super().__init__()
  568. self.predictions = MobileBertLMPredictionHead(config)
  569. def forward(self, sequence_output: torch.Tensor) -> torch.Tensor:
  570. prediction_scores = self.predictions(sequence_output)
  571. return prediction_scores
  572. class MobileBertPreTrainingHeads(nn.Module):
  573. def __init__(self, config):
  574. super().__init__()
  575. self.predictions = MobileBertLMPredictionHead(config)
  576. self.seq_relationship = nn.Linear(config.hidden_size, 2)
  577. def forward(self, sequence_output: torch.Tensor, pooled_output: torch.Tensor) -> Tuple[torch.Tensor]:
  578. prediction_scores = self.predictions(sequence_output)
  579. seq_relationship_score = self.seq_relationship(pooled_output)
  580. return prediction_scores, seq_relationship_score
  581. class MobileBertPreTrainedModel(PreTrainedModel):
  582. """
  583. An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
  584. models.
  585. """
  586. config_class = MobileBertConfig
  587. load_tf_weights = load_tf_weights_in_mobilebert
  588. base_model_prefix = "mobilebert"
  589. def _init_weights(self, module):
  590. """Initialize the weights"""
  591. if isinstance(module, nn.Linear):
  592. # Slightly different from the TF version which uses truncated_normal for initialization
  593. # cf https://github.com/pytorch/pytorch/pull/5617
  594. module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
  595. if module.bias is not None:
  596. module.bias.data.zero_()
  597. elif isinstance(module, nn.Embedding):
  598. module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
  599. if module.padding_idx is not None:
  600. module.weight.data[module.padding_idx].zero_()
  601. elif isinstance(module, (nn.LayerNorm, NoNorm)):
  602. module.bias.data.zero_()
  603. module.weight.data.fill_(1.0)
  604. @dataclass
  605. class MobileBertForPreTrainingOutput(ModelOutput):
  606. """
  607. Output type of [`MobileBertForPreTraining`].
  608. Args:
  609. loss (*optional*, returned when `labels` is provided, `torch.FloatTensor` of shape `(1,)`):
  610. Total loss as the sum of the masked language modeling loss and the next sequence prediction
  611. (classification) loss.
  612. prediction_logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
  613. Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
  614. seq_relationship_logits (`torch.FloatTensor` of shape `(batch_size, 2)`):
  615. Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
  616. before SoftMax).
  617. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
  618. Tuple of `torch.FloatTensor` (one for the output of the embeddings + one for the output of each layer) of
  619. shape `(batch_size, sequence_length, hidden_size)`.
  620. Hidden-states of the model at the output of each layer plus the initial embedding outputs.
  621. attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
  622. Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
  623. sequence_length)`.
  624. Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
  625. heads.
  626. """
  627. loss: Optional[torch.FloatTensor] = None
  628. prediction_logits: torch.FloatTensor = None
  629. seq_relationship_logits: torch.FloatTensor = None
  630. hidden_states: Optional[Tuple[torch.FloatTensor]] = None
  631. attentions: Optional[Tuple[torch.FloatTensor]] = None
  632. MOBILEBERT_START_DOCSTRING = r"""
  633. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
  634. library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
  635. etc.)
  636. This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
  637. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
  638. and behavior.
  639. Parameters:
  640. config ([`MobileBertConfig`]): Model configuration class with all the parameters of the model.
  641. Initializing with a config file does not load the weights associated with the model, only the
  642. configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
  643. """
  644. MOBILEBERT_INPUTS_DOCSTRING = r"""
  645. Args:
  646. input_ids (`torch.LongTensor` of shape `({0})`):
  647. Indices of input sequence tokens in the vocabulary.
  648. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
  649. [`PreTrainedTokenizer.__call__`] for details.
  650. [What are input IDs?](../glossary#input-ids)
  651. attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
  652. Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
  653. - 1 for tokens that are **not masked**,
  654. - 0 for tokens that are **masked**.
  655. [What are attention masks?](../glossary#attention-mask)
  656. token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
  657. Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
  658. 1]`:
  659. - 0 corresponds to a *sentence A* token,
  660. - 1 corresponds to a *sentence B* token.
  661. [What are token type IDs?](../glossary#token-type-ids)
  662. position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
  663. Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
  664. config.max_position_embeddings - 1]`.
  665. [What are position IDs?](../glossary#position-ids)
  666. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
  667. Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
  668. - 1 indicates the head is **not masked**,
  669. - 0 indicates the head is **masked**.
  670. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
  671. Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
  672. is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
  673. model's internal embedding lookup matrix.
  674. output_attentions (`bool`, *optional*):
  675. Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
  676. tensors for more detail.
  677. output_hidden_states (`bool`, *optional*):
  678. Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
  679. more detail.
  680. return_dict (`bool`, *optional*):
  681. Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
  682. """
  683. @add_start_docstrings(
  684. "The bare MobileBert Model transformer outputting raw hidden-states without any specific head on top.",
  685. MOBILEBERT_START_DOCSTRING,
  686. )
  687. class MobileBertModel(MobileBertPreTrainedModel):
  688. """
  689. https://arxiv.org/pdf/2004.02984.pdf
  690. """
  691. def __init__(self, config, add_pooling_layer=True):
  692. super().__init__(config)
  693. self.config = config
  694. self.embeddings = MobileBertEmbeddings(config)
  695. self.encoder = MobileBertEncoder(config)
  696. self.pooler = MobileBertPooler(config) if add_pooling_layer else None
  697. # Initialize weights and apply final processing
  698. self.post_init()
  699. def get_input_embeddings(self):
  700. return self.embeddings.word_embeddings
  701. def set_input_embeddings(self, value):
  702. self.embeddings.word_embeddings = value
  703. def _prune_heads(self, heads_to_prune):
  704. """
  705. Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
  706. class PreTrainedModel
  707. """
  708. for layer, heads in heads_to_prune.items():
  709. self.encoder.layer[layer].attention.prune_heads(heads)
  710. @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  711. @add_code_sample_docstrings(
  712. checkpoint=_CHECKPOINT_FOR_DOC,
  713. output_type=BaseModelOutputWithPooling,
  714. config_class=_CONFIG_FOR_DOC,
  715. )
  716. def forward(
  717. self,
  718. input_ids: Optional[torch.LongTensor] = None,
  719. attention_mask: Optional[torch.FloatTensor] = None,
  720. token_type_ids: Optional[torch.LongTensor] = None,
  721. position_ids: Optional[torch.LongTensor] = None,
  722. head_mask: Optional[torch.FloatTensor] = None,
  723. inputs_embeds: Optional[torch.FloatTensor] = None,
  724. output_hidden_states: Optional[bool] = None,
  725. output_attentions: Optional[bool] = None,
  726. return_dict: Optional[bool] = None,
  727. ) -> Union[Tuple, BaseModelOutputWithPooling]:
  728. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
  729. output_hidden_states = (
  730. output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
  731. )
  732. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  733. if input_ids is not None and inputs_embeds is not None:
  734. raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
  735. elif input_ids is not None:
  736. self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
  737. input_shape = input_ids.size()
  738. elif inputs_embeds is not None:
  739. input_shape = inputs_embeds.size()[:-1]
  740. else:
  741. raise ValueError("You have to specify either input_ids or inputs_embeds")
  742. device = input_ids.device if input_ids is not None else inputs_embeds.device
  743. if attention_mask is None:
  744. attention_mask = torch.ones(input_shape, device=device)
  745. if token_type_ids is None:
  746. token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
  747. # We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
  748. # ourselves in which case we just need to make it broadcastable to all heads.
  749. extended_attention_mask: torch.Tensor = self.get_extended_attention_mask(attention_mask, input_shape)
  750. # Prepare head mask if needed
  751. # 1.0 in head_mask indicate we keep the head
  752. # attention_probs has shape bsz x n_heads x N x N
  753. # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
  754. # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
  755. head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
  756. embedding_output = self.embeddings(
  757. input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
  758. )
  759. encoder_outputs = self.encoder(
  760. embedding_output,
  761. attention_mask=extended_attention_mask,
  762. head_mask=head_mask,
  763. output_attentions=output_attentions,
  764. output_hidden_states=output_hidden_states,
  765. return_dict=return_dict,
  766. )
  767. sequence_output = encoder_outputs[0]
  768. pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
  769. if not return_dict:
  770. return (sequence_output, pooled_output) + encoder_outputs[1:]
  771. return BaseModelOutputWithPooling(
  772. last_hidden_state=sequence_output,
  773. pooler_output=pooled_output,
  774. hidden_states=encoder_outputs.hidden_states,
  775. attentions=encoder_outputs.attentions,
  776. )
  777. @add_start_docstrings(
  778. """
  779. MobileBert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a
  780. `next sentence prediction (classification)` head.
  781. """,
  782. MOBILEBERT_START_DOCSTRING,
  783. )
  784. class MobileBertForPreTraining(MobileBertPreTrainedModel):
  785. _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"]
  786. def __init__(self, config):
  787. super().__init__(config)
  788. self.mobilebert = MobileBertModel(config)
  789. self.cls = MobileBertPreTrainingHeads(config)
  790. # Initialize weights and apply final processing
  791. self.post_init()
  792. def get_output_embeddings(self):
  793. return self.cls.predictions.decoder
  794. def set_output_embeddings(self, new_embeddings):
  795. self.cls.predictions.decoder = new_embeddings
  796. self.cls.predictions.bias = new_embeddings.bias
  797. def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding:
  798. # resize dense output embedings at first
  799. self.cls.predictions.dense = self._get_resized_lm_head(
  800. self.cls.predictions.dense, new_num_tokens=new_num_tokens, transposed=True
  801. )
  802. return super().resize_token_embeddings(new_num_tokens=new_num_tokens)
  803. @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  804. @replace_return_docstrings(output_type=MobileBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
  805. def forward(
  806. self,
  807. input_ids: Optional[torch.LongTensor] = None,
  808. attention_mask: Optional[torch.FloatTensor] = None,
  809. token_type_ids: Optional[torch.LongTensor] = None,
  810. position_ids: Optional[torch.LongTensor] = None,
  811. head_mask: Optional[torch.FloatTensor] = None,
  812. inputs_embeds: Optional[torch.FloatTensor] = None,
  813. labels: Optional[torch.LongTensor] = None,
  814. next_sentence_label: Optional[torch.LongTensor] = None,
  815. output_attentions: Optional[torch.FloatTensor] = None,
  816. output_hidden_states: Optional[torch.FloatTensor] = None,
  817. return_dict: Optional[torch.FloatTensor] = None,
  818. ) -> Union[Tuple, MobileBertForPreTrainingOutput]:
  819. r"""
  820. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
  821. Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
  822. config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
  823. loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
  824. next_sentence_label (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  825. Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
  826. (see `input_ids` docstring) Indices should be in `[0, 1]`:
  827. - 0 indicates sequence B is a continuation of sequence A,
  828. - 1 indicates sequence B is a random sequence.
  829. Returns:
  830. Examples:
  831. ```python
  832. >>> from transformers import AutoTokenizer, MobileBertForPreTraining
  833. >>> import torch
  834. >>> tokenizer = AutoTokenizer.from_pretrained("google/mobilebert-uncased")
  835. >>> model = MobileBertForPreTraining.from_pretrained("google/mobilebert-uncased")
  836. >>> input_ids = torch.tensor(tokenizer.encode("Hello, my dog is cute", add_special_tokens=True)).unsqueeze(0)
  837. >>> # Batch size 1
  838. >>> outputs = model(input_ids)
  839. >>> prediction_logits = outputs.prediction_logits
  840. >>> seq_relationship_logits = outputs.seq_relationship_logits
  841. ```"""
  842. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  843. outputs = self.mobilebert(
  844. input_ids,
  845. attention_mask=attention_mask,
  846. token_type_ids=token_type_ids,
  847. position_ids=position_ids,
  848. head_mask=head_mask,
  849. inputs_embeds=inputs_embeds,
  850. output_attentions=output_attentions,
  851. output_hidden_states=output_hidden_states,
  852. return_dict=return_dict,
  853. )
  854. sequence_output, pooled_output = outputs[:2]
  855. prediction_scores, seq_relationship_score = self.cls(sequence_output, pooled_output)
  856. total_loss = None
  857. if labels is not None and next_sentence_label is not None:
  858. loss_fct = CrossEntropyLoss()
  859. masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
  860. next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), next_sentence_label.view(-1))
  861. total_loss = masked_lm_loss + next_sentence_loss
  862. if not return_dict:
  863. output = (prediction_scores, seq_relationship_score) + outputs[2:]
  864. return ((total_loss,) + output) if total_loss is not None else output
  865. return MobileBertForPreTrainingOutput(
  866. loss=total_loss,
  867. prediction_logits=prediction_scores,
  868. seq_relationship_logits=seq_relationship_score,
  869. hidden_states=outputs.hidden_states,
  870. attentions=outputs.attentions,
  871. )
  872. @add_start_docstrings("""MobileBert Model with a `language modeling` head on top.""", MOBILEBERT_START_DOCSTRING)
  873. class MobileBertForMaskedLM(MobileBertPreTrainedModel):
  874. _tied_weights_keys = ["cls.predictions.decoder.weight", "cls.predictions.decoder.bias"]
  875. def __init__(self, config):
  876. super().__init__(config)
  877. self.mobilebert = MobileBertModel(config, add_pooling_layer=False)
  878. self.cls = MobileBertOnlyMLMHead(config)
  879. self.config = config
  880. # Initialize weights and apply final processing
  881. self.post_init()
  882. def get_output_embeddings(self):
  883. return self.cls.predictions.decoder
  884. def set_output_embeddings(self, new_embeddings):
  885. self.cls.predictions.decoder = new_embeddings
  886. self.cls.predictions.bias = new_embeddings.bias
  887. def resize_token_embeddings(self, new_num_tokens: Optional[int] = None) -> nn.Embedding:
  888. # resize dense output embedings at first
  889. self.cls.predictions.dense = self._get_resized_lm_head(
  890. self.cls.predictions.dense, new_num_tokens=new_num_tokens, transposed=True
  891. )
  892. return super().resize_token_embeddings(new_num_tokens=new_num_tokens)
  893. @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  894. @add_code_sample_docstrings(
  895. checkpoint=_CHECKPOINT_FOR_DOC,
  896. output_type=MaskedLMOutput,
  897. config_class=_CONFIG_FOR_DOC,
  898. expected_output="'paris'",
  899. expected_loss=0.57,
  900. )
  901. def forward(
  902. self,
  903. input_ids: Optional[torch.LongTensor] = None,
  904. attention_mask: Optional[torch.FloatTensor] = None,
  905. token_type_ids: Optional[torch.LongTensor] = None,
  906. position_ids: Optional[torch.LongTensor] = None,
  907. head_mask: Optional[torch.FloatTensor] = None,
  908. inputs_embeds: Optional[torch.FloatTensor] = None,
  909. labels: Optional[torch.LongTensor] = None,
  910. output_attentions: Optional[bool] = None,
  911. output_hidden_states: Optional[bool] = None,
  912. return_dict: Optional[bool] = None,
  913. ) -> Union[Tuple, MaskedLMOutput]:
  914. r"""
  915. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
  916. Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
  917. config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
  918. loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
  919. """
  920. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  921. outputs = self.mobilebert(
  922. input_ids,
  923. attention_mask=attention_mask,
  924. token_type_ids=token_type_ids,
  925. position_ids=position_ids,
  926. head_mask=head_mask,
  927. inputs_embeds=inputs_embeds,
  928. output_attentions=output_attentions,
  929. output_hidden_states=output_hidden_states,
  930. return_dict=return_dict,
  931. )
  932. sequence_output = outputs[0]
  933. prediction_scores = self.cls(sequence_output)
  934. masked_lm_loss = None
  935. if labels is not None:
  936. loss_fct = CrossEntropyLoss() # -100 index = padding token
  937. masked_lm_loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
  938. if not return_dict:
  939. output = (prediction_scores,) + outputs[2:]
  940. return ((masked_lm_loss,) + output) if masked_lm_loss is not None else output
  941. return MaskedLMOutput(
  942. loss=masked_lm_loss,
  943. logits=prediction_scores,
  944. hidden_states=outputs.hidden_states,
  945. attentions=outputs.attentions,
  946. )
  947. class MobileBertOnlyNSPHead(nn.Module):
  948. def __init__(self, config):
  949. super().__init__()
  950. self.seq_relationship = nn.Linear(config.hidden_size, 2)
  951. def forward(self, pooled_output: torch.Tensor) -> torch.Tensor:
  952. seq_relationship_score = self.seq_relationship(pooled_output)
  953. return seq_relationship_score
  954. @add_start_docstrings(
  955. """MobileBert Model with a `next sentence prediction (classification)` head on top.""",
  956. MOBILEBERT_START_DOCSTRING,
  957. )
  958. class MobileBertForNextSentencePrediction(MobileBertPreTrainedModel):
  959. def __init__(self, config):
  960. super().__init__(config)
  961. self.mobilebert = MobileBertModel(config)
  962. self.cls = MobileBertOnlyNSPHead(config)
  963. # Initialize weights and apply final processing
  964. self.post_init()
  965. @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  966. @replace_return_docstrings(output_type=NextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
  967. def forward(
  968. self,
  969. input_ids: Optional[torch.LongTensor] = None,
  970. attention_mask: Optional[torch.FloatTensor] = None,
  971. token_type_ids: Optional[torch.LongTensor] = None,
  972. position_ids: Optional[torch.LongTensor] = None,
  973. head_mask: Optional[torch.FloatTensor] = None,
  974. inputs_embeds: Optional[torch.FloatTensor] = None,
  975. labels: Optional[torch.LongTensor] = None,
  976. output_attentions: Optional[bool] = None,
  977. output_hidden_states: Optional[bool] = None,
  978. return_dict: Optional[bool] = None,
  979. **kwargs,
  980. ) -> Union[Tuple, NextSentencePredictorOutput]:
  981. r"""
  982. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  983. Labels for computing the next sequence prediction (classification) loss. Input should be a sequence pair
  984. (see `input_ids` docstring) Indices should be in `[0, 1]`.
  985. - 0 indicates sequence B is a continuation of sequence A,
  986. - 1 indicates sequence B is a random sequence.
  987. Returns:
  988. Examples:
  989. ```python
  990. >>> from transformers import AutoTokenizer, MobileBertForNextSentencePrediction
  991. >>> import torch
  992. >>> tokenizer = AutoTokenizer.from_pretrained("google/mobilebert-uncased")
  993. >>> model = MobileBertForNextSentencePrediction.from_pretrained("google/mobilebert-uncased")
  994. >>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
  995. >>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
  996. >>> encoding = tokenizer(prompt, next_sentence, return_tensors="pt")
  997. >>> outputs = model(**encoding, labels=torch.LongTensor([1]))
  998. >>> loss = outputs.loss
  999. >>> logits = outputs.logits
  1000. ```"""
  1001. if "next_sentence_label" in kwargs:
  1002. warnings.warn(
  1003. "The `next_sentence_label` argument is deprecated and will be removed in a future version, use"
  1004. " `labels` instead.",
  1005. FutureWarning,
  1006. )
  1007. labels = kwargs.pop("next_sentence_label")
  1008. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1009. outputs = self.mobilebert(
  1010. input_ids,
  1011. attention_mask=attention_mask,
  1012. token_type_ids=token_type_ids,
  1013. position_ids=position_ids,
  1014. head_mask=head_mask,
  1015. inputs_embeds=inputs_embeds,
  1016. output_attentions=output_attentions,
  1017. output_hidden_states=output_hidden_states,
  1018. return_dict=return_dict,
  1019. )
  1020. pooled_output = outputs[1]
  1021. seq_relationship_score = self.cls(pooled_output)
  1022. next_sentence_loss = None
  1023. if labels is not None:
  1024. loss_fct = CrossEntropyLoss()
  1025. next_sentence_loss = loss_fct(seq_relationship_score.view(-1, 2), labels.view(-1))
  1026. if not return_dict:
  1027. output = (seq_relationship_score,) + outputs[2:]
  1028. return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
  1029. return NextSentencePredictorOutput(
  1030. loss=next_sentence_loss,
  1031. logits=seq_relationship_score,
  1032. hidden_states=outputs.hidden_states,
  1033. attentions=outputs.attentions,
  1034. )
  1035. @add_start_docstrings(
  1036. """
  1037. MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
  1038. pooled output) e.g. for GLUE tasks.
  1039. """,
  1040. MOBILEBERT_START_DOCSTRING,
  1041. )
  1042. # Copied from transformers.models.bert.modeling_bert.BertForSequenceClassification with Bert->MobileBert all-casing
  1043. class MobileBertForSequenceClassification(MobileBertPreTrainedModel):
  1044. def __init__(self, config):
  1045. super().__init__(config)
  1046. self.num_labels = config.num_labels
  1047. self.config = config
  1048. self.mobilebert = MobileBertModel(config)
  1049. classifier_dropout = (
  1050. config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
  1051. )
  1052. self.dropout = nn.Dropout(classifier_dropout)
  1053. self.classifier = nn.Linear(config.hidden_size, config.num_labels)
  1054. # Initialize weights and apply final processing
  1055. self.post_init()
  1056. @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  1057. @add_code_sample_docstrings(
  1058. checkpoint=_CHECKPOINT_FOR_SEQUENCE_CLASSIFICATION,
  1059. output_type=SequenceClassifierOutput,
  1060. config_class=_CONFIG_FOR_DOC,
  1061. expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
  1062. expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
  1063. )
  1064. def forward(
  1065. self,
  1066. input_ids: Optional[torch.Tensor] = None,
  1067. attention_mask: Optional[torch.Tensor] = None,
  1068. token_type_ids: Optional[torch.Tensor] = None,
  1069. position_ids: Optional[torch.Tensor] = None,
  1070. head_mask: Optional[torch.Tensor] = None,
  1071. inputs_embeds: Optional[torch.Tensor] = None,
  1072. labels: Optional[torch.Tensor] = None,
  1073. output_attentions: Optional[bool] = None,
  1074. output_hidden_states: Optional[bool] = None,
  1075. return_dict: Optional[bool] = None,
  1076. ) -> Union[Tuple[torch.Tensor], SequenceClassifierOutput]:
  1077. r"""
  1078. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  1079. Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
  1080. config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
  1081. `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
  1082. """
  1083. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1084. outputs = self.mobilebert(
  1085. input_ids,
  1086. attention_mask=attention_mask,
  1087. token_type_ids=token_type_ids,
  1088. position_ids=position_ids,
  1089. head_mask=head_mask,
  1090. inputs_embeds=inputs_embeds,
  1091. output_attentions=output_attentions,
  1092. output_hidden_states=output_hidden_states,
  1093. return_dict=return_dict,
  1094. )
  1095. pooled_output = outputs[1]
  1096. pooled_output = self.dropout(pooled_output)
  1097. logits = self.classifier(pooled_output)
  1098. loss = None
  1099. if labels is not None:
  1100. if self.config.problem_type is None:
  1101. if self.num_labels == 1:
  1102. self.config.problem_type = "regression"
  1103. elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
  1104. self.config.problem_type = "single_label_classification"
  1105. else:
  1106. self.config.problem_type = "multi_label_classification"
  1107. if self.config.problem_type == "regression":
  1108. loss_fct = MSELoss()
  1109. if self.num_labels == 1:
  1110. loss = loss_fct(logits.squeeze(), labels.squeeze())
  1111. else:
  1112. loss = loss_fct(logits, labels)
  1113. elif self.config.problem_type == "single_label_classification":
  1114. loss_fct = CrossEntropyLoss()
  1115. loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
  1116. elif self.config.problem_type == "multi_label_classification":
  1117. loss_fct = BCEWithLogitsLoss()
  1118. loss = loss_fct(logits, labels)
  1119. if not return_dict:
  1120. output = (logits,) + outputs[2:]
  1121. return ((loss,) + output) if loss is not None else output
  1122. return SequenceClassifierOutput(
  1123. loss=loss,
  1124. logits=logits,
  1125. hidden_states=outputs.hidden_states,
  1126. attentions=outputs.attentions,
  1127. )
  1128. @add_start_docstrings(
  1129. """
  1130. MobileBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
  1131. linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
  1132. """,
  1133. MOBILEBERT_START_DOCSTRING,
  1134. )
  1135. # Copied from transformers.models.bert.modeling_bert.BertForQuestionAnswering with Bert->MobileBert all-casing
  1136. class MobileBertForQuestionAnswering(MobileBertPreTrainedModel):
  1137. def __init__(self, config):
  1138. super().__init__(config)
  1139. self.num_labels = config.num_labels
  1140. self.mobilebert = MobileBertModel(config, add_pooling_layer=False)
  1141. self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
  1142. # Initialize weights and apply final processing
  1143. self.post_init()
  1144. @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  1145. @add_code_sample_docstrings(
  1146. checkpoint=_CHECKPOINT_FOR_QA,
  1147. output_type=QuestionAnsweringModelOutput,
  1148. config_class=_CONFIG_FOR_DOC,
  1149. qa_target_start_index=_QA_TARGET_START_INDEX,
  1150. qa_target_end_index=_QA_TARGET_END_INDEX,
  1151. expected_output=_QA_EXPECTED_OUTPUT,
  1152. expected_loss=_QA_EXPECTED_LOSS,
  1153. )
  1154. def forward(
  1155. self,
  1156. input_ids: Optional[torch.Tensor] = None,
  1157. attention_mask: Optional[torch.Tensor] = None,
  1158. token_type_ids: Optional[torch.Tensor] = None,
  1159. position_ids: Optional[torch.Tensor] = None,
  1160. head_mask: Optional[torch.Tensor] = None,
  1161. inputs_embeds: Optional[torch.Tensor] = None,
  1162. start_positions: Optional[torch.Tensor] = None,
  1163. end_positions: Optional[torch.Tensor] = None,
  1164. output_attentions: Optional[bool] = None,
  1165. output_hidden_states: Optional[bool] = None,
  1166. return_dict: Optional[bool] = None,
  1167. ) -> Union[Tuple[torch.Tensor], QuestionAnsweringModelOutput]:
  1168. r"""
  1169. start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  1170. Labels for position (index) of the start of the labelled span for computing the token classification loss.
  1171. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
  1172. are not taken into account for computing the loss.
  1173. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  1174. Labels for position (index) of the end of the labelled span for computing the token classification loss.
  1175. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
  1176. are not taken into account for computing the loss.
  1177. """
  1178. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1179. outputs = self.mobilebert(
  1180. input_ids,
  1181. attention_mask=attention_mask,
  1182. token_type_ids=token_type_ids,
  1183. position_ids=position_ids,
  1184. head_mask=head_mask,
  1185. inputs_embeds=inputs_embeds,
  1186. output_attentions=output_attentions,
  1187. output_hidden_states=output_hidden_states,
  1188. return_dict=return_dict,
  1189. )
  1190. sequence_output = outputs[0]
  1191. logits = self.qa_outputs(sequence_output)
  1192. start_logits, end_logits = logits.split(1, dim=-1)
  1193. start_logits = start_logits.squeeze(-1).contiguous()
  1194. end_logits = end_logits.squeeze(-1).contiguous()
  1195. total_loss = None
  1196. if start_positions is not None and end_positions is not None:
  1197. # If we are on multi-GPU, split add a dimension
  1198. if len(start_positions.size()) > 1:
  1199. start_positions = start_positions.squeeze(-1)
  1200. if len(end_positions.size()) > 1:
  1201. end_positions = end_positions.squeeze(-1)
  1202. # sometimes the start/end positions are outside our model inputs, we ignore these terms
  1203. ignored_index = start_logits.size(1)
  1204. start_positions = start_positions.clamp(0, ignored_index)
  1205. end_positions = end_positions.clamp(0, ignored_index)
  1206. loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
  1207. start_loss = loss_fct(start_logits, start_positions)
  1208. end_loss = loss_fct(end_logits, end_positions)
  1209. total_loss = (start_loss + end_loss) / 2
  1210. if not return_dict:
  1211. output = (start_logits, end_logits) + outputs[2:]
  1212. return ((total_loss,) + output) if total_loss is not None else output
  1213. return QuestionAnsweringModelOutput(
  1214. loss=total_loss,
  1215. start_logits=start_logits,
  1216. end_logits=end_logits,
  1217. hidden_states=outputs.hidden_states,
  1218. attentions=outputs.attentions,
  1219. )
  1220. @add_start_docstrings(
  1221. """
  1222. MobileBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
  1223. a softmax) e.g. for RocStories/SWAG tasks.
  1224. """,
  1225. MOBILEBERT_START_DOCSTRING,
  1226. )
  1227. # Copied from transformers.models.bert.modeling_bert.BertForMultipleChoice with Bert->MobileBert all-casing
  1228. class MobileBertForMultipleChoice(MobileBertPreTrainedModel):
  1229. def __init__(self, config):
  1230. super().__init__(config)
  1231. self.mobilebert = MobileBertModel(config)
  1232. classifier_dropout = (
  1233. config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
  1234. )
  1235. self.dropout = nn.Dropout(classifier_dropout)
  1236. self.classifier = nn.Linear(config.hidden_size, 1)
  1237. # Initialize weights and apply final processing
  1238. self.post_init()
  1239. @add_start_docstrings_to_model_forward(
  1240. MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
  1241. )
  1242. @add_code_sample_docstrings(
  1243. checkpoint=_CHECKPOINT_FOR_DOC,
  1244. output_type=MultipleChoiceModelOutput,
  1245. config_class=_CONFIG_FOR_DOC,
  1246. )
  1247. def forward(
  1248. self,
  1249. input_ids: Optional[torch.Tensor] = None,
  1250. attention_mask: Optional[torch.Tensor] = None,
  1251. token_type_ids: Optional[torch.Tensor] = None,
  1252. position_ids: Optional[torch.Tensor] = None,
  1253. head_mask: Optional[torch.Tensor] = None,
  1254. inputs_embeds: Optional[torch.Tensor] = None,
  1255. labels: Optional[torch.Tensor] = None,
  1256. output_attentions: Optional[bool] = None,
  1257. output_hidden_states: Optional[bool] = None,
  1258. return_dict: Optional[bool] = None,
  1259. ) -> Union[Tuple[torch.Tensor], MultipleChoiceModelOutput]:
  1260. r"""
  1261. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  1262. Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
  1263. num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
  1264. `input_ids` above)
  1265. """
  1266. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1267. num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
  1268. input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
  1269. attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
  1270. token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
  1271. position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
  1272. inputs_embeds = (
  1273. inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
  1274. if inputs_embeds is not None
  1275. else None
  1276. )
  1277. outputs = self.mobilebert(
  1278. input_ids,
  1279. attention_mask=attention_mask,
  1280. token_type_ids=token_type_ids,
  1281. position_ids=position_ids,
  1282. head_mask=head_mask,
  1283. inputs_embeds=inputs_embeds,
  1284. output_attentions=output_attentions,
  1285. output_hidden_states=output_hidden_states,
  1286. return_dict=return_dict,
  1287. )
  1288. pooled_output = outputs[1]
  1289. pooled_output = self.dropout(pooled_output)
  1290. logits = self.classifier(pooled_output)
  1291. reshaped_logits = logits.view(-1, num_choices)
  1292. loss = None
  1293. if labels is not None:
  1294. loss_fct = CrossEntropyLoss()
  1295. loss = loss_fct(reshaped_logits, labels)
  1296. if not return_dict:
  1297. output = (reshaped_logits,) + outputs[2:]
  1298. return ((loss,) + output) if loss is not None else output
  1299. return MultipleChoiceModelOutput(
  1300. loss=loss,
  1301. logits=reshaped_logits,
  1302. hidden_states=outputs.hidden_states,
  1303. attentions=outputs.attentions,
  1304. )
  1305. @add_start_docstrings(
  1306. """
  1307. MobileBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
  1308. for Named-Entity-Recognition (NER) tasks.
  1309. """,
  1310. MOBILEBERT_START_DOCSTRING,
  1311. )
  1312. # Copied from transformers.models.bert.modeling_bert.BertForTokenClassification with Bert->MobileBert all-casing
  1313. class MobileBertForTokenClassification(MobileBertPreTrainedModel):
  1314. def __init__(self, config):
  1315. super().__init__(config)
  1316. self.num_labels = config.num_labels
  1317. self.mobilebert = MobileBertModel(config, add_pooling_layer=False)
  1318. classifier_dropout = (
  1319. config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
  1320. )
  1321. self.dropout = nn.Dropout(classifier_dropout)
  1322. self.classifier = nn.Linear(config.hidden_size, config.num_labels)
  1323. # Initialize weights and apply final processing
  1324. self.post_init()
  1325. @add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  1326. @add_code_sample_docstrings(
  1327. checkpoint=_CHECKPOINT_FOR_TOKEN_CLASSIFICATION,
  1328. output_type=TokenClassifierOutput,
  1329. config_class=_CONFIG_FOR_DOC,
  1330. expected_output=_TOKEN_CLASS_EXPECTED_OUTPUT,
  1331. expected_loss=_TOKEN_CLASS_EXPECTED_LOSS,
  1332. )
  1333. def forward(
  1334. self,
  1335. input_ids: Optional[torch.Tensor] = None,
  1336. attention_mask: Optional[torch.Tensor] = None,
  1337. token_type_ids: Optional[torch.Tensor] = None,
  1338. position_ids: Optional[torch.Tensor] = None,
  1339. head_mask: Optional[torch.Tensor] = None,
  1340. inputs_embeds: Optional[torch.Tensor] = None,
  1341. labels: Optional[torch.Tensor] = None,
  1342. output_attentions: Optional[bool] = None,
  1343. output_hidden_states: Optional[bool] = None,
  1344. return_dict: Optional[bool] = None,
  1345. ) -> Union[Tuple[torch.Tensor], TokenClassifierOutput]:
  1346. r"""
  1347. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
  1348. Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
  1349. """
  1350. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1351. outputs = self.mobilebert(
  1352. input_ids,
  1353. attention_mask=attention_mask,
  1354. token_type_ids=token_type_ids,
  1355. position_ids=position_ids,
  1356. head_mask=head_mask,
  1357. inputs_embeds=inputs_embeds,
  1358. output_attentions=output_attentions,
  1359. output_hidden_states=output_hidden_states,
  1360. return_dict=return_dict,
  1361. )
  1362. sequence_output = outputs[0]
  1363. sequence_output = self.dropout(sequence_output)
  1364. logits = self.classifier(sequence_output)
  1365. loss = None
  1366. if labels is not None:
  1367. loss_fct = CrossEntropyLoss()
  1368. loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
  1369. if not return_dict:
  1370. output = (logits,) + outputs[2:]
  1371. return ((loss,) + output) if loss is not None else output
  1372. return TokenClassifierOutput(
  1373. loss=loss,
  1374. logits=logits,
  1375. hidden_states=outputs.hidden_states,
  1376. attentions=outputs.attentions,
  1377. )