modeling_convbert.py 57 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333
  1. # coding=utf-8
  2. # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """PyTorch ConvBERT model."""
  16. import math
  17. import os
  18. from operator import attrgetter
  19. from typing import Optional, Tuple, Union
  20. import torch
  21. import torch.utils.checkpoint
  22. from torch import nn
  23. from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
  24. from ...activations import ACT2FN, get_activation
  25. from ...modeling_outputs import (
  26. BaseModelOutputWithCrossAttentions,
  27. MaskedLMOutput,
  28. MultipleChoiceModelOutput,
  29. QuestionAnsweringModelOutput,
  30. SequenceClassifierOutput,
  31. TokenClassifierOutput,
  32. )
  33. from ...modeling_utils import PreTrainedModel, SequenceSummary
  34. from ...pytorch_utils import apply_chunking_to_forward, find_pruneable_heads_and_indices, prune_linear_layer
  35. from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
  36. from .configuration_convbert import ConvBertConfig
  37. logger = logging.get_logger(__name__)
  38. _CHECKPOINT_FOR_DOC = "YituTech/conv-bert-base"
  39. _CONFIG_FOR_DOC = "ConvBertConfig"
  40. def load_tf_weights_in_convbert(model, config, tf_checkpoint_path):
  41. """Load tf checkpoints in a pytorch model."""
  42. try:
  43. import tensorflow as tf
  44. except ImportError:
  45. logger.error(
  46. "Loading a TensorFlow model in PyTorch, requires TensorFlow to be installed. Please see "
  47. "https://www.tensorflow.org/install/ for installation instructions."
  48. )
  49. raise
  50. tf_path = os.path.abspath(tf_checkpoint_path)
  51. logger.info(f"Converting TensorFlow checkpoint from {tf_path}")
  52. # Load weights from TF model
  53. init_vars = tf.train.list_variables(tf_path)
  54. tf_data = {}
  55. for name, shape in init_vars:
  56. logger.info(f"Loading TF weight {name} with shape {shape}")
  57. array = tf.train.load_variable(tf_path, name)
  58. tf_data[name] = array
  59. param_mapping = {
  60. "embeddings.word_embeddings.weight": "electra/embeddings/word_embeddings",
  61. "embeddings.position_embeddings.weight": "electra/embeddings/position_embeddings",
  62. "embeddings.token_type_embeddings.weight": "electra/embeddings/token_type_embeddings",
  63. "embeddings.LayerNorm.weight": "electra/embeddings/LayerNorm/gamma",
  64. "embeddings.LayerNorm.bias": "electra/embeddings/LayerNorm/beta",
  65. "embeddings_project.weight": "electra/embeddings_project/kernel",
  66. "embeddings_project.bias": "electra/embeddings_project/bias",
  67. }
  68. if config.num_groups > 1:
  69. group_dense_name = "g_dense"
  70. else:
  71. group_dense_name = "dense"
  72. for j in range(config.num_hidden_layers):
  73. param_mapping[f"encoder.layer.{j}.attention.self.query.weight"] = (
  74. f"electra/encoder/layer_{j}/attention/self/query/kernel"
  75. )
  76. param_mapping[f"encoder.layer.{j}.attention.self.query.bias"] = (
  77. f"electra/encoder/layer_{j}/attention/self/query/bias"
  78. )
  79. param_mapping[f"encoder.layer.{j}.attention.self.key.weight"] = (
  80. f"electra/encoder/layer_{j}/attention/self/key/kernel"
  81. )
  82. param_mapping[f"encoder.layer.{j}.attention.self.key.bias"] = (
  83. f"electra/encoder/layer_{j}/attention/self/key/bias"
  84. )
  85. param_mapping[f"encoder.layer.{j}.attention.self.value.weight"] = (
  86. f"electra/encoder/layer_{j}/attention/self/value/kernel"
  87. )
  88. param_mapping[f"encoder.layer.{j}.attention.self.value.bias"] = (
  89. f"electra/encoder/layer_{j}/attention/self/value/bias"
  90. )
  91. param_mapping[f"encoder.layer.{j}.attention.self.key_conv_attn_layer.depthwise.weight"] = (
  92. f"electra/encoder/layer_{j}/attention/self/conv_attn_key/depthwise_kernel"
  93. )
  94. param_mapping[f"encoder.layer.{j}.attention.self.key_conv_attn_layer.pointwise.weight"] = (
  95. f"electra/encoder/layer_{j}/attention/self/conv_attn_key/pointwise_kernel"
  96. )
  97. param_mapping[f"encoder.layer.{j}.attention.self.key_conv_attn_layer.bias"] = (
  98. f"electra/encoder/layer_{j}/attention/self/conv_attn_key/bias"
  99. )
  100. param_mapping[f"encoder.layer.{j}.attention.self.conv_kernel_layer.weight"] = (
  101. f"electra/encoder/layer_{j}/attention/self/conv_attn_kernel/kernel"
  102. )
  103. param_mapping[f"encoder.layer.{j}.attention.self.conv_kernel_layer.bias"] = (
  104. f"electra/encoder/layer_{j}/attention/self/conv_attn_kernel/bias"
  105. )
  106. param_mapping[f"encoder.layer.{j}.attention.self.conv_out_layer.weight"] = (
  107. f"electra/encoder/layer_{j}/attention/self/conv_attn_point/kernel"
  108. )
  109. param_mapping[f"encoder.layer.{j}.attention.self.conv_out_layer.bias"] = (
  110. f"electra/encoder/layer_{j}/attention/self/conv_attn_point/bias"
  111. )
  112. param_mapping[f"encoder.layer.{j}.attention.output.dense.weight"] = (
  113. f"electra/encoder/layer_{j}/attention/output/dense/kernel"
  114. )
  115. param_mapping[f"encoder.layer.{j}.attention.output.LayerNorm.weight"] = (
  116. f"electra/encoder/layer_{j}/attention/output/LayerNorm/gamma"
  117. )
  118. param_mapping[f"encoder.layer.{j}.attention.output.dense.bias"] = (
  119. f"electra/encoder/layer_{j}/attention/output/dense/bias"
  120. )
  121. param_mapping[f"encoder.layer.{j}.attention.output.LayerNorm.bias"] = (
  122. f"electra/encoder/layer_{j}/attention/output/LayerNorm/beta"
  123. )
  124. param_mapping[f"encoder.layer.{j}.intermediate.dense.weight"] = (
  125. f"electra/encoder/layer_{j}/intermediate/{group_dense_name}/kernel"
  126. )
  127. param_mapping[f"encoder.layer.{j}.intermediate.dense.bias"] = (
  128. f"electra/encoder/layer_{j}/intermediate/{group_dense_name}/bias"
  129. )
  130. param_mapping[f"encoder.layer.{j}.output.dense.weight"] = (
  131. f"electra/encoder/layer_{j}/output/{group_dense_name}/kernel"
  132. )
  133. param_mapping[f"encoder.layer.{j}.output.dense.bias"] = (
  134. f"electra/encoder/layer_{j}/output/{group_dense_name}/bias"
  135. )
  136. param_mapping[f"encoder.layer.{j}.output.LayerNorm.weight"] = (
  137. f"electra/encoder/layer_{j}/output/LayerNorm/gamma"
  138. )
  139. param_mapping[f"encoder.layer.{j}.output.LayerNorm.bias"] = f"electra/encoder/layer_{j}/output/LayerNorm/beta"
  140. for param in model.named_parameters():
  141. param_name = param[0]
  142. retriever = attrgetter(param_name)
  143. result = retriever(model)
  144. tf_name = param_mapping[param_name]
  145. value = torch.from_numpy(tf_data[tf_name])
  146. logger.info(f"TF: {tf_name}, PT: {param_name} ")
  147. if tf_name.endswith("/kernel"):
  148. if not tf_name.endswith("/intermediate/g_dense/kernel"):
  149. if not tf_name.endswith("/output/g_dense/kernel"):
  150. value = value.T
  151. if tf_name.endswith("/depthwise_kernel"):
  152. value = value.permute(1, 2, 0) # 2, 0, 1
  153. if tf_name.endswith("/pointwise_kernel"):
  154. value = value.permute(2, 1, 0) # 2, 1, 0
  155. if tf_name.endswith("/conv_attn_key/bias"):
  156. value = value.unsqueeze(-1)
  157. result.data = value
  158. return model
  159. class ConvBertEmbeddings(nn.Module):
  160. """Construct the embeddings from word, position and token_type embeddings."""
  161. def __init__(self, config):
  162. super().__init__()
  163. self.word_embeddings = nn.Embedding(config.vocab_size, config.embedding_size, padding_idx=config.pad_token_id)
  164. self.position_embeddings = nn.Embedding(config.max_position_embeddings, config.embedding_size)
  165. self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.embedding_size)
  166. # self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
  167. # any TensorFlow checkpoint file
  168. self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
  169. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  170. # position_ids (1, len position emb) is contiguous in memory and exported when serialized
  171. self.register_buffer(
  172. "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
  173. )
  174. self.register_buffer(
  175. "token_type_ids", torch.zeros(self.position_ids.size(), dtype=torch.long), persistent=False
  176. )
  177. def forward(
  178. self,
  179. input_ids: Optional[torch.LongTensor] = None,
  180. token_type_ids: Optional[torch.LongTensor] = None,
  181. position_ids: Optional[torch.LongTensor] = None,
  182. inputs_embeds: Optional[torch.FloatTensor] = None,
  183. ) -> torch.LongTensor:
  184. if input_ids is not None:
  185. input_shape = input_ids.size()
  186. else:
  187. input_shape = inputs_embeds.size()[:-1]
  188. seq_length = input_shape[1]
  189. if position_ids is None:
  190. position_ids = self.position_ids[:, :seq_length]
  191. # Setting the token_type_ids to the registered buffer in constructor where it is all zeros, which usually occurs
  192. # when its auto-generated, registered buffer helps users when tracing the model without passing token_type_ids, solves
  193. # issue #5664
  194. if token_type_ids is None:
  195. if hasattr(self, "token_type_ids"):
  196. buffered_token_type_ids = self.token_type_ids[:, :seq_length]
  197. buffered_token_type_ids_expanded = buffered_token_type_ids.expand(input_shape[0], seq_length)
  198. token_type_ids = buffered_token_type_ids_expanded
  199. else:
  200. token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=self.position_ids.device)
  201. if inputs_embeds is None:
  202. inputs_embeds = self.word_embeddings(input_ids)
  203. position_embeddings = self.position_embeddings(position_ids)
  204. token_type_embeddings = self.token_type_embeddings(token_type_ids)
  205. embeddings = inputs_embeds + position_embeddings + token_type_embeddings
  206. embeddings = self.LayerNorm(embeddings)
  207. embeddings = self.dropout(embeddings)
  208. return embeddings
  209. class ConvBertPreTrainedModel(PreTrainedModel):
  210. """
  211. An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
  212. models.
  213. """
  214. config_class = ConvBertConfig
  215. load_tf_weights = load_tf_weights_in_convbert
  216. base_model_prefix = "convbert"
  217. supports_gradient_checkpointing = True
  218. def _init_weights(self, module):
  219. """Initialize the weights"""
  220. if isinstance(module, nn.Linear):
  221. # Slightly different from the TF version which uses truncated_normal for initialization
  222. # cf https://github.com/pytorch/pytorch/pull/5617
  223. module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
  224. if module.bias is not None:
  225. module.bias.data.zero_()
  226. elif isinstance(module, nn.Embedding):
  227. module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
  228. if module.padding_idx is not None:
  229. module.weight.data[module.padding_idx].zero_()
  230. elif isinstance(module, nn.LayerNorm):
  231. module.bias.data.zero_()
  232. module.weight.data.fill_(1.0)
  233. class SeparableConv1D(nn.Module):
  234. """This class implements separable convolution, i.e. a depthwise and a pointwise layer"""
  235. def __init__(self, config, input_filters, output_filters, kernel_size, **kwargs):
  236. super().__init__()
  237. self.depthwise = nn.Conv1d(
  238. input_filters,
  239. input_filters,
  240. kernel_size=kernel_size,
  241. groups=input_filters,
  242. padding=kernel_size // 2,
  243. bias=False,
  244. )
  245. self.pointwise = nn.Conv1d(input_filters, output_filters, kernel_size=1, bias=False)
  246. self.bias = nn.Parameter(torch.zeros(output_filters, 1))
  247. self.depthwise.weight.data.normal_(mean=0.0, std=config.initializer_range)
  248. self.pointwise.weight.data.normal_(mean=0.0, std=config.initializer_range)
  249. def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
  250. x = self.depthwise(hidden_states)
  251. x = self.pointwise(x)
  252. x += self.bias
  253. return x
  254. class ConvBertSelfAttention(nn.Module):
  255. def __init__(self, config):
  256. super().__init__()
  257. if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
  258. raise ValueError(
  259. f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
  260. f"heads ({config.num_attention_heads})"
  261. )
  262. new_num_attention_heads = config.num_attention_heads // config.head_ratio
  263. if new_num_attention_heads < 1:
  264. self.head_ratio = config.num_attention_heads
  265. self.num_attention_heads = 1
  266. else:
  267. self.num_attention_heads = new_num_attention_heads
  268. self.head_ratio = config.head_ratio
  269. self.conv_kernel_size = config.conv_kernel_size
  270. if config.hidden_size % self.num_attention_heads != 0:
  271. raise ValueError("hidden_size should be divisible by num_attention_heads")
  272. self.attention_head_size = (config.hidden_size // self.num_attention_heads) // 2
  273. self.all_head_size = self.num_attention_heads * self.attention_head_size
  274. self.query = nn.Linear(config.hidden_size, self.all_head_size)
  275. self.key = nn.Linear(config.hidden_size, self.all_head_size)
  276. self.value = nn.Linear(config.hidden_size, self.all_head_size)
  277. self.key_conv_attn_layer = SeparableConv1D(
  278. config, config.hidden_size, self.all_head_size, self.conv_kernel_size
  279. )
  280. self.conv_kernel_layer = nn.Linear(self.all_head_size, self.num_attention_heads * self.conv_kernel_size)
  281. self.conv_out_layer = nn.Linear(config.hidden_size, self.all_head_size)
  282. self.unfold = nn.Unfold(
  283. kernel_size=[self.conv_kernel_size, 1], padding=[int((self.conv_kernel_size - 1) / 2), 0]
  284. )
  285. self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
  286. def transpose_for_scores(self, x):
  287. new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
  288. x = x.view(*new_x_shape)
  289. return x.permute(0, 2, 1, 3)
  290. def forward(
  291. self,
  292. hidden_states: torch.Tensor,
  293. attention_mask: Optional[torch.FloatTensor] = None,
  294. head_mask: Optional[torch.FloatTensor] = None,
  295. encoder_hidden_states: Optional[torch.Tensor] = None,
  296. output_attentions: Optional[bool] = False,
  297. ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
  298. mixed_query_layer = self.query(hidden_states)
  299. batch_size = hidden_states.size(0)
  300. # If this is instantiated as a cross-attention module, the keys
  301. # and values come from an encoder; the attention mask needs to be
  302. # such that the encoder's padding tokens are not attended to.
  303. if encoder_hidden_states is not None:
  304. mixed_key_layer = self.key(encoder_hidden_states)
  305. mixed_value_layer = self.value(encoder_hidden_states)
  306. else:
  307. mixed_key_layer = self.key(hidden_states)
  308. mixed_value_layer = self.value(hidden_states)
  309. mixed_key_conv_attn_layer = self.key_conv_attn_layer(hidden_states.transpose(1, 2))
  310. mixed_key_conv_attn_layer = mixed_key_conv_attn_layer.transpose(1, 2)
  311. query_layer = self.transpose_for_scores(mixed_query_layer)
  312. key_layer = self.transpose_for_scores(mixed_key_layer)
  313. value_layer = self.transpose_for_scores(mixed_value_layer)
  314. conv_attn_layer = torch.multiply(mixed_key_conv_attn_layer, mixed_query_layer)
  315. conv_kernel_layer = self.conv_kernel_layer(conv_attn_layer)
  316. conv_kernel_layer = torch.reshape(conv_kernel_layer, [-1, self.conv_kernel_size, 1])
  317. conv_kernel_layer = torch.softmax(conv_kernel_layer, dim=1)
  318. conv_out_layer = self.conv_out_layer(hidden_states)
  319. conv_out_layer = torch.reshape(conv_out_layer, [batch_size, -1, self.all_head_size])
  320. conv_out_layer = conv_out_layer.transpose(1, 2).contiguous().unsqueeze(-1)
  321. conv_out_layer = nn.functional.unfold(
  322. conv_out_layer,
  323. kernel_size=[self.conv_kernel_size, 1],
  324. dilation=1,
  325. padding=[(self.conv_kernel_size - 1) // 2, 0],
  326. stride=1,
  327. )
  328. conv_out_layer = conv_out_layer.transpose(1, 2).reshape(
  329. batch_size, -1, self.all_head_size, self.conv_kernel_size
  330. )
  331. conv_out_layer = torch.reshape(conv_out_layer, [-1, self.attention_head_size, self.conv_kernel_size])
  332. conv_out_layer = torch.matmul(conv_out_layer, conv_kernel_layer)
  333. conv_out_layer = torch.reshape(conv_out_layer, [-1, self.all_head_size])
  334. # Take the dot product between "query" and "key" to get the raw attention scores.
  335. attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
  336. attention_scores = attention_scores / math.sqrt(self.attention_head_size)
  337. if attention_mask is not None:
  338. # Apply the attention mask is (precomputed for all layers in ConvBertModel forward() function)
  339. attention_scores = attention_scores + attention_mask
  340. # Normalize the attention scores to probabilities.
  341. attention_probs = nn.functional.softmax(attention_scores, dim=-1)
  342. # This is actually dropping out entire tokens to attend to, which might
  343. # seem a bit unusual, but is taken from the original Transformer paper.
  344. attention_probs = self.dropout(attention_probs)
  345. # Mask heads if we want to
  346. if head_mask is not None:
  347. attention_probs = attention_probs * head_mask
  348. context_layer = torch.matmul(attention_probs, value_layer)
  349. context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
  350. conv_out = torch.reshape(conv_out_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size])
  351. context_layer = torch.cat([context_layer, conv_out], 2)
  352. # conv and context
  353. new_context_layer_shape = context_layer.size()[:-2] + (
  354. self.num_attention_heads * self.attention_head_size * 2,
  355. )
  356. context_layer = context_layer.view(*new_context_layer_shape)
  357. outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
  358. return outputs
  359. class ConvBertSelfOutput(nn.Module):
  360. def __init__(self, config):
  361. super().__init__()
  362. self.dense = nn.Linear(config.hidden_size, config.hidden_size)
  363. self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
  364. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  365. def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
  366. hidden_states = self.dense(hidden_states)
  367. hidden_states = self.dropout(hidden_states)
  368. hidden_states = self.LayerNorm(hidden_states + input_tensor)
  369. return hidden_states
  370. class ConvBertAttention(nn.Module):
  371. def __init__(self, config):
  372. super().__init__()
  373. self.self = ConvBertSelfAttention(config)
  374. self.output = ConvBertSelfOutput(config)
  375. self.pruned_heads = set()
  376. def prune_heads(self, heads):
  377. if len(heads) == 0:
  378. return
  379. heads, index = find_pruneable_heads_and_indices(
  380. heads, self.self.num_attention_heads, self.self.attention_head_size, self.pruned_heads
  381. )
  382. # Prune linear layers
  383. self.self.query = prune_linear_layer(self.self.query, index)
  384. self.self.key = prune_linear_layer(self.self.key, index)
  385. self.self.value = prune_linear_layer(self.self.value, index)
  386. self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
  387. # Update hyper params and store pruned heads
  388. self.self.num_attention_heads = self.self.num_attention_heads - len(heads)
  389. self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads
  390. self.pruned_heads = self.pruned_heads.union(heads)
  391. def forward(
  392. self,
  393. hidden_states: torch.Tensor,
  394. attention_mask: Optional[torch.FloatTensor] = None,
  395. head_mask: Optional[torch.FloatTensor] = None,
  396. encoder_hidden_states: Optional[torch.Tensor] = None,
  397. output_attentions: Optional[bool] = False,
  398. ) -> Tuple[torch.Tensor, Optional[torch.FloatTensor]]:
  399. self_outputs = self.self(
  400. hidden_states,
  401. attention_mask,
  402. head_mask,
  403. encoder_hidden_states,
  404. output_attentions,
  405. )
  406. attention_output = self.output(self_outputs[0], hidden_states)
  407. outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
  408. return outputs
  409. class GroupedLinearLayer(nn.Module):
  410. def __init__(self, input_size, output_size, num_groups):
  411. super().__init__()
  412. self.input_size = input_size
  413. self.output_size = output_size
  414. self.num_groups = num_groups
  415. self.group_in_dim = self.input_size // self.num_groups
  416. self.group_out_dim = self.output_size // self.num_groups
  417. self.weight = nn.Parameter(torch.empty(self.num_groups, self.group_in_dim, self.group_out_dim))
  418. self.bias = nn.Parameter(torch.empty(output_size))
  419. def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
  420. batch_size = list(hidden_states.size())[0]
  421. x = torch.reshape(hidden_states, [-1, self.num_groups, self.group_in_dim])
  422. x = x.permute(1, 0, 2)
  423. x = torch.matmul(x, self.weight)
  424. x = x.permute(1, 0, 2)
  425. x = torch.reshape(x, [batch_size, -1, self.output_size])
  426. x = x + self.bias
  427. return x
  428. class ConvBertIntermediate(nn.Module):
  429. def __init__(self, config):
  430. super().__init__()
  431. if config.num_groups == 1:
  432. self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
  433. else:
  434. self.dense = GroupedLinearLayer(
  435. input_size=config.hidden_size, output_size=config.intermediate_size, num_groups=config.num_groups
  436. )
  437. if isinstance(config.hidden_act, str):
  438. self.intermediate_act_fn = ACT2FN[config.hidden_act]
  439. else:
  440. self.intermediate_act_fn = config.hidden_act
  441. def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
  442. hidden_states = self.dense(hidden_states)
  443. hidden_states = self.intermediate_act_fn(hidden_states)
  444. return hidden_states
  445. class ConvBertOutput(nn.Module):
  446. def __init__(self, config):
  447. super().__init__()
  448. if config.num_groups == 1:
  449. self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
  450. else:
  451. self.dense = GroupedLinearLayer(
  452. input_size=config.intermediate_size, output_size=config.hidden_size, num_groups=config.num_groups
  453. )
  454. self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
  455. self.dropout = nn.Dropout(config.hidden_dropout_prob)
  456. def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
  457. hidden_states = self.dense(hidden_states)
  458. hidden_states = self.dropout(hidden_states)
  459. hidden_states = self.LayerNorm(hidden_states + input_tensor)
  460. return hidden_states
  461. class ConvBertLayer(nn.Module):
  462. def __init__(self, config):
  463. super().__init__()
  464. self.chunk_size_feed_forward = config.chunk_size_feed_forward
  465. self.seq_len_dim = 1
  466. self.attention = ConvBertAttention(config)
  467. self.is_decoder = config.is_decoder
  468. self.add_cross_attention = config.add_cross_attention
  469. if self.add_cross_attention:
  470. if not self.is_decoder:
  471. raise TypeError(f"{self} should be used as a decoder model if cross attention is added")
  472. self.crossattention = ConvBertAttention(config)
  473. self.intermediate = ConvBertIntermediate(config)
  474. self.output = ConvBertOutput(config)
  475. def forward(
  476. self,
  477. hidden_states: torch.Tensor,
  478. attention_mask: Optional[torch.FloatTensor] = None,
  479. head_mask: Optional[torch.FloatTensor] = None,
  480. encoder_hidden_states: Optional[torch.Tensor] = None,
  481. encoder_attention_mask: Optional[torch.Tensor] = None,
  482. output_attentions: Optional[bool] = False,
  483. ) -> Tuple[torch.Tensor, Optional[torch.FloatTensor]]:
  484. self_attention_outputs = self.attention(
  485. hidden_states,
  486. attention_mask,
  487. head_mask,
  488. output_attentions=output_attentions,
  489. )
  490. attention_output = self_attention_outputs[0]
  491. outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
  492. if self.is_decoder and encoder_hidden_states is not None:
  493. if not hasattr(self, "crossattention"):
  494. raise AttributeError(
  495. f"If `encoder_hidden_states` are passed, {self} has to be instantiated with cross-attention layers"
  496. " by setting `config.add_cross_attention=True`"
  497. )
  498. cross_attention_outputs = self.crossattention(
  499. attention_output,
  500. encoder_attention_mask,
  501. head_mask,
  502. encoder_hidden_states,
  503. output_attentions,
  504. )
  505. attention_output = cross_attention_outputs[0]
  506. outputs = outputs + cross_attention_outputs[1:] # add cross attentions if we output attention weights
  507. layer_output = apply_chunking_to_forward(
  508. self.feed_forward_chunk, self.chunk_size_feed_forward, self.seq_len_dim, attention_output
  509. )
  510. outputs = (layer_output,) + outputs
  511. return outputs
  512. def feed_forward_chunk(self, attention_output):
  513. intermediate_output = self.intermediate(attention_output)
  514. layer_output = self.output(intermediate_output, attention_output)
  515. return layer_output
  516. class ConvBertEncoder(nn.Module):
  517. def __init__(self, config):
  518. super().__init__()
  519. self.config = config
  520. self.layer = nn.ModuleList([ConvBertLayer(config) for _ in range(config.num_hidden_layers)])
  521. self.gradient_checkpointing = False
  522. def forward(
  523. self,
  524. hidden_states: torch.Tensor,
  525. attention_mask: Optional[torch.FloatTensor] = None,
  526. head_mask: Optional[torch.FloatTensor] = None,
  527. encoder_hidden_states: Optional[torch.Tensor] = None,
  528. encoder_attention_mask: Optional[torch.Tensor] = None,
  529. output_attentions: Optional[bool] = False,
  530. output_hidden_states: Optional[bool] = False,
  531. return_dict: Optional[bool] = True,
  532. ) -> Union[Tuple, BaseModelOutputWithCrossAttentions]:
  533. all_hidden_states = () if output_hidden_states else None
  534. all_self_attentions = () if output_attentions else None
  535. all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
  536. for i, layer_module in enumerate(self.layer):
  537. if output_hidden_states:
  538. all_hidden_states = all_hidden_states + (hidden_states,)
  539. layer_head_mask = head_mask[i] if head_mask is not None else None
  540. if self.gradient_checkpointing and self.training:
  541. layer_outputs = self._gradient_checkpointing_func(
  542. layer_module.__call__,
  543. hidden_states,
  544. attention_mask,
  545. layer_head_mask,
  546. encoder_hidden_states,
  547. encoder_attention_mask,
  548. output_attentions,
  549. )
  550. else:
  551. layer_outputs = layer_module(
  552. hidden_states,
  553. attention_mask,
  554. layer_head_mask,
  555. encoder_hidden_states,
  556. encoder_attention_mask,
  557. output_attentions,
  558. )
  559. hidden_states = layer_outputs[0]
  560. if output_attentions:
  561. all_self_attentions = all_self_attentions + (layer_outputs[1],)
  562. if self.config.add_cross_attention:
  563. all_cross_attentions = all_cross_attentions + (layer_outputs[2],)
  564. if output_hidden_states:
  565. all_hidden_states = all_hidden_states + (hidden_states,)
  566. if not return_dict:
  567. return tuple(
  568. v
  569. for v in [hidden_states, all_hidden_states, all_self_attentions, all_cross_attentions]
  570. if v is not None
  571. )
  572. return BaseModelOutputWithCrossAttentions(
  573. last_hidden_state=hidden_states,
  574. hidden_states=all_hidden_states,
  575. attentions=all_self_attentions,
  576. cross_attentions=all_cross_attentions,
  577. )
  578. class ConvBertPredictionHeadTransform(nn.Module):
  579. def __init__(self, config):
  580. super().__init__()
  581. self.dense = nn.Linear(config.hidden_size, config.hidden_size)
  582. if isinstance(config.hidden_act, str):
  583. self.transform_act_fn = ACT2FN[config.hidden_act]
  584. else:
  585. self.transform_act_fn = config.hidden_act
  586. self.LayerNorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
  587. def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
  588. hidden_states = self.dense(hidden_states)
  589. hidden_states = self.transform_act_fn(hidden_states)
  590. hidden_states = self.LayerNorm(hidden_states)
  591. return hidden_states
  592. CONVBERT_START_DOCSTRING = r"""
  593. This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
  594. it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
  595. behavior.
  596. Parameters:
  597. config ([`ConvBertConfig`]): Model configuration class with all the parameters of the model.
  598. Initializing with a config file does not load the weights associated with the model, only the
  599. configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
  600. """
  601. CONVBERT_INPUTS_DOCSTRING = r"""
  602. Args:
  603. input_ids (`torch.LongTensor` of shape `({0})`):
  604. Indices of input sequence tokens in the vocabulary.
  605. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
  606. [`PreTrainedTokenizer.__call__`] for details.
  607. [What are input IDs?](../glossary#input-ids)
  608. attention_mask (`torch.FloatTensor` of shape `({0})`, *optional*):
  609. Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
  610. - 1 for tokens that are **not masked**,
  611. - 0 for tokens that are **masked**.
  612. [What are attention masks?](../glossary#attention-mask)
  613. token_type_ids (`torch.LongTensor` of shape `({0})`, *optional*):
  614. Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
  615. 1]`:
  616. - 0 corresponds to a *sentence A* token,
  617. - 1 corresponds to a *sentence B* token.
  618. [What are token type IDs?](../glossary#token-type-ids)
  619. position_ids (`torch.LongTensor` of shape `({0})`, *optional*):
  620. Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
  621. config.max_position_embeddings - 1]`.
  622. [What are position IDs?](../glossary#position-ids)
  623. head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
  624. Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
  625. - 1 indicates the head is **not masked**,
  626. - 0 indicates the head is **masked**.
  627. inputs_embeds (`torch.FloatTensor` of shape `({0}, hidden_size)`, *optional*):
  628. Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
  629. is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
  630. model's internal embedding lookup matrix.
  631. output_attentions (`bool`, *optional*):
  632. Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
  633. tensors for more detail.
  634. output_hidden_states (`bool`, *optional*):
  635. Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
  636. more detail.
  637. return_dict (`bool`, *optional*):
  638. Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
  639. """
  640. @add_start_docstrings(
  641. "The bare ConvBERT Model transformer outputting raw hidden-states without any specific head on top.",
  642. CONVBERT_START_DOCSTRING,
  643. )
  644. class ConvBertModel(ConvBertPreTrainedModel):
  645. def __init__(self, config):
  646. super().__init__(config)
  647. self.embeddings = ConvBertEmbeddings(config)
  648. if config.embedding_size != config.hidden_size:
  649. self.embeddings_project = nn.Linear(config.embedding_size, config.hidden_size)
  650. self.encoder = ConvBertEncoder(config)
  651. self.config = config
  652. # Initialize weights and apply final processing
  653. self.post_init()
  654. def get_input_embeddings(self):
  655. return self.embeddings.word_embeddings
  656. def set_input_embeddings(self, value):
  657. self.embeddings.word_embeddings = value
  658. def _prune_heads(self, heads_to_prune):
  659. """
  660. Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
  661. class PreTrainedModel
  662. """
  663. for layer, heads in heads_to_prune.items():
  664. self.encoder.layer[layer].attention.prune_heads(heads)
  665. @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  666. @add_code_sample_docstrings(
  667. checkpoint=_CHECKPOINT_FOR_DOC,
  668. output_type=BaseModelOutputWithCrossAttentions,
  669. config_class=_CONFIG_FOR_DOC,
  670. )
  671. def forward(
  672. self,
  673. input_ids: Optional[torch.LongTensor] = None,
  674. attention_mask: Optional[torch.FloatTensor] = None,
  675. token_type_ids: Optional[torch.LongTensor] = None,
  676. position_ids: Optional[torch.LongTensor] = None,
  677. head_mask: Optional[torch.FloatTensor] = None,
  678. inputs_embeds: Optional[torch.FloatTensor] = None,
  679. output_attentions: Optional[bool] = None,
  680. output_hidden_states: Optional[bool] = None,
  681. return_dict: Optional[bool] = None,
  682. ) -> Union[Tuple, BaseModelOutputWithCrossAttentions]:
  683. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
  684. output_hidden_states = (
  685. output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
  686. )
  687. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  688. if input_ids is not None and inputs_embeds is not None:
  689. raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
  690. elif input_ids is not None:
  691. self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
  692. input_shape = input_ids.size()
  693. elif inputs_embeds is not None:
  694. input_shape = inputs_embeds.size()[:-1]
  695. else:
  696. raise ValueError("You have to specify either input_ids or inputs_embeds")
  697. batch_size, seq_length = input_shape
  698. device = input_ids.device if input_ids is not None else inputs_embeds.device
  699. if attention_mask is None:
  700. attention_mask = torch.ones(input_shape, device=device)
  701. if token_type_ids is None:
  702. if hasattr(self.embeddings, "token_type_ids"):
  703. buffered_token_type_ids = self.embeddings.token_type_ids[:, :seq_length]
  704. buffered_token_type_ids_expanded = buffered_token_type_ids.expand(batch_size, seq_length)
  705. token_type_ids = buffered_token_type_ids_expanded
  706. else:
  707. token_type_ids = torch.zeros(input_shape, dtype=torch.long, device=device)
  708. extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape)
  709. head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
  710. hidden_states = self.embeddings(
  711. input_ids=input_ids, position_ids=position_ids, token_type_ids=token_type_ids, inputs_embeds=inputs_embeds
  712. )
  713. if hasattr(self, "embeddings_project"):
  714. hidden_states = self.embeddings_project(hidden_states)
  715. hidden_states = self.encoder(
  716. hidden_states,
  717. attention_mask=extended_attention_mask,
  718. head_mask=head_mask,
  719. output_attentions=output_attentions,
  720. output_hidden_states=output_hidden_states,
  721. return_dict=return_dict,
  722. )
  723. return hidden_states
  724. class ConvBertGeneratorPredictions(nn.Module):
  725. """Prediction module for the generator, made up of two dense layers."""
  726. def __init__(self, config):
  727. super().__init__()
  728. self.activation = get_activation("gelu")
  729. self.LayerNorm = nn.LayerNorm(config.embedding_size, eps=config.layer_norm_eps)
  730. self.dense = nn.Linear(config.hidden_size, config.embedding_size)
  731. def forward(self, generator_hidden_states: torch.FloatTensor) -> torch.FloatTensor:
  732. hidden_states = self.dense(generator_hidden_states)
  733. hidden_states = self.activation(hidden_states)
  734. hidden_states = self.LayerNorm(hidden_states)
  735. return hidden_states
  736. @add_start_docstrings("""ConvBERT Model with a `language modeling` head on top.""", CONVBERT_START_DOCSTRING)
  737. class ConvBertForMaskedLM(ConvBertPreTrainedModel):
  738. _tied_weights_keys = ["generator.lm_head.weight"]
  739. def __init__(self, config):
  740. super().__init__(config)
  741. self.convbert = ConvBertModel(config)
  742. self.generator_predictions = ConvBertGeneratorPredictions(config)
  743. self.generator_lm_head = nn.Linear(config.embedding_size, config.vocab_size)
  744. # Initialize weights and apply final processing
  745. self.post_init()
  746. def get_output_embeddings(self):
  747. return self.generator_lm_head
  748. def set_output_embeddings(self, word_embeddings):
  749. self.generator_lm_head = word_embeddings
  750. @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  751. @add_code_sample_docstrings(
  752. checkpoint=_CHECKPOINT_FOR_DOC,
  753. output_type=MaskedLMOutput,
  754. config_class=_CONFIG_FOR_DOC,
  755. )
  756. def forward(
  757. self,
  758. input_ids: Optional[torch.LongTensor] = None,
  759. attention_mask: Optional[torch.FloatTensor] = None,
  760. token_type_ids: Optional[torch.LongTensor] = None,
  761. position_ids: Optional[torch.LongTensor] = None,
  762. head_mask: Optional[torch.FloatTensor] = None,
  763. inputs_embeds: Optional[torch.FloatTensor] = None,
  764. labels: Optional[torch.LongTensor] = None,
  765. output_attentions: Optional[bool] = None,
  766. output_hidden_states: Optional[bool] = None,
  767. return_dict: Optional[bool] = None,
  768. ) -> Union[Tuple, MaskedLMOutput]:
  769. r"""
  770. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
  771. Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
  772. config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
  773. loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
  774. """
  775. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  776. generator_hidden_states = self.convbert(
  777. input_ids,
  778. attention_mask,
  779. token_type_ids,
  780. position_ids,
  781. head_mask,
  782. inputs_embeds,
  783. output_attentions,
  784. output_hidden_states,
  785. return_dict,
  786. )
  787. generator_sequence_output = generator_hidden_states[0]
  788. prediction_scores = self.generator_predictions(generator_sequence_output)
  789. prediction_scores = self.generator_lm_head(prediction_scores)
  790. loss = None
  791. # Masked language modeling softmax layer
  792. if labels is not None:
  793. loss_fct = nn.CrossEntropyLoss() # -100 index = padding token
  794. loss = loss_fct(prediction_scores.view(-1, self.config.vocab_size), labels.view(-1))
  795. if not return_dict:
  796. output = (prediction_scores,) + generator_hidden_states[1:]
  797. return ((loss,) + output) if loss is not None else output
  798. return MaskedLMOutput(
  799. loss=loss,
  800. logits=prediction_scores,
  801. hidden_states=generator_hidden_states.hidden_states,
  802. attentions=generator_hidden_states.attentions,
  803. )
  804. class ConvBertClassificationHead(nn.Module):
  805. """Head for sentence-level classification tasks."""
  806. def __init__(self, config):
  807. super().__init__()
  808. self.dense = nn.Linear(config.hidden_size, config.hidden_size)
  809. classifier_dropout = (
  810. config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
  811. )
  812. self.dropout = nn.Dropout(classifier_dropout)
  813. self.out_proj = nn.Linear(config.hidden_size, config.num_labels)
  814. self.config = config
  815. def forward(self, hidden_states: torch.Tensor, **kwargs) -> torch.Tensor:
  816. x = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS])
  817. x = self.dropout(x)
  818. x = self.dense(x)
  819. x = ACT2FN[self.config.hidden_act](x)
  820. x = self.dropout(x)
  821. x = self.out_proj(x)
  822. return x
  823. @add_start_docstrings(
  824. """
  825. ConvBERT Model transformer with a sequence classification/regression head on top (a linear layer on top of the
  826. pooled output) e.g. for GLUE tasks.
  827. """,
  828. CONVBERT_START_DOCSTRING,
  829. )
  830. class ConvBertForSequenceClassification(ConvBertPreTrainedModel):
  831. def __init__(self, config):
  832. super().__init__(config)
  833. self.num_labels = config.num_labels
  834. self.config = config
  835. self.convbert = ConvBertModel(config)
  836. self.classifier = ConvBertClassificationHead(config)
  837. # Initialize weights and apply final processing
  838. self.post_init()
  839. @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  840. @add_code_sample_docstrings(
  841. checkpoint=_CHECKPOINT_FOR_DOC,
  842. output_type=SequenceClassifierOutput,
  843. config_class=_CONFIG_FOR_DOC,
  844. )
  845. def forward(
  846. self,
  847. input_ids: Optional[torch.LongTensor] = None,
  848. attention_mask: Optional[torch.FloatTensor] = None,
  849. token_type_ids: Optional[torch.LongTensor] = None,
  850. position_ids: Optional[torch.LongTensor] = None,
  851. head_mask: Optional[torch.FloatTensor] = None,
  852. inputs_embeds: Optional[torch.FloatTensor] = None,
  853. labels: Optional[torch.LongTensor] = None,
  854. output_attentions: Optional[bool] = None,
  855. output_hidden_states: Optional[bool] = None,
  856. return_dict: Optional[bool] = None,
  857. ) -> Union[Tuple, SequenceClassifierOutput]:
  858. r"""
  859. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  860. Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
  861. config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
  862. `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
  863. """
  864. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  865. outputs = self.convbert(
  866. input_ids,
  867. attention_mask=attention_mask,
  868. token_type_ids=token_type_ids,
  869. position_ids=position_ids,
  870. head_mask=head_mask,
  871. inputs_embeds=inputs_embeds,
  872. output_attentions=output_attentions,
  873. output_hidden_states=output_hidden_states,
  874. return_dict=return_dict,
  875. )
  876. sequence_output = outputs[0]
  877. logits = self.classifier(sequence_output)
  878. loss = None
  879. if labels is not None:
  880. if self.config.problem_type is None:
  881. if self.num_labels == 1:
  882. self.config.problem_type = "regression"
  883. elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
  884. self.config.problem_type = "single_label_classification"
  885. else:
  886. self.config.problem_type = "multi_label_classification"
  887. if self.config.problem_type == "regression":
  888. loss_fct = MSELoss()
  889. if self.num_labels == 1:
  890. loss = loss_fct(logits.squeeze(), labels.squeeze())
  891. else:
  892. loss = loss_fct(logits, labels)
  893. elif self.config.problem_type == "single_label_classification":
  894. loss_fct = CrossEntropyLoss()
  895. loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
  896. elif self.config.problem_type == "multi_label_classification":
  897. loss_fct = BCEWithLogitsLoss()
  898. loss = loss_fct(logits, labels)
  899. if not return_dict:
  900. output = (logits,) + outputs[1:]
  901. return ((loss,) + output) if loss is not None else output
  902. return SequenceClassifierOutput(
  903. loss=loss,
  904. logits=logits,
  905. hidden_states=outputs.hidden_states,
  906. attentions=outputs.attentions,
  907. )
  908. @add_start_docstrings(
  909. """
  910. ConvBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
  911. softmax) e.g. for RocStories/SWAG tasks.
  912. """,
  913. CONVBERT_START_DOCSTRING,
  914. )
  915. class ConvBertForMultipleChoice(ConvBertPreTrainedModel):
  916. def __init__(self, config):
  917. super().__init__(config)
  918. self.convbert = ConvBertModel(config)
  919. self.sequence_summary = SequenceSummary(config)
  920. self.classifier = nn.Linear(config.hidden_size, 1)
  921. # Initialize weights and apply final processing
  922. self.post_init()
  923. @add_start_docstrings_to_model_forward(
  924. CONVBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
  925. )
  926. @add_code_sample_docstrings(
  927. checkpoint=_CHECKPOINT_FOR_DOC,
  928. output_type=MultipleChoiceModelOutput,
  929. config_class=_CONFIG_FOR_DOC,
  930. )
  931. def forward(
  932. self,
  933. input_ids: Optional[torch.LongTensor] = None,
  934. attention_mask: Optional[torch.FloatTensor] = None,
  935. token_type_ids: Optional[torch.LongTensor] = None,
  936. position_ids: Optional[torch.LongTensor] = None,
  937. head_mask: Optional[torch.FloatTensor] = None,
  938. inputs_embeds: Optional[torch.FloatTensor] = None,
  939. labels: Optional[torch.LongTensor] = None,
  940. output_attentions: Optional[bool] = None,
  941. output_hidden_states: Optional[bool] = None,
  942. return_dict: Optional[bool] = None,
  943. ) -> Union[Tuple, MultipleChoiceModelOutput]:
  944. r"""
  945. labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  946. Labels for computing the multiple choice classification loss. Indices should be in `[0, ...,
  947. num_choices-1]` where `num_choices` is the size of the second dimension of the input tensors. (See
  948. `input_ids` above)
  949. """
  950. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  951. num_choices = input_ids.shape[1] if input_ids is not None else inputs_embeds.shape[1]
  952. input_ids = input_ids.view(-1, input_ids.size(-1)) if input_ids is not None else None
  953. attention_mask = attention_mask.view(-1, attention_mask.size(-1)) if attention_mask is not None else None
  954. token_type_ids = token_type_ids.view(-1, token_type_ids.size(-1)) if token_type_ids is not None else None
  955. position_ids = position_ids.view(-1, position_ids.size(-1)) if position_ids is not None else None
  956. inputs_embeds = (
  957. inputs_embeds.view(-1, inputs_embeds.size(-2), inputs_embeds.size(-1))
  958. if inputs_embeds is not None
  959. else None
  960. )
  961. outputs = self.convbert(
  962. input_ids,
  963. attention_mask=attention_mask,
  964. token_type_ids=token_type_ids,
  965. position_ids=position_ids,
  966. head_mask=head_mask,
  967. inputs_embeds=inputs_embeds,
  968. output_attentions=output_attentions,
  969. output_hidden_states=output_hidden_states,
  970. return_dict=return_dict,
  971. )
  972. sequence_output = outputs[0]
  973. pooled_output = self.sequence_summary(sequence_output)
  974. logits = self.classifier(pooled_output)
  975. reshaped_logits = logits.view(-1, num_choices)
  976. loss = None
  977. if labels is not None:
  978. loss_fct = CrossEntropyLoss()
  979. loss = loss_fct(reshaped_logits, labels)
  980. if not return_dict:
  981. output = (reshaped_logits,) + outputs[1:]
  982. return ((loss,) + output) if loss is not None else output
  983. return MultipleChoiceModelOutput(
  984. loss=loss,
  985. logits=reshaped_logits,
  986. hidden_states=outputs.hidden_states,
  987. attentions=outputs.attentions,
  988. )
  989. @add_start_docstrings(
  990. """
  991. ConvBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
  992. Named-Entity-Recognition (NER) tasks.
  993. """,
  994. CONVBERT_START_DOCSTRING,
  995. )
  996. class ConvBertForTokenClassification(ConvBertPreTrainedModel):
  997. def __init__(self, config):
  998. super().__init__(config)
  999. self.num_labels = config.num_labels
  1000. self.convbert = ConvBertModel(config)
  1001. classifier_dropout = (
  1002. config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
  1003. )
  1004. self.dropout = nn.Dropout(classifier_dropout)
  1005. self.classifier = nn.Linear(config.hidden_size, config.num_labels)
  1006. # Initialize weights and apply final processing
  1007. self.post_init()
  1008. @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  1009. @add_code_sample_docstrings(
  1010. checkpoint=_CHECKPOINT_FOR_DOC,
  1011. output_type=TokenClassifierOutput,
  1012. config_class=_CONFIG_FOR_DOC,
  1013. )
  1014. def forward(
  1015. self,
  1016. input_ids: Optional[torch.LongTensor] = None,
  1017. attention_mask: Optional[torch.FloatTensor] = None,
  1018. token_type_ids: Optional[torch.LongTensor] = None,
  1019. position_ids: Optional[torch.LongTensor] = None,
  1020. head_mask: Optional[torch.FloatTensor] = None,
  1021. inputs_embeds: Optional[torch.FloatTensor] = None,
  1022. labels: Optional[torch.LongTensor] = None,
  1023. output_attentions: Optional[bool] = None,
  1024. output_hidden_states: Optional[bool] = None,
  1025. return_dict: Optional[bool] = None,
  1026. ) -> Union[Tuple, TokenClassifierOutput]:
  1027. r"""
  1028. labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
  1029. Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
  1030. """
  1031. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1032. outputs = self.convbert(
  1033. input_ids,
  1034. attention_mask=attention_mask,
  1035. token_type_ids=token_type_ids,
  1036. position_ids=position_ids,
  1037. head_mask=head_mask,
  1038. inputs_embeds=inputs_embeds,
  1039. output_attentions=output_attentions,
  1040. output_hidden_states=output_hidden_states,
  1041. return_dict=return_dict,
  1042. )
  1043. sequence_output = outputs[0]
  1044. sequence_output = self.dropout(sequence_output)
  1045. logits = self.classifier(sequence_output)
  1046. loss = None
  1047. if labels is not None:
  1048. loss_fct = CrossEntropyLoss()
  1049. loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
  1050. if not return_dict:
  1051. output = (logits,) + outputs[1:]
  1052. return ((loss,) + output) if loss is not None else output
  1053. return TokenClassifierOutput(
  1054. loss=loss,
  1055. logits=logits,
  1056. hidden_states=outputs.hidden_states,
  1057. attentions=outputs.attentions,
  1058. )
  1059. @add_start_docstrings(
  1060. """
  1061. ConvBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
  1062. layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
  1063. """,
  1064. CONVBERT_START_DOCSTRING,
  1065. )
  1066. class ConvBertForQuestionAnswering(ConvBertPreTrainedModel):
  1067. def __init__(self, config):
  1068. super().__init__(config)
  1069. self.num_labels = config.num_labels
  1070. self.convbert = ConvBertModel(config)
  1071. self.qa_outputs = nn.Linear(config.hidden_size, config.num_labels)
  1072. # Initialize weights and apply final processing
  1073. self.post_init()
  1074. @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  1075. @add_code_sample_docstrings(
  1076. checkpoint=_CHECKPOINT_FOR_DOC,
  1077. output_type=QuestionAnsweringModelOutput,
  1078. config_class=_CONFIG_FOR_DOC,
  1079. )
  1080. def forward(
  1081. self,
  1082. input_ids: Optional[torch.LongTensor] = None,
  1083. attention_mask: Optional[torch.FloatTensor] = None,
  1084. token_type_ids: Optional[torch.LongTensor] = None,
  1085. position_ids: Optional[torch.LongTensor] = None,
  1086. head_mask: Optional[torch.FloatTensor] = None,
  1087. inputs_embeds: Optional[torch.FloatTensor] = None,
  1088. start_positions: Optional[torch.LongTensor] = None,
  1089. end_positions: Optional[torch.LongTensor] = None,
  1090. output_attentions: Optional[bool] = None,
  1091. output_hidden_states: Optional[bool] = None,
  1092. return_dict: Optional[bool] = None,
  1093. ) -> Union[Tuple, QuestionAnsweringModelOutput]:
  1094. r"""
  1095. start_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  1096. Labels for position (index) of the start of the labelled span for computing the token classification loss.
  1097. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
  1098. are not taken into account for computing the loss.
  1099. end_positions (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
  1100. Labels for position (index) of the end of the labelled span for computing the token classification loss.
  1101. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
  1102. are not taken into account for computing the loss.
  1103. """
  1104. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  1105. outputs = self.convbert(
  1106. input_ids,
  1107. attention_mask=attention_mask,
  1108. token_type_ids=token_type_ids,
  1109. position_ids=position_ids,
  1110. head_mask=head_mask,
  1111. inputs_embeds=inputs_embeds,
  1112. output_attentions=output_attentions,
  1113. output_hidden_states=output_hidden_states,
  1114. return_dict=return_dict,
  1115. )
  1116. sequence_output = outputs[0]
  1117. logits = self.qa_outputs(sequence_output)
  1118. start_logits, end_logits = logits.split(1, dim=-1)
  1119. start_logits = start_logits.squeeze(-1).contiguous()
  1120. end_logits = end_logits.squeeze(-1).contiguous()
  1121. total_loss = None
  1122. if start_positions is not None and end_positions is not None:
  1123. # If we are on multi-GPU, split add a dimension
  1124. if len(start_positions.size()) > 1:
  1125. start_positions = start_positions.squeeze(-1)
  1126. if len(end_positions.size()) > 1:
  1127. end_positions = end_positions.squeeze(-1)
  1128. # sometimes the start/end positions are outside our model inputs, we ignore these terms
  1129. ignored_index = start_logits.size(1)
  1130. start_positions = start_positions.clamp(0, ignored_index)
  1131. end_positions = end_positions.clamp(0, ignored_index)
  1132. loss_fct = CrossEntropyLoss(ignore_index=ignored_index)
  1133. start_loss = loss_fct(start_logits, start_positions)
  1134. end_loss = loss_fct(end_logits, end_positions)
  1135. total_loss = (start_loss + end_loss) / 2
  1136. if not return_dict:
  1137. output = (start_logits, end_logits) + outputs[1:]
  1138. return ((total_loss,) + output) if total_loss is not None else output
  1139. return QuestionAnsweringModelOutput(
  1140. loss=total_loss,
  1141. start_logits=start_logits,
  1142. end_logits=end_logits,
  1143. hidden_states=outputs.hidden_states,
  1144. attentions=outputs.attentions,
  1145. )