modeling_tf_convbert.py 60 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464
  1. # coding=utf-8
  2. # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """TF 2.0 ConvBERT model."""
  16. from __future__ import annotations
  17. from typing import Optional, Tuple, Union
  18. import numpy as np
  19. import tensorflow as tf
  20. from ...activations_tf import get_tf_activation
  21. from ...modeling_tf_outputs import (
  22. TFBaseModelOutput,
  23. TFMaskedLMOutput,
  24. TFMultipleChoiceModelOutput,
  25. TFQuestionAnsweringModelOutput,
  26. TFSequenceClassifierOutput,
  27. TFTokenClassifierOutput,
  28. )
  29. from ...modeling_tf_utils import (
  30. TFMaskedLanguageModelingLoss,
  31. TFModelInputType,
  32. TFMultipleChoiceLoss,
  33. TFPreTrainedModel,
  34. TFQuestionAnsweringLoss,
  35. TFSequenceClassificationLoss,
  36. TFSequenceSummary,
  37. TFTokenClassificationLoss,
  38. get_initializer,
  39. keras,
  40. keras_serializable,
  41. unpack_inputs,
  42. )
  43. from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
  44. from ...utils import (
  45. add_code_sample_docstrings,
  46. add_start_docstrings,
  47. add_start_docstrings_to_model_forward,
  48. logging,
  49. )
  50. from .configuration_convbert import ConvBertConfig
  51. logger = logging.get_logger(__name__)
  52. _CHECKPOINT_FOR_DOC = "YituTech/conv-bert-base"
  53. _CONFIG_FOR_DOC = "ConvBertConfig"
  54. # Copied from transformers.models.albert.modeling_tf_albert.TFAlbertEmbeddings with Albert->ConvBert
  55. class TFConvBertEmbeddings(keras.layers.Layer):
  56. """Construct the embeddings from word, position and token_type embeddings."""
  57. def __init__(self, config: ConvBertConfig, **kwargs):
  58. super().__init__(**kwargs)
  59. self.config = config
  60. self.embedding_size = config.embedding_size
  61. self.max_position_embeddings = config.max_position_embeddings
  62. self.initializer_range = config.initializer_range
  63. self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
  64. self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
  65. def build(self, input_shape=None):
  66. with tf.name_scope("word_embeddings"):
  67. self.weight = self.add_weight(
  68. name="weight",
  69. shape=[self.config.vocab_size, self.embedding_size],
  70. initializer=get_initializer(self.initializer_range),
  71. )
  72. with tf.name_scope("token_type_embeddings"):
  73. self.token_type_embeddings = self.add_weight(
  74. name="embeddings",
  75. shape=[self.config.type_vocab_size, self.embedding_size],
  76. initializer=get_initializer(self.initializer_range),
  77. )
  78. with tf.name_scope("position_embeddings"):
  79. self.position_embeddings = self.add_weight(
  80. name="embeddings",
  81. shape=[self.max_position_embeddings, self.embedding_size],
  82. initializer=get_initializer(self.initializer_range),
  83. )
  84. if self.built:
  85. return
  86. self.built = True
  87. if getattr(self, "LayerNorm", None) is not None:
  88. with tf.name_scope(self.LayerNorm.name):
  89. self.LayerNorm.build([None, None, self.config.embedding_size])
  90. # Copied from transformers.models.bert.modeling_tf_bert.TFBertEmbeddings.call
  91. def call(
  92. self,
  93. input_ids: tf.Tensor = None,
  94. position_ids: tf.Tensor = None,
  95. token_type_ids: tf.Tensor = None,
  96. inputs_embeds: tf.Tensor = None,
  97. past_key_values_length=0,
  98. training: bool = False,
  99. ) -> tf.Tensor:
  100. """
  101. Applies embedding based on inputs tensor.
  102. Returns:
  103. final_embeddings (`tf.Tensor`): output embedding tensor.
  104. """
  105. if input_ids is None and inputs_embeds is None:
  106. raise ValueError("Need to provide either `input_ids` or `input_embeds`.")
  107. if input_ids is not None:
  108. check_embeddings_within_bounds(input_ids, self.config.vocab_size)
  109. inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
  110. input_shape = shape_list(inputs_embeds)[:-1]
  111. if token_type_ids is None:
  112. token_type_ids = tf.fill(dims=input_shape, value=0)
  113. if position_ids is None:
  114. position_ids = tf.expand_dims(
  115. tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0
  116. )
  117. position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
  118. token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
  119. final_embeddings = inputs_embeds + position_embeds + token_type_embeds
  120. final_embeddings = self.LayerNorm(inputs=final_embeddings)
  121. final_embeddings = self.dropout(inputs=final_embeddings, training=training)
  122. return final_embeddings
  123. class TFConvBertSelfAttention(keras.layers.Layer):
  124. def __init__(self, config, **kwargs):
  125. super().__init__(**kwargs)
  126. if config.hidden_size % config.num_attention_heads != 0:
  127. raise ValueError(
  128. f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
  129. f"heads ({config.num_attention_heads})"
  130. )
  131. new_num_attention_heads = int(config.num_attention_heads / config.head_ratio)
  132. if new_num_attention_heads < 1:
  133. self.head_ratio = config.num_attention_heads
  134. num_attention_heads = 1
  135. else:
  136. num_attention_heads = new_num_attention_heads
  137. self.head_ratio = config.head_ratio
  138. self.num_attention_heads = num_attention_heads
  139. self.conv_kernel_size = config.conv_kernel_size
  140. if config.hidden_size % self.num_attention_heads != 0:
  141. raise ValueError("hidden_size should be divisible by num_attention_heads")
  142. self.attention_head_size = config.hidden_size // config.num_attention_heads
  143. self.all_head_size = self.num_attention_heads * self.attention_head_size
  144. self.query = keras.layers.Dense(
  145. self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
  146. )
  147. self.key = keras.layers.Dense(
  148. self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
  149. )
  150. self.value = keras.layers.Dense(
  151. self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
  152. )
  153. self.key_conv_attn_layer = keras.layers.SeparableConv1D(
  154. self.all_head_size,
  155. self.conv_kernel_size,
  156. padding="same",
  157. activation=None,
  158. depthwise_initializer=get_initializer(1 / self.conv_kernel_size),
  159. pointwise_initializer=get_initializer(config.initializer_range),
  160. name="key_conv_attn_layer",
  161. )
  162. self.conv_kernel_layer = keras.layers.Dense(
  163. self.num_attention_heads * self.conv_kernel_size,
  164. activation=None,
  165. name="conv_kernel_layer",
  166. kernel_initializer=get_initializer(config.initializer_range),
  167. )
  168. self.conv_out_layer = keras.layers.Dense(
  169. self.all_head_size,
  170. activation=None,
  171. name="conv_out_layer",
  172. kernel_initializer=get_initializer(config.initializer_range),
  173. )
  174. self.dropout = keras.layers.Dropout(config.attention_probs_dropout_prob)
  175. self.config = config
  176. def transpose_for_scores(self, x, batch_size):
  177. # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
  178. x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size))
  179. return tf.transpose(x, perm=[0, 2, 1, 3])
  180. def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):
  181. batch_size = shape_list(hidden_states)[0]
  182. mixed_query_layer = self.query(hidden_states)
  183. mixed_key_layer = self.key(hidden_states)
  184. mixed_value_layer = self.value(hidden_states)
  185. mixed_key_conv_attn_layer = self.key_conv_attn_layer(hidden_states)
  186. query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
  187. key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
  188. conv_attn_layer = tf.multiply(mixed_key_conv_attn_layer, mixed_query_layer)
  189. conv_kernel_layer = self.conv_kernel_layer(conv_attn_layer)
  190. conv_kernel_layer = tf.reshape(conv_kernel_layer, [-1, self.conv_kernel_size, 1])
  191. conv_kernel_layer = stable_softmax(conv_kernel_layer, axis=1)
  192. paddings = tf.constant(
  193. [
  194. [
  195. 0,
  196. 0,
  197. ],
  198. [int((self.conv_kernel_size - 1) / 2), int((self.conv_kernel_size - 1) / 2)],
  199. [0, 0],
  200. ]
  201. )
  202. conv_out_layer = self.conv_out_layer(hidden_states)
  203. conv_out_layer = tf.reshape(conv_out_layer, [batch_size, -1, self.all_head_size])
  204. conv_out_layer = tf.pad(conv_out_layer, paddings, "CONSTANT")
  205. unfold_conv_out_layer = tf.stack(
  206. [
  207. tf.slice(conv_out_layer, [0, i, 0], [batch_size, shape_list(mixed_query_layer)[1], self.all_head_size])
  208. for i in range(self.conv_kernel_size)
  209. ],
  210. axis=-1,
  211. )
  212. conv_out_layer = tf.reshape(unfold_conv_out_layer, [-1, self.attention_head_size, self.conv_kernel_size])
  213. conv_out_layer = tf.matmul(conv_out_layer, conv_kernel_layer)
  214. conv_out_layer = tf.reshape(conv_out_layer, [-1, self.all_head_size])
  215. # Take the dot product between "query" and "key" to get the raw attention scores.
  216. attention_scores = tf.matmul(
  217. query_layer, key_layer, transpose_b=True
  218. ) # (batch size, num_heads, seq_len_q, seq_len_k)
  219. dk = tf.cast(shape_list(key_layer)[-1], attention_scores.dtype) # scale attention_scores
  220. attention_scores = attention_scores / tf.math.sqrt(dk)
  221. if attention_mask is not None:
  222. # Apply the attention mask is (precomputed for all layers in TFBertModel call() function)
  223. attention_scores = attention_scores + attention_mask
  224. # Normalize the attention scores to probabilities.
  225. attention_probs = stable_softmax(attention_scores, axis=-1)
  226. # This is actually dropping out entire tokens to attend to, which might
  227. # seem a bit unusual, but is taken from the original Transformer paper.
  228. attention_probs = self.dropout(attention_probs, training=training)
  229. # Mask heads if we want to
  230. if head_mask is not None:
  231. attention_probs = attention_probs * head_mask
  232. value_layer = tf.reshape(
  233. mixed_value_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size]
  234. )
  235. value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
  236. context_layer = tf.matmul(attention_probs, value_layer)
  237. context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
  238. conv_out = tf.reshape(conv_out_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size])
  239. context_layer = tf.concat([context_layer, conv_out], 2)
  240. context_layer = tf.reshape(
  241. context_layer, (batch_size, -1, self.head_ratio * self.all_head_size)
  242. ) # (batch_size, seq_len_q, all_head_size)
  243. outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
  244. return outputs
  245. def build(self, input_shape=None):
  246. if self.built:
  247. return
  248. self.built = True
  249. if getattr(self, "query", None) is not None:
  250. with tf.name_scope(self.query.name):
  251. self.query.build([None, None, self.config.hidden_size])
  252. if getattr(self, "key", None) is not None:
  253. with tf.name_scope(self.key.name):
  254. self.key.build([None, None, self.config.hidden_size])
  255. if getattr(self, "value", None) is not None:
  256. with tf.name_scope(self.value.name):
  257. self.value.build([None, None, self.config.hidden_size])
  258. if getattr(self, "key_conv_attn_layer", None) is not None:
  259. with tf.name_scope(self.key_conv_attn_layer.name):
  260. self.key_conv_attn_layer.build([None, None, self.config.hidden_size])
  261. if getattr(self, "conv_kernel_layer", None) is not None:
  262. with tf.name_scope(self.conv_kernel_layer.name):
  263. self.conv_kernel_layer.build([None, None, self.all_head_size])
  264. if getattr(self, "conv_out_layer", None) is not None:
  265. with tf.name_scope(self.conv_out_layer.name):
  266. self.conv_out_layer.build([None, None, self.config.hidden_size])
  267. class TFConvBertSelfOutput(keras.layers.Layer):
  268. def __init__(self, config, **kwargs):
  269. super().__init__(**kwargs)
  270. self.dense = keras.layers.Dense(
  271. config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
  272. )
  273. self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
  274. self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
  275. self.config = config
  276. def call(self, hidden_states, input_tensor, training=False):
  277. hidden_states = self.dense(hidden_states)
  278. hidden_states = self.dropout(hidden_states, training=training)
  279. hidden_states = self.LayerNorm(hidden_states + input_tensor)
  280. return hidden_states
  281. def build(self, input_shape=None):
  282. if self.built:
  283. return
  284. self.built = True
  285. if getattr(self, "dense", None) is not None:
  286. with tf.name_scope(self.dense.name):
  287. self.dense.build([None, None, self.config.hidden_size])
  288. if getattr(self, "LayerNorm", None) is not None:
  289. with tf.name_scope(self.LayerNorm.name):
  290. self.LayerNorm.build([None, None, self.config.hidden_size])
  291. class TFConvBertAttention(keras.layers.Layer):
  292. def __init__(self, config, **kwargs):
  293. super().__init__(**kwargs)
  294. self.self_attention = TFConvBertSelfAttention(config, name="self")
  295. self.dense_output = TFConvBertSelfOutput(config, name="output")
  296. def prune_heads(self, heads):
  297. raise NotImplementedError
  298. def call(self, input_tensor, attention_mask, head_mask, output_attentions, training=False):
  299. self_outputs = self.self_attention(
  300. input_tensor, attention_mask, head_mask, output_attentions, training=training
  301. )
  302. attention_output = self.dense_output(self_outputs[0], input_tensor, training=training)
  303. outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
  304. return outputs
  305. def build(self, input_shape=None):
  306. if self.built:
  307. return
  308. self.built = True
  309. if getattr(self, "self_attention", None) is not None:
  310. with tf.name_scope(self.self_attention.name):
  311. self.self_attention.build(None)
  312. if getattr(self, "dense_output", None) is not None:
  313. with tf.name_scope(self.dense_output.name):
  314. self.dense_output.build(None)
  315. class GroupedLinearLayer(keras.layers.Layer):
  316. def __init__(self, input_size, output_size, num_groups, kernel_initializer, **kwargs):
  317. super().__init__(**kwargs)
  318. self.input_size = input_size
  319. self.output_size = output_size
  320. self.num_groups = num_groups
  321. self.kernel_initializer = kernel_initializer
  322. self.group_in_dim = self.input_size // self.num_groups
  323. self.group_out_dim = self.output_size // self.num_groups
  324. def build(self, input_shape=None):
  325. self.kernel = self.add_weight(
  326. "kernel",
  327. shape=[self.group_out_dim, self.group_in_dim, self.num_groups],
  328. initializer=self.kernel_initializer,
  329. trainable=True,
  330. )
  331. self.bias = self.add_weight(
  332. "bias", shape=[self.output_size], initializer=self.kernel_initializer, dtype=self.dtype, trainable=True
  333. )
  334. super().build(input_shape)
  335. def call(self, hidden_states):
  336. batch_size = shape_list(hidden_states)[0]
  337. x = tf.transpose(tf.reshape(hidden_states, [-1, self.num_groups, self.group_in_dim]), [1, 0, 2])
  338. x = tf.matmul(x, tf.transpose(self.kernel, [2, 1, 0]))
  339. x = tf.transpose(x, [1, 0, 2])
  340. x = tf.reshape(x, [batch_size, -1, self.output_size])
  341. x = tf.nn.bias_add(value=x, bias=self.bias)
  342. return x
  343. class TFConvBertIntermediate(keras.layers.Layer):
  344. def __init__(self, config, **kwargs):
  345. super().__init__(**kwargs)
  346. if config.num_groups == 1:
  347. self.dense = keras.layers.Dense(
  348. config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
  349. )
  350. else:
  351. self.dense = GroupedLinearLayer(
  352. config.hidden_size,
  353. config.intermediate_size,
  354. num_groups=config.num_groups,
  355. kernel_initializer=get_initializer(config.initializer_range),
  356. name="dense",
  357. )
  358. if isinstance(config.hidden_act, str):
  359. self.intermediate_act_fn = get_tf_activation(config.hidden_act)
  360. else:
  361. self.intermediate_act_fn = config.hidden_act
  362. self.config = config
  363. def call(self, hidden_states):
  364. hidden_states = self.dense(hidden_states)
  365. hidden_states = self.intermediate_act_fn(hidden_states)
  366. return hidden_states
  367. def build(self, input_shape=None):
  368. if self.built:
  369. return
  370. self.built = True
  371. if getattr(self, "dense", None) is not None:
  372. with tf.name_scope(self.dense.name):
  373. self.dense.build([None, None, self.config.hidden_size])
  374. class TFConvBertOutput(keras.layers.Layer):
  375. def __init__(self, config, **kwargs):
  376. super().__init__(**kwargs)
  377. if config.num_groups == 1:
  378. self.dense = keras.layers.Dense(
  379. config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
  380. )
  381. else:
  382. self.dense = GroupedLinearLayer(
  383. config.intermediate_size,
  384. config.hidden_size,
  385. num_groups=config.num_groups,
  386. kernel_initializer=get_initializer(config.initializer_range),
  387. name="dense",
  388. )
  389. self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
  390. self.dropout = keras.layers.Dropout(config.hidden_dropout_prob)
  391. self.config = config
  392. def call(self, hidden_states, input_tensor, training=False):
  393. hidden_states = self.dense(hidden_states)
  394. hidden_states = self.dropout(hidden_states, training=training)
  395. hidden_states = self.LayerNorm(hidden_states + input_tensor)
  396. return hidden_states
  397. def build(self, input_shape=None):
  398. if self.built:
  399. return
  400. self.built = True
  401. if getattr(self, "LayerNorm", None) is not None:
  402. with tf.name_scope(self.LayerNorm.name):
  403. self.LayerNorm.build([None, None, self.config.hidden_size])
  404. if getattr(self, "dense", None) is not None:
  405. with tf.name_scope(self.dense.name):
  406. self.dense.build([None, None, self.config.intermediate_size])
  407. class TFConvBertLayer(keras.layers.Layer):
  408. def __init__(self, config, **kwargs):
  409. super().__init__(**kwargs)
  410. self.attention = TFConvBertAttention(config, name="attention")
  411. self.intermediate = TFConvBertIntermediate(config, name="intermediate")
  412. self.bert_output = TFConvBertOutput(config, name="output")
  413. def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):
  414. attention_outputs = self.attention(
  415. hidden_states, attention_mask, head_mask, output_attentions, training=training
  416. )
  417. attention_output = attention_outputs[0]
  418. intermediate_output = self.intermediate(attention_output)
  419. layer_output = self.bert_output(intermediate_output, attention_output, training=training)
  420. outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
  421. return outputs
  422. def build(self, input_shape=None):
  423. if self.built:
  424. return
  425. self.built = True
  426. if getattr(self, "attention", None) is not None:
  427. with tf.name_scope(self.attention.name):
  428. self.attention.build(None)
  429. if getattr(self, "intermediate", None) is not None:
  430. with tf.name_scope(self.intermediate.name):
  431. self.intermediate.build(None)
  432. if getattr(self, "bert_output", None) is not None:
  433. with tf.name_scope(self.bert_output.name):
  434. self.bert_output.build(None)
  435. class TFConvBertEncoder(keras.layers.Layer):
  436. def __init__(self, config, **kwargs):
  437. super().__init__(**kwargs)
  438. self.layer = [TFConvBertLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
  439. def call(
  440. self,
  441. hidden_states,
  442. attention_mask,
  443. head_mask,
  444. output_attentions,
  445. output_hidden_states,
  446. return_dict,
  447. training=False,
  448. ):
  449. all_hidden_states = () if output_hidden_states else None
  450. all_attentions = () if output_attentions else None
  451. for i, layer_module in enumerate(self.layer):
  452. if output_hidden_states:
  453. all_hidden_states = all_hidden_states + (hidden_states,)
  454. layer_outputs = layer_module(
  455. hidden_states, attention_mask, head_mask[i], output_attentions, training=training
  456. )
  457. hidden_states = layer_outputs[0]
  458. if output_attentions:
  459. all_attentions = all_attentions + (layer_outputs[1],)
  460. # Add last layer
  461. if output_hidden_states:
  462. all_hidden_states = all_hidden_states + (hidden_states,)
  463. if not return_dict:
  464. return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
  465. return TFBaseModelOutput(
  466. last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
  467. )
  468. def build(self, input_shape=None):
  469. if self.built:
  470. return
  471. self.built = True
  472. if getattr(self, "layer", None) is not None:
  473. for layer in self.layer:
  474. with tf.name_scope(layer.name):
  475. layer.build(None)
  476. class TFConvBertPredictionHeadTransform(keras.layers.Layer):
  477. def __init__(self, config, **kwargs):
  478. super().__init__(**kwargs)
  479. self.dense = keras.layers.Dense(
  480. config.embedding_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
  481. )
  482. if isinstance(config.hidden_act, str):
  483. self.transform_act_fn = get_tf_activation(config.hidden_act)
  484. else:
  485. self.transform_act_fn = config.hidden_act
  486. self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
  487. self.config = config
  488. def call(self, hidden_states):
  489. hidden_states = self.dense(hidden_states)
  490. hidden_states = self.transform_act_fn(hidden_states)
  491. hidden_states = self.LayerNorm(hidden_states)
  492. return hidden_states
  493. def build(self, input_shape=None):
  494. if self.built:
  495. return
  496. self.built = True
  497. if getattr(self, "dense", None) is not None:
  498. with tf.name_scope(self.dense.name):
  499. self.dense.build([None, None, self.config.hidden_size])
  500. if getattr(self, "LayerNorm", None) is not None:
  501. with tf.name_scope(self.LayerNorm.name):
  502. self.LayerNorm.build([None, None, self.config.hidden_size])
  503. @keras_serializable
  504. class TFConvBertMainLayer(keras.layers.Layer):
  505. config_class = ConvBertConfig
  506. def __init__(self, config, **kwargs):
  507. super().__init__(**kwargs)
  508. self.embeddings = TFConvBertEmbeddings(config, name="embeddings")
  509. if config.embedding_size != config.hidden_size:
  510. self.embeddings_project = keras.layers.Dense(config.hidden_size, name="embeddings_project")
  511. self.encoder = TFConvBertEncoder(config, name="encoder")
  512. self.config = config
  513. def get_input_embeddings(self):
  514. return self.embeddings
  515. def set_input_embeddings(self, value):
  516. self.embeddings.weight = value
  517. self.embeddings.vocab_size = value.shape[0]
  518. def _prune_heads(self, heads_to_prune):
  519. """
  520. Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
  521. class PreTrainedModel
  522. """
  523. raise NotImplementedError
  524. def get_extended_attention_mask(self, attention_mask, input_shape, dtype):
  525. if attention_mask is None:
  526. attention_mask = tf.fill(input_shape, 1)
  527. # We create a 3D attention mask from a 2D tensor mask.
  528. # Sizes are [batch_size, 1, 1, to_seq_length]
  529. # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
  530. # this attention mask is more simple than the triangular masking of causal attention
  531. # used in OpenAI GPT, we just need to prepare the broadcast dimension here.
  532. extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1]))
  533. # Since attention_mask is 1.0 for positions we want to attend and 0.0 for
  534. # masked positions, this operation will create a tensor which is 0.0 for
  535. # positions we want to attend and -10000.0 for masked positions.
  536. # Since we are adding it to the raw scores before the softmax, this is
  537. # effectively the same as removing these entirely.
  538. extended_attention_mask = tf.cast(extended_attention_mask, dtype)
  539. extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
  540. return extended_attention_mask
  541. def get_head_mask(self, head_mask):
  542. if head_mask is not None:
  543. raise NotImplementedError
  544. else:
  545. head_mask = [None] * self.config.num_hidden_layers
  546. return head_mask
  547. @unpack_inputs
  548. def call(
  549. self,
  550. input_ids=None,
  551. attention_mask=None,
  552. token_type_ids=None,
  553. position_ids=None,
  554. head_mask=None,
  555. inputs_embeds=None,
  556. output_attentions=None,
  557. output_hidden_states=None,
  558. return_dict=None,
  559. training=False,
  560. ):
  561. if input_ids is not None and inputs_embeds is not None:
  562. raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
  563. elif input_ids is not None:
  564. input_shape = shape_list(input_ids)
  565. elif inputs_embeds is not None:
  566. input_shape = shape_list(inputs_embeds)[:-1]
  567. else:
  568. raise ValueError("You have to specify either input_ids or inputs_embeds")
  569. if attention_mask is None:
  570. attention_mask = tf.fill(input_shape, 1)
  571. if token_type_ids is None:
  572. token_type_ids = tf.fill(input_shape, 0)
  573. hidden_states = self.embeddings(input_ids, position_ids, token_type_ids, inputs_embeds, training=training)
  574. extended_attention_mask = self.get_extended_attention_mask(attention_mask, input_shape, hidden_states.dtype)
  575. head_mask = self.get_head_mask(head_mask)
  576. if hasattr(self, "embeddings_project"):
  577. hidden_states = self.embeddings_project(hidden_states, training=training)
  578. hidden_states = self.encoder(
  579. hidden_states,
  580. extended_attention_mask,
  581. head_mask,
  582. output_attentions,
  583. output_hidden_states,
  584. return_dict,
  585. training=training,
  586. )
  587. return hidden_states
  588. def build(self, input_shape=None):
  589. if self.built:
  590. return
  591. self.built = True
  592. if getattr(self, "embeddings", None) is not None:
  593. with tf.name_scope(self.embeddings.name):
  594. self.embeddings.build(None)
  595. if getattr(self, "encoder", None) is not None:
  596. with tf.name_scope(self.encoder.name):
  597. self.encoder.build(None)
  598. if getattr(self, "embeddings_project", None) is not None:
  599. with tf.name_scope(self.embeddings_project.name):
  600. self.embeddings_project.build([None, None, self.config.embedding_size])
  601. class TFConvBertPreTrainedModel(TFPreTrainedModel):
  602. """
  603. An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
  604. models.
  605. """
  606. config_class = ConvBertConfig
  607. base_model_prefix = "convbert"
  608. CONVBERT_START_DOCSTRING = r"""
  609. This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
  610. library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
  611. etc.)
  612. This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
  613. as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
  614. behavior.
  615. <Tip>
  616. TensorFlow models and layers in `transformers` accept two formats as input:
  617. - having all inputs as keyword arguments (like PyTorch models), or
  618. - having all inputs as a list, tuple or dict in the first positional argument.
  619. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
  620. and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
  621. pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
  622. format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
  623. the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
  624. positional argument:
  625. - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
  626. - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
  627. `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
  628. - a dictionary with one or several input Tensors associated to the input names given in the docstring:
  629. `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
  630. Note that when creating models and layers with
  631. [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
  632. about any of this, as you can just pass inputs like you would to any other Python function!
  633. </Tip>
  634. Args:
  635. config ([`ConvBertConfig`]): Model configuration class with all the parameters of the model.
  636. Initializing with a config file does not load the weights associated with the model, only the
  637. configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
  638. """
  639. CONVBERT_INPUTS_DOCSTRING = r"""
  640. Args:
  641. input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
  642. Indices of input sequence tokens in the vocabulary.
  643. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
  644. [`PreTrainedTokenizer.encode`] for details.
  645. [What are input IDs?](../glossary#input-ids)
  646. attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
  647. Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
  648. - 1 for tokens that are **not masked**,
  649. - 0 for tokens that are **masked**.
  650. [What are attention masks?](../glossary#attention-mask)
  651. token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
  652. Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
  653. 1]`:
  654. - 0 corresponds to a *sentence A* token,
  655. - 1 corresponds to a *sentence B* token.
  656. [What are token type IDs?](../glossary#token-type-ids)
  657. position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
  658. Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
  659. config.max_position_embeddings - 1]`.
  660. [What are position IDs?](../glossary#position-ids)
  661. head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
  662. Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
  663. - 1 indicates the head is **not masked**,
  664. - 0 indicates the head is **masked**.
  665. inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
  666. Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
  667. is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
  668. model's internal embedding lookup matrix.
  669. output_attentions (`bool`, *optional*):
  670. Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
  671. tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
  672. config will be used instead.
  673. output_hidden_states (`bool`, *optional*):
  674. Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
  675. more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
  676. used instead.
  677. return_dict (`bool`, *optional*):
  678. Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
  679. eager mode, in graph mode the value will always be set to True.
  680. training (`bool`, *optional*, defaults to `False`):
  681. Whether or not to use the model in training mode (some modules like dropout modules have different
  682. behaviors between training and evaluation).
  683. """
  684. @add_start_docstrings(
  685. "The bare ConvBERT Model transformer outputting raw hidden-states without any specific head on top.",
  686. CONVBERT_START_DOCSTRING,
  687. )
  688. class TFConvBertModel(TFConvBertPreTrainedModel):
  689. def __init__(self, config, *inputs, **kwargs):
  690. super().__init__(config, *inputs, **kwargs)
  691. self.convbert = TFConvBertMainLayer(config, name="convbert")
  692. @unpack_inputs
  693. @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  694. @add_code_sample_docstrings(
  695. checkpoint=_CHECKPOINT_FOR_DOC,
  696. output_type=TFBaseModelOutput,
  697. config_class=_CONFIG_FOR_DOC,
  698. )
  699. def call(
  700. self,
  701. input_ids: TFModelInputType | None = None,
  702. attention_mask: Optional[Union[np.array, tf.Tensor]] = None,
  703. token_type_ids: Optional[Union[np.array, tf.Tensor]] = None,
  704. position_ids: Optional[Union[np.array, tf.Tensor]] = None,
  705. head_mask: Optional[Union[np.array, tf.Tensor]] = None,
  706. inputs_embeds: tf.Tensor | None = None,
  707. output_attentions: Optional[bool] = None,
  708. output_hidden_states: Optional[bool] = None,
  709. return_dict: Optional[bool] = None,
  710. training: bool = False,
  711. ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
  712. outputs = self.convbert(
  713. input_ids=input_ids,
  714. attention_mask=attention_mask,
  715. token_type_ids=token_type_ids,
  716. position_ids=position_ids,
  717. head_mask=head_mask,
  718. inputs_embeds=inputs_embeds,
  719. output_attentions=output_attentions,
  720. output_hidden_states=output_hidden_states,
  721. return_dict=return_dict,
  722. training=training,
  723. )
  724. return outputs
  725. def build(self, input_shape=None):
  726. if self.built:
  727. return
  728. self.built = True
  729. if getattr(self, "convbert", None) is not None:
  730. with tf.name_scope(self.convbert.name):
  731. self.convbert.build(None)
  732. class TFConvBertMaskedLMHead(keras.layers.Layer):
  733. def __init__(self, config, input_embeddings, **kwargs):
  734. super().__init__(**kwargs)
  735. self.config = config
  736. self.embedding_size = config.embedding_size
  737. self.input_embeddings = input_embeddings
  738. def build(self, input_shape):
  739. self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
  740. super().build(input_shape)
  741. def get_output_embeddings(self):
  742. return self.input_embeddings
  743. def set_output_embeddings(self, value):
  744. self.input_embeddings.weight = value
  745. self.input_embeddings.vocab_size = shape_list(value)[0]
  746. def get_bias(self):
  747. return {"bias": self.bias}
  748. def set_bias(self, value):
  749. self.bias = value["bias"]
  750. self.config.vocab_size = shape_list(value["bias"])[0]
  751. def call(self, hidden_states):
  752. seq_length = shape_list(tensor=hidden_states)[1]
  753. hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size])
  754. hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
  755. hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
  756. hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
  757. return hidden_states
  758. class TFConvBertGeneratorPredictions(keras.layers.Layer):
  759. def __init__(self, config, **kwargs):
  760. super().__init__(**kwargs)
  761. self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
  762. self.dense = keras.layers.Dense(config.embedding_size, name="dense")
  763. self.config = config
  764. def call(self, generator_hidden_states, training=False):
  765. hidden_states = self.dense(generator_hidden_states)
  766. hidden_states = get_tf_activation("gelu")(hidden_states)
  767. hidden_states = self.LayerNorm(hidden_states)
  768. return hidden_states
  769. def build(self, input_shape=None):
  770. if self.built:
  771. return
  772. self.built = True
  773. if getattr(self, "LayerNorm", None) is not None:
  774. with tf.name_scope(self.LayerNorm.name):
  775. self.LayerNorm.build([None, None, self.config.embedding_size])
  776. if getattr(self, "dense", None) is not None:
  777. with tf.name_scope(self.dense.name):
  778. self.dense.build([None, None, self.config.hidden_size])
  779. @add_start_docstrings("""ConvBERT Model with a `language modeling` head on top.""", CONVBERT_START_DOCSTRING)
  780. class TFConvBertForMaskedLM(TFConvBertPreTrainedModel, TFMaskedLanguageModelingLoss):
  781. def __init__(self, config, *inputs, **kwargs):
  782. super().__init__(config, **kwargs)
  783. self.config = config
  784. self.convbert = TFConvBertMainLayer(config, name="convbert")
  785. self.generator_predictions = TFConvBertGeneratorPredictions(config, name="generator_predictions")
  786. if isinstance(config.hidden_act, str):
  787. self.activation = get_tf_activation(config.hidden_act)
  788. else:
  789. self.activation = config.hidden_act
  790. self.generator_lm_head = TFConvBertMaskedLMHead(config, self.convbert.embeddings, name="generator_lm_head")
  791. def get_lm_head(self):
  792. return self.generator_lm_head
  793. def get_prefix_bias_name(self):
  794. return self.name + "/" + self.generator_lm_head.name
  795. @unpack_inputs
  796. @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  797. @add_code_sample_docstrings(
  798. checkpoint=_CHECKPOINT_FOR_DOC,
  799. output_type=TFMaskedLMOutput,
  800. config_class=_CONFIG_FOR_DOC,
  801. )
  802. def call(
  803. self,
  804. input_ids: TFModelInputType | None = None,
  805. attention_mask: np.ndarray | tf.Tensor | None = None,
  806. token_type_ids: np.ndarray | tf.Tensor | None = None,
  807. position_ids: np.ndarray | tf.Tensor | None = None,
  808. head_mask: np.ndarray | tf.Tensor | None = None,
  809. inputs_embeds: tf.Tensor | None = None,
  810. output_attentions: Optional[bool] = None,
  811. output_hidden_states: Optional[bool] = None,
  812. return_dict: Optional[bool] = None,
  813. labels: tf.Tensor | None = None,
  814. training: Optional[bool] = False,
  815. ) -> Union[Tuple, TFMaskedLMOutput]:
  816. r"""
  817. labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
  818. Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
  819. config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
  820. loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
  821. """
  822. generator_hidden_states = self.convbert(
  823. input_ids=input_ids,
  824. attention_mask=attention_mask,
  825. token_type_ids=token_type_ids,
  826. position_ids=position_ids,
  827. head_mask=head_mask,
  828. inputs_embeds=inputs_embeds,
  829. output_attentions=output_attentions,
  830. output_hidden_states=output_hidden_states,
  831. return_dict=return_dict,
  832. training=training,
  833. )
  834. generator_sequence_output = generator_hidden_states[0]
  835. prediction_scores = self.generator_predictions(generator_sequence_output, training=training)
  836. prediction_scores = self.generator_lm_head(prediction_scores, training=training)
  837. loss = None if labels is None else self.hf_compute_loss(labels, prediction_scores)
  838. if not return_dict:
  839. output = (prediction_scores,) + generator_hidden_states[1:]
  840. return ((loss,) + output) if loss is not None else output
  841. return TFMaskedLMOutput(
  842. loss=loss,
  843. logits=prediction_scores,
  844. hidden_states=generator_hidden_states.hidden_states,
  845. attentions=generator_hidden_states.attentions,
  846. )
  847. def build(self, input_shape=None):
  848. if self.built:
  849. return
  850. self.built = True
  851. if getattr(self, "convbert", None) is not None:
  852. with tf.name_scope(self.convbert.name):
  853. self.convbert.build(None)
  854. if getattr(self, "generator_predictions", None) is not None:
  855. with tf.name_scope(self.generator_predictions.name):
  856. self.generator_predictions.build(None)
  857. if getattr(self, "generator_lm_head", None) is not None:
  858. with tf.name_scope(self.generator_lm_head.name):
  859. self.generator_lm_head.build(None)
  860. class TFConvBertClassificationHead(keras.layers.Layer):
  861. """Head for sentence-level classification tasks."""
  862. def __init__(self, config, **kwargs):
  863. super().__init__(**kwargs)
  864. self.dense = keras.layers.Dense(
  865. config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
  866. )
  867. classifier_dropout = (
  868. config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
  869. )
  870. self.dropout = keras.layers.Dropout(classifier_dropout)
  871. self.out_proj = keras.layers.Dense(
  872. config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj"
  873. )
  874. self.config = config
  875. def call(self, hidden_states, **kwargs):
  876. x = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS])
  877. x = self.dropout(x)
  878. x = self.dense(x)
  879. x = get_tf_activation(self.config.hidden_act)(x)
  880. x = self.dropout(x)
  881. x = self.out_proj(x)
  882. return x
  883. def build(self, input_shape=None):
  884. if self.built:
  885. return
  886. self.built = True
  887. if getattr(self, "dense", None) is not None:
  888. with tf.name_scope(self.dense.name):
  889. self.dense.build([None, None, self.config.hidden_size])
  890. if getattr(self, "out_proj", None) is not None:
  891. with tf.name_scope(self.out_proj.name):
  892. self.out_proj.build([None, None, self.config.hidden_size])
  893. @add_start_docstrings(
  894. """
  895. ConvBERT Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks.
  896. """,
  897. CONVBERT_START_DOCSTRING,
  898. )
  899. class TFConvBertForSequenceClassification(TFConvBertPreTrainedModel, TFSequenceClassificationLoss):
  900. def __init__(self, config, *inputs, **kwargs):
  901. super().__init__(config, *inputs, **kwargs)
  902. self.num_labels = config.num_labels
  903. self.convbert = TFConvBertMainLayer(config, name="convbert")
  904. self.classifier = TFConvBertClassificationHead(config, name="classifier")
  905. @unpack_inputs
  906. @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  907. @add_code_sample_docstrings(
  908. checkpoint=_CHECKPOINT_FOR_DOC,
  909. output_type=TFSequenceClassifierOutput,
  910. config_class=_CONFIG_FOR_DOC,
  911. )
  912. def call(
  913. self,
  914. input_ids: TFModelInputType | None = None,
  915. attention_mask: np.ndarray | tf.Tensor | None = None,
  916. token_type_ids: np.ndarray | tf.Tensor | None = None,
  917. position_ids: np.ndarray | tf.Tensor | None = None,
  918. head_mask: np.ndarray | tf.Tensor | None = None,
  919. inputs_embeds: tf.Tensor | None = None,
  920. output_attentions: Optional[bool] = None,
  921. output_hidden_states: Optional[bool] = None,
  922. return_dict: Optional[bool] = None,
  923. labels: tf.Tensor | None = None,
  924. training: Optional[bool] = False,
  925. ) -> Union[Tuple, TFSequenceClassifierOutput]:
  926. r"""
  927. labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
  928. Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
  929. config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
  930. `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
  931. """
  932. outputs = self.convbert(
  933. input_ids,
  934. attention_mask=attention_mask,
  935. token_type_ids=token_type_ids,
  936. position_ids=position_ids,
  937. head_mask=head_mask,
  938. inputs_embeds=inputs_embeds,
  939. output_attentions=output_attentions,
  940. output_hidden_states=output_hidden_states,
  941. return_dict=return_dict,
  942. training=training,
  943. )
  944. logits = self.classifier(outputs[0], training=training)
  945. loss = None if labels is None else self.hf_compute_loss(labels, logits)
  946. if not return_dict:
  947. output = (logits,) + outputs[1:]
  948. return ((loss,) + output) if loss is not None else output
  949. return TFSequenceClassifierOutput(
  950. loss=loss,
  951. logits=logits,
  952. hidden_states=outputs.hidden_states,
  953. attentions=outputs.attentions,
  954. )
  955. def build(self, input_shape=None):
  956. if self.built:
  957. return
  958. self.built = True
  959. if getattr(self, "convbert", None) is not None:
  960. with tf.name_scope(self.convbert.name):
  961. self.convbert.build(None)
  962. if getattr(self, "classifier", None) is not None:
  963. with tf.name_scope(self.classifier.name):
  964. self.classifier.build(None)
  965. @add_start_docstrings(
  966. """
  967. ConvBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
  968. softmax) e.g. for RocStories/SWAG tasks.
  969. """,
  970. CONVBERT_START_DOCSTRING,
  971. )
  972. class TFConvBertForMultipleChoice(TFConvBertPreTrainedModel, TFMultipleChoiceLoss):
  973. def __init__(self, config, *inputs, **kwargs):
  974. super().__init__(config, *inputs, **kwargs)
  975. self.convbert = TFConvBertMainLayer(config, name="convbert")
  976. self.sequence_summary = TFSequenceSummary(
  977. config, initializer_range=config.initializer_range, name="sequence_summary"
  978. )
  979. self.classifier = keras.layers.Dense(
  980. 1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
  981. )
  982. self.config = config
  983. @unpack_inputs
  984. @add_start_docstrings_to_model_forward(
  985. CONVBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
  986. )
  987. @add_code_sample_docstrings(
  988. checkpoint=_CHECKPOINT_FOR_DOC,
  989. output_type=TFMultipleChoiceModelOutput,
  990. config_class=_CONFIG_FOR_DOC,
  991. )
  992. def call(
  993. self,
  994. input_ids: TFModelInputType | None = None,
  995. attention_mask: np.ndarray | tf.Tensor | None = None,
  996. token_type_ids: np.ndarray | tf.Tensor | None = None,
  997. position_ids: np.ndarray | tf.Tensor | None = None,
  998. head_mask: np.ndarray | tf.Tensor | None = None,
  999. inputs_embeds: tf.Tensor | None = None,
  1000. output_attentions: Optional[bool] = None,
  1001. output_hidden_states: Optional[bool] = None,
  1002. return_dict: Optional[bool] = None,
  1003. labels: tf.Tensor | None = None,
  1004. training: Optional[bool] = False,
  1005. ) -> Union[Tuple, TFMultipleChoiceModelOutput]:
  1006. r"""
  1007. labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
  1008. Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
  1009. where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
  1010. """
  1011. if input_ids is not None:
  1012. num_choices = shape_list(input_ids)[1]
  1013. seq_length = shape_list(input_ids)[2]
  1014. else:
  1015. num_choices = shape_list(inputs_embeds)[1]
  1016. seq_length = shape_list(inputs_embeds)[2]
  1017. flat_input_ids = tf.reshape(input_ids, (-1, seq_length)) if input_ids is not None else None
  1018. flat_attention_mask = tf.reshape(attention_mask, (-1, seq_length)) if attention_mask is not None else None
  1019. flat_token_type_ids = tf.reshape(token_type_ids, (-1, seq_length)) if token_type_ids is not None else None
  1020. flat_position_ids = tf.reshape(position_ids, (-1, seq_length)) if position_ids is not None else None
  1021. flat_inputs_embeds = (
  1022. tf.reshape(inputs_embeds, (-1, seq_length, shape_list(inputs_embeds)[3]))
  1023. if inputs_embeds is not None
  1024. else None
  1025. )
  1026. outputs = self.convbert(
  1027. flat_input_ids,
  1028. flat_attention_mask,
  1029. flat_token_type_ids,
  1030. flat_position_ids,
  1031. head_mask,
  1032. flat_inputs_embeds,
  1033. output_attentions,
  1034. output_hidden_states,
  1035. return_dict=return_dict,
  1036. training=training,
  1037. )
  1038. logits = self.sequence_summary(outputs[0], training=training)
  1039. logits = self.classifier(logits)
  1040. reshaped_logits = tf.reshape(logits, (-1, num_choices))
  1041. loss = None if labels is None else self.hf_compute_loss(labels, reshaped_logits)
  1042. if not return_dict:
  1043. output = (reshaped_logits,) + outputs[1:]
  1044. return ((loss,) + output) if loss is not None else output
  1045. return TFMultipleChoiceModelOutput(
  1046. loss=loss,
  1047. logits=reshaped_logits,
  1048. hidden_states=outputs.hidden_states,
  1049. attentions=outputs.attentions,
  1050. )
  1051. def build(self, input_shape=None):
  1052. if self.built:
  1053. return
  1054. self.built = True
  1055. if getattr(self, "convbert", None) is not None:
  1056. with tf.name_scope(self.convbert.name):
  1057. self.convbert.build(None)
  1058. if getattr(self, "sequence_summary", None) is not None:
  1059. with tf.name_scope(self.sequence_summary.name):
  1060. self.sequence_summary.build(None)
  1061. if getattr(self, "classifier", None) is not None:
  1062. with tf.name_scope(self.classifier.name):
  1063. self.classifier.build([None, None, self.config.hidden_size])
  1064. @add_start_docstrings(
  1065. """
  1066. ConvBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
  1067. Named-Entity-Recognition (NER) tasks.
  1068. """,
  1069. CONVBERT_START_DOCSTRING,
  1070. )
  1071. class TFConvBertForTokenClassification(TFConvBertPreTrainedModel, TFTokenClassificationLoss):
  1072. def __init__(self, config, *inputs, **kwargs):
  1073. super().__init__(config, *inputs, **kwargs)
  1074. self.num_labels = config.num_labels
  1075. self.convbert = TFConvBertMainLayer(config, name="convbert")
  1076. classifier_dropout = (
  1077. config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
  1078. )
  1079. self.dropout = keras.layers.Dropout(classifier_dropout)
  1080. self.classifier = keras.layers.Dense(
  1081. config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
  1082. )
  1083. self.config = config
  1084. @unpack_inputs
  1085. @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  1086. @add_code_sample_docstrings(
  1087. checkpoint=_CHECKPOINT_FOR_DOC,
  1088. output_type=TFTokenClassifierOutput,
  1089. config_class=_CONFIG_FOR_DOC,
  1090. )
  1091. def call(
  1092. self,
  1093. input_ids: TFModelInputType | None = None,
  1094. attention_mask: np.ndarray | tf.Tensor | None = None,
  1095. token_type_ids: np.ndarray | tf.Tensor | None = None,
  1096. position_ids: np.ndarray | tf.Tensor | None = None,
  1097. head_mask: np.ndarray | tf.Tensor | None = None,
  1098. inputs_embeds: tf.Tensor | None = None,
  1099. output_attentions: Optional[bool] = None,
  1100. output_hidden_states: Optional[bool] = None,
  1101. return_dict: Optional[bool] = None,
  1102. labels: tf.Tensor | None = None,
  1103. training: Optional[bool] = False,
  1104. ) -> Union[Tuple, TFTokenClassifierOutput]:
  1105. r"""
  1106. labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
  1107. Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
  1108. """
  1109. outputs = self.convbert(
  1110. input_ids,
  1111. attention_mask=attention_mask,
  1112. token_type_ids=token_type_ids,
  1113. position_ids=position_ids,
  1114. head_mask=head_mask,
  1115. inputs_embeds=inputs_embeds,
  1116. output_attentions=output_attentions,
  1117. output_hidden_states=output_hidden_states,
  1118. return_dict=return_dict,
  1119. training=training,
  1120. )
  1121. sequence_output = outputs[0]
  1122. sequence_output = self.dropout(sequence_output, training=training)
  1123. logits = self.classifier(sequence_output)
  1124. loss = None if labels is None else self.hf_compute_loss(labels, logits)
  1125. if not return_dict:
  1126. output = (logits,) + outputs[1:]
  1127. return ((loss,) + output) if loss is not None else output
  1128. return TFTokenClassifierOutput(
  1129. loss=loss,
  1130. logits=logits,
  1131. hidden_states=outputs.hidden_states,
  1132. attentions=outputs.attentions,
  1133. )
  1134. def build(self, input_shape=None):
  1135. if self.built:
  1136. return
  1137. self.built = True
  1138. if getattr(self, "convbert", None) is not None:
  1139. with tf.name_scope(self.convbert.name):
  1140. self.convbert.build(None)
  1141. if getattr(self, "classifier", None) is not None:
  1142. with tf.name_scope(self.classifier.name):
  1143. self.classifier.build([None, None, self.config.hidden_size])
  1144. @add_start_docstrings(
  1145. """
  1146. ConvBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
  1147. layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
  1148. """,
  1149. CONVBERT_START_DOCSTRING,
  1150. )
  1151. class TFConvBertForQuestionAnswering(TFConvBertPreTrainedModel, TFQuestionAnsweringLoss):
  1152. def __init__(self, config, *inputs, **kwargs):
  1153. super().__init__(config, *inputs, **kwargs)
  1154. self.num_labels = config.num_labels
  1155. self.convbert = TFConvBertMainLayer(config, name="convbert")
  1156. self.qa_outputs = keras.layers.Dense(
  1157. config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
  1158. )
  1159. self.config = config
  1160. @unpack_inputs
  1161. @add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  1162. @add_code_sample_docstrings(
  1163. checkpoint=_CHECKPOINT_FOR_DOC,
  1164. output_type=TFQuestionAnsweringModelOutput,
  1165. config_class=_CONFIG_FOR_DOC,
  1166. )
  1167. def call(
  1168. self,
  1169. input_ids: TFModelInputType | None = None,
  1170. attention_mask: np.ndarray | tf.Tensor | None = None,
  1171. token_type_ids: np.ndarray | tf.Tensor | None = None,
  1172. position_ids: np.ndarray | tf.Tensor | None = None,
  1173. head_mask: np.ndarray | tf.Tensor | None = None,
  1174. inputs_embeds: tf.Tensor | None = None,
  1175. output_attentions: Optional[bool] = None,
  1176. output_hidden_states: Optional[bool] = None,
  1177. return_dict: Optional[bool] = None,
  1178. start_positions: tf.Tensor | None = None,
  1179. end_positions: tf.Tensor | None = None,
  1180. training: Optional[bool] = False,
  1181. ) -> Union[Tuple, TFQuestionAnsweringModelOutput]:
  1182. r"""
  1183. start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
  1184. Labels for position (index) of the start of the labelled span for computing the token classification loss.
  1185. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
  1186. are not taken into account for computing the loss.
  1187. end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
  1188. Labels for position (index) of the end of the labelled span for computing the token classification loss.
  1189. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
  1190. are not taken into account for computing the loss.
  1191. """
  1192. outputs = self.convbert(
  1193. input_ids,
  1194. attention_mask=attention_mask,
  1195. token_type_ids=token_type_ids,
  1196. position_ids=position_ids,
  1197. head_mask=head_mask,
  1198. inputs_embeds=inputs_embeds,
  1199. output_attentions=output_attentions,
  1200. output_hidden_states=output_hidden_states,
  1201. return_dict=return_dict,
  1202. training=training,
  1203. )
  1204. sequence_output = outputs[0]
  1205. logits = self.qa_outputs(sequence_output)
  1206. start_logits, end_logits = tf.split(logits, 2, axis=-1)
  1207. start_logits = tf.squeeze(start_logits, axis=-1)
  1208. end_logits = tf.squeeze(end_logits, axis=-1)
  1209. loss = None
  1210. if start_positions is not None and end_positions is not None:
  1211. labels = {"start_position": start_positions}
  1212. labels["end_position"] = end_positions
  1213. loss = self.hf_compute_loss(labels, (start_logits, end_logits))
  1214. if not return_dict:
  1215. output = (start_logits, end_logits) + outputs[1:]
  1216. return ((loss,) + output) if loss is not None else output
  1217. return TFQuestionAnsweringModelOutput(
  1218. loss=loss,
  1219. start_logits=start_logits,
  1220. end_logits=end_logits,
  1221. hidden_states=outputs.hidden_states,
  1222. attentions=outputs.attentions,
  1223. )
  1224. def build(self, input_shape=None):
  1225. if self.built:
  1226. return
  1227. self.built = True
  1228. if getattr(self, "convbert", None) is not None:
  1229. with tf.name_scope(self.convbert.name):
  1230. self.convbert.build(None)
  1231. if getattr(self, "qa_outputs", None) is not None:
  1232. with tf.name_scope(self.qa_outputs.name):
  1233. self.qa_outputs.build([None, None, self.config.hidden_size])