modeling_tf_deberta.py 67 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642
  1. # coding=utf-8
  2. # Copyright 2021 Microsoft and The HuggingFace Inc. team. All rights reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """TF 2.0 DeBERTa model."""
  16. from __future__ import annotations
  17. import math
  18. from typing import Dict, Optional, Sequence, Tuple, Union
  19. import numpy as np
  20. import tensorflow as tf
  21. from ...activations_tf import get_tf_activation
  22. from ...modeling_tf_outputs import (
  23. TFBaseModelOutput,
  24. TFMaskedLMOutput,
  25. TFQuestionAnsweringModelOutput,
  26. TFSequenceClassifierOutput,
  27. TFTokenClassifierOutput,
  28. )
  29. from ...modeling_tf_utils import (
  30. TFMaskedLanguageModelingLoss,
  31. TFModelInputType,
  32. TFPreTrainedModel,
  33. TFQuestionAnsweringLoss,
  34. TFSequenceClassificationLoss,
  35. TFTokenClassificationLoss,
  36. get_initializer,
  37. keras,
  38. unpack_inputs,
  39. )
  40. from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
  41. from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
  42. from .configuration_deberta import DebertaConfig
  43. logger = logging.get_logger(__name__)
  44. _CONFIG_FOR_DOC = "DebertaConfig"
  45. _CHECKPOINT_FOR_DOC = "kamalkraj/deberta-base"
  46. class TFDebertaContextPooler(keras.layers.Layer):
  47. def __init__(self, config: DebertaConfig, **kwargs):
  48. super().__init__(**kwargs)
  49. self.dense = keras.layers.Dense(config.pooler_hidden_size, name="dense")
  50. self.dropout = TFDebertaStableDropout(config.pooler_dropout, name="dropout")
  51. self.config = config
  52. def call(self, hidden_states, training: bool = False):
  53. # We "pool" the model by simply taking the hidden state corresponding
  54. # to the first token.
  55. context_token = hidden_states[:, 0]
  56. context_token = self.dropout(context_token, training=training)
  57. pooled_output = self.dense(context_token)
  58. pooled_output = get_tf_activation(self.config.pooler_hidden_act)(pooled_output)
  59. return pooled_output
  60. @property
  61. def output_dim(self) -> int:
  62. return self.config.hidden_size
  63. def build(self, input_shape=None):
  64. if self.built:
  65. return
  66. self.built = True
  67. if getattr(self, "dense", None) is not None:
  68. with tf.name_scope(self.dense.name):
  69. self.dense.build([None, None, self.config.pooler_hidden_size])
  70. if getattr(self, "dropout", None) is not None:
  71. with tf.name_scope(self.dropout.name):
  72. self.dropout.build(None)
  73. class TFDebertaXSoftmax(keras.layers.Layer):
  74. """
  75. Masked Softmax which is optimized for saving memory
  76. Args:
  77. input (`tf.Tensor`): The input tensor that will apply softmax.
  78. mask (`tf.Tensor`): The mask matrix where 0 indicate that element will be ignored in the softmax calculation.
  79. dim (int): The dimension that will apply softmax
  80. """
  81. def __init__(self, axis=-1, **kwargs):
  82. super().__init__(**kwargs)
  83. self.axis = axis
  84. def call(self, inputs: tf.Tensor, mask: tf.Tensor):
  85. rmask = tf.logical_not(tf.cast(mask, tf.bool))
  86. output = tf.where(rmask, tf.cast(float("-inf"), dtype=self.compute_dtype), inputs)
  87. output = stable_softmax(tf.cast(output, dtype=tf.float32), self.axis)
  88. output = tf.where(rmask, 0.0, output)
  89. return output
  90. class TFDebertaStableDropout(keras.layers.Layer):
  91. """
  92. Optimized dropout module for stabilizing the training
  93. Args:
  94. drop_prob (float): the dropout probabilities
  95. """
  96. def __init__(self, drop_prob, **kwargs):
  97. super().__init__(**kwargs)
  98. self.drop_prob = drop_prob
  99. @tf.custom_gradient
  100. def xdropout(self, inputs):
  101. """
  102. Applies dropout to the inputs, as vanilla dropout, but also scales the remaining elements up by 1/drop_prob.
  103. """
  104. mask = tf.cast(
  105. 1
  106. - tf.compat.v1.distributions.Bernoulli(probs=1.0 - self.drop_prob).sample(sample_shape=shape_list(inputs)),
  107. tf.bool,
  108. )
  109. scale = tf.convert_to_tensor(1.0 / (1 - self.drop_prob), dtype=self.compute_dtype)
  110. if self.drop_prob > 0:
  111. inputs = tf.where(mask, tf.cast(0.0, dtype=self.compute_dtype), inputs) * scale
  112. def grad(upstream):
  113. if self.drop_prob > 0:
  114. return tf.where(mask, tf.cast(0.0, dtype=self.compute_dtype), upstream) * scale
  115. else:
  116. return upstream
  117. return inputs, grad
  118. def call(self, inputs: tf.Tensor, training: tf.Tensor = False):
  119. if training:
  120. return self.xdropout(inputs)
  121. return inputs
  122. class TFDebertaLayerNorm(keras.layers.Layer):
  123. """LayerNorm module in the TF style (epsilon inside the square root)."""
  124. def __init__(self, size, eps=1e-12, **kwargs):
  125. super().__init__(**kwargs)
  126. self.size = size
  127. self.eps = eps
  128. def build(self, input_shape):
  129. self.gamma = self.add_weight(shape=[self.size], initializer=tf.ones_initializer(), name="weight")
  130. self.beta = self.add_weight(shape=[self.size], initializer=tf.zeros_initializer(), name="bias")
  131. return super().build(input_shape)
  132. def call(self, x: tf.Tensor) -> tf.Tensor:
  133. mean = tf.reduce_mean(x, axis=[-1], keepdims=True)
  134. variance = tf.reduce_mean(tf.square(x - mean), axis=[-1], keepdims=True)
  135. std = tf.math.sqrt(variance + self.eps)
  136. return self.gamma * (x - mean) / std + self.beta
  137. class TFDebertaSelfOutput(keras.layers.Layer):
  138. def __init__(self, config: DebertaConfig, **kwargs):
  139. super().__init__(**kwargs)
  140. self.dense = keras.layers.Dense(config.hidden_size, name="dense")
  141. self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
  142. self.dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name="dropout")
  143. self.config = config
  144. def call(self, hidden_states, input_tensor, training: bool = False):
  145. hidden_states = self.dense(hidden_states)
  146. hidden_states = self.dropout(hidden_states, training=training)
  147. hidden_states = self.LayerNorm(hidden_states + input_tensor)
  148. return hidden_states
  149. def build(self, input_shape=None):
  150. if self.built:
  151. return
  152. self.built = True
  153. if getattr(self, "dense", None) is not None:
  154. with tf.name_scope(self.dense.name):
  155. self.dense.build([None, None, self.config.hidden_size])
  156. if getattr(self, "LayerNorm", None) is not None:
  157. with tf.name_scope(self.LayerNorm.name):
  158. self.LayerNorm.build([None, None, self.config.hidden_size])
  159. if getattr(self, "dropout", None) is not None:
  160. with tf.name_scope(self.dropout.name):
  161. self.dropout.build(None)
  162. class TFDebertaAttention(keras.layers.Layer):
  163. def __init__(self, config: DebertaConfig, **kwargs):
  164. super().__init__(**kwargs)
  165. self.self = TFDebertaDisentangledSelfAttention(config, name="self")
  166. self.dense_output = TFDebertaSelfOutput(config, name="output")
  167. self.config = config
  168. def call(
  169. self,
  170. input_tensor: tf.Tensor,
  171. attention_mask: tf.Tensor,
  172. query_states: tf.Tensor = None,
  173. relative_pos: tf.Tensor = None,
  174. rel_embeddings: tf.Tensor = None,
  175. output_attentions: bool = False,
  176. training: bool = False,
  177. ) -> Tuple[tf.Tensor]:
  178. self_outputs = self.self(
  179. hidden_states=input_tensor,
  180. attention_mask=attention_mask,
  181. query_states=query_states,
  182. relative_pos=relative_pos,
  183. rel_embeddings=rel_embeddings,
  184. output_attentions=output_attentions,
  185. training=training,
  186. )
  187. if query_states is None:
  188. query_states = input_tensor
  189. attention_output = self.dense_output(
  190. hidden_states=self_outputs[0], input_tensor=query_states, training=training
  191. )
  192. output = (attention_output,) + self_outputs[1:]
  193. return output
  194. def build(self, input_shape=None):
  195. if self.built:
  196. return
  197. self.built = True
  198. if getattr(self, "self", None) is not None:
  199. with tf.name_scope(self.self.name):
  200. self.self.build(None)
  201. if getattr(self, "dense_output", None) is not None:
  202. with tf.name_scope(self.dense_output.name):
  203. self.dense_output.build(None)
  204. class TFDebertaIntermediate(keras.layers.Layer):
  205. def __init__(self, config: DebertaConfig, **kwargs):
  206. super().__init__(**kwargs)
  207. self.dense = keras.layers.Dense(
  208. units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
  209. )
  210. if isinstance(config.hidden_act, str):
  211. self.intermediate_act_fn = get_tf_activation(config.hidden_act)
  212. else:
  213. self.intermediate_act_fn = config.hidden_act
  214. self.config = config
  215. def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
  216. hidden_states = self.dense(inputs=hidden_states)
  217. hidden_states = self.intermediate_act_fn(hidden_states)
  218. return hidden_states
  219. def build(self, input_shape=None):
  220. if self.built:
  221. return
  222. self.built = True
  223. if getattr(self, "dense", None) is not None:
  224. with tf.name_scope(self.dense.name):
  225. self.dense.build([None, None, self.config.hidden_size])
  226. class TFDebertaOutput(keras.layers.Layer):
  227. def __init__(self, config: DebertaConfig, **kwargs):
  228. super().__init__(**kwargs)
  229. self.dense = keras.layers.Dense(
  230. units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
  231. )
  232. self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
  233. self.dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name="dropout")
  234. self.config = config
  235. def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
  236. hidden_states = self.dense(inputs=hidden_states)
  237. hidden_states = self.dropout(hidden_states, training=training)
  238. hidden_states = self.LayerNorm(hidden_states + input_tensor)
  239. return hidden_states
  240. def build(self, input_shape=None):
  241. if self.built:
  242. return
  243. self.built = True
  244. if getattr(self, "dense", None) is not None:
  245. with tf.name_scope(self.dense.name):
  246. self.dense.build([None, None, self.config.intermediate_size])
  247. if getattr(self, "LayerNorm", None) is not None:
  248. with tf.name_scope(self.LayerNorm.name):
  249. self.LayerNorm.build([None, None, self.config.hidden_size])
  250. if getattr(self, "dropout", None) is not None:
  251. with tf.name_scope(self.dropout.name):
  252. self.dropout.build(None)
  253. class TFDebertaLayer(keras.layers.Layer):
  254. def __init__(self, config: DebertaConfig, **kwargs):
  255. super().__init__(**kwargs)
  256. self.attention = TFDebertaAttention(config, name="attention")
  257. self.intermediate = TFDebertaIntermediate(config, name="intermediate")
  258. self.bert_output = TFDebertaOutput(config, name="output")
  259. def call(
  260. self,
  261. hidden_states: tf.Tensor,
  262. attention_mask: tf.Tensor,
  263. query_states: tf.Tensor = None,
  264. relative_pos: tf.Tensor = None,
  265. rel_embeddings: tf.Tensor = None,
  266. output_attentions: bool = False,
  267. training: bool = False,
  268. ) -> Tuple[tf.Tensor]:
  269. attention_outputs = self.attention(
  270. input_tensor=hidden_states,
  271. attention_mask=attention_mask,
  272. query_states=query_states,
  273. relative_pos=relative_pos,
  274. rel_embeddings=rel_embeddings,
  275. output_attentions=output_attentions,
  276. training=training,
  277. )
  278. attention_output = attention_outputs[0]
  279. intermediate_output = self.intermediate(hidden_states=attention_output)
  280. layer_output = self.bert_output(
  281. hidden_states=intermediate_output, input_tensor=attention_output, training=training
  282. )
  283. outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
  284. return outputs
  285. def build(self, input_shape=None):
  286. if self.built:
  287. return
  288. self.built = True
  289. if getattr(self, "attention", None) is not None:
  290. with tf.name_scope(self.attention.name):
  291. self.attention.build(None)
  292. if getattr(self, "intermediate", None) is not None:
  293. with tf.name_scope(self.intermediate.name):
  294. self.intermediate.build(None)
  295. if getattr(self, "bert_output", None) is not None:
  296. with tf.name_scope(self.bert_output.name):
  297. self.bert_output.build(None)
  298. class TFDebertaEncoder(keras.layers.Layer):
  299. def __init__(self, config: DebertaConfig, **kwargs):
  300. super().__init__(**kwargs)
  301. self.layer = [TFDebertaLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
  302. self.relative_attention = getattr(config, "relative_attention", False)
  303. self.config = config
  304. if self.relative_attention:
  305. self.max_relative_positions = getattr(config, "max_relative_positions", -1)
  306. if self.max_relative_positions < 1:
  307. self.max_relative_positions = config.max_position_embeddings
  308. def build(self, input_shape=None):
  309. if self.built:
  310. return
  311. self.built = True
  312. if self.relative_attention:
  313. self.rel_embeddings = self.add_weight(
  314. name="rel_embeddings.weight",
  315. shape=[self.max_relative_positions * 2, self.config.hidden_size],
  316. initializer=get_initializer(self.config.initializer_range),
  317. )
  318. if getattr(self, "layer", None) is not None:
  319. for layer in self.layer:
  320. with tf.name_scope(layer.name):
  321. layer.build(None)
  322. def get_rel_embedding(self):
  323. rel_embeddings = self.rel_embeddings if self.relative_attention else None
  324. return rel_embeddings
  325. def get_attention_mask(self, attention_mask):
  326. if len(shape_list(attention_mask)) <= 2:
  327. extended_attention_mask = tf.expand_dims(tf.expand_dims(attention_mask, 1), 2)
  328. attention_mask = extended_attention_mask * tf.expand_dims(tf.squeeze(extended_attention_mask, -2), -1)
  329. attention_mask = tf.cast(attention_mask, tf.uint8)
  330. elif len(shape_list(attention_mask)) == 3:
  331. attention_mask = tf.expand_dims(attention_mask, 1)
  332. return attention_mask
  333. def get_rel_pos(self, hidden_states, query_states=None, relative_pos=None):
  334. if self.relative_attention and relative_pos is None:
  335. q = shape_list(query_states)[-2] if query_states is not None else shape_list(hidden_states)[-2]
  336. relative_pos = build_relative_position(q, shape_list(hidden_states)[-2])
  337. return relative_pos
  338. def call(
  339. self,
  340. hidden_states: tf.Tensor,
  341. attention_mask: tf.Tensor,
  342. query_states: tf.Tensor = None,
  343. relative_pos: tf.Tensor = None,
  344. output_attentions: bool = False,
  345. output_hidden_states: bool = False,
  346. return_dict: bool = True,
  347. training: bool = False,
  348. ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
  349. all_hidden_states = () if output_hidden_states else None
  350. all_attentions = () if output_attentions else None
  351. attention_mask = self.get_attention_mask(attention_mask)
  352. relative_pos = self.get_rel_pos(hidden_states, query_states, relative_pos)
  353. if isinstance(hidden_states, Sequence):
  354. next_kv = hidden_states[0]
  355. else:
  356. next_kv = hidden_states
  357. rel_embeddings = self.get_rel_embedding()
  358. for i, layer_module in enumerate(self.layer):
  359. if output_hidden_states:
  360. all_hidden_states = all_hidden_states + (hidden_states,)
  361. layer_outputs = layer_module(
  362. hidden_states=next_kv,
  363. attention_mask=attention_mask,
  364. query_states=query_states,
  365. relative_pos=relative_pos,
  366. rel_embeddings=rel_embeddings,
  367. output_attentions=output_attentions,
  368. training=training,
  369. )
  370. hidden_states = layer_outputs[0]
  371. if query_states is not None:
  372. query_states = hidden_states
  373. if isinstance(hidden_states, Sequence):
  374. next_kv = hidden_states[i + 1] if i + 1 < len(self.layer) else None
  375. else:
  376. next_kv = hidden_states
  377. if output_attentions:
  378. all_attentions = all_attentions + (layer_outputs[1],)
  379. # Add last layer
  380. if output_hidden_states:
  381. all_hidden_states = all_hidden_states + (hidden_states,)
  382. if not return_dict:
  383. return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
  384. return TFBaseModelOutput(
  385. last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
  386. )
  387. def build_relative_position(query_size, key_size):
  388. """
  389. Build relative position according to the query and key
  390. We assume the absolute position of query \\(P_q\\) is range from (0, query_size) and the absolute position of key
  391. \\(P_k\\) is range from (0, key_size), The relative positions from query to key is \\(R_{q \\rightarrow k} = P_q -
  392. P_k\\)
  393. Args:
  394. query_size (int): the length of query
  395. key_size (int): the length of key
  396. Return:
  397. `tf.Tensor`: A tensor with shape [1, query_size, key_size]
  398. """
  399. q_ids = tf.range(query_size, dtype=tf.int32)
  400. k_ids = tf.range(key_size, dtype=tf.int32)
  401. rel_pos_ids = q_ids[:, None] - tf.tile(tf.reshape(k_ids, [1, -1]), [query_size, 1])
  402. rel_pos_ids = rel_pos_ids[:query_size, :]
  403. rel_pos_ids = tf.expand_dims(rel_pos_ids, axis=0)
  404. return tf.cast(rel_pos_ids, tf.int64)
  405. def c2p_dynamic_expand(c2p_pos, query_layer, relative_pos):
  406. shapes = [
  407. shape_list(query_layer)[0],
  408. shape_list(query_layer)[1],
  409. shape_list(query_layer)[2],
  410. shape_list(relative_pos)[-1],
  411. ]
  412. return tf.broadcast_to(c2p_pos, shapes)
  413. def p2c_dynamic_expand(c2p_pos, query_layer, key_layer):
  414. shapes = [
  415. shape_list(query_layer)[0],
  416. shape_list(query_layer)[1],
  417. shape_list(key_layer)[-2],
  418. shape_list(key_layer)[-2],
  419. ]
  420. return tf.broadcast_to(c2p_pos, shapes)
  421. def pos_dynamic_expand(pos_index, p2c_att, key_layer):
  422. shapes = shape_list(p2c_att)[:2] + [shape_list(pos_index)[-2], shape_list(key_layer)[-2]]
  423. return tf.broadcast_to(pos_index, shapes)
  424. def torch_gather(x, indices, gather_axis):
  425. if gather_axis < 0:
  426. gather_axis = tf.rank(x) + gather_axis
  427. if gather_axis != tf.rank(x) - 1:
  428. pre_roll = tf.rank(x) - 1 - gather_axis
  429. permutation = tf.roll(tf.range(tf.rank(x)), pre_roll, axis=0)
  430. x = tf.transpose(x, perm=permutation)
  431. indices = tf.transpose(indices, perm=permutation)
  432. else:
  433. pre_roll = 0
  434. flat_x = tf.reshape(x, (-1, tf.shape(x)[-1]))
  435. flat_indices = tf.reshape(indices, (-1, tf.shape(indices)[-1]))
  436. gathered = tf.gather(flat_x, flat_indices, batch_dims=1)
  437. gathered = tf.reshape(gathered, tf.shape(indices))
  438. if pre_roll != 0:
  439. permutation = tf.roll(tf.range(tf.rank(x)), -pre_roll, axis=0)
  440. gathered = tf.transpose(gathered, perm=permutation)
  441. return gathered
  442. class TFDebertaDisentangledSelfAttention(keras.layers.Layer):
  443. """
  444. Disentangled self-attention module
  445. Parameters:
  446. config (`str`):
  447. A model config class instance with the configuration to build a new model. The schema is similar to
  448. *BertConfig*, for more details, please refer [`DebertaConfig`]
  449. """
  450. def __init__(self, config: DebertaConfig, **kwargs):
  451. super().__init__(**kwargs)
  452. if config.hidden_size % config.num_attention_heads != 0:
  453. raise ValueError(
  454. f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
  455. f"heads ({config.num_attention_heads})"
  456. )
  457. self.num_attention_heads = config.num_attention_heads
  458. self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
  459. self.all_head_size = self.num_attention_heads * self.attention_head_size
  460. self.in_proj = keras.layers.Dense(
  461. self.all_head_size * 3,
  462. kernel_initializer=get_initializer(config.initializer_range),
  463. name="in_proj",
  464. use_bias=False,
  465. )
  466. self.pos_att_type = config.pos_att_type if config.pos_att_type is not None else []
  467. self.relative_attention = getattr(config, "relative_attention", False)
  468. self.talking_head = getattr(config, "talking_head", False)
  469. if self.talking_head:
  470. self.head_logits_proj = keras.layers.Dense(
  471. self.num_attention_heads,
  472. kernel_initializer=get_initializer(config.initializer_range),
  473. name="head_logits_proj",
  474. use_bias=False,
  475. )
  476. self.head_weights_proj = keras.layers.Dense(
  477. self.num_attention_heads,
  478. kernel_initializer=get_initializer(config.initializer_range),
  479. name="head_weights_proj",
  480. use_bias=False,
  481. )
  482. self.softmax = TFDebertaXSoftmax(axis=-1)
  483. if self.relative_attention:
  484. self.max_relative_positions = getattr(config, "max_relative_positions", -1)
  485. if self.max_relative_positions < 1:
  486. self.max_relative_positions = config.max_position_embeddings
  487. self.pos_dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name="pos_dropout")
  488. if "c2p" in self.pos_att_type:
  489. self.pos_proj = keras.layers.Dense(
  490. self.all_head_size,
  491. kernel_initializer=get_initializer(config.initializer_range),
  492. name="pos_proj",
  493. use_bias=False,
  494. )
  495. if "p2c" in self.pos_att_type:
  496. self.pos_q_proj = keras.layers.Dense(
  497. self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="pos_q_proj"
  498. )
  499. self.dropout = TFDebertaStableDropout(config.attention_probs_dropout_prob, name="dropout")
  500. self.config = config
  501. def build(self, input_shape=None):
  502. if self.built:
  503. return
  504. self.built = True
  505. self.q_bias = self.add_weight(
  506. name="q_bias", shape=(self.all_head_size), initializer=keras.initializers.Zeros()
  507. )
  508. self.v_bias = self.add_weight(
  509. name="v_bias", shape=(self.all_head_size), initializer=keras.initializers.Zeros()
  510. )
  511. if getattr(self, "in_proj", None) is not None:
  512. with tf.name_scope(self.in_proj.name):
  513. self.in_proj.build([None, None, self.config.hidden_size])
  514. if getattr(self, "dropout", None) is not None:
  515. with tf.name_scope(self.dropout.name):
  516. self.dropout.build(None)
  517. if getattr(self, "head_logits_proj", None) is not None:
  518. with tf.name_scope(self.head_logits_proj.name):
  519. self.head_logits_proj.build(None)
  520. if getattr(self, "head_weights_proj", None) is not None:
  521. with tf.name_scope(self.head_weights_proj.name):
  522. self.head_weights_proj.build(None)
  523. if getattr(self, "pos_dropout", None) is not None:
  524. with tf.name_scope(self.pos_dropout.name):
  525. self.pos_dropout.build(None)
  526. if getattr(self, "pos_proj", None) is not None:
  527. with tf.name_scope(self.pos_proj.name):
  528. self.pos_proj.build([self.config.hidden_size])
  529. if getattr(self, "pos_q_proj", None) is not None:
  530. with tf.name_scope(self.pos_q_proj.name):
  531. self.pos_q_proj.build([self.config.hidden_size])
  532. def transpose_for_scores(self, tensor: tf.Tensor) -> tf.Tensor:
  533. shape = shape_list(tensor)[:-1] + [self.num_attention_heads, -1]
  534. # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
  535. tensor = tf.reshape(tensor=tensor, shape=shape)
  536. # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
  537. return tf.transpose(tensor, perm=[0, 2, 1, 3])
  538. def call(
  539. self,
  540. hidden_states: tf.Tensor,
  541. attention_mask: tf.Tensor,
  542. query_states: tf.Tensor = None,
  543. relative_pos: tf.Tensor = None,
  544. rel_embeddings: tf.Tensor = None,
  545. output_attentions: bool = False,
  546. training: bool = False,
  547. ) -> Tuple[tf.Tensor]:
  548. """
  549. Call the module
  550. Args:
  551. hidden_states (`tf.Tensor`):
  552. Input states to the module usually the output from previous layer, it will be the Q,K and V in
  553. *Attention(Q,K,V)*
  554. attention_mask (`tf.Tensor`):
  555. An attention mask matrix of shape [*B*, *N*, *N*] where *B* is the batch size, *N* is the maximum
  556. sequence length in which element [i,j] = *1* means the *i* th token in the input can attend to the *j*
  557. th token.
  558. return_att (`bool`, *optional*):
  559. Whether return the attention matrix.
  560. query_states (`tf.Tensor`, *optional*):
  561. The *Q* state in *Attention(Q,K,V)*.
  562. relative_pos (`tf.Tensor`):
  563. The relative position encoding between the tokens in the sequence. It's of shape [*B*, *N*, *N*] with
  564. values ranging in [*-max_relative_positions*, *max_relative_positions*].
  565. rel_embeddings (`tf.Tensor`):
  566. The embedding of relative distances. It's a tensor of shape [\\(2 \\times
  567. \\text{max_relative_positions}\\), *hidden_size*].
  568. """
  569. if query_states is None:
  570. qp = self.in_proj(hidden_states) # .split(self.all_head_size, dim=-1)
  571. query_layer, key_layer, value_layer = tf.split(
  572. self.transpose_for_scores(qp), num_or_size_splits=3, axis=-1
  573. )
  574. else:
  575. def linear(w, b, x):
  576. out = tf.matmul(x, w, transpose_b=True)
  577. if b is not None:
  578. out += tf.transpose(b)
  579. return out
  580. ws = tf.split(
  581. tf.transpose(self.in_proj.weight[0]), num_or_size_splits=self.num_attention_heads * 3, axis=0
  582. )
  583. qkvw = tf.TensorArray(dtype=self.dtype, size=3)
  584. for k in tf.range(3):
  585. qkvw_inside = tf.TensorArray(dtype=self.dtype, size=self.num_attention_heads)
  586. for i in tf.range(self.num_attention_heads):
  587. qkvw_inside = qkvw_inside.write(i, ws[i * 3 + k])
  588. qkvw = qkvw.write(k, qkvw_inside.concat())
  589. qkvb = [None] * 3
  590. q = linear(qkvw[0], qkvb[0], query_states)
  591. k = linear(qkvw[1], qkvb[1], hidden_states)
  592. v = linear(qkvw[2], qkvb[2], hidden_states)
  593. query_layer = self.transpose_for_scores(q)
  594. key_layer = self.transpose_for_scores(k)
  595. value_layer = self.transpose_for_scores(v)
  596. query_layer = query_layer + self.transpose_for_scores(self.q_bias[None, None, :])
  597. value_layer = value_layer + self.transpose_for_scores(self.v_bias[None, None, :])
  598. rel_att = None
  599. # Take the dot product between "query" and "key" to get the raw attention scores.
  600. scale_factor = 1 + len(self.pos_att_type)
  601. scale = math.sqrt(shape_list(query_layer)[-1] * scale_factor)
  602. query_layer = query_layer / scale
  603. attention_scores = tf.matmul(query_layer, tf.transpose(key_layer, [0, 1, 3, 2]))
  604. if self.relative_attention:
  605. rel_embeddings = self.pos_dropout(rel_embeddings, training=training)
  606. rel_att = self.disentangled_att_bias(query_layer, key_layer, relative_pos, rel_embeddings, scale_factor)
  607. if rel_att is not None:
  608. attention_scores = attention_scores + rel_att
  609. if self.talking_head:
  610. attention_scores = tf.transpose(
  611. self.head_logits_proj(tf.transpose(attention_scores, [0, 2, 3, 1])), [0, 3, 1, 2]
  612. )
  613. attention_probs = self.softmax(attention_scores, attention_mask)
  614. attention_probs = self.dropout(attention_probs, training=training)
  615. if self.talking_head:
  616. attention_probs = tf.transpose(
  617. self.head_weights_proj(tf.transpose(attention_probs, [0, 2, 3, 1])), [0, 3, 1, 2]
  618. )
  619. context_layer = tf.matmul(attention_probs, value_layer)
  620. context_layer = tf.transpose(context_layer, [0, 2, 1, 3])
  621. context_layer_shape = shape_list(context_layer)
  622. # Set the final dimension here explicitly.
  623. # Calling tf.reshape(context_layer, (*context_layer_shape[:-2], -1)) raises an error when executing
  624. # the model in graph mode as context_layer is reshaped to (None, 7, None) and Dense layer in TFDebertaV2SelfOutput
  625. # requires final input dimension to be defined
  626. new_context_layer_shape = context_layer_shape[:-2] + [context_layer_shape[-2] * context_layer_shape[-1]]
  627. context_layer = tf.reshape(context_layer, new_context_layer_shape)
  628. outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
  629. return outputs
  630. def disentangled_att_bias(self, query_layer, key_layer, relative_pos, rel_embeddings, scale_factor):
  631. if relative_pos is None:
  632. q = shape_list(query_layer)[-2]
  633. relative_pos = build_relative_position(q, shape_list(key_layer)[-2])
  634. shape_list_pos = shape_list(relative_pos)
  635. if len(shape_list_pos) == 2:
  636. relative_pos = tf.expand_dims(tf.expand_dims(relative_pos, 0), 0)
  637. elif len(shape_list_pos) == 3:
  638. relative_pos = tf.expand_dims(relative_pos, 1)
  639. # bxhxqxk
  640. elif len(shape_list_pos) != 4:
  641. raise ValueError(f"Relative position ids must be of dim 2 or 3 or 4. {len(shape_list_pos)}")
  642. att_span = tf.cast(
  643. tf.minimum(
  644. tf.maximum(shape_list(query_layer)[-2], shape_list(key_layer)[-2]), self.max_relative_positions
  645. ),
  646. tf.int64,
  647. )
  648. rel_embeddings = tf.expand_dims(
  649. rel_embeddings[self.max_relative_positions - att_span : self.max_relative_positions + att_span, :], 0
  650. )
  651. score = 0
  652. # content->position
  653. if "c2p" in self.pos_att_type:
  654. pos_key_layer = self.pos_proj(rel_embeddings)
  655. pos_key_layer = self.transpose_for_scores(pos_key_layer)
  656. c2p_att = tf.matmul(query_layer, tf.transpose(pos_key_layer, [0, 1, 3, 2]))
  657. c2p_pos = tf.clip_by_value(relative_pos + att_span, 0, att_span * 2 - 1)
  658. c2p_att = torch_gather(c2p_att, c2p_dynamic_expand(c2p_pos, query_layer, relative_pos), -1)
  659. score += c2p_att
  660. # position->content
  661. if "p2c" in self.pos_att_type:
  662. pos_query_layer = self.pos_q_proj(rel_embeddings)
  663. pos_query_layer = self.transpose_for_scores(pos_query_layer)
  664. pos_query_layer /= tf.math.sqrt(
  665. tf.cast(shape_list(pos_query_layer)[-1] * scale_factor, dtype=self.compute_dtype)
  666. )
  667. if shape_list(query_layer)[-2] != shape_list(key_layer)[-2]:
  668. r_pos = build_relative_position(shape_list(key_layer)[-2], shape_list(key_layer)[-2])
  669. else:
  670. r_pos = relative_pos
  671. p2c_pos = tf.clip_by_value(-r_pos + att_span, 0, att_span * 2 - 1)
  672. p2c_att = tf.matmul(key_layer, tf.transpose(pos_query_layer, [0, 1, 3, 2]))
  673. p2c_att = tf.transpose(
  674. torch_gather(p2c_att, p2c_dynamic_expand(p2c_pos, query_layer, key_layer), -1), [0, 1, 3, 2]
  675. )
  676. if shape_list(query_layer)[-2] != shape_list(key_layer)[-2]:
  677. pos_index = tf.expand_dims(relative_pos[:, :, :, 0], -1)
  678. p2c_att = torch_gather(p2c_att, pos_dynamic_expand(pos_index, p2c_att, key_layer), -2)
  679. score += p2c_att
  680. return score
  681. class TFDebertaEmbeddings(keras.layers.Layer):
  682. """Construct the embeddings from word, position and token_type embeddings."""
  683. def __init__(self, config, **kwargs):
  684. super().__init__(**kwargs)
  685. self.config = config
  686. self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
  687. self.hidden_size = config.hidden_size
  688. self.max_position_embeddings = config.max_position_embeddings
  689. self.position_biased_input = getattr(config, "position_biased_input", True)
  690. self.initializer_range = config.initializer_range
  691. if self.embedding_size != config.hidden_size:
  692. self.embed_proj = keras.layers.Dense(
  693. config.hidden_size,
  694. kernel_initializer=get_initializer(config.initializer_range),
  695. name="embed_proj",
  696. use_bias=False,
  697. )
  698. self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
  699. self.dropout = TFDebertaStableDropout(config.hidden_dropout_prob, name="dropout")
  700. def build(self, input_shape=None):
  701. with tf.name_scope("word_embeddings"):
  702. self.weight = self.add_weight(
  703. name="weight",
  704. shape=[self.config.vocab_size, self.embedding_size],
  705. initializer=get_initializer(self.initializer_range),
  706. )
  707. with tf.name_scope("token_type_embeddings"):
  708. if self.config.type_vocab_size > 0:
  709. self.token_type_embeddings = self.add_weight(
  710. name="embeddings",
  711. shape=[self.config.type_vocab_size, self.embedding_size],
  712. initializer=get_initializer(self.initializer_range),
  713. )
  714. else:
  715. self.token_type_embeddings = None
  716. with tf.name_scope("position_embeddings"):
  717. if self.position_biased_input:
  718. self.position_embeddings = self.add_weight(
  719. name="embeddings",
  720. shape=[self.max_position_embeddings, self.hidden_size],
  721. initializer=get_initializer(self.initializer_range),
  722. )
  723. else:
  724. self.position_embeddings = None
  725. if self.built:
  726. return
  727. self.built = True
  728. if getattr(self, "LayerNorm", None) is not None:
  729. with tf.name_scope(self.LayerNorm.name):
  730. self.LayerNorm.build([None, None, self.config.hidden_size])
  731. if getattr(self, "dropout", None) is not None:
  732. with tf.name_scope(self.dropout.name):
  733. self.dropout.build(None)
  734. if getattr(self, "embed_proj", None) is not None:
  735. with tf.name_scope(self.embed_proj.name):
  736. self.embed_proj.build([None, None, self.embedding_size])
  737. def call(
  738. self,
  739. input_ids: tf.Tensor = None,
  740. position_ids: tf.Tensor = None,
  741. token_type_ids: tf.Tensor = None,
  742. inputs_embeds: tf.Tensor = None,
  743. mask: tf.Tensor = None,
  744. training: bool = False,
  745. ) -> tf.Tensor:
  746. """
  747. Applies embedding based on inputs tensor.
  748. Returns:
  749. final_embeddings (`tf.Tensor`): output embedding tensor.
  750. """
  751. if input_ids is None and inputs_embeds is None:
  752. raise ValueError("Need to provide either `input_ids` or `input_embeds`.")
  753. if input_ids is not None:
  754. check_embeddings_within_bounds(input_ids, self.config.vocab_size)
  755. inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
  756. input_shape = shape_list(inputs_embeds)[:-1]
  757. if token_type_ids is None:
  758. token_type_ids = tf.fill(dims=input_shape, value=0)
  759. if position_ids is None:
  760. position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
  761. final_embeddings = inputs_embeds
  762. if self.position_biased_input:
  763. position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
  764. final_embeddings += position_embeds
  765. if self.config.type_vocab_size > 0:
  766. token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
  767. final_embeddings += token_type_embeds
  768. if self.embedding_size != self.hidden_size:
  769. final_embeddings = self.embed_proj(final_embeddings)
  770. final_embeddings = self.LayerNorm(final_embeddings)
  771. if mask is not None:
  772. if len(shape_list(mask)) != len(shape_list(final_embeddings)):
  773. if len(shape_list(mask)) == 4:
  774. mask = tf.squeeze(tf.squeeze(mask, axis=1), axis=1)
  775. mask = tf.cast(tf.expand_dims(mask, axis=2), dtype=self.compute_dtype)
  776. final_embeddings = final_embeddings * mask
  777. final_embeddings = self.dropout(final_embeddings, training=training)
  778. return final_embeddings
  779. class TFDebertaPredictionHeadTransform(keras.layers.Layer):
  780. def __init__(self, config: DebertaConfig, **kwargs):
  781. super().__init__(**kwargs)
  782. self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
  783. self.dense = keras.layers.Dense(
  784. units=self.embedding_size,
  785. kernel_initializer=get_initializer(config.initializer_range),
  786. name="dense",
  787. )
  788. if isinstance(config.hidden_act, str):
  789. self.transform_act_fn = get_tf_activation(config.hidden_act)
  790. else:
  791. self.transform_act_fn = config.hidden_act
  792. self.LayerNorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
  793. self.config = config
  794. def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
  795. hidden_states = self.dense(inputs=hidden_states)
  796. hidden_states = self.transform_act_fn(hidden_states)
  797. hidden_states = self.LayerNorm(hidden_states)
  798. return hidden_states
  799. def build(self, input_shape=None):
  800. if self.built:
  801. return
  802. self.built = True
  803. if getattr(self, "dense", None) is not None:
  804. with tf.name_scope(self.dense.name):
  805. self.dense.build([None, None, self.config.hidden_size])
  806. if getattr(self, "LayerNorm", None) is not None:
  807. with tf.name_scope(self.LayerNorm.name):
  808. self.LayerNorm.build([None, None, self.embedding_size])
  809. class TFDebertaLMPredictionHead(keras.layers.Layer):
  810. def __init__(self, config: DebertaConfig, input_embeddings: keras.layers.Layer, **kwargs):
  811. super().__init__(**kwargs)
  812. self.config = config
  813. self.embedding_size = getattr(config, "embedding_size", config.hidden_size)
  814. self.transform = TFDebertaPredictionHeadTransform(config, name="transform")
  815. # The output weights are the same as the input embeddings, but there is
  816. # an output-only bias for each token.
  817. self.input_embeddings = input_embeddings
  818. def build(self, input_shape=None):
  819. self.bias = self.add_weight(shape=(self.config.vocab_size,), initializer="zeros", trainable=True, name="bias")
  820. if self.built:
  821. return
  822. self.built = True
  823. if getattr(self, "transform", None) is not None:
  824. with tf.name_scope(self.transform.name):
  825. self.transform.build(None)
  826. def get_output_embeddings(self) -> keras.layers.Layer:
  827. return self.input_embeddings
  828. def set_output_embeddings(self, value: tf.Variable):
  829. self.input_embeddings.weight = value
  830. self.input_embeddings.vocab_size = shape_list(value)[0]
  831. def get_bias(self) -> Dict[str, tf.Variable]:
  832. return {"bias": self.bias}
  833. def set_bias(self, value: tf.Variable):
  834. self.bias = value["bias"]
  835. self.config.vocab_size = shape_list(value["bias"])[0]
  836. def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
  837. hidden_states = self.transform(hidden_states=hidden_states)
  838. seq_length = shape_list(hidden_states)[1]
  839. hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size])
  840. hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
  841. hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.config.vocab_size])
  842. hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
  843. return hidden_states
  844. class TFDebertaOnlyMLMHead(keras.layers.Layer):
  845. def __init__(self, config: DebertaConfig, input_embeddings: keras.layers.Layer, **kwargs):
  846. super().__init__(**kwargs)
  847. self.predictions = TFDebertaLMPredictionHead(config, input_embeddings, name="predictions")
  848. def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
  849. prediction_scores = self.predictions(hidden_states=sequence_output)
  850. return prediction_scores
  851. def build(self, input_shape=None):
  852. if self.built:
  853. return
  854. self.built = True
  855. if getattr(self, "predictions", None) is not None:
  856. with tf.name_scope(self.predictions.name):
  857. self.predictions.build(None)
  858. # @keras_serializable
  859. class TFDebertaMainLayer(keras.layers.Layer):
  860. config_class = DebertaConfig
  861. def __init__(self, config: DebertaConfig, **kwargs):
  862. super().__init__(**kwargs)
  863. self.config = config
  864. self.embeddings = TFDebertaEmbeddings(config, name="embeddings")
  865. self.encoder = TFDebertaEncoder(config, name="encoder")
  866. def get_input_embeddings(self) -> keras.layers.Layer:
  867. return self.embeddings
  868. def set_input_embeddings(self, value: tf.Variable):
  869. self.embeddings.weight = value
  870. self.embeddings.vocab_size = shape_list(value)[0]
  871. def _prune_heads(self, heads_to_prune):
  872. """
  873. Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
  874. class PreTrainedModel
  875. """
  876. raise NotImplementedError
  877. @unpack_inputs
  878. def call(
  879. self,
  880. input_ids: TFModelInputType | None = None,
  881. attention_mask: np.ndarray | tf.Tensor | None = None,
  882. token_type_ids: np.ndarray | tf.Tensor | None = None,
  883. position_ids: np.ndarray | tf.Tensor | None = None,
  884. inputs_embeds: np.ndarray | tf.Tensor | None = None,
  885. output_attentions: Optional[bool] = None,
  886. output_hidden_states: Optional[bool] = None,
  887. return_dict: Optional[bool] = None,
  888. training: bool = False,
  889. ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
  890. if input_ids is not None and inputs_embeds is not None:
  891. raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
  892. elif input_ids is not None:
  893. input_shape = shape_list(input_ids)
  894. elif inputs_embeds is not None:
  895. input_shape = shape_list(inputs_embeds)[:-1]
  896. else:
  897. raise ValueError("You have to specify either input_ids or inputs_embeds")
  898. if attention_mask is None:
  899. attention_mask = tf.fill(dims=input_shape, value=1)
  900. if token_type_ids is None:
  901. token_type_ids = tf.fill(dims=input_shape, value=0)
  902. embedding_output = self.embeddings(
  903. input_ids=input_ids,
  904. position_ids=position_ids,
  905. token_type_ids=token_type_ids,
  906. inputs_embeds=inputs_embeds,
  907. mask=attention_mask,
  908. training=training,
  909. )
  910. encoder_outputs = self.encoder(
  911. hidden_states=embedding_output,
  912. attention_mask=attention_mask,
  913. output_attentions=output_attentions,
  914. output_hidden_states=output_hidden_states,
  915. return_dict=return_dict,
  916. training=training,
  917. )
  918. sequence_output = encoder_outputs[0]
  919. if not return_dict:
  920. return (sequence_output,) + encoder_outputs[1:]
  921. return TFBaseModelOutput(
  922. last_hidden_state=sequence_output,
  923. hidden_states=encoder_outputs.hidden_states,
  924. attentions=encoder_outputs.attentions,
  925. )
  926. def build(self, input_shape=None):
  927. if self.built:
  928. return
  929. self.built = True
  930. if getattr(self, "embeddings", None) is not None:
  931. with tf.name_scope(self.embeddings.name):
  932. self.embeddings.build(None)
  933. if getattr(self, "encoder", None) is not None:
  934. with tf.name_scope(self.encoder.name):
  935. self.encoder.build(None)
  936. class TFDebertaPreTrainedModel(TFPreTrainedModel):
  937. """
  938. An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
  939. models.
  940. """
  941. config_class = DebertaConfig
  942. base_model_prefix = "deberta"
  943. DEBERTA_START_DOCSTRING = r"""
  944. The DeBERTa model was proposed in [DeBERTa: Decoding-enhanced BERT with Disentangled
  945. Attention](https://arxiv.org/abs/2006.03654) by Pengcheng He, Xiaodong Liu, Jianfeng Gao, Weizhu Chen. It's build
  946. on top of BERT/RoBERTa with two improvements, i.e. disentangled attention and enhanced mask decoder. With those two
  947. improvements, it out perform BERT/RoBERTa on a majority of tasks with 80GB pretraining data.
  948. This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
  949. as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
  950. behavior.
  951. <Tip>
  952. TensorFlow models and layers in `transformers` accept two formats as input:
  953. - having all inputs as keyword arguments (like PyTorch models), or
  954. - having all inputs as a list, tuple or dict in the first positional argument.
  955. The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
  956. and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
  957. pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
  958. format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
  959. the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
  960. positional argument:
  961. - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
  962. - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
  963. `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
  964. - a dictionary with one or several input Tensors associated to the input names given in the docstring:
  965. `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
  966. Note that when creating models and layers with
  967. [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
  968. about any of this, as you can just pass inputs like you would to any other Python function!
  969. </Tip>
  970. Parameters:
  971. config ([`DebertaConfig`]): Model configuration class with all the parameters of the model.
  972. Initializing with a config file does not load the weights associated with the model, only the
  973. configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
  974. """
  975. DEBERTA_INPUTS_DOCSTRING = r"""
  976. Args:
  977. input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
  978. Indices of input sequence tokens in the vocabulary.
  979. Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
  980. [`PreTrainedTokenizer.__call__`] for details.
  981. [What are input IDs?](../glossary#input-ids)
  982. attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
  983. Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
  984. - 1 for tokens that are **not masked**,
  985. - 0 for tokens that are **masked**.
  986. [What are attention masks?](../glossary#attention-mask)
  987. token_type_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
  988. Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
  989. 1]`:
  990. - 0 corresponds to a *sentence A* token,
  991. - 1 corresponds to a *sentence B* token.
  992. [What are token type IDs?](../glossary#token-type-ids)
  993. position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
  994. Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
  995. config.max_position_embeddings - 1]`.
  996. [What are position IDs?](../glossary#position-ids)
  997. inputs_embeds (`np.ndarray` or `tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
  998. Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
  999. is useful if you want more control over how to convert *input_ids* indices into associated vectors than the
  1000. model's internal embedding lookup matrix.
  1001. output_attentions (`bool`, *optional*):
  1002. Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
  1003. tensors for more detail.
  1004. output_hidden_states (`bool`, *optional*):
  1005. Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
  1006. more detail.
  1007. return_dict (`bool`, *optional*):
  1008. Whether or not to return a [`~utils.ModelOutput``] instead of a plain tuple.
  1009. """
  1010. @add_start_docstrings(
  1011. "The bare DeBERTa Model transformer outputting raw hidden-states without any specific head on top.",
  1012. DEBERTA_START_DOCSTRING,
  1013. )
  1014. class TFDebertaModel(TFDebertaPreTrainedModel):
  1015. def __init__(self, config: DebertaConfig, *inputs, **kwargs):
  1016. super().__init__(config, *inputs, **kwargs)
  1017. self.deberta = TFDebertaMainLayer(config, name="deberta")
  1018. @unpack_inputs
  1019. @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  1020. @add_code_sample_docstrings(
  1021. checkpoint=_CHECKPOINT_FOR_DOC,
  1022. output_type=TFBaseModelOutput,
  1023. config_class=_CONFIG_FOR_DOC,
  1024. )
  1025. def call(
  1026. self,
  1027. input_ids: TFModelInputType | None = None,
  1028. attention_mask: np.ndarray | tf.Tensor | None = None,
  1029. token_type_ids: np.ndarray | tf.Tensor | None = None,
  1030. position_ids: np.ndarray | tf.Tensor | None = None,
  1031. inputs_embeds: np.ndarray | tf.Tensor | None = None,
  1032. output_attentions: Optional[bool] = None,
  1033. output_hidden_states: Optional[bool] = None,
  1034. return_dict: Optional[bool] = None,
  1035. training: Optional[bool] = False,
  1036. ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
  1037. outputs = self.deberta(
  1038. input_ids=input_ids,
  1039. attention_mask=attention_mask,
  1040. token_type_ids=token_type_ids,
  1041. position_ids=position_ids,
  1042. inputs_embeds=inputs_embeds,
  1043. output_attentions=output_attentions,
  1044. output_hidden_states=output_hidden_states,
  1045. return_dict=return_dict,
  1046. training=training,
  1047. )
  1048. return outputs
  1049. def build(self, input_shape=None):
  1050. if self.built:
  1051. return
  1052. self.built = True
  1053. if getattr(self, "deberta", None) is not None:
  1054. with tf.name_scope(self.deberta.name):
  1055. self.deberta.build(None)
  1056. @add_start_docstrings("""DeBERTa Model with a `language modeling` head on top.""", DEBERTA_START_DOCSTRING)
  1057. class TFDebertaForMaskedLM(TFDebertaPreTrainedModel, TFMaskedLanguageModelingLoss):
  1058. def __init__(self, config: DebertaConfig, *inputs, **kwargs):
  1059. super().__init__(config, *inputs, **kwargs)
  1060. if config.is_decoder:
  1061. logger.warning(
  1062. "If you want to use `TFDebertaForMaskedLM` make sure `config.is_decoder=False` for "
  1063. "bi-directional self-attention."
  1064. )
  1065. self.deberta = TFDebertaMainLayer(config, name="deberta")
  1066. self.mlm = TFDebertaOnlyMLMHead(config, input_embeddings=self.deberta.embeddings, name="cls")
  1067. def get_lm_head(self) -> keras.layers.Layer:
  1068. return self.mlm.predictions
  1069. @unpack_inputs
  1070. @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  1071. @add_code_sample_docstrings(
  1072. checkpoint=_CHECKPOINT_FOR_DOC,
  1073. output_type=TFMaskedLMOutput,
  1074. config_class=_CONFIG_FOR_DOC,
  1075. )
  1076. def call(
  1077. self,
  1078. input_ids: TFModelInputType | None = None,
  1079. attention_mask: np.ndarray | tf.Tensor | None = None,
  1080. token_type_ids: np.ndarray | tf.Tensor | None = None,
  1081. position_ids: np.ndarray | tf.Tensor | None = None,
  1082. inputs_embeds: np.ndarray | tf.Tensor | None = None,
  1083. output_attentions: Optional[bool] = None,
  1084. output_hidden_states: Optional[bool] = None,
  1085. return_dict: Optional[bool] = None,
  1086. labels: np.ndarray | tf.Tensor | None = None,
  1087. training: Optional[bool] = False,
  1088. ) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
  1089. r"""
  1090. labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
  1091. Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
  1092. config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
  1093. loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
  1094. """
  1095. outputs = self.deberta(
  1096. input_ids=input_ids,
  1097. attention_mask=attention_mask,
  1098. token_type_ids=token_type_ids,
  1099. position_ids=position_ids,
  1100. inputs_embeds=inputs_embeds,
  1101. output_attentions=output_attentions,
  1102. output_hidden_states=output_hidden_states,
  1103. return_dict=return_dict,
  1104. training=training,
  1105. )
  1106. sequence_output = outputs[0]
  1107. prediction_scores = self.mlm(sequence_output=sequence_output, training=training)
  1108. loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=prediction_scores)
  1109. if not return_dict:
  1110. output = (prediction_scores,) + outputs[2:]
  1111. return ((loss,) + output) if loss is not None else output
  1112. return TFMaskedLMOutput(
  1113. loss=loss,
  1114. logits=prediction_scores,
  1115. hidden_states=outputs.hidden_states,
  1116. attentions=outputs.attentions,
  1117. )
  1118. def build(self, input_shape=None):
  1119. if self.built:
  1120. return
  1121. self.built = True
  1122. if getattr(self, "deberta", None) is not None:
  1123. with tf.name_scope(self.deberta.name):
  1124. self.deberta.build(None)
  1125. if getattr(self, "mlm", None) is not None:
  1126. with tf.name_scope(self.mlm.name):
  1127. self.mlm.build(None)
  1128. @add_start_docstrings(
  1129. """
  1130. DeBERTa Model transformer with a sequence classification/regression head on top (a linear layer on top of the
  1131. pooled output) e.g. for GLUE tasks.
  1132. """,
  1133. DEBERTA_START_DOCSTRING,
  1134. )
  1135. class TFDebertaForSequenceClassification(TFDebertaPreTrainedModel, TFSequenceClassificationLoss):
  1136. def __init__(self, config: DebertaConfig, *inputs, **kwargs):
  1137. super().__init__(config, *inputs, **kwargs)
  1138. self.num_labels = config.num_labels
  1139. self.deberta = TFDebertaMainLayer(config, name="deberta")
  1140. self.pooler = TFDebertaContextPooler(config, name="pooler")
  1141. drop_out = getattr(config, "cls_dropout", None)
  1142. drop_out = self.config.hidden_dropout_prob if drop_out is None else drop_out
  1143. self.dropout = TFDebertaStableDropout(drop_out, name="cls_dropout")
  1144. self.classifier = keras.layers.Dense(
  1145. units=config.num_labels,
  1146. kernel_initializer=get_initializer(config.initializer_range),
  1147. name="classifier",
  1148. )
  1149. self.output_dim = self.pooler.output_dim
  1150. @unpack_inputs
  1151. @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  1152. @add_code_sample_docstrings(
  1153. checkpoint=_CHECKPOINT_FOR_DOC,
  1154. output_type=TFSequenceClassifierOutput,
  1155. config_class=_CONFIG_FOR_DOC,
  1156. )
  1157. def call(
  1158. self,
  1159. input_ids: TFModelInputType | None = None,
  1160. attention_mask: np.ndarray | tf.Tensor | None = None,
  1161. token_type_ids: np.ndarray | tf.Tensor | None = None,
  1162. position_ids: np.ndarray | tf.Tensor | None = None,
  1163. inputs_embeds: np.ndarray | tf.Tensor | None = None,
  1164. output_attentions: Optional[bool] = None,
  1165. output_hidden_states: Optional[bool] = None,
  1166. return_dict: Optional[bool] = None,
  1167. labels: np.ndarray | tf.Tensor | None = None,
  1168. training: Optional[bool] = False,
  1169. ) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
  1170. r"""
  1171. labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
  1172. Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
  1173. config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
  1174. `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
  1175. """
  1176. outputs = self.deberta(
  1177. input_ids=input_ids,
  1178. attention_mask=attention_mask,
  1179. token_type_ids=token_type_ids,
  1180. position_ids=position_ids,
  1181. inputs_embeds=inputs_embeds,
  1182. output_attentions=output_attentions,
  1183. output_hidden_states=output_hidden_states,
  1184. return_dict=return_dict,
  1185. training=training,
  1186. )
  1187. sequence_output = outputs[0]
  1188. pooled_output = self.pooler(sequence_output, training=training)
  1189. pooled_output = self.dropout(pooled_output, training=training)
  1190. logits = self.classifier(pooled_output)
  1191. loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
  1192. if not return_dict:
  1193. output = (logits,) + outputs[1:]
  1194. return ((loss,) + output) if loss is not None else output
  1195. return TFSequenceClassifierOutput(
  1196. loss=loss,
  1197. logits=logits,
  1198. hidden_states=outputs.hidden_states,
  1199. attentions=outputs.attentions,
  1200. )
  1201. def build(self, input_shape=None):
  1202. if self.built:
  1203. return
  1204. self.built = True
  1205. if getattr(self, "deberta", None) is not None:
  1206. with tf.name_scope(self.deberta.name):
  1207. self.deberta.build(None)
  1208. if getattr(self, "pooler", None) is not None:
  1209. with tf.name_scope(self.pooler.name):
  1210. self.pooler.build(None)
  1211. if getattr(self, "dropout", None) is not None:
  1212. with tf.name_scope(self.dropout.name):
  1213. self.dropout.build(None)
  1214. if getattr(self, "classifier", None) is not None:
  1215. with tf.name_scope(self.classifier.name):
  1216. self.classifier.build([None, None, self.output_dim])
  1217. @add_start_docstrings(
  1218. """
  1219. DeBERTa Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
  1220. Named-Entity-Recognition (NER) tasks.
  1221. """,
  1222. DEBERTA_START_DOCSTRING,
  1223. )
  1224. class TFDebertaForTokenClassification(TFDebertaPreTrainedModel, TFTokenClassificationLoss):
  1225. def __init__(self, config: DebertaConfig, *inputs, **kwargs):
  1226. super().__init__(config, *inputs, **kwargs)
  1227. self.num_labels = config.num_labels
  1228. self.deberta = TFDebertaMainLayer(config, name="deberta")
  1229. self.dropout = keras.layers.Dropout(rate=config.hidden_dropout_prob)
  1230. self.classifier = keras.layers.Dense(
  1231. units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
  1232. )
  1233. self.config = config
  1234. @unpack_inputs
  1235. @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  1236. @add_code_sample_docstrings(
  1237. checkpoint=_CHECKPOINT_FOR_DOC,
  1238. output_type=TFTokenClassifierOutput,
  1239. config_class=_CONFIG_FOR_DOC,
  1240. )
  1241. def call(
  1242. self,
  1243. input_ids: TFModelInputType | None = None,
  1244. attention_mask: np.ndarray | tf.Tensor | None = None,
  1245. token_type_ids: np.ndarray | tf.Tensor | None = None,
  1246. position_ids: np.ndarray | tf.Tensor | None = None,
  1247. inputs_embeds: np.ndarray | tf.Tensor | None = None,
  1248. output_attentions: Optional[bool] = None,
  1249. output_hidden_states: Optional[bool] = None,
  1250. return_dict: Optional[bool] = None,
  1251. labels: np.ndarray | tf.Tensor | None = None,
  1252. training: Optional[bool] = False,
  1253. ) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
  1254. r"""
  1255. labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
  1256. Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
  1257. """
  1258. outputs = self.deberta(
  1259. input_ids=input_ids,
  1260. attention_mask=attention_mask,
  1261. token_type_ids=token_type_ids,
  1262. position_ids=position_ids,
  1263. inputs_embeds=inputs_embeds,
  1264. output_attentions=output_attentions,
  1265. output_hidden_states=output_hidden_states,
  1266. return_dict=return_dict,
  1267. training=training,
  1268. )
  1269. sequence_output = outputs[0]
  1270. sequence_output = self.dropout(sequence_output, training=training)
  1271. logits = self.classifier(inputs=sequence_output)
  1272. loss = None if labels is None else self.hf_compute_loss(labels=labels, logits=logits)
  1273. if not return_dict:
  1274. output = (logits,) + outputs[1:]
  1275. return ((loss,) + output) if loss is not None else output
  1276. return TFTokenClassifierOutput(
  1277. loss=loss,
  1278. logits=logits,
  1279. hidden_states=outputs.hidden_states,
  1280. attentions=outputs.attentions,
  1281. )
  1282. def build(self, input_shape=None):
  1283. if self.built:
  1284. return
  1285. self.built = True
  1286. if getattr(self, "deberta", None) is not None:
  1287. with tf.name_scope(self.deberta.name):
  1288. self.deberta.build(None)
  1289. if getattr(self, "classifier", None) is not None:
  1290. with tf.name_scope(self.classifier.name):
  1291. self.classifier.build([None, None, self.config.hidden_size])
  1292. @add_start_docstrings(
  1293. """
  1294. DeBERTa Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
  1295. layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
  1296. """,
  1297. DEBERTA_START_DOCSTRING,
  1298. )
  1299. class TFDebertaForQuestionAnswering(TFDebertaPreTrainedModel, TFQuestionAnsweringLoss):
  1300. def __init__(self, config: DebertaConfig, *inputs, **kwargs):
  1301. super().__init__(config, *inputs, **kwargs)
  1302. self.num_labels = config.num_labels
  1303. self.deberta = TFDebertaMainLayer(config, name="deberta")
  1304. self.qa_outputs = keras.layers.Dense(
  1305. units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
  1306. )
  1307. self.config = config
  1308. @unpack_inputs
  1309. @add_start_docstrings_to_model_forward(DEBERTA_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
  1310. @add_code_sample_docstrings(
  1311. checkpoint=_CHECKPOINT_FOR_DOC,
  1312. output_type=TFQuestionAnsweringModelOutput,
  1313. config_class=_CONFIG_FOR_DOC,
  1314. )
  1315. def call(
  1316. self,
  1317. input_ids: TFModelInputType | None = None,
  1318. attention_mask: np.ndarray | tf.Tensor | None = None,
  1319. token_type_ids: np.ndarray | tf.Tensor | None = None,
  1320. position_ids: np.ndarray | tf.Tensor | None = None,
  1321. inputs_embeds: np.ndarray | tf.Tensor | None = None,
  1322. output_attentions: Optional[bool] = None,
  1323. output_hidden_states: Optional[bool] = None,
  1324. return_dict: Optional[bool] = None,
  1325. start_positions: np.ndarray | tf.Tensor | None = None,
  1326. end_positions: np.ndarray | tf.Tensor | None = None,
  1327. training: Optional[bool] = False,
  1328. ) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
  1329. r"""
  1330. start_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
  1331. Labels for position (index) of the start of the labelled span for computing the token classification loss.
  1332. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
  1333. are not taken into account for computing the loss.
  1334. end_positions (`tf.Tensor` or `np.ndarray` of shape `(batch_size,)`, *optional*):
  1335. Labels for position (index) of the end of the labelled span for computing the token classification loss.
  1336. Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
  1337. are not taken into account for computing the loss.
  1338. """
  1339. outputs = self.deberta(
  1340. input_ids=input_ids,
  1341. attention_mask=attention_mask,
  1342. token_type_ids=token_type_ids,
  1343. position_ids=position_ids,
  1344. inputs_embeds=inputs_embeds,
  1345. output_attentions=output_attentions,
  1346. output_hidden_states=output_hidden_states,
  1347. return_dict=return_dict,
  1348. training=training,
  1349. )
  1350. sequence_output = outputs[0]
  1351. logits = self.qa_outputs(inputs=sequence_output)
  1352. start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1)
  1353. start_logits = tf.squeeze(input=start_logits, axis=-1)
  1354. end_logits = tf.squeeze(input=end_logits, axis=-1)
  1355. loss = None
  1356. if start_positions is not None and end_positions is not None:
  1357. labels = {"start_position": start_positions}
  1358. labels["end_position"] = end_positions
  1359. loss = self.hf_compute_loss(labels=labels, logits=(start_logits, end_logits))
  1360. if not return_dict:
  1361. output = (start_logits, end_logits) + outputs[2:]
  1362. return ((loss,) + output) if loss is not None else output
  1363. return TFQuestionAnsweringModelOutput(
  1364. loss=loss,
  1365. start_logits=start_logits,
  1366. end_logits=end_logits,
  1367. hidden_states=outputs.hidden_states,
  1368. attentions=outputs.attentions,
  1369. )
  1370. def build(self, input_shape=None):
  1371. if self.built:
  1372. return
  1373. self.built = True
  1374. if getattr(self, "deberta", None) is not None:
  1375. with tf.name_scope(self.deberta.name):
  1376. self.deberta.build(None)
  1377. if getattr(self, "qa_outputs", None) is not None:
  1378. with tf.name_scope(self.qa_outputs.name):
  1379. self.qa_outputs.build([None, None, self.config.hidden_size])