modeling_tf_rag.py 87 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770
  1. # coding=utf-8
  2. # Copyright 2020, The RAG Authors and The HuggingFace Inc. team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """TFRAG model implementation."""
  16. from __future__ import annotations
  17. import copy
  18. from dataclasses import dataclass
  19. from typing import List, Optional, Tuple, Union
  20. import numpy as np
  21. import tensorflow as tf
  22. from ...configuration_utils import PretrainedConfig
  23. from ...generation import TFLogitsProcessorList
  24. from ...modeling_tf_utils import (
  25. TFCausalLanguageModelingLoss,
  26. TFModelInputType,
  27. TFPreTrainedModel,
  28. keras,
  29. shape_list,
  30. unpack_inputs,
  31. )
  32. from ...utils import ModelOutput, add_start_docstrings_to_model_forward, logging, replace_return_docstrings
  33. from .configuration_rag import RagConfig
  34. from .retrieval_rag import RagRetriever
  35. logger = logging.get_logger(__name__)
  36. _CONFIG_FOR_DOC = "RagConfig"
  37. @dataclass
  38. class TFRetrievAugLMMarginOutput(ModelOutput):
  39. """
  40. Base class for retriever augmented marginalized models outputs.
  41. Args:
  42. loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
  43. Language modeling loss.
  44. logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
  45. Prediction scores of the language modeling head. The score is possibly marginalized over all documents for
  46. each vocabulary token.
  47. past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
  48. List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads,
  49. sequence_length, embed_size_per_head)`).
  50. Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used
  51. (see `past_key_values` input) to speed up sequential decoding.
  52. doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`):
  53. Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
  54. `question_encoder_last_hidden_state`.
  55. retrieved_doc_embeds (`tf.Tensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*):
  56. Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute
  57. the `doc_scores`.
  58. retrieved_doc_ids (`tf.Tensor` (int32) of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*):
  59. The indexes of the embedded documents retrieved by the retriever.
  60. context_input_ids (`tf.Tensor`(int32) of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
  61. Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.
  62. context_attention_mask (`tf.Tensor` (int32) of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
  63. Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
  64. retriever.
  65. question_encoder_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
  66. Sequence of hidden states at the output of the last layer of the question encoder pooled output of the
  67. model.
  68. question_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
  69. Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape
  70. `(batch_size, sequence_length, hidden_size)`.
  71. Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.
  72. question_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
  73. Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
  74. sequence_length)`.
  75. Attentions weights of the question encoder, after the attention softmax, used to compute the weighted
  76. average in the self-attention heads.
  77. generator_enc_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
  78. Sequence of hidden-states at the output of the last layer of the generator encoder of the model.
  79. generator_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
  80. Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape
  81. `(batch_size, sequence_length, hidden_size)`.
  82. Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.
  83. generator_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
  84. Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
  85. sequence_length)`.
  86. Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted
  87. average in the self-attention heads.
  88. generator_dec_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
  89. Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape
  90. `(batch_size, sequence_length, hidden_size)`.
  91. Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.
  92. generator_dec_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
  93. Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
  94. sequence_length)`.
  95. Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted
  96. average in the self-attention heads.
  97. """
  98. loss: tf.Tensor | None = None
  99. logits: tf.Tensor = None
  100. past_key_values: List[tf.Tensor] | None = None
  101. doc_scores: tf.Tensor | None = None
  102. retrieved_doc_embeds: tf.Tensor | None = None
  103. retrieved_doc_ids: tf.Tensor | None = None
  104. context_input_ids: tf.Tensor | None = None
  105. context_attention_mask: tf.Tensor | None = None
  106. question_encoder_last_hidden_state: tf.Tensor | None = None
  107. question_enc_hidden_states: Tuple[tf.Tensor, ...] | None = None
  108. question_enc_attentions: Tuple[tf.Tensor, ...] | None = None
  109. generator_enc_last_hidden_state: tf.Tensor | None = None
  110. generator_enc_hidden_states: Tuple[tf.Tensor, ...] | None = None
  111. generator_enc_attentions: Tuple[tf.Tensor, ...] | None = None
  112. generator_dec_hidden_states: Tuple[tf.Tensor, ...] | None = None
  113. generator_dec_attentions: Tuple[tf.Tensor, ...] | None = None
  114. @dataclass
  115. class TFRetrievAugLMOutput(ModelOutput):
  116. """
  117. Args:
  118. logits (`tf.Tensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
  119. Prediction scores of the language modeling head. The score is possibly marginalized over all documents for
  120. each vocabulary token.
  121. past_key_values (`List[tf.Tensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
  122. List of `tf.Tensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size, num_heads,
  123. sequence_length, embed_size_per_head)`).
  124. Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used
  125. (see `past_key_values` input) to speed up sequential decoding.
  126. doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`):
  127. Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
  128. `question_encoder_last_hidden_state`.
  129. retrieved_doc_embeds (`tf.Tensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*):
  130. Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute
  131. the `doc_scores`.
  132. retrieved_doc_ids (`tf.Tensor` of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*):
  133. The indexes of the embedded documents retrieved by the retriever.
  134. context_input_ids (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
  135. Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.
  136. context_attention_mask (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
  137. Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
  138. retriever.
  139. question_encoder_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
  140. Sequence of hidden states at the output of the last layer of the question encoder pooled output of the
  141. model.
  142. question_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
  143. Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape
  144. `(batch_size, sequence_length, hidden_size)`.
  145. Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.
  146. question_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
  147. Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
  148. sequence_length)`.
  149. Attentions weights of the question encoder, after the attention softmax, used to compute the weighted
  150. average in the self-attention heads.
  151. generator_enc_last_hidden_state (`tf.Tensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
  152. Sequence of hidden-states at the output of the last layer of the generator encoder of the model.
  153. generator_enc_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
  154. Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape
  155. `(batch_size, sequence_length, hidden_size)`.
  156. Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.
  157. generator_enc_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
  158. Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
  159. sequence_length)`.
  160. Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted
  161. average in the self-attention heads.
  162. generator_dec_hidden_states (`tuple(tf.Tensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
  163. Tuple of `tf.Tensor` (one for the output of the embeddings and one for the output of each layer) of shape
  164. `(batch_size, sequence_length, hidden_size)`.
  165. Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.
  166. generator_dec_attentions (`tuple(tf.Tensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
  167. Tuple of `tf.Tensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
  168. sequence_length)`.
  169. Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted
  170. average in the self-attention heads.
  171. """
  172. logits: tf.Tensor = None
  173. past_key_values: List[tf.Tensor] | None = None
  174. doc_scores: tf.Tensor | None = None
  175. retrieved_doc_embeds: tf.Tensor | None = None
  176. retrieved_doc_ids: tf.Tensor | None = None
  177. context_input_ids: tf.Tensor | None = None
  178. context_attention_mask: tf.Tensor | None = None
  179. question_encoder_last_hidden_state: tf.Tensor | None = None
  180. question_enc_hidden_states: Tuple[tf.Tensor, ...] | None = None
  181. question_enc_attentions: Tuple[tf.Tensor, ...] | None = None
  182. generator_enc_last_hidden_state: tf.Tensor | None = None
  183. generator_enc_hidden_states: Tuple[tf.Tensor, ...] | None = None
  184. generator_enc_attentions: Tuple[tf.Tensor, ...] | None = None
  185. generator_dec_hidden_states: Tuple[tf.Tensor, ...] | None = None
  186. generator_dec_attentions: Tuple[tf.Tensor, ...] | None = None
  187. class TFRagPreTrainedModel(TFPreTrainedModel):
  188. r"""
  189. RAG models were released with the paper [Retrieval-Augmented Generation for Knowledge-Intensive NLP
  190. Tasks](https://arxiv.org/abs/2005.11401) by Patrick Lewis, Ethan Perez, Aleksandra Piktus et al.
  191. RAG is a retriever augmented model and encapsulate three components: a question encoder, a dataset retriever and a
  192. generator, the encoder and generator are trainable while the retriever is just an indexed dataset.
  193. """
  194. config_class = RagConfig
  195. base_model_prefix = "rag"
  196. _keys_to_ignore_on_load_missing = [r"position_ids"]
  197. @classmethod
  198. def from_pretrained_question_encoder_generator(
  199. cls,
  200. question_encoder_pretrained_model_name_or_path: str = None,
  201. generator_pretrained_model_name_or_path: str = None,
  202. retriever: RagRetriever = None,
  203. *model_args,
  204. **kwargs,
  205. ) -> TFPreTrainedModel:
  206. r"""
  207. Instantiates an question encoder and a generator from one or two base classes of the library from pretrained
  208. model checkpoints.
  209. Params:
  210. question_encoder_pretrained_model_name_or_path (`str`, *optional*):
  211. Information necessary to initiate the question encoder. Can be either:
  212. - A string with the *shortcut name* of a pretrained model to load from cache or download, e.g.,
  213. `google-bert/bert-base-uncased`.
  214. - A string with the *identifier name* of a pretrained model that was user-uploaded to our S3, e.g.,
  215. `dbmdz/bert-base-german-cased`.
  216. - A path to a *directory* containing model weights saved using
  217. [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
  218. - A path or url to a *pytorch index checkpoint file* (e.g, `./pt_model/`). In this case,
  219. `question_encoder_from_pt` should be set to `True`.
  220. generator_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
  221. Information necessary to initiate the generator. Can be either:
  222. - A string with the *shortcut name* of a pretrained model to load from cache or download, e.g.,
  223. `google-t5/t5-small`.
  224. - A string with the *identifier name* of a pretrained model that was user-uploaded to our S3, e.g.,
  225. `facebook/bart-base`.
  226. - A path to a *directory* containing model weights saved using
  227. [`~TFPreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
  228. - A path or url to a *pytorch checkpoint file* (e.g, `./pt_model/`). In this case,
  229. `generator_from_pt` should be set to `True`.
  230. model_args (remaining positional arguments, *optional*):
  231. All remaining positional arguments will be passed to the underlying model's `__init__` method.
  232. retriever ([`RagRetriever`], *optional*):
  233. The retriever to use.
  234. kwargs (remaining dictionary of keyword arguments, *optional*):
  235. Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
  236. `output_attentions=True`).
  237. - To update the question_encoder configuration, use the prefix *question_encoder_* for each
  238. configuration parameter.
  239. - To update the generator configuration, use the prefix *generator_* for each configuration parameter.
  240. - To update the parent model configuration, do not use a prefix for each configuration parameter.
  241. Behaves differently depending on whether a `config` is provided or automatically loaded.
  242. Example:
  243. ```python
  244. >>> from transformers import RagRetriever, TFRagModel
  245. >>> # initialize a RAG from two pretrained models.
  246. >>> model = TFRagModel.from_pretrained_question_encoder_generator(
  247. ... "facebook/dpr-question_encoder-single-nq-base", "google-t5/t5-small"
  248. ... )
  249. >>> # alternatively, initialize from pytorch pretrained models can also be done
  250. >>> model = TFRagModel.from_pretrained_question_encoder_generator(
  251. ... "facebook/dpr-question_encoder-single-nq-base",
  252. ... "facebook/bart-base",
  253. ... generator_from_pt=True,
  254. ... question_encoder_from_pt=True,
  255. ... )
  256. >>> # saving model after fine-tuning
  257. >>> model.save_pretrained("./rag")
  258. >>> # load retriever
  259. >>> retriever = RagRetriever.from_pretrained(
  260. ... "facebook/rag-token-base", index_name="exact", use_dummy_dataset=True
  261. ... )
  262. >>> # load fine-tuned model with retriever
  263. >>> model = TFRagModel.from_pretrained("./rag", retriever=retriever)
  264. ```"""
  265. kwargs_question_encoder = {
  266. argument[len("question_encoder_") :]: value
  267. for argument, value in kwargs.items()
  268. if argument.startswith("question_encoder_")
  269. }
  270. kwargs_generator = {
  271. argument[len("generator_") :]: value
  272. for argument, value in kwargs.items()
  273. if argument.startswith("generator_")
  274. }
  275. # remove question_encoder, generator kwargs from kwargs
  276. for key in kwargs_question_encoder.keys():
  277. del kwargs["question_encoder_" + key]
  278. for key in kwargs_generator.keys():
  279. del kwargs["generator_" + key]
  280. # Load and initialize the question_encoder and generator
  281. # The distinction between question_encoder and generator at the model level is made
  282. # by the value of the flag `is_generator` that we need to set correctly.
  283. question_encoder = kwargs_question_encoder.pop("model", None)
  284. if question_encoder is None:
  285. assert question_encoder_pretrained_model_name_or_path is not None, (
  286. "If `model` is not defined as an argument, a `question_encoder_pretrained_model_name_or_path` has to"
  287. " be defined"
  288. )
  289. from ..auto.modeling_tf_auto import TFAutoModel
  290. if "config" not in kwargs_question_encoder:
  291. from ..auto.configuration_auto import AutoConfig
  292. question_encoder_config = AutoConfig.from_pretrained(question_encoder_pretrained_model_name_or_path)
  293. kwargs_question_encoder["config"] = question_encoder_config
  294. question_encoder = TFAutoModel.from_pretrained(
  295. question_encoder_pretrained_model_name_or_path,
  296. name="question_encoder",
  297. load_weight_prefix=cls.load_weight_prefix,
  298. *model_args,
  299. **kwargs_question_encoder,
  300. )
  301. generator = kwargs_generator.pop("generator", None)
  302. if generator is None:
  303. assert generator_pretrained_model_name_or_path is not None, (
  304. "If `generator_model` is not defined as an argument, a `generator_pretrained_model_name_or_path` has"
  305. " to be defined"
  306. )
  307. from ..auto.modeling_tf_auto import TFAutoModelForSeq2SeqLM
  308. if "config" not in kwargs_generator:
  309. from ..auto.configuration_auto import AutoConfig
  310. generator_config = AutoConfig.from_pretrained(generator_pretrained_model_name_or_path)
  311. kwargs_generator["config"] = generator_config
  312. generator = TFAutoModelForSeq2SeqLM.from_pretrained(
  313. generator_pretrained_model_name_or_path,
  314. name="generator",
  315. load_weight_prefix=cls.load_weight_prefix,
  316. **kwargs_generator,
  317. )
  318. # instantiate config with corresponding kwargs
  319. config = kwargs.get("config", None)
  320. if config is None:
  321. config = RagConfig.from_question_encoder_generator_configs(
  322. question_encoder.config, generator.config, **kwargs
  323. )
  324. return cls(question_encoder=question_encoder, generator=generator, config=config, retriever=retriever)
  325. RAG_START_DOCSTRING = r"""
  326. RAG is a sequence-to-sequence model which encapsulates two core components: a question encoder and a generator.
  327. During a forward pass, we encode the input with the question encoder and pass it to the retriever to extract
  328. relevant context documents. The documents are then prepended to the input. Such contextualized inputs is passed to
  329. the generator.
  330. The question encoder can be any *autoencoding* model, preferably [`TFDPRQuestionEncoder`], and the generator can be
  331. any *seq2seq* model, preferably [`TFBartForConditionalGeneration`].
  332. The model can be initialized with a [`RagRetriever`] for end-to-end generation or used in combination with the
  333. outputs of a retriever in multiple steps---see examples for more details. The model is compatible any
  334. *autoencoding* model as the `question_encoder` and any *seq2seq* model with language model head as the `generator`.
  335. It has been tested with [`TFDPRQuestionEncoder`] as the `question_encoder` and [`TFBartForConditionalGeneration`]
  336. as the `generator`.
  337. This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
  338. library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
  339. etc.)
  340. This model is also a Tensorflow [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model)
  341. subclass. Use it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to
  342. general usage and behavior.
  343. The model is in a developing state as it is now fully supports in eager-mode only, and may not be exported in
  344. SavedModel format.
  345. Args:
  346. config ([`RagConfig`]):
  347. Model configuration class with all the parameters of the model. Initializing with a config file does not
  348. load the weights associated with the model, only the configuration. Check out the
  349. [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
  350. question_encoder ([`TFPreTrainedModel`]):
  351. An encoder model compatible with the faiss index encapsulated by the `retriever`.
  352. generator ([`TFPreTrainedModel`]):
  353. A seq2seq model used as the generator in the RAG architecture.
  354. retriever ([`RagRetriever`]):
  355. A retriever class encapsulating a faiss index queried to obtain context documents for current inputs.
  356. """
  357. RAG_FORWARD_INPUTS_DOCSTRING = r"""
  358. Args:
  359. input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
  360. Indices of input sequence tokens in the vocabulary. [`RagConfig`], used to initialize the model, specifies
  361. which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to
  362. obtain the indices.
  363. attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
  364. Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
  365. - 1 for tokens that are **not masked**,
  366. - 0 for tokens that are **masked**.
  367. [What are attention masks?](../glossary#attention-mask)
  368. encoder_outputs (`tuple(tuple(tf.Tensor)`, *optional*)
  369. Tuple consists of (`generator_enc_last_hidden_state`, *optional*: `generator_enc_hidden_states`,
  370. *optional*: `generator_enc_attentions`). `generator_enc_last_hidden_state` of shape `(batch_size, n_docs *
  371. sequence_length, hidden_size)` is a sequence of hidden-states at the output of the last layer of the
  372. generator's encoder.
  373. Used by the ([`TFRagModel`]) model during decoding.
  374. decoder_input_ids (`tf.Tensor` of shape `(batch_size, target_sequence_length)`, *optional*):
  375. Provide for generation tasks. `None` by default, construct as per instructions for the generator model
  376. you're using with your RAG instance.
  377. decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
  378. Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
  379. be used by default.
  380. past_key_values (`tuple(tuple(tf.Tensor))`):
  381. Tuple consists of two elements: `encoder_outputs` of the RAG model (see `encoder_outputs`) and
  382. `past_key_values` of the underlying generator. Can be used to speed up decoding. `past_key_values` are used
  383. in the ([`RagTokenForGeneration`]) model during decoding.
  384. doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`):
  385. Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
  386. `question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever` `doc_scores`
  387. has to be provided to the forward pass. `doc_scores` can be computed via
  388. `question_encoder_last_hidden_state` and `retrieved_doc_embeds`, see examples for more information.
  389. context_input_ids (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
  390. Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the
  391. retriever.
  392. If the model has is not initialized with a `retriever` ``context_input_ids` has to be provided to the
  393. forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`]. context_attention_mask
  394. (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when
  395. *output_retrieved=True*): Attention mask post-processed from the retrieved documents and the question
  396. encoder `input_ids` by the retriever.
  397. If the model has is not initialized with a `retriever` `context_attention_mask` has to be provided to the
  398. forward pass. `context_attention_mask` are returned by [`~RagRetriever.__call__`].
  399. use_cache (`bool`, *optional*, defaults to `True`):
  400. If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
  401. `past_key_values`).
  402. output_attentions (`bool`, *optional*):
  403. Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
  404. tensors for more detail.
  405. output_hidden_states (`bool`, *optional*):
  406. Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
  407. more detail.
  408. output_retrieved(`bool`, *optional*):
  409. Whether or not to return the `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
  410. `context_attention_mask`. See returned tensors for more detail.
  411. return_dict (`bool`, *optional*):
  412. Whether or not to return a [`TFRetrievAugLMOutput`] instead of a plain tuple.
  413. n_docs (`int`, *optional*, defaults to `config.n_docs``)
  414. Number of documents to retrieve and/or number of documents for which to generate an answer.
  415. """
  416. @add_start_docstrings_to_model_forward(RAG_START_DOCSTRING)
  417. class TFRagModel(TFRagPreTrainedModel):
  418. load_weight_prefix = "tf_rag_model_1"
  419. def __init__(
  420. self,
  421. config: Optional[PretrainedConfig] = None,
  422. question_encoder: Optional[TFPreTrainedModel] = None,
  423. generator: Optional[TFPreTrainedModel] = None,
  424. retriever: Optional[RagRetriever] = None,
  425. load_weight_prefix: Optional[str] = None,
  426. **kwargs,
  427. ):
  428. assert config is not None or (
  429. question_encoder is not None and generator is not None
  430. ), "Either a configuration or an question_encoder and a generator has to be provided."
  431. if config is None:
  432. config = RagConfig.from_question_encoder_generator_configs(
  433. question_encoder.config, generator.config, **kwargs
  434. )
  435. else:
  436. assert isinstance(config, self.config_class), f"config: {config} has to be of type {self.config_class}"
  437. super().__init__(config, **kwargs)
  438. if question_encoder is None:
  439. from ..auto.modeling_tf_auto import TFAutoModel
  440. question_encoder = TFAutoModel.from_config(config.question_encoder, name="question_encoder")
  441. if generator is None:
  442. from ..auto.modeling_tf_auto import TFAutoModelForSeq2SeqLM
  443. load_weight_prefix = load_weight_prefix if load_weight_prefix is not None else self.load_weight_prefix
  444. generator = TFAutoModelForSeq2SeqLM.from_config(
  445. config.generator, name="generator", load_weight_prefix=load_weight_prefix + "/generator"
  446. )
  447. self.retriever = retriever
  448. if self.retriever is not None:
  449. assert isinstance(
  450. retriever, RagRetriever
  451. ), f"`self.retriever` is of type {type(self.retriever)}, but should be of type `RagRetriever`"
  452. self.retriever = retriever
  453. self.question_encoder = question_encoder
  454. self.generator = generator
  455. def set_retriever(self, retriever: RagRetriever):
  456. self.retriever = retriever
  457. @unpack_inputs
  458. @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING)
  459. @replace_return_docstrings(output_type=TFRetrievAugLMOutput, config_class=_CONFIG_FOR_DOC)
  460. def call(
  461. self,
  462. input_ids: TFModelInputType | None = None,
  463. attention_mask: np.ndarray | tf.Tensor | None = None,
  464. encoder_outputs: np.ndarray | tf.Tensor | None = None,
  465. decoder_input_ids: np.ndarray | tf.Tensor | None = None,
  466. decoder_attention_mask: np.ndarray | tf.Tensor | None = None,
  467. past_key_values: Tuple[Tuple[Union[np.ndarray, tf.Tensor]]] | None = None,
  468. doc_scores: np.ndarray | tf.Tensor | None = None,
  469. context_input_ids: np.ndarray | tf.Tensor | None = None,
  470. context_attention_mask: np.ndarray | tf.Tensor | None = None,
  471. use_cache: bool | None = None,
  472. output_attentions: bool | None = None,
  473. output_hidden_states: bool | None = None,
  474. output_retrieved: bool | None = None,
  475. n_docs: int | None = None,
  476. return_dict: bool | None = None,
  477. training: bool = False,
  478. **kwargs,
  479. ) -> TFRetrievAugLMOutput:
  480. r"""
  481. Returns:
  482. Example:
  483. ```python
  484. >>> from transformers import AutoTokenizer, RagRetriever, TFRagModel
  485. >>> import torch
  486. >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-base")
  487. >>> retriever = RagRetriever.from_pretrained(
  488. ... "facebook/rag-token-base", index_name="exact", use_dummy_dataset=True
  489. ... )
  490. >>> # initialize with RagRetriever to do everything in one forward call
  491. >>> model = TFRagModel.from_pretrained("facebook/rag-token-base", retriever=retriever, from_pt=True)
  492. >>> input_dict = tokenizer.prepare_seq2seq_batch(
  493. ... "How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf"
  494. ... )
  495. >>> input_ids = input_dict["input_ids"]
  496. >>> outputs = model(input_ids)
  497. ```"""
  498. assert (
  499. "decoder_cached_states" not in kwargs
  500. ), "Please use past_key_values to cache intermediate outputs" # from modeling_tf_bart.py
  501. # aliasing to minimize code changing
  502. n_docs = n_docs if n_docs is not None else self.config.n_docs
  503. # whether retriever has to be used
  504. has_to_retrieve = (
  505. self.retriever is not None
  506. and (context_input_ids is None or context_attention_mask is None or doc_scores is None)
  507. and encoder_outputs is None
  508. )
  509. # encoder_outputs are pre-computed during RAG-token generation
  510. if encoder_outputs is None:
  511. if has_to_retrieve:
  512. question_enc_outputs = self.question_encoder(
  513. input_ids, attention_mask=attention_mask, return_dict=True, training=training
  514. )
  515. # see https://github.com/huggingface/transformers/blob/main/src/transformers/models/dpr/modeling_tf_dpr.py#L91
  516. question_encoder_last_hidden_state = question_enc_outputs[
  517. 0
  518. ] # hidden states of question encoder => pooler_output
  519. retriever_outputs = self.retriever(
  520. input_ids,
  521. question_encoder_last_hidden_state.numpy(),
  522. prefix=self.generator.config.prefix,
  523. n_docs=n_docs,
  524. return_tensors="tf",
  525. )
  526. context_input_ids, context_attention_mask, retrieved_doc_embeds, retrieved_doc_ids = (
  527. retriever_outputs["context_input_ids"],
  528. retriever_outputs["context_attention_mask"],
  529. retriever_outputs["retrieved_doc_embeds"],
  530. retriever_outputs["doc_ids"],
  531. )
  532. context_input_ids = tf.cast(context_input_ids, tf.int32)
  533. context_attention_mask = tf.cast(context_attention_mask, tf.int32)
  534. retrieved_doc_embeds = tf.cast(retrieved_doc_embeds, tf.float32)
  535. retrieved_doc_ids = tf.cast(retrieved_doc_ids, tf.int32)
  536. # compute doc_scores
  537. doc_scores = tf.squeeze(
  538. tf.matmul(
  539. tf.expand_dims(question_encoder_last_hidden_state, axis=1),
  540. retrieved_doc_embeds,
  541. transpose_b=True,
  542. ),
  543. axis=1,
  544. )
  545. else:
  546. assert context_input_ids is not None, (
  547. "Make sure that `context_input_ids` are passed, if no `retriever` is set. Alternatively, you can"
  548. " set a retriever using the `set_retriever(...)` function."
  549. )
  550. assert context_attention_mask is not None, (
  551. "Make sure that `context_attention_mask` are passed, if no `retriever` is set. Alternatively, you"
  552. " can set a retriever using the `set_retriever(...)` function."
  553. )
  554. assert doc_scores is not None, (
  555. "Make sure that `doc_scores` are passed, if no `retriever` is set. Alternatively, you can set a"
  556. " retriever using the `set_retriever(...)` function."
  557. )
  558. assert (
  559. doc_scores is not None
  560. ), "Make sure that `doc_scores` are passed when passing `encoder_outputs` to the forward function."
  561. assert (doc_scores.shape[1] % n_docs) == 0, (
  562. f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is"
  563. f" {context_input_ids.shape[0]}."
  564. )
  565. # Decoder input without context documents
  566. if decoder_input_ids is not None:
  567. decoder_input_ids = tf.repeat(decoder_input_ids, n_docs, axis=0)
  568. if decoder_attention_mask is not None:
  569. decoder_attention_mask = tf.repeat(decoder_attention_mask, n_docs, axis=0)
  570. gen_outputs = self.generator(
  571. context_input_ids,
  572. attention_mask=context_attention_mask,
  573. encoder_outputs=encoder_outputs,
  574. decoder_input_ids=decoder_input_ids,
  575. decoder_attention_mask=decoder_attention_mask,
  576. past_key_values=past_key_values,
  577. use_cache=use_cache,
  578. return_dict=True,
  579. training=training,
  580. )
  581. if not has_to_retrieve:
  582. question_encoder_last_hidden_state = None
  583. question_enc_hidden_states = None
  584. question_enc_attentions = None
  585. retrieved_doc_embeds = None
  586. retrieved_doc_ids = None
  587. else:
  588. question_enc_hidden_states = question_enc_outputs.hidden_states
  589. question_enc_attentions = question_enc_outputs.attentions
  590. if not has_to_retrieve or not output_retrieved:
  591. # don't output retrieved docs
  592. context_input_ids = (None,)
  593. context_attention_mask = None
  594. retrieved_doc_embeds = None
  595. retrieved_doc_ids = None
  596. return TFRetrievAugLMOutput(
  597. logits=gen_outputs.logits,
  598. doc_scores=doc_scores,
  599. past_key_values=gen_outputs.past_key_values,
  600. context_input_ids=context_input_ids,
  601. context_attention_mask=context_attention_mask,
  602. retrieved_doc_embeds=retrieved_doc_embeds,
  603. retrieved_doc_ids=retrieved_doc_ids,
  604. question_encoder_last_hidden_state=question_encoder_last_hidden_state,
  605. question_enc_hidden_states=question_enc_hidden_states,
  606. question_enc_attentions=question_enc_attentions,
  607. generator_enc_last_hidden_state=gen_outputs.encoder_last_hidden_state,
  608. generator_enc_hidden_states=gen_outputs.encoder_hidden_states,
  609. generator_enc_attentions=gen_outputs.encoder_attentions,
  610. generator_dec_hidden_states=gen_outputs.decoder_hidden_states,
  611. generator_dec_attentions=gen_outputs.decoder_attentions,
  612. )
  613. def build(self, input_shape=None):
  614. if self.built:
  615. return
  616. self.built = True
  617. with tf.name_scope(self.generator.name):
  618. self.generator.build(None)
  619. with tf.name_scope(self.question_encoder.name):
  620. self.question_encoder.build(None)
  621. @add_start_docstrings_to_model_forward(
  622. """
  623. A TF RAG-token model implementation. It performs RAG-token specific marginalization in the forward pass.
  624. """,
  625. RAG_START_DOCSTRING,
  626. )
  627. class TFRagTokenForGeneration(TFRagPreTrainedModel, TFCausalLanguageModelingLoss):
  628. load_weight_prefix = "tf_rag_token_for_generation_1/rag"
  629. def __init__(
  630. self,
  631. config: Optional[PretrainedConfig] = None,
  632. question_encoder: Optional[TFPreTrainedModel] = None,
  633. generator: Optional[TFPreTrainedModel] = None,
  634. retriever: Optional[RagRetriever] = None,
  635. **kwargs,
  636. ):
  637. assert config is not None or (
  638. question_encoder is not None and generator is not None
  639. ), "Either a configuration or an encoder and a generator has to be provided."
  640. if config is None:
  641. config = RagConfig.from_question_encoder_generator_configs(
  642. question_encoder.config, generator.config, **kwargs
  643. )
  644. super().__init__(config)
  645. # instantiate model
  646. self.rag = TFRagModel(
  647. config=config,
  648. question_encoder=question_encoder,
  649. generator=generator,
  650. retriever=retriever,
  651. load_weight_prefix=self.load_weight_prefix,
  652. name="rag",
  653. )
  654. def set_retriever(self, retriever: RagRetriever):
  655. self.rag.retriever = retriever
  656. # Adapted from https://github.com/huggingface/transformers/blob/main/src/transformers/modeling_tf_bart.py
  657. def prepare_inputs_for_generation(
  658. self,
  659. decoder_input_ids,
  660. past_key_values=None,
  661. attention_mask=None,
  662. use_cache=None,
  663. encoder_outputs=None,
  664. doc_scores=None,
  665. n_docs=None,
  666. **kwargs,
  667. ):
  668. if past_key_values is not None:
  669. # if past is defined use only last decoder_input_ids
  670. decoder_input_ids = decoder_input_ids[:, -1:]
  671. return {
  672. "input_ids": None,
  673. "encoder_outputs": encoder_outputs,
  674. "doc_scores": doc_scores,
  675. "context_attention_mask": attention_mask,
  676. "decoder_input_ids": decoder_input_ids,
  677. "past_key_values": past_key_values,
  678. "use_cache": use_cache,
  679. "do_marginalize": True,
  680. "n_docs": n_docs,
  681. }
  682. @property
  683. def retriever(self):
  684. return self.rag.retriever
  685. @property
  686. def generator(self):
  687. return self.rag.generator
  688. @property
  689. def question_encoder(self):
  690. return self.rag.question_encoder
  691. @staticmethod
  692. def _gather_beams(nested, beam_indices, batch_axis=0):
  693. """
  694. RAG-specific `_gather_beams`: gathers the beam slices indexed by beam_indices into new beam array. If the
  695. nested tensor has a shape mismatch with the beam indices, then it means it is the cache. In that case, isolates
  696. and takes care of the extra dimension for ndocs.
  697. """
  698. def gather_fn(tensor):
  699. is_rag_cache = tensor.shape[0] != beam_indices.shape[0]
  700. if is_rag_cache:
  701. n_docs = tensor.shape[0] // beam_indices.shape[0]
  702. batch_size = beam_indices.shape[0]
  703. # reshapes into (batch size, num beams, n_docs, ...), the cache format expected by RAG
  704. tensor = tf.reshape(tensor, (batch_size, -1, n_docs, *tensor.shape[2:]))
  705. gathered_tensor = tf.gather(params=tensor, indices=beam_indices, axis=1, batch_dims=1)
  706. if is_rag_cache:
  707. # reshapes back into the shape expected by beam search
  708. gathered_tensor = tf.reshape(gathered_tensor, (batch_size * n_docs, -1, *gathered_tensor.shape[3:]))
  709. return gathered_tensor
  710. return tf.nest.map_structure(gather_fn, nested)
  711. def marginalize(self, seq_logits, doc_scores, n_docs=None):
  712. n_docs = n_docs if n_docs is not None else self.config.n_docs
  713. # RAG-token marginalization
  714. seq_logprobs = tf.nn.log_softmax(seq_logits, axis=-1)
  715. seq_logprobs = tf.reshape(seq_logprobs, [seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.shape[-1]])
  716. doc_logprobs = tf.nn.log_softmax(doc_scores, axis=1)
  717. doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1)
  718. doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1) # twice
  719. log_prob_sum = seq_logprobs + doc_logprobs
  720. return tf.reduce_logsumexp(log_prob_sum, axis=1)
  721. @unpack_inputs
  722. @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING)
  723. @replace_return_docstrings(output_type=TFRetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC)
  724. def call(
  725. self,
  726. input_ids: TFModelInputType | None = None,
  727. attention_mask: np.ndarray | tf.Tensor | None = None,
  728. decoder_input_ids: np.ndarray | tf.Tensor | None = None,
  729. decoder_attention_mask: np.ndarray | tf.Tensor | None = None,
  730. encoder_outputs: np.ndarray | tf.Tensor | None = None,
  731. past_key_values: Tuple[Tuple[Union[np.ndarray, tf.Tensor]]] | None = None,
  732. doc_scores: np.ndarray | tf.Tensor | None = None,
  733. context_input_ids: np.ndarray | tf.Tensor | None = None,
  734. context_attention_mask: np.ndarray | tf.Tensor | None = None,
  735. use_cache: bool | None = None,
  736. output_attentions: bool | None = None,
  737. output_hidden_states: bool | None = None,
  738. output_retrieved: bool | None = None,
  739. n_docs: int | None = None,
  740. do_marginalize: bool | None = None,
  741. labels: np.ndarray | tf.Tensor | None = None,
  742. reduce_loss: bool | None = None,
  743. return_dict: bool | None = None,
  744. training: bool = False,
  745. **kwargs, # needs kwargs for generation
  746. ) -> TFRetrievAugLMMarginOutput:
  747. r"""
  748. do_marginalize (`bool`, *optional*):
  749. If `True`, the logits are marginalized over all documents by making use of
  750. `torch.nn.functional.log_softmax`.
  751. labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
  752. Labels for computing the cross entropy classification loss according to Rag-Token model formulation See
  753. https://arxiv.org/pdf/2005.11401.pdf Section 2.1 for details about Rag-Token formulation. Indices should be
  754. in `[0, ..., config.vocab_size - 1]`.
  755. reduce_loss (`bool`, *optional*):
  756. Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `tf.Tensor.sum`
  757. operation.
  758. kwargs (`Dict[str, any]`, *optional*, defaults to `{}`):
  759. Legacy dictionary, which is required so that model can use *generate()* function.
  760. Returns:
  761. Example:
  762. ```python
  763. >>> import tensorflow as tf
  764. >>> from transformers import AutoTokenizer, RagRetriever, TFRagTokenForGeneration
  765. >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-nq")
  766. >>> retriever = RagRetriever.from_pretrained(
  767. ... "facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True
  768. ... )
  769. >>> # initialize with RagRetriever to do everything in one forward call
  770. >>> model = TFRagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever, from_pt=True)
  771. >>> input_dict = tokenizer.prepare_seq2seq_batch(
  772. ... "How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf"
  773. ... )
  774. >>> outputs = model(input_dict, output_retrieved=True)
  775. >>> # or use retriever separately
  776. >>> # 1. Encode
  777. >>> input_ids = input_dict["input_ids"]
  778. >>> question_hidden_states = model.question_encoder(input_ids)[0]
  779. >>> # 2. Retrieve
  780. >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.numpy(), return_tensors="tf")
  781. >>> doc_scores = tf.squeeze(
  782. ... tf.matmul(
  783. ... tf.expand_dims(question_hidden_states, axis=1), docs_dict["retrieved_doc_embeds"], transpose_b=True
  784. ... ),
  785. ... axis=1,
  786. ... )
  787. >>> # 3. Forward to generator
  788. >>> outputs = model(
  789. ... inputs=None,
  790. ... context_input_ids=docs_dict["context_input_ids"],
  791. ... context_attention_mask=docs_dict["context_attention_mask"],
  792. ... doc_scores=doc_scores,
  793. ... decoder_input_ids=input_dict["labels"],
  794. ... )
  795. >>> # or directly generate
  796. >>> generated = model.generate(
  797. ... context_input_ids=docs_dict["context_input_ids"],
  798. ... context_attention_mask=docs_dict["context_attention_mask"],
  799. ... doc_scores=doc_scores,
  800. ... )
  801. >>> generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True)
  802. ```"""
  803. assert (
  804. "decoder_cached_states" not in kwargs
  805. ), "Please use past_key_values to cache intermediate outputs" # from modeling_tf_bart.py
  806. do_marginalize = do_marginalize if do_marginalize else self.config.do_marginalize
  807. reduce_loss = reduce_loss if reduce_loss else self.config.reduce_loss
  808. if labels is not None:
  809. if decoder_input_ids is None:
  810. decoder_input_ids = labels
  811. use_cache = False
  812. outputs = self.rag(
  813. input_ids,
  814. attention_mask=attention_mask,
  815. encoder_outputs=encoder_outputs,
  816. decoder_input_ids=decoder_input_ids,
  817. decoder_attention_mask=decoder_attention_mask,
  818. context_input_ids=context_input_ids,
  819. context_attention_mask=context_attention_mask,
  820. doc_scores=doc_scores,
  821. past_key_values=past_key_values,
  822. use_cache=use_cache,
  823. output_attentions=output_attentions,
  824. output_hidden_states=output_hidden_states,
  825. output_retrieved=output_retrieved,
  826. n_docs=n_docs,
  827. training=training,
  828. )
  829. loss = None
  830. logits = outputs.logits
  831. if labels is not None:
  832. assert decoder_input_ids is not None
  833. loss = self.get_nll(
  834. outputs.logits,
  835. outputs.doc_scores,
  836. labels,
  837. reduce_loss=reduce_loss,
  838. epsilon=self.config.label_smoothing,
  839. n_docs=n_docs,
  840. )
  841. if do_marginalize:
  842. logits = self.marginalize(logits, outputs.doc_scores, n_docs)
  843. return TFRetrievAugLMMarginOutput(
  844. loss=loss,
  845. logits=logits,
  846. past_key_values=outputs.past_key_values,
  847. doc_scores=outputs.doc_scores,
  848. context_input_ids=outputs.context_input_ids,
  849. context_attention_mask=outputs.context_attention_mask,
  850. retrieved_doc_embeds=outputs.retrieved_doc_embeds,
  851. retrieved_doc_ids=outputs.retrieved_doc_ids,
  852. question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state,
  853. question_enc_hidden_states=outputs.question_enc_hidden_states,
  854. question_enc_attentions=outputs.question_enc_attentions,
  855. generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state,
  856. generator_enc_hidden_states=outputs.generator_enc_hidden_states,
  857. generator_enc_attentions=outputs.generator_enc_attentions,
  858. generator_dec_hidden_states=outputs.generator_dec_hidden_states,
  859. generator_dec_attentions=outputs.generator_dec_attentions,
  860. )
  861. def generate(
  862. self,
  863. input_ids: TFModelInputType | None = None,
  864. attention_mask: tf.Tensor | None = None,
  865. context_input_ids=None,
  866. context_attention_mask=None,
  867. doc_scores=None,
  868. n_docs=None,
  869. generation_config=None,
  870. logits_processor=TFLogitsProcessorList(),
  871. **kwargs,
  872. ):
  873. """
  874. Implements TFRAG token decoding.
  875. Args:
  876. input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
  877. The sequence used as a prompt for the generation. If `input_ids` is not passed, then
  878. `context_input_ids` has to be provided.
  879. attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
  880. Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
  881. - 1 for tokens that are **not masked**,
  882. - 0 for tokens that are **masked**.
  883. [What are attention masks?](../glossary#attention-mask)
  884. context_input_ids (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
  885. Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the
  886. retriever.
  887. If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the
  888. forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
  889. context_attention_mask (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
  890. Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
  891. retriever.
  892. If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the
  893. forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
  894. doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`):
  895. Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
  896. `question_encoder_last_hidden_state`.
  897. If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the
  898. forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
  899. n_docs (`int`, *optional*, defaults to `config.n_docs`)
  900. Number of documents to retrieve and/or number of documents for which to generate an answer.
  901. generation_config (`~generation.GenerationConfig`, *optional*):
  902. The generation configuration to be used as base parametrization for the generation call. `**kwargs`
  903. passed to generate matching the attributes of `generation_config` will override them. If
  904. `generation_config` is not provided, the default will be used, which had the following loading
  905. priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
  906. configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
  907. default values, whose documentation should be checked to parameterize generation.
  908. logits_processor (`TFLogitsProcessorList`, *optional*):
  909. Custom logits processors that complement the default logits processors built from arguments and a
  910. model's config. If a logit processor is passed that is already created with the arguments or a model's
  911. config an error is thrown.
  912. kwargs (`Dict[str, Any]`, *optional*):
  913. Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
  914. forwarded to the `forward` function of the model.
  915. Return:
  916. `tf.Tensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated sequences. The
  917. second dimension (sequence_length) is either equal to `max_length` or shorter if all batches finished early
  918. due to the `eos_token_id`.
  919. """
  920. # Handle `generation_config` and kwargs that might update it
  921. if generation_config is None:
  922. generation_config = self.generation_config
  923. generation_config = copy.deepcopy(generation_config)
  924. model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs
  925. # set default parameters
  926. n_docs = n_docs if n_docs is not None else self.config.n_docs
  927. # retrieve docs
  928. if self.retriever is not None and context_input_ids is None:
  929. question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0]
  930. out = self.retriever(
  931. input_ids,
  932. question_hidden_states.numpy().astype(np.float32),
  933. prefix=self.generator.config.prefix,
  934. n_docs=n_docs,
  935. return_tensors="tf",
  936. )
  937. context_input_ids, context_attention_mask, retrieved_doc_embeds = (
  938. out["context_input_ids"],
  939. out["context_attention_mask"],
  940. out["retrieved_doc_embeds"],
  941. )
  942. context_input_ids = tf.cast(context_input_ids, tf.int32)
  943. context_attention_mask = tf.cast(context_attention_mask, tf.int32)
  944. retrieved_doc_embeds = tf.cast(retrieved_doc_embeds, tf.float32)
  945. # compute doc_scores
  946. doc_scores = tf.matmul(
  947. tf.expand_dims(question_hidden_states, axis=1), retrieved_doc_embeds, transpose_b=True
  948. )
  949. doc_scores = tf.squeeze(doc_scores, axis=1)
  950. assert (context_input_ids.shape[0] % n_docs) == 0, (
  951. f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is"
  952. f" {context_input_ids.shape[0]}."
  953. )
  954. batch_size = context_input_ids.shape[0] // n_docs
  955. encoder = self.rag.generator.get_encoder()
  956. encoder_outputs = encoder(
  957. input_ids=context_input_ids,
  958. attention_mask=context_attention_mask,
  959. output_attentions=generation_config.output_attentions,
  960. output_hidden_states=generation_config.output_hidden_states,
  961. return_dict=True,
  962. )
  963. decoder_input_ids = tf.fill(
  964. (batch_size * generation_config.num_beams, 1),
  965. tf.cast(generation_config.decoder_start_token_id, tf.int32),
  966. )
  967. last_hidden_state = encoder_outputs["last_hidden_state"]
  968. def extend_enc_output(tensor, num_beams=None):
  969. """
  970. Broadcast tensor with `num_beams` replica, with correct order Input: tensor of shape (batch_size*n_docs ,
  971. d) Output: tensor of shape (batch_size*num_beams*n_docs , d)
  972. """
  973. # expand batch_size & num_beam dimensions
  974. d_shape_list = tensor.shape[1:]
  975. # split n_docs dimensions
  976. new_shape = (batch_size, 1, n_docs) + d_shape_list
  977. tensor = tf.reshape(tensor, new_shape)
  978. # repeat same last hidden states over `num_beams` dimension
  979. new_shape = (batch_size, num_beams, n_docs) + d_shape_list
  980. tensor = tf.broadcast_to(tensor, new_shape)
  981. # merge `batch_size`, `num_beams`, `num_docs` dims again
  982. new_shape = (batch_size * num_beams * n_docs,) + d_shape_list
  983. return tf.reshape(tensor, new_shape)
  984. # correctly extend last_hidden_state and attention mask
  985. context_attention_mask = extend_enc_output(context_attention_mask, num_beams=generation_config.num_beams)
  986. encoder_outputs["last_hidden_state"] = extend_enc_output(
  987. last_hidden_state, num_beams=generation_config.num_beams
  988. )
  989. doc_scores = tf.repeat(doc_scores, generation_config.num_beams, axis=0)
  990. # define start_len & additional parameters
  991. model_kwargs["doc_scores"] = doc_scores
  992. model_kwargs["encoder_outputs"] = encoder_outputs
  993. model_kwargs["attention_mask"] = context_attention_mask
  994. model_kwargs["n_docs"] = n_docs
  995. pre_processor = self._get_logits_processor(
  996. generation_config=generation_config,
  997. input_ids_seq_length=tf.shape(decoder_input_ids)[-1],
  998. logits_processor=logits_processor,
  999. )
  1000. if generation_config.num_beams == 1:
  1001. return self.greedy_search(
  1002. input_ids=decoder_input_ids,
  1003. max_length=generation_config.max_length,
  1004. pad_token_id=generation_config.pad_token_id,
  1005. eos_token_id=generation_config.eos_token_id,
  1006. logits_processor=pre_processor,
  1007. output_attentions=generation_config.output_attentions,
  1008. output_hidden_states=generation_config.output_hidden_states,
  1009. output_scores=generation_config.output_scores,
  1010. return_dict_in_generate=generation_config.return_dict_in_generate,
  1011. **model_kwargs,
  1012. )
  1013. elif generation_config.num_beams > 1:
  1014. if generation_config.num_beams < generation_config.num_return_sequences:
  1015. raise ValueError(
  1016. "Beam search decoding cannot return more sequences than it has beams. Please set num_beams >="
  1017. f" num_return_sequences, got {generation_config.num_beams} and"
  1018. f" {generation_config.num_return_sequences} (respectivelly)"
  1019. )
  1020. def unflatten_beam_dim(tensor):
  1021. """Unflattens the first, flat batch*beam dimension of a non-scalar array."""
  1022. shape = shape_list(tensor)
  1023. return tf.reshape(tensor, [-1, generation_config.num_beams] + shape[1:])
  1024. decoder_input_ids = unflatten_beam_dim(decoder_input_ids)
  1025. model_kwargs["attention_mask"] = unflatten_beam_dim(model_kwargs["attention_mask"])
  1026. model_kwargs["encoder_outputs"]["last_hidden_state"] = unflatten_beam_dim(
  1027. model_kwargs["encoder_outputs"]["last_hidden_state"]
  1028. )
  1029. return self.beam_search(
  1030. input_ids=decoder_input_ids,
  1031. max_length=generation_config.max_length,
  1032. pad_token_id=generation_config.pad_token_id,
  1033. eos_token_id=generation_config.eos_token_id,
  1034. logits_processor=pre_processor,
  1035. output_attentions=generation_config.output_attentions,
  1036. output_hidden_states=generation_config.output_hidden_states,
  1037. output_scores=generation_config.output_scores,
  1038. return_dict_in_generate=generation_config.return_dict_in_generate,
  1039. **model_kwargs,
  1040. )
  1041. else:
  1042. raise ValueError(
  1043. f"`num_beams` has to be an integer strictly superior to 0 (≥ 1), but is {generation_config.num_beams}"
  1044. )
  1045. def get_input_embeddings(self):
  1046. return self.rag.generator.get_input_embeddings()
  1047. def get_output_embeddings(self):
  1048. return self.rag.generator.get_output_embeddings()
  1049. # Adapted from tf_t5's & tf_bart's _shift_right
  1050. def shift_tokens_right(self, input_ids, start_token_id=None):
  1051. """Shift input ids one token to the right, and pad with start_token_id"""
  1052. if start_token_id is None:
  1053. start_token_id = self.generator.config.decoder_start_token_id
  1054. assert start_token_id is not None, (
  1055. "self.generator.config.decoder_start_token_id has to be defined. In Rag we commonly use Bart as"
  1056. " generator, see Bart docs for more information"
  1057. )
  1058. pad_token_id = self.generator.config.pad_token_id
  1059. assert pad_token_id is not None, "self.model.config.pad_token_id has to be defined."
  1060. start_tokens = tf.fill((shape_list(input_ids)[0], 1), tf.cast(start_token_id, input_ids.dtype))
  1061. shifted_input_ids = tf.concat([start_tokens, input_ids[:, :-1]], -1)
  1062. # replace possible -100 values in labels by `pad_token_id`
  1063. shifted_input_ids = tf.where(
  1064. shifted_input_ids == -100,
  1065. tf.fill(shape_list(shifted_input_ids), tf.cast(pad_token_id, input_ids.dtype)),
  1066. shifted_input_ids,
  1067. )
  1068. # "Verify that `labels` has only positive values and -100"
  1069. assert_gte0 = tf.debugging.assert_greater_equal(shifted_input_ids, tf.cast(0, shifted_input_ids.dtype))
  1070. # Make sure the assertion op is called by wrapping the result in an identity no-op
  1071. with tf.control_dependencies([assert_gte0]):
  1072. shifted_input_ids = tf.identity(shifted_input_ids)
  1073. return shifted_input_ids
  1074. # nll stands for 'negative log likelihood'
  1075. def get_nll(self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, n_docs=None):
  1076. n_docs = n_docs if n_docs is not None else self.config.n_docs
  1077. # shift tokens left (from original Pytorch's version)
  1078. target = tf.concat(
  1079. [target[:, 1:], tf.fill([target.shape[0], 1], tf.cast(self.config.generator.pad_token_id, target.dtype))],
  1080. axis=1,
  1081. )
  1082. rag_logprobs = self.marginalize(seq_logits, doc_scores, n_docs)
  1083. loss = self.hf_compute_loss(target, rag_logprobs, from_logits=True, reduce_loss=reduce_loss)
  1084. return loss
  1085. # Adopted modeling_tf_bart + add smooth_loss to match with pytorch version
  1086. def hf_compute_loss(self, labels, y_pred, smooth_epsilon=0.0, from_logits=True, reduce_loss=False):
  1087. """CrossEntropyLoss that ignores pad tokens"""
  1088. # Matt: As written, this loss is not XLA-compatible, but it's doing some very weird things
  1089. # and I don't feel comfortable converting it.
  1090. loss_fn = keras.losses.SparseCategoricalCrossentropy(
  1091. from_logits=True,
  1092. reduction=keras.losses.Reduction.SUM,
  1093. )
  1094. if from_logits is False: # convert to logits
  1095. eps = 1e-9
  1096. y_pred = tf.clip_by_value(y_pred, clip_value_min=eps, clip_value_max=1 - eps)
  1097. y_pred = tf.math.log(y_pred)
  1098. logits = y_pred
  1099. melted_labels = tf.reshape(labels, (-1,))
  1100. active_loss = tf.not_equal(melted_labels, self.config.generator.pad_token_id)
  1101. reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, logits.shape[2])), active_loss)
  1102. labels = tf.boolean_mask(melted_labels, active_loss)
  1103. nll_loss = loss_fn(labels, reduced_logits)
  1104. smooth_loss = -tf.reduce_sum(reduced_logits, axis=-1)
  1105. smooth_loss = tf.reduce_sum(smooth_loss) # sum and squeeze like torch
  1106. eps_i = smooth_epsilon / reduced_logits.shape[-1]
  1107. loss = (1.0 - smooth_epsilon) * nll_loss + eps_i * smooth_loss
  1108. return loss
  1109. def build(self, input_shape=None):
  1110. if self.built:
  1111. return
  1112. self.built = True
  1113. if getattr(self, "rag", None) is not None:
  1114. with tf.name_scope(self.rag.name):
  1115. self.rag.build(None)
  1116. @add_start_docstrings_to_model_forward(
  1117. """
  1118. A TF RAG-sequence model implementation. It performs RAG-sequence specific marginalization in the forward pass.
  1119. """,
  1120. RAG_START_DOCSTRING,
  1121. )
  1122. class TFRagSequenceForGeneration(TFRagPreTrainedModel, TFCausalLanguageModelingLoss):
  1123. load_weight_prefix = "tf_rag_sequence_for_generation_1/rag"
  1124. def __init__(
  1125. self,
  1126. config: Optional[PretrainedConfig] = None,
  1127. question_encoder: Optional[TFPreTrainedModel] = None,
  1128. generator: Optional[TFPreTrainedModel] = None,
  1129. retriever: Optional[RagRetriever] = None,
  1130. **kwargs,
  1131. ):
  1132. assert config is not None or (
  1133. question_encoder is not None and generator is not None
  1134. ), "Either a configuration or an encoder and a generator has to be provided."
  1135. if config is None:
  1136. config = RagConfig.from_question_encoder_generator_configs(
  1137. question_encoder.config, generator.config, **kwargs
  1138. )
  1139. super().__init__(config)
  1140. # instantiate model
  1141. self.rag = TFRagModel(
  1142. config=config,
  1143. question_encoder=question_encoder,
  1144. generator=generator,
  1145. retriever=retriever,
  1146. load_weight_prefix=self.load_weight_prefix,
  1147. name="rag",
  1148. )
  1149. def set_retriever(self, retriever: RagRetriever):
  1150. self.rag.retriever = retriever
  1151. @property
  1152. def retriever(self):
  1153. return self.rag.retriever
  1154. @property
  1155. def generator(self):
  1156. return self.rag.generator
  1157. @property
  1158. def question_encoder(self):
  1159. return self.rag.question_encoder
  1160. @unpack_inputs
  1161. @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING)
  1162. @replace_return_docstrings(output_type=TFRetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC)
  1163. def call(
  1164. self,
  1165. input_ids: TFModelInputType | None = None,
  1166. attention_mask: np.ndarray | tf.Tensor | None = None,
  1167. decoder_input_ids: np.ndarray | tf.Tensor | None = None,
  1168. decoder_attention_mask: np.ndarray | tf.Tensor | None = None,
  1169. encoder_outputs: np.ndarray | tf.Tensor | None = None,
  1170. past_key_values: Optional[Tuple[Tuple[Union[np.ndarray, tf.Tensor]]]] = None,
  1171. doc_scores: np.ndarray | tf.Tensor | None = None,
  1172. context_input_ids: np.ndarray | tf.Tensor | None = None,
  1173. context_attention_mask: np.ndarray | tf.Tensor | None = None,
  1174. use_cache: Optional[bool] = None,
  1175. output_attentions: Optional[bool] = None,
  1176. output_hidden_states: Optional[bool] = None,
  1177. output_retrieved: Optional[bool] = None,
  1178. n_docs: Optional[int] = None,
  1179. exclude_bos_score: Optional[bool] = None,
  1180. labels: np.ndarray | tf.Tensor | None = None,
  1181. reduce_loss: Optional[bool] = None,
  1182. return_dict: Optional[bool] = None,
  1183. training: bool = False,
  1184. **kwargs, # needs kwargs for generation
  1185. ) -> Union[Tuple[tf.Tensor], TFRetrievAugLMMarginOutput]:
  1186. r"""
  1187. exclude_bos_score (`bool`, *optional*):
  1188. Only relevant if `labels` is passed. If `True`, the score of the BOS token is disregarded when computing
  1189. the loss.
  1190. labels (`tf.Tensor` or `np.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
  1191. Labels for computing the cross entropy classification loss according to Rag-Sequence model formulation See
  1192. https://arxiv.org/pdf/2005.11401.pdf Section 2.1 for details about Rag-Sequence formulation. Indices should
  1193. be in `[0, ..., config.vocab_size - 1]`.
  1194. reduce_loss (`bool`, *optional*):
  1195. Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `tf.Tensor.sum`
  1196. operation.
  1197. kwargs (`Dict[str, any]`, *optional*, defaults to `{}`):
  1198. Legacy dictionary, which is required so that model can use *generate()* function.
  1199. Returns:
  1200. Example:
  1201. ```python
  1202. >>> from transformers import AutoTokenizer, RagRetriever, TFRagSequenceForGeneration
  1203. >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-sequence-nq")
  1204. >>> retriever = RagRetriever.from_pretrained(
  1205. ... "facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True
  1206. ... )
  1207. >>> # initialize with RagRetriever to do everything in one forward call
  1208. >>> model = TFRagSequenceForGeneration.from_pretrained(
  1209. ... "facebook/rag-sequence-nq", retriever=retriever, from_pt=True
  1210. ... )
  1211. >>> input_dict = tokenizer.prepare_seq2seq_batch(
  1212. ... "How many people live in Paris?", "In Paris, there are 10 million people.", return_tensors="tf"
  1213. ... )
  1214. >>> outputs = model(input_dict, output_retrieved=True)
  1215. >>> # or use retriever separately
  1216. >>> # 1. Encode
  1217. >>> input_ids = input_dict["input_ids"]
  1218. >>> question_hidden_states = model.question_encoder(input_ids)[0]
  1219. >>> # 2. Retrieve
  1220. >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.numpy(), return_tensors="tf")
  1221. >>> doc_scores = tf.squeeze(
  1222. ... tf.matmul(
  1223. ... tf.expand_dims(question_hidden_states, axis=1), docs_dict["retrieved_doc_embeds"], transpose_b=True
  1224. ... ),
  1225. ... axis=1,
  1226. ... )
  1227. >>> # 3. Forward to generator
  1228. >>> outputs = model(
  1229. ... inputs=None,
  1230. ... context_input_ids=docs_dict["context_input_ids"],
  1231. ... context_attention_mask=docs_dict["context_attention_mask"],
  1232. ... doc_scores=doc_scores,
  1233. ... decoder_input_ids=input_dict["labels"],
  1234. ... )
  1235. >>> # or directly generate
  1236. >>> generated = model.generate(
  1237. ... context_input_ids=docs_dict["context_input_ids"],
  1238. ... context_attention_mask=docs_dict["context_attention_mask"],
  1239. ... doc_scores=doc_scores,
  1240. ... )
  1241. >>> generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True)
  1242. ```"""
  1243. assert (
  1244. "decoder_cached_states" not in kwargs
  1245. ), "Please use past_key_values to cache intermediate outputs" # from modeling_tf_bart.py
  1246. exclude_bos_score = exclude_bos_score if exclude_bos_score else self.config.exclude_bos_score
  1247. reduce_loss = reduce_loss if reduce_loss else self.config.reduce_loss
  1248. if labels is not None:
  1249. if decoder_input_ids is None:
  1250. decoder_input_ids = labels
  1251. use_cache = False
  1252. outputs = self.rag(
  1253. input_ids,
  1254. attention_mask=attention_mask,
  1255. encoder_outputs=encoder_outputs,
  1256. decoder_input_ids=decoder_input_ids,
  1257. decoder_attention_mask=decoder_attention_mask,
  1258. context_input_ids=context_input_ids,
  1259. context_attention_mask=context_attention_mask,
  1260. doc_scores=doc_scores,
  1261. past_key_values=past_key_values,
  1262. use_cache=use_cache,
  1263. output_attentions=output_attentions,
  1264. output_hidden_states=output_hidden_states,
  1265. output_retrieved=output_retrieved,
  1266. n_docs=n_docs,
  1267. training=training,
  1268. )
  1269. loss = None
  1270. if labels is not None:
  1271. loss = self.get_nll(
  1272. outputs.logits,
  1273. outputs.doc_scores,
  1274. labels,
  1275. reduce_loss=reduce_loss,
  1276. epsilon=self.config.label_smoothing,
  1277. n_docs=n_docs,
  1278. )
  1279. return TFRetrievAugLMMarginOutput(
  1280. loss=loss,
  1281. logits=outputs.logits,
  1282. doc_scores=outputs.doc_scores,
  1283. past_key_values=outputs.past_key_values,
  1284. context_input_ids=outputs.context_input_ids,
  1285. context_attention_mask=outputs.context_attention_mask,
  1286. retrieved_doc_embeds=outputs.retrieved_doc_embeds,
  1287. retrieved_doc_ids=outputs.retrieved_doc_ids,
  1288. question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state,
  1289. question_enc_hidden_states=outputs.question_enc_hidden_states,
  1290. question_enc_attentions=outputs.question_enc_attentions,
  1291. generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state,
  1292. generator_enc_hidden_states=outputs.generator_enc_hidden_states,
  1293. generator_enc_attentions=outputs.generator_enc_attentions,
  1294. generator_dec_hidden_states=outputs.generator_dec_hidden_states,
  1295. generator_dec_attentions=outputs.generator_dec_attentions,
  1296. )
  1297. def get_nll(
  1298. self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, exclude_bos_score=False, n_docs=None
  1299. ):
  1300. # shift tokens left
  1301. target = tf.concat(
  1302. [target[:, 1:], tf.fill([target.shape[0], 1], tf.cast(self.config.generator.pad_token_id, target.dtype))],
  1303. axis=1,
  1304. )
  1305. # bos_token_id is None for T5
  1306. bos_token_id = self.config.bos_token_id or self.config.generator.bos_token_id
  1307. n_docs = n_docs if n_docs is not None else self.config.n_docs
  1308. equal_bos_token_id_all = tf.reduce_all(tf.equal(target[:, 0], bos_token_id))
  1309. use_bos = bos_token_id is not None and equal_bos_token_id_all
  1310. def _mask_pads(ll, smooth_obj):
  1311. pad_mask = tf.equal(target, tf.cast(self.config.generator.pad_token_id, target.dtype))
  1312. if tf.reduce_any(pad_mask):
  1313. ll = tf.where(pad_mask, 0.0, ll)
  1314. smooth_obj = tf.where(pad_mask, 0.0, smooth_obj)
  1315. return tf.squeeze(ll, axis=-1), tf.squeeze(smooth_obj, axis=-1)
  1316. # seq_logits.shape = (batch*n_docs, tgt_len , vocabs)
  1317. seq_logprobs = tf.nn.log_softmax(seq_logits, axis=-1)
  1318. seq_logprobs = tf.reshape(
  1319. seq_logprobs, (seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.shape[-1])
  1320. ) # (batch_size, n_docs, tgt_len, vocabs)
  1321. doc_logprobs = tf.nn.log_softmax(doc_scores, axis=1)
  1322. doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1)
  1323. doc_logprobs = tf.expand_dims(doc_logprobs, axis=-1) # done twice to get 4-D
  1324. # RAG-sequence marginalization
  1325. first_token_scores = seq_logprobs[:, :, :1, :]
  1326. second_token_scores = seq_logprobs[:, :, 1:2, :]
  1327. remainder = seq_logprobs[:, :, 2:, :]
  1328. rag_logprobs = tf.concat([first_token_scores, second_token_scores + doc_logprobs, remainder], axis=2)
  1329. # calculate loss
  1330. target = tf.expand_dims(target, axis=1) # n_docs dimension
  1331. target = tf.expand_dims(target, axis=-1) # logits dimension
  1332. target = tf.repeat(target, n_docs, axis=1)
  1333. assert len(target.shape) == len(rag_logprobs.shape)
  1334. # last-axis gathering only - use 2D-reshape-trick for Torch's style nD gathering
  1335. def torch_gather(param, id_tensor):
  1336. # 2d-gather torch equivalent: https://stackoverflow.com/questions/52129909/tensorflow-equivalent-of-torch-gather
  1337. def gather2d(target, id_tensor):
  1338. idx = tf.stack([tf.range(tf.shape(id_tensor)[0], dtype=id_tensor.dtype), id_tensor[:, 0]], axis=-1)
  1339. result = tf.gather_nd(target, idx)
  1340. return tf.expand_dims(result, axis=-1)
  1341. target = tf.reshape(param, (-1, param.shape[-1])) # reshape 2D
  1342. target_shape = id_tensor.shape
  1343. id_tensor = tf.reshape(id_tensor, (-1, 1)) # also 2D-index
  1344. result = gather2d(target, id_tensor)
  1345. return tf.reshape(result, target_shape)
  1346. ll = torch_gather(rag_logprobs, id_tensor=target)
  1347. smooth_obj = tf.reduce_sum(rag_logprobs, axis=-1, keepdims=True) # total sum of all (normalised) logits
  1348. ll, smooth_obj = _mask_pads(ll, smooth_obj)
  1349. # sum over tokens, exclude bos while scoring
  1350. if exclude_bos_score and use_bos:
  1351. ll = tf.reduce_sum(ll[:, :, 1:], axis=2)
  1352. else:
  1353. ll = tf.reduce_sum(ll, axis=2)
  1354. smooth_obj = tf.reduce_sum(smooth_obj, axis=2)
  1355. ll = tf.math.reduce_logsumexp(ll, axis=1) # logsumexp over docs
  1356. smooth_obj = tf.math.reduce_logsumexp(smooth_obj, axis=1)
  1357. nll_loss = -ll
  1358. smooth_loss = -smooth_obj
  1359. if reduce_loss:
  1360. nll_loss = tf.reduce_sum(nll_loss)
  1361. smooth_loss = tf.reduce_sum(smooth_loss)
  1362. eps_i = epsilon / rag_logprobs.shape[-1]
  1363. loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
  1364. return loss
  1365. def generate(
  1366. self,
  1367. input_ids: TFModelInputType | None = None,
  1368. attention_mask: tf.Tensor | None = None,
  1369. context_input_ids=None,
  1370. context_attention_mask=None,
  1371. doc_scores=None,
  1372. do_deduplication=None, # defaults to True
  1373. num_return_sequences=None, # defaults to 1
  1374. num_beams=None, # defaults to 1
  1375. n_docs=None,
  1376. **model_kwargs,
  1377. ):
  1378. """
  1379. Implements RAG sequence "thorough" decoding. Read the [`~generation.GenerationMixin.generate`]` documentation
  1380. for more information on how to set other generate input parameters
  1381. Args:
  1382. input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
  1383. The sequence used as a prompt for the generation. If `input_ids` is not passed, then
  1384. `context_input_ids` has to be provided.
  1385. attention_mask (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
  1386. Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`: - 1 for
  1387. tokens that are **not masked**, - 0 for tokens that are **masked**. [What are attention
  1388. masks?](../glossary#attention-mask)
  1389. context_input_ids (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
  1390. Input IDs post-processed from the retrieved documents and the question encoder input_ids by the
  1391. retriever.
  1392. context_attention_mask (`tf.Tensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
  1393. Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
  1394. retriever. If the model has is not initialized with a `retriever` or `input_ids` is not given,
  1395. `context_input_ids` and `context_attention_mask` have to be provided to the forward pass. They are
  1396. returned by [`~RagRetriever.__call__`].
  1397. doc_scores (`tf.Tensor` of shape `(batch_size, config.n_docs)`):
  1398. Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
  1399. `question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever` or
  1400. `input_ids` is not given, `doc_scores` has to be provided to the forward pass. `doc_scores` are
  1401. returned by [`~RagRetriever.__call__`].
  1402. do_deduplication (`bool`, *optional*):
  1403. Whether or not to deduplicate the generations from different context documents for a given input. Has
  1404. to be set to `False` if used while training with distributed backend.
  1405. num_return_sequences(`int`, *optional*, defaults to 1):
  1406. The number of independently computed returned sequences for each element in the batch. Note that this
  1407. is not the value we pass to the `generator`'s `[`~generation.GenerationMixin.generate`]` function,
  1408. where we set `num_return_sequences` to `num_beams`.
  1409. num_beams (`int`, *optional*, defaults to 1):
  1410. Number of beams for beam search. 1 means no beam search.
  1411. n_docs (`int`, *optional*, defaults to `config.n_docs`)
  1412. Number of documents to retrieve and/or number of documents for which to generate an answer.
  1413. kwargs (`Dict[str, Any]`, *optional*):
  1414. Additional kwargs will be passed to [`~generation.GenerationMixin.generate`]
  1415. Return:
  1416. `tf.Tensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated sequences. The
  1417. second dimension (sequence length) is either equal to `max_length` or shorter if all batches finished early
  1418. due to the `eos_token_id`.
  1419. """
  1420. n_docs = n_docs if n_docs is not None else self.config.n_docs
  1421. do_deduplication = do_deduplication if do_deduplication is not None else self.config.do_deduplication
  1422. num_doc_return_sequences = (
  1423. num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences
  1424. )
  1425. num_beams = num_beams if num_beams is not None else self.config.num_beams
  1426. assert (
  1427. input_ids is not None or context_input_ids is not None
  1428. ), " At least one of input_ids or context_input_ids must be given"
  1429. if self.retriever is not None and context_input_ids is None:
  1430. question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0]
  1431. context_input_ids = self.retriever(
  1432. input_ids,
  1433. question_hidden_states.numpy(),
  1434. prefix=self.generator.config.prefix,
  1435. n_docs=n_docs,
  1436. return_tensors="tf",
  1437. )["context_input_ids"]
  1438. hypos = []
  1439. model_kwargs["num_beams"] = num_beams
  1440. model_kwargs["num_return_sequences"] = num_beams # put here so that not confused with num_doc_return_sequences
  1441. model_kwargs["attention_mask"] = None
  1442. batch_size = input_ids.shape[0] if input_ids is not None else context_input_ids.shape[0] // n_docs
  1443. for index in range(batch_size):
  1444. # first, generate beams from documents:
  1445. generator_input_ids = context_input_ids[index * n_docs : (index + 1) * n_docs] # (n_docs, max_len)
  1446. output_sequences = self.generator.generate(
  1447. generator_input_ids,
  1448. **model_kwargs,
  1449. ) # n_docs * n_beam, tgt_len
  1450. if do_deduplication:
  1451. # do_deduplication -- for TF, work on Eager mode only!
  1452. output_sequences = tf.stack(list({str(k.numpy().tolist()): k for k in output_sequences}.values()))
  1453. num_candidates = output_sequences.shape[
  1454. 0
  1455. ] # after deduplication, this number can be less than n_docs*n_beam
  1456. # then, run model forwards to get nll scores:
  1457. if input_ids is not None:
  1458. new_input_ids = tf.tile(input_ids[index : index + 1], (num_candidates, 1))
  1459. outputs = self(new_input_ids, labels=output_sequences, exclude_bos_score=True)
  1460. else: # input_ids is None, need context_input_ids/mask and doc_scores
  1461. assert context_attention_mask is not None, (
  1462. "Make sure that `context_attention_mask` are passed, if no `input_ids` is set. Alternatively, you"
  1463. " can set a retriever using the `set_retriever(...)` function."
  1464. )
  1465. assert doc_scores is not None, (
  1466. "Make sure that `doc_scores` are passed, if no `input_ids` is set. Alternatively, you can set a"
  1467. " retriever using the `set_retriever(...)` function."
  1468. )
  1469. individual_input_ids = tf.tile(
  1470. generator_input_ids, (num_candidates, 1)
  1471. ) # (num_candidates*n_docs, max_len)
  1472. individual_attention_mask = context_attention_mask[index * n_docs : (index + 1) * n_docs]
  1473. individual_attention_mask = tf.tile(individual_attention_mask, (num_candidates, 1))
  1474. individual_doc_scores = doc_scores[index : (index + 1), :] # doc_scores.shape = [batch, n_docs]
  1475. individual_doc_scores = tf.tile(individual_doc_scores, (num_candidates, 1)) # [num_candidates, n_docs]
  1476. outputs = self(
  1477. input_ids=None,
  1478. context_input_ids=individual_input_ids,
  1479. context_attention_mask=individual_attention_mask,
  1480. doc_scores=individual_doc_scores,
  1481. labels=output_sequences,
  1482. exclude_bos_score=True,
  1483. )
  1484. top_cand_inds = tf.math.top_k((-outputs["loss"]), k=num_doc_return_sequences)[1]
  1485. # add hypothesis
  1486. hypos.append(tf.gather(output_sequences, top_cand_inds))
  1487. return self._cat_and_pad(hypos, pad_token_id=self.config.generator.pad_token_id)
  1488. @staticmethod
  1489. def _cat_and_pad(tensors, pad_token_id):
  1490. # used by generate(): tensors is a (batched) list of (candidates, len); len is varied across batch
  1491. # Initialize padded tensor with shape ( all_candidates , max_candidate_length ),
  1492. # where all_candidates counted from all inputs
  1493. new_shape = sum([t.shape[0] for t in tensors]), max([t.shape[1] for t in tensors])
  1494. output = tf.fill(new_shape, pad_token_id)
  1495. # Normal tensor doesn't support slice assignment, so we need tf.Variable
  1496. output = tf.Variable(output)
  1497. # Assign, and then convert back to tensor
  1498. ind = 0
  1499. for t in tensors:
  1500. output[ind : ind + t.shape[0], : t.shape[1]].assign(t)
  1501. ind += t.shape[0]
  1502. output = tf.convert_to_tensor(output)
  1503. return tf.cast(output, tensors[0][0][0].dtype)
  1504. def build(self, input_shape=None):
  1505. if self.built:
  1506. return
  1507. self.built = True
  1508. if getattr(self, "rag", None) is not None:
  1509. with tf.name_scope(self.rag.name):
  1510. self.rag.build(None)