modeling_rag.py 84 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641
  1. # coding=utf-8
  2. # Copyright 2020, The RAG Authors and The HuggingFace Inc. team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """RAG model implementation."""
  16. import copy
  17. from dataclasses import dataclass
  18. from typing import Callable, List, Optional, Tuple, Union
  19. import torch
  20. from torch import nn
  21. from ...configuration_utils import PretrainedConfig
  22. from ...generation import BeamSearchScorer, GenerationConfig, LogitsProcessorList, StoppingCriteriaList
  23. from ...modeling_outputs import ModelOutput
  24. from ...modeling_utils import PreTrainedModel
  25. from ...utils import add_start_docstrings_to_model_forward, logging, replace_return_docstrings
  26. from .configuration_rag import RagConfig
  27. from .retrieval_rag import RagRetriever
  28. logger = logging.get_logger(__name__)
  29. _CONFIG_FOR_DOC = "RagConfig"
  30. @dataclass
  31. class RetrievAugLMMarginOutput(ModelOutput):
  32. """
  33. Base class for retriever augmented marginalized models outputs.
  34. Args:
  35. loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `labels` is provided):
  36. Language modeling loss.
  37. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
  38. Prediction scores of the language modeling head. The score is possibly marginalized over all documents for
  39. each vocabulary token.
  40. doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
  41. Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
  42. `question_encoder_last_hidden_state`.
  43. past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
  44. List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size,
  45. num_heads, sequence_length, embed_size_per_head)`).
  46. Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used
  47. (see `past_key_values` input) to speed up sequential decoding.
  48. retrieved_doc_embeds (`torch.FloatTensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*):
  49. Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute
  50. the `doc_scores`.
  51. retrieved_doc_ids (`torch.LongTensor` of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*):
  52. The indexes of the embedded documents retrieved by the retriever.
  53. context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
  54. Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.
  55. context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
  56. Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
  57. retriever.
  58. question_encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
  59. Sequence of hidden states at the output of the last layer of the question encoder pooled output of the
  60. model.
  61. question_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
  62. Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
  63. shape `(batch_size, sequence_length, hidden_size)`.
  64. Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.
  65. question_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
  66. Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
  67. sequence_length)`.
  68. Attentions weights of the question encoder, after the attention softmax, used to compute the weighted
  69. average in the self-attention heads.
  70. generator_enc_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
  71. Sequence of hidden-states at the output of the last layer of the generator encoder of the model.
  72. generator_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
  73. Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
  74. shape `(batch_size, sequence_length, hidden_size)`.
  75. Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.
  76. generator_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
  77. Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
  78. sequence_length)`.
  79. Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted
  80. average in the self-attention heads.
  81. generator_dec_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
  82. Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
  83. shape `(batch_size, sequence_length, hidden_size)`.
  84. Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.
  85. generator_dec_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
  86. Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
  87. sequence_length)`.
  88. Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted
  89. average in the self-attention heads.
  90. generator_cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
  91. Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
  92. sequence_length)`.
  93. Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the
  94. weighted average in the cross-attention heads.
  95. """
  96. loss: Optional[torch.FloatTensor] = None
  97. logits: torch.FloatTensor = None
  98. doc_scores: torch.FloatTensor = None
  99. past_key_values: Optional[List[torch.FloatTensor]] = None
  100. retrieved_doc_embeds: Optional[torch.FloatTensor] = None
  101. retrieved_doc_ids: Optional[torch.LongTensor] = None
  102. context_input_ids: Optional[torch.LongTensor] = None
  103. context_attention_mask: Optional[torch.LongTensor] = None
  104. question_encoder_last_hidden_state: Optional[torch.FloatTensor] = None
  105. question_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
  106. question_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
  107. generator_enc_last_hidden_state: Optional[torch.FloatTensor] = None
  108. generator_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
  109. generator_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
  110. generator_dec_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
  111. generator_dec_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
  112. generator_cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
  113. @dataclass
  114. class RetrievAugLMOutput(ModelOutput):
  115. """
  116. Args:
  117. logits (`torch.FloatTensor` of shape `(batch_size, sequence_length, config.vocab_size)`):
  118. Prediction scores of the language modeling head. The score is possibly marginalized over all documents for
  119. each vocabulary token.
  120. doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
  121. Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
  122. `question_encoder_last_hidden_state`.
  123. past_key_values (`List[torch.FloatTensor]`, *optional*, returned when `use_cache=True` is passed or when `config.use_cache=True`):
  124. List of `torch.FloatTensor` of length `config.n_layers`, with each tensor of shape `(2, batch_size,
  125. num_heads, sequence_length, embed_size_per_head)`).
  126. Contains precomputed hidden-states (key and values in the attention blocks) of the decoder that can be used
  127. (see `past_key_values` input) to speed up sequential decoding.
  128. retrieved_doc_embeds (`torch.FloatTensor` of shape `(batch_size, config.n_docs, hidden_size)`, *optional*, returned when *output_retrieved=True*):
  129. Embedded documents retrieved by the retriever. Is used with `question_encoder_last_hidden_state` to compute
  130. the `doc_scores`.
  131. retrieved_doc_ids (`torch.LongTensor` of shape `(batch_size, config.n_docs)`, *optional*, returned when *output_retrieved=True*):
  132. The indexes of the embedded documents retrieved by the retriever.
  133. context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
  134. Input ids post-processed from the retrieved documents and the question encoder input_ids by the retriever.
  135. context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
  136. Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
  137. retriever.
  138. question_encoder_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
  139. Sequence of hidden states at the output of the last layer of the question encoder pooled output of the
  140. model.
  141. question_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
  142. Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
  143. shape `(batch_size, sequence_length, hidden_size)`.
  144. Hidden states of the question encoder at the output of each layer plus the initial embedding outputs.
  145. question_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
  146. Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
  147. sequence_length)`.
  148. Attentions weights of the question encoder, after the attention softmax, used to compute the weighted
  149. average in the self-attention heads.
  150. generator_enc_last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`, *optional*):
  151. Sequence of hidden-states at the output of the last layer of the generator encoder of the model.
  152. generator_enc_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
  153. Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
  154. shape `(batch_size, sequence_length, hidden_size)`.
  155. Hidden states of the generator encoder at the output of each layer plus the initial embedding outputs.
  156. generator_enc_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
  157. Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
  158. sequence_length)`.
  159. Attentions weights of the generator encoder, after the attention softmax, used to compute the weighted
  160. average in the self-attention heads.
  161. generator_dec_hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
  162. Tuple of `torch.FloatTensor` (one for the output of the embeddings and one for the output of each layer) of
  163. shape `(batch_size, sequence_length, hidden_size)`.
  164. Hidden states of the generator decoder at the output of each layer plus the initial embedding outputs.
  165. generator_dec_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
  166. Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
  167. sequence_length)`.
  168. Attentions weights of the generator decoder, after the attention softmax, used to compute the weighted
  169. average in the self-attention heads.
  170. generator_cross_attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
  171. Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
  172. sequence_length)`.
  173. Cross-attentions weights of the generator decoder, after the attention softmax, used to compute the
  174. weighted average in the cross-attention heads.
  175. """
  176. logits: torch.FloatTensor = None
  177. doc_scores: torch.FloatTensor = None
  178. past_key_values: Optional[List[torch.FloatTensor]] = None
  179. retrieved_doc_embeds: Optional[torch.FloatTensor] = None
  180. retrieved_doc_ids: Optional[torch.LongTensor] = None
  181. context_input_ids: Optional[torch.LongTensor] = None
  182. context_attention_mask: Optional[torch.LongTensor] = None
  183. question_encoder_last_hidden_state: Optional[torch.FloatTensor] = None
  184. question_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
  185. question_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
  186. generator_enc_last_hidden_state: Optional[torch.FloatTensor] = None
  187. generator_enc_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
  188. generator_enc_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
  189. generator_dec_hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
  190. generator_dec_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
  191. generator_cross_attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
  192. class RagPreTrainedModel(PreTrainedModel):
  193. r"""
  194. RAG models were released with the paper [Retrieval-Augmented Generation for Knowledge-Intensive NLP
  195. Tasks](https://arxiv.org/abs/2005.11401) by Patrick Lewis, Ethan Perez, Aleksandra Piktus et al.
  196. RAG is a retriever augmented model and encapsulate three components: a question encoder, a dataset retriever and a
  197. generator, the encoder and generator are trainable while the retriever is just an indexed dataset.
  198. """
  199. config_class = RagConfig
  200. base_model_prefix = "rag"
  201. _supports_flash_attn_2 = True
  202. _supports_sdpa = True
  203. @classmethod
  204. def from_pretrained(cls, *args, **kwargs):
  205. # At the moment fast initialization is not supported
  206. # for composite models
  207. kwargs["_fast_init"] = False
  208. return super().from_pretrained(*args, **kwargs)
  209. @classmethod
  210. def from_pretrained_question_encoder_generator(
  211. cls,
  212. question_encoder_pretrained_model_name_or_path: str = None,
  213. generator_pretrained_model_name_or_path: str = None,
  214. retriever: RagRetriever = None,
  215. **kwargs,
  216. ) -> PreTrainedModel:
  217. r"""
  218. Instantiates an question encoder and a generator from one or two base classes of the library from pretrained
  219. model checkpoints.
  220. The model is set in evaluation mode by default using `model.eval()` (Dropout modules are deactivated). To train
  221. the model, you need to first set it back in training mode with `model.train()`.
  222. Params:
  223. question_encoder_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
  224. Information necessary to initiate the question encoder. Can be either:
  225. - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
  226. - A path to a *directory* containing model weights saved using
  227. [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
  228. - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
  229. this case, `from_tf` should be set to `True` and a configuration object should be provided as
  230. `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
  231. PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
  232. generator_pretrained_model_name_or_path (`str`, *optional*, defaults to `None`):
  233. Information necessary to initiate the generator. Can be either:
  234. - A string, the *model id* of a pretrained model hosted inside a model repo on huggingface.co.
  235. - A path to a *directory* containing model weights saved using
  236. [`~PreTrainedModel.save_pretrained`], e.g., `./my_model_directory/`.
  237. - A path or url to a *tensorflow index checkpoint file* (e.g, `./tf_model/model.ckpt.index`). In
  238. this case, `from_tf` should be set to `True` and a configuration object should be provided as
  239. `config` argument. This loading path is slower than converting the TensorFlow checkpoint in a
  240. PyTorch model using the provided conversion scripts and loading the PyTorch model afterwards.
  241. model_args (remaining positional arguments, *optional*):
  242. All remaining positional arguments will be passed to the underlying model's `__init__` method.
  243. retriever ([`RagRetriever`], *optional*):
  244. The retriever to use.
  245. kwwargs (remaining dictionary of keyword arguments, *optional*):
  246. Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
  247. `output_attentions=True`).
  248. - To update the question_encoder configuration, use the prefix *question_encoder_* for each
  249. configuration parameter.
  250. - To update the generator configuration, use the prefix *generator_* for each configuration parameter.
  251. - To update the parent model configuration, do not use a prefix for each configuration parameter.
  252. Behaves differently depending on whether a `config` is provided or automatically loaded.
  253. Example:
  254. ```python
  255. >>> from transformers import RagModel
  256. >>> # initialize a RAG from two pretrained models.
  257. >>> model = RagModel.from_pretrained_question_encoder_generator(
  258. ... "facebook/dpr-question_encoder-single-nq-base", "google-t5/t5-small"
  259. ... )
  260. >>> # saving model after fine-tuning
  261. >>> model.save_pretrained("./rag")
  262. >>> # load fine-tuned model
  263. >>> model = RagModel.from_pretrained("./rag")
  264. ```"""
  265. kwargs_question_encoder = {
  266. argument[len("question_encoder_") :]: value
  267. for argument, value in kwargs.items()
  268. if argument.startswith("question_encoder_")
  269. }
  270. kwargs_generator = {
  271. argument[len("generator_") :]: value
  272. for argument, value in kwargs.items()
  273. if argument.startswith("generator_")
  274. }
  275. # remove question_encoder, generator kwargs from kwargs
  276. for key in kwargs_question_encoder.keys():
  277. del kwargs["question_encoder_" + key]
  278. for key in kwargs_generator.keys():
  279. del kwargs["generator_" + key]
  280. # Load and initialize the question_encoder and generator
  281. # The distinction between question_encoder and generator at the model level is made
  282. # by the value of the flag `is_generator` that we need to set correctly.
  283. question_encoder = kwargs_question_encoder.pop("model", None)
  284. if question_encoder is None:
  285. assert question_encoder_pretrained_model_name_or_path is not None, (
  286. "If `model` is not defined as an argument, a `question_encoder_pretrained_model_name_or_path` has to"
  287. " be defined"
  288. )
  289. from ..auto.modeling_auto import AutoModel
  290. if "config" not in kwargs_question_encoder:
  291. from ..auto.configuration_auto import AutoConfig
  292. question_encoder_config, kwargs_question_encoder = AutoConfig.from_pretrained(
  293. question_encoder_pretrained_model_name_or_path,
  294. **kwargs_question_encoder,
  295. return_unused_kwargs=True,
  296. )
  297. kwargs_question_encoder["config"] = question_encoder_config
  298. question_encoder = AutoModel.from_pretrained(
  299. question_encoder_pretrained_model_name_or_path, **kwargs_question_encoder
  300. )
  301. generator = kwargs_generator.pop("model", None)
  302. if generator is None:
  303. assert generator_pretrained_model_name_or_path is not None, (
  304. "If `generator_model` is not defined as an argument, a `generator_pretrained_model_name_or_path` has"
  305. " to be defined"
  306. )
  307. from ..auto.modeling_auto import AutoModelForSeq2SeqLM
  308. if "config" not in kwargs_generator:
  309. from ..auto.configuration_auto import AutoConfig
  310. generator_config, kwargs_generator = AutoConfig.from_pretrained(
  311. generator_pretrained_model_name_or_path, **kwargs_generator, return_unused_kwargs=True
  312. )
  313. kwargs_generator["config"] = generator_config
  314. generator = AutoModelForSeq2SeqLM.from_pretrained(
  315. generator_pretrained_model_name_or_path, **kwargs_generator
  316. )
  317. # instantiate config with corresponding kwargs
  318. config = kwargs.get("config", None)
  319. if config is None:
  320. config = RagConfig.from_question_encoder_generator_configs(
  321. question_encoder.config, generator.config, **kwargs
  322. )
  323. return cls(question_encoder=question_encoder, generator=generator, config=config, retriever=retriever)
  324. RAG_START_DOCSTRING = r"""
  325. RAG is a seq2seq model which encapsulates two core components: a question encoder and a generator. During a forward
  326. pass, we encode the input with the question encoder and pass it to the retriever to extract relevant context
  327. documents. The documents are then prepended to the input. Such contextualized inputs is passed to the generator.
  328. The question encoder can be any *autoencoding* model, preferably [`DPRQuestionEncoder`], and the generator can be
  329. any *seq2seq* model, preferably [`BartForConditionalGeneration`].
  330. The model can be initialized with a [`RagRetriever`] for end-to-end generation or used in combination with the
  331. outputs of a retriever in multiple steps---see examples for more details. The model is compatible any
  332. *autoencoding* model as the `question_encoder` and any *seq2seq* model with language model head as the `generator`.
  333. It has been tested with [`DPRQuestionEncoder`] as the `question_encoder` and [`BartForConditionalGeneration`] or
  334. [`T5ForConditionalGeneration`] as the `generator`.
  335. This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
  336. library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
  337. etc.)
  338. This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
  339. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
  340. and behavior.
  341. Args:
  342. config ([`RagConfig`]):
  343. Model configuration class with all the parameters of the model. Initializing with a config file does not
  344. load the weights associated with the model, only the configuration. Check out the
  345. [`~PreTrainedModel.from_pretrained`] method to load the model weights.
  346. question_encoder ([`PreTrainedModel`]):
  347. An encoder model compatible with the faiss index encapsulated by the `retriever`.
  348. generator ([`PreTrainedModel`]):
  349. A seq2seq model used as the generator in the RAG architecture.
  350. retriever ([`RagRetriever`]):
  351. A retriever class encapsulating a faiss index queried to obtain context documents for current inputs.
  352. """
  353. RAG_FORWARD_INPUTS_DOCSTRING = r"""
  354. Args:
  355. input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
  356. Indices of input sequence tokens in the vocabulary. [`RagConfig`], used to initialize the model, specifies
  357. which generator to use, it also specifies a compatible generator tokenizer. Use that tokenizer class to
  358. obtain the indices.
  359. [What are input IDs?](../glossary#input-ids)
  360. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
  361. Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
  362. - 1 for tokens that are **not masked**,
  363. - 0 for tokens that are **masked**.
  364. [What are attention masks?](../glossary#attention-mask)
  365. encoder_outputs (`tuple(tuple(torch.FloatTensor)`, *optional*)
  366. Tuple consists of (`generator_enc_last_hidden_state`, *optional*: `generator_enc_hidden_states`,
  367. *optional*: `generator_enc_attentions`). `generator_enc_last_hidden_state` of shape `(batch_size, n_docs *
  368. sequence_length, hidden_size)` is a sequence of hidden-states at the output of the last layer of the
  369. generator's encoder.
  370. Used by the ([`RagModel`]) model during decoding.
  371. decoder_input_ids (`torch.LongTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
  372. Provide for generation tasks. `None` by default, construct as per instructions for the generator model
  373. you're using with your RAG instance.
  374. decoder_attention_mask (`torch.BoolTensor` of shape `(batch_size, target_sequence_length)`, *optional*):
  375. Default behavior: generate a tensor that ignores pad tokens in `decoder_input_ids`. Causal mask will also
  376. be used by default.
  377. past_key_values (`tuple(tuple(torch.FloatTensor))`):
  378. Tuple consists of two elements: `encoder_outputs` of the RAG model (see `encoder_outputs`) and
  379. `past_key_values` of the underlying generator. Can be used to speed up decoding. `past_key_values` are used
  380. in the ([`RagTokenForGeneration`]) model during decoding.
  381. doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
  382. Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
  383. `question_encoder_last_hidden_state`. If the model has is not initialized with a `retriever` `doc_scores`
  384. has to be provided to the forward pass. `doc_scores` can be computed via
  385. `question_encoder_last_hidden_state` and `retrieved_doc_embeds`, see examples for more information.
  386. context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
  387. Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the
  388. retriever. If the model was not initialized with a `retriever` ``context_input_ids` has to be provided to
  389. the forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
  390. context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`,*optional*, returned when *output_retrieved=True*):
  391. Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
  392. retriever. If the model has is not initialized with a `retriever` `context_attention_mask` has to be
  393. provided to the forward pass. `context_attention_mask` are returned by [`~RagRetriever.__call__`].
  394. use_cache (`bool`, *optional*, defaults to `True`):
  395. If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding (see
  396. `past_key_values`).
  397. output_attentions (`bool`, *optional*):
  398. Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
  399. tensors for more detail.
  400. output_hidden_states (`bool`, *optional*):
  401. Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
  402. more detail.
  403. output_retrieved(`bool`, *optional*):
  404. Whether or not to return the `retrieved_doc_embeds`, `retrieved_doc_ids`, `context_input_ids` and
  405. `context_attention_mask`. See returned tensors for more detail.
  406. n_docs (`int`, *optional*, defaults to `config.n_docs``)
  407. Number of documents to retrieve and/or number of documents for which to generate an answer.
  408. """
  409. @add_start_docstrings_to_model_forward(RAG_START_DOCSTRING)
  410. class RagModel(RagPreTrainedModel):
  411. def __init__(
  412. self,
  413. config: Optional[PretrainedConfig] = None,
  414. question_encoder: Optional[PreTrainedModel] = None,
  415. generator: Optional[PreTrainedModel] = None,
  416. retriever: Optional[RagRetriever] = None, # or maybe just use a `set_retriever(...)` method
  417. **kwargs,
  418. ):
  419. assert config is not None or (
  420. question_encoder is not None and generator is not None
  421. ), "Either a configuration or an question_encoder and a generator has to be provided."
  422. if config is None:
  423. config = RagConfig.from_question_encoder_generator_configs(
  424. question_encoder.config, generator.config, **kwargs
  425. )
  426. else:
  427. assert isinstance(config, self.config_class), f"config: {config} has to be of type {self.config_class}"
  428. super().__init__(config)
  429. if question_encoder is None:
  430. from ..auto.modeling_auto import AutoModel
  431. question_encoder = AutoModel.from_config(config.question_encoder)
  432. if generator is None:
  433. from ..auto.modeling_auto import AutoModelForSeq2SeqLM
  434. generator = AutoModelForSeq2SeqLM.from_config(config.generator)
  435. self.retriever = retriever
  436. if self.retriever is not None:
  437. assert isinstance(
  438. retriever, RagRetriever
  439. ), f"`self.retriever` is of type {type(self.retriever)}, but should be of type `RagRetriever`"
  440. self.retriever = retriever
  441. self.question_encoder = question_encoder
  442. self.generator = generator
  443. self.ctx_encoder = None
  444. self.context_encoder_training = False
  445. @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING)
  446. @replace_return_docstrings(output_type=RetrievAugLMOutput, config_class=_CONFIG_FOR_DOC)
  447. def forward(
  448. self,
  449. input_ids: Optional[torch.LongTensor] = None,
  450. attention_mask: Optional[torch.Tensor] = None,
  451. encoder_outputs: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
  452. decoder_input_ids: Optional[torch.LongTensor] = None,
  453. decoder_attention_mask: Optional[torch.BoolTensor] = None,
  454. past_key_values: Optional[Tuple[Tuple[torch.FloatTensor]]] = None,
  455. doc_scores: Optional[torch.FloatTensor] = None,
  456. context_input_ids: Optional[torch.LongTensor] = None,
  457. context_attention_mask: Optional[torch.LongTensor] = None,
  458. use_cache: Optional[bool] = None,
  459. output_attentions: Optional[bool] = None,
  460. output_hidden_states: Optional[bool] = None,
  461. output_retrieved: Optional[bool] = None,
  462. n_docs: Optional[int] = None,
  463. ) -> Union[Tuple[torch.Tensor], RetrievAugLMOutput]:
  464. r"""
  465. Returns:
  466. Example:
  467. ```python
  468. >>> from transformers import AutoTokenizer, RagRetriever, RagModel
  469. >>> import torch
  470. >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-base")
  471. >>> retriever = RagRetriever.from_pretrained(
  472. ... "facebook/rag-token-base", index_name="exact", use_dummy_dataset=True
  473. ... )
  474. >>> # initialize with RagRetriever to do everything in one forward call
  475. >>> model = RagModel.from_pretrained("facebook/rag-token-base", retriever=retriever)
  476. >>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt")
  477. >>> outputs = model(input_ids=inputs["input_ids"])
  478. ```"""
  479. n_docs = n_docs if n_docs is not None else self.config.n_docs
  480. use_cache = use_cache if use_cache is not None else self.config.use_cache
  481. output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
  482. output_hidden_states = (
  483. output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
  484. )
  485. output_retrieved = output_retrieved if output_retrieved is not None else self.config.output_retrieved
  486. # whether retriever has to be used
  487. has_to_retrieve = (
  488. self.retriever is not None
  489. and (context_input_ids is None or context_attention_mask is None or doc_scores is None)
  490. and encoder_outputs is None
  491. )
  492. # encoder_outputs are pre-computed during RAG-token generation
  493. if encoder_outputs is None:
  494. if has_to_retrieve:
  495. question_enc_outputs = self.question_encoder(
  496. input_ids, attention_mask=attention_mask, return_dict=True
  497. )
  498. question_encoder_last_hidden_state = question_enc_outputs[0] # hidden states of question encoder
  499. retriever_outputs = self.retriever(
  500. input_ids,
  501. question_encoder_last_hidden_state.cpu().detach().to(torch.float32).numpy(),
  502. prefix=self.generator.config.prefix,
  503. n_docs=n_docs,
  504. return_tensors="pt",
  505. )
  506. if self.context_encoder_training:
  507. (
  508. context_input_ids,
  509. context_attention_mask,
  510. retrieved_doc_embeds,
  511. retrived_doc_input_ids,
  512. retrived_doc_attention_mask,
  513. retrieved_doc_ids,
  514. ) = (
  515. retriever_outputs["context_input_ids"],
  516. retriever_outputs["context_attention_mask"],
  517. retriever_outputs["retrieved_doc_embeds"],
  518. retriever_outputs["tokenized_doc_ids"],
  519. retriever_outputs["tokenized_doc_attention_mask"],
  520. retriever_outputs["doc_ids"],
  521. )
  522. context_input_ids = context_input_ids.to(input_ids)
  523. context_attention_mask = context_attention_mask.to(input_ids)
  524. retrived_doc_input_ids = retrived_doc_input_ids.to(input_ids)
  525. retrived_doc_attention_mask = retrived_doc_attention_mask.to(input_ids)
  526. retrieved_doc_embeds = self.ctx_encoder(
  527. retrived_doc_input_ids, attention_mask=retrived_doc_attention_mask, return_dict=True
  528. ).pooler_output
  529. retrieved_doc_embeds = retrieved_doc_embeds.view(
  530. -1, n_docs, question_encoder_last_hidden_state.shape[1]
  531. ) # reshaping
  532. # compute doc_scores involving ctx_encoder
  533. doc_scores = torch.bmm(
  534. question_encoder_last_hidden_state.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)
  535. ).squeeze(1)
  536. else:
  537. context_input_ids, context_attention_mask, retrieved_doc_embeds, retrieved_doc_ids = (
  538. retriever_outputs["context_input_ids"],
  539. retriever_outputs["context_attention_mask"],
  540. retriever_outputs["retrieved_doc_embeds"],
  541. retriever_outputs["doc_ids"],
  542. )
  543. # set to correct device
  544. retrieved_doc_embeds = retrieved_doc_embeds.to(question_encoder_last_hidden_state)
  545. context_input_ids = context_input_ids.to(input_ids)
  546. context_attention_mask = context_attention_mask.to(input_ids)
  547. # compute doc_scores
  548. doc_scores = torch.bmm(
  549. question_encoder_last_hidden_state.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)
  550. ).squeeze(1)
  551. else:
  552. assert context_input_ids is not None, (
  553. "Make sure that `context_input_ids` are passed, if no `retriever` is set. Alternatively, you can"
  554. " set a retriever using the `set_retriever(...)` function."
  555. )
  556. assert context_attention_mask is not None, (
  557. "Make sure that `context_attention_mask` are passed, if no `retriever` is set. Alternatively, you"
  558. " can set a retriever using the `set_retriever(...)` function."
  559. )
  560. assert doc_scores is not None, (
  561. "Make sure that `doc_scores` are passed, if no `retriever` is set. Alternatively, you can set a"
  562. " retriever using the `set_retriever(...)` function."
  563. )
  564. assert (
  565. doc_scores is not None
  566. ), "Make sure that `doc_scores` are passed when passing `encoder_outputs` to the forward function."
  567. assert (doc_scores.shape[1] % n_docs) == 0, (
  568. f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is"
  569. f" {context_input_ids.shape[0]}."
  570. )
  571. # Decoder input without context documents
  572. if decoder_input_ids is not None:
  573. decoder_input_ids = decoder_input_ids.repeat_interleave(n_docs, dim=0)
  574. if decoder_attention_mask is not None:
  575. decoder_attention_mask = decoder_attention_mask.repeat_interleave(n_docs, dim=0)
  576. gen_outputs = self.generator(
  577. input_ids=context_input_ids,
  578. attention_mask=context_attention_mask,
  579. encoder_outputs=encoder_outputs,
  580. decoder_input_ids=decoder_input_ids,
  581. decoder_attention_mask=decoder_attention_mask,
  582. past_key_values=past_key_values,
  583. use_cache=use_cache,
  584. output_attentions=output_attentions,
  585. return_dict=True,
  586. )
  587. if not has_to_retrieve:
  588. question_encoder_last_hidden_state = None
  589. question_enc_hidden_states = None
  590. question_enc_attentions = None
  591. retrieved_doc_embeds = None
  592. retrieved_doc_ids = None
  593. else:
  594. question_enc_hidden_states = question_enc_outputs.hidden_states
  595. question_enc_attentions = question_enc_outputs.attentions
  596. if not has_to_retrieve or not output_retrieved:
  597. # don't output retrieved docs
  598. context_input_ids = (None,)
  599. context_attention_mask = None
  600. retrieved_doc_embeds = None
  601. retrieved_doc_ids = None
  602. return RetrievAugLMOutput(
  603. logits=gen_outputs.logits,
  604. doc_scores=doc_scores,
  605. past_key_values=gen_outputs.past_key_values,
  606. context_input_ids=context_input_ids,
  607. context_attention_mask=context_attention_mask,
  608. retrieved_doc_embeds=retrieved_doc_embeds,
  609. retrieved_doc_ids=retrieved_doc_ids,
  610. question_encoder_last_hidden_state=question_encoder_last_hidden_state,
  611. question_enc_hidden_states=question_enc_hidden_states,
  612. question_enc_attentions=question_enc_attentions,
  613. generator_enc_last_hidden_state=gen_outputs.encoder_last_hidden_state,
  614. generator_enc_hidden_states=gen_outputs.encoder_hidden_states,
  615. generator_enc_attentions=gen_outputs.encoder_attentions,
  616. generator_dec_hidden_states=gen_outputs.decoder_hidden_states,
  617. generator_dec_attentions=gen_outputs.decoder_attentions,
  618. generator_cross_attentions=gen_outputs.cross_attentions,
  619. )
  620. @add_start_docstrings_to_model_forward(
  621. """
  622. A RAG-sequence model implementation. It performs RAG-sequence specific marginalization in the forward pass.
  623. """,
  624. RAG_START_DOCSTRING,
  625. )
  626. class RagSequenceForGeneration(RagPreTrainedModel):
  627. def __init__(
  628. self,
  629. config: Optional[PretrainedConfig] = None,
  630. question_encoder: Optional[PreTrainedModel] = None,
  631. generator: Optional[PreTrainedModel] = None,
  632. retriever: Optional[RagRetriever] = None,
  633. **kwargs,
  634. ):
  635. assert config is not None or (
  636. question_encoder is not None and generator is not None
  637. ), "Either a configuration or an encoder and a generator has to be provided."
  638. if config is None:
  639. config = RagConfig.from_question_encoder_generator_configs(
  640. question_encoder.config, generator.config, **kwargs
  641. )
  642. super().__init__(config)
  643. # instantiate model
  644. self.rag = RagModel(config=config, question_encoder=question_encoder, generator=generator, retriever=retriever)
  645. def set_retriever(self, retriever: RagRetriever):
  646. self.rag.retriever = retriever
  647. def set_context_encoder_for_training(self, ctx_encoder: PreTrainedModel):
  648. self.rag.context_encoder_training = True
  649. self.rag.ctx_encoder = ctx_encoder
  650. @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING)
  651. @replace_return_docstrings(output_type=RetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC)
  652. def forward(
  653. self,
  654. input_ids: Optional[torch.LongTensor] = None,
  655. attention_mask: Optional[torch.Tensor] = None,
  656. encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,
  657. decoder_input_ids: Optional[torch.LongTensor] = None,
  658. decoder_attention_mask: Optional[torch.BoolTensor] = None,
  659. past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
  660. context_input_ids: Optional[torch.LongTensor] = None,
  661. context_attention_mask: Optional[torch.LongTensor] = None,
  662. doc_scores: Optional[torch.FloatTensor] = None,
  663. use_cache: Optional[bool] = None,
  664. output_attentions: Optional[bool] = None,
  665. output_hidden_states: Optional[bool] = None,
  666. output_retrieved: Optional[bool] = None,
  667. exclude_bos_score: Optional[bool] = None,
  668. reduce_loss: Optional[bool] = None,
  669. labels: Optional[torch.LongTensor] = None,
  670. n_docs: Optional[int] = None,
  671. **kwargs, # needs kwargs for generation
  672. ) -> RetrievAugLMMarginOutput:
  673. r"""
  674. exclude_bos_score (`bool`, *optional*):
  675. Only relevant if `labels` is passed. If `True`, the score of the BOS token is disregarded when computing
  676. the loss.
  677. reduce_loss (`bool`, *optional*):
  678. Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `torch.Tensor.sum`
  679. operation.
  680. kwargs (`Dict[str, any]`, *optional*, defaults to `{}`):
  681. Legacy dictionary, which is required so that model can use *generate()* function.
  682. Returns:
  683. Example:
  684. ```python
  685. >>> from transformers import AutoTokenizer, RagRetriever, RagSequenceForGeneration
  686. >>> import torch
  687. >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-sequence-nq")
  688. >>> retriever = RagRetriever.from_pretrained(
  689. ... "facebook/rag-sequence-nq", index_name="exact", use_dummy_dataset=True
  690. ... )
  691. >>> # initialize with RagRetriever to do everything in one forward call
  692. >>> model = RagSequenceForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever)
  693. >>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt")
  694. >>> targets = tokenizer(text_target="In Paris, there are 10 million people.", return_tensors="pt")
  695. >>> input_ids = inputs["input_ids"]
  696. >>> labels = targets["input_ids"]
  697. >>> outputs = model(input_ids=input_ids, labels=labels)
  698. >>> # or use retriever separately
  699. >>> model = RagSequenceForGeneration.from_pretrained("facebook/rag-sequence-nq", use_dummy_dataset=True)
  700. >>> # 1. Encode
  701. >>> question_hidden_states = model.question_encoder(input_ids)[0]
  702. >>> # 2. Retrieve
  703. >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors="pt")
  704. >>> doc_scores = torch.bmm(
  705. ... question_hidden_states.unsqueeze(1), docs_dict["retrieved_doc_embeds"].float().transpose(1, 2)
  706. ... ).squeeze(1)
  707. >>> # 3. Forward to generator
  708. >>> outputs = model(
  709. ... context_input_ids=docs_dict["context_input_ids"],
  710. ... context_attention_mask=docs_dict["context_attention_mask"],
  711. ... doc_scores=doc_scores,
  712. ... decoder_input_ids=labels,
  713. ... )
  714. ```"""
  715. n_docs = n_docs if n_docs is not None else self.config.n_docs
  716. exclude_bos_score = exclude_bos_score if exclude_bos_score is not None else self.config.exclude_bos_score
  717. reduce_loss = reduce_loss if reduce_loss is not None else self.config.reduce_loss
  718. if labels is not None:
  719. if decoder_input_ids is None:
  720. decoder_input_ids = labels
  721. use_cache = False
  722. outputs = self.rag(
  723. input_ids=input_ids,
  724. attention_mask=attention_mask,
  725. encoder_outputs=encoder_outputs,
  726. decoder_input_ids=decoder_input_ids,
  727. decoder_attention_mask=decoder_attention_mask,
  728. context_input_ids=context_input_ids,
  729. context_attention_mask=context_attention_mask,
  730. doc_scores=doc_scores,
  731. past_key_values=past_key_values,
  732. use_cache=use_cache,
  733. output_attentions=output_attentions,
  734. output_hidden_states=output_hidden_states,
  735. output_retrieved=output_retrieved,
  736. n_docs=n_docs,
  737. )
  738. loss = None
  739. if labels is not None:
  740. loss = self.get_nll(
  741. outputs.logits,
  742. outputs.doc_scores,
  743. decoder_input_ids,
  744. reduce_loss=reduce_loss,
  745. epsilon=self.config.label_smoothing,
  746. exclude_bos_score=exclude_bos_score,
  747. n_docs=n_docs,
  748. )
  749. return RetrievAugLMMarginOutput(
  750. loss=loss,
  751. logits=outputs.logits,
  752. doc_scores=outputs.doc_scores,
  753. past_key_values=outputs.past_key_values,
  754. context_input_ids=outputs.context_input_ids,
  755. context_attention_mask=outputs.context_attention_mask,
  756. retrieved_doc_embeds=outputs.retrieved_doc_embeds,
  757. retrieved_doc_ids=outputs.retrieved_doc_ids,
  758. question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state,
  759. question_enc_hidden_states=outputs.question_enc_hidden_states,
  760. question_enc_attentions=outputs.question_enc_attentions,
  761. generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state,
  762. generator_enc_hidden_states=outputs.generator_enc_hidden_states,
  763. generator_enc_attentions=outputs.generator_enc_attentions,
  764. generator_dec_hidden_states=outputs.generator_dec_hidden_states,
  765. generator_dec_attentions=outputs.generator_dec_attentions,
  766. generator_cross_attentions=outputs.generator_cross_attentions,
  767. )
  768. @property
  769. def retriever(self):
  770. return self.rag.retriever
  771. @property
  772. def generator(self):
  773. return self.rag.generator
  774. @property
  775. def question_encoder(self):
  776. return self.rag.question_encoder
  777. @torch.no_grad()
  778. def generate(
  779. self,
  780. input_ids: Optional[torch.LongTensor] = None,
  781. attention_mask: Optional[torch.LongTensor] = None,
  782. context_input_ids: Optional[torch.LongTensor] = None,
  783. context_attention_mask: Optional[torch.LongTensor] = None,
  784. doc_scores: Optional[torch.FloatTensor] = None,
  785. do_deduplication: Optional[bool] = None, # defaults to True
  786. num_return_sequences: Optional[int] = None, # defaults to 1
  787. num_beams: Optional[int] = None, # defaults to 1
  788. n_docs: Optional[int] = None,
  789. **model_kwargs,
  790. ) -> torch.LongTensor:
  791. """
  792. Implements RAG sequence "thorough" decoding. Read the [`~generation.GenerationMixin.generate`]` documentation
  793. for more information on how to set other generate input parameters.
  794. Args:
  795. input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
  796. The sequence used as a prompt for the generation. If `input_ids` is not passed, then
  797. `context_input_ids` has to be provided.
  798. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
  799. Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
  800. - 1 for tokens that are **not masked**,
  801. - 0 for tokens that are **masked**.
  802. [What are attention masks?](../glossary#attention-mask)
  803. context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
  804. Input IDs post-processed from the retrieved documents and the question encoder input_ids by the
  805. retriever.
  806. context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
  807. Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
  808. retriever.
  809. If the model is not initialized with a `retriever` or `input_ids` is not given, `context_input_ids` and
  810. `context_attention_mask` have to be provided to the forward pass. They are returned by
  811. [`~RagRetriever.__call__`].
  812. doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
  813. Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
  814. `question_encoder_last_hidden_state`.
  815. If the model is not initialized with a `retriever` or `input_ids` is not given, `doc_scores` has to be
  816. provided to the forward pass. `doc_scores` are returned by [`~RagRetriever.__call__`].
  817. do_deduplication (`bool`, *optional*):
  818. Whether or not to deduplicate the generations from different context documents for a given input. Has
  819. to be set to `False` if used while training with distributed backend.
  820. num_return_sequences(`int`, *optional*, defaults to 1):
  821. The number of independently computed returned sequences for each element in the batch. Note that this
  822. is not the value we pass to the `generator`'s `[`~generation.GenerationMixin.generate`]` function,
  823. where we set `num_return_sequences` to `num_beams`.
  824. num_beams (`int`, *optional*, defaults to 1):
  825. Number of beams for beam search. 1 means no beam search.
  826. n_docs (`int`, *optional*, defaults to `config.n_docs`)
  827. Number of documents to retrieve and/or number of documents for which to generate an answer.
  828. kwargs (`Dict[str, Any]`, *optional*):
  829. Additional kwargs will be passed to [`~generation.GenerationMixin.generate`].
  830. Return:
  831. `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated
  832. sequences. The second dimension (sequence length) is either equal to `max_length` or shorter if all batches
  833. finished early due to the `eos_token_id`.
  834. """
  835. n_docs = n_docs if n_docs is not None else self.config.n_docs
  836. do_deduplication = do_deduplication if do_deduplication is not None else self.config.do_deduplication
  837. num_doc_return_sequences = (
  838. num_return_sequences if num_return_sequences is not None else self.config.num_return_sequences
  839. )
  840. num_beams = num_beams if num_beams is not None else self.config.num_beams
  841. assert (
  842. input_ids is not None or context_input_ids is not None
  843. ), " At least one of input_ids or context_input_ids must be given"
  844. if self.retriever is not None and context_input_ids is None:
  845. question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0]
  846. context_input_ids = self.retriever(
  847. input_ids,
  848. question_hidden_states.cpu().detach().to(torch.float32).numpy(),
  849. prefix=self.generator.config.prefix,
  850. n_docs=n_docs,
  851. return_tensors="pt",
  852. )["context_input_ids"]
  853. # set to correct device
  854. context_input_ids = context_input_ids.to(input_ids)
  855. hypos = []
  856. model_kwargs["num_beams"] = num_beams
  857. model_kwargs["num_return_sequences"] = num_beams
  858. model_kwargs["attention_mask"] = None
  859. batch_size = input_ids.shape[0] if input_ids is not None else context_input_ids.shape[0] // n_docs
  860. for index in range(batch_size):
  861. # first, generate beams from documents:
  862. generator_input_ids = context_input_ids[index * n_docs : (index + 1) * n_docs] # (n_docs, max_len)
  863. output_sequences = self.generator.generate(
  864. generator_input_ids,
  865. **model_kwargs,
  866. ) # n_docs * n_beam, tgt_len
  867. if do_deduplication:
  868. # do_deduplication, max_output_len
  869. output_sequences = torch.stack(list({str(k.tolist()): k for k in output_sequences}.values()))
  870. num_candidates = output_sequences.shape[
  871. 0
  872. ] # after deduplication, this number can be less than n_docs*n_beam
  873. # then, run model forwards to get nll scores:
  874. if input_ids is not None:
  875. new_input_ids = input_ids[index : index + 1].repeat(num_candidates, 1)
  876. outputs = self(new_input_ids, labels=output_sequences, exclude_bos_score=True)
  877. else: # input_ids is None, need context_input_ids/mask and doc_scores
  878. assert context_attention_mask is not None, (
  879. "Make sure that `context_attention_mask` are passed, if no `input_ids` is set. Alternatively, you"
  880. " can set a retriever using the `set_retriever(...)` function."
  881. )
  882. assert doc_scores is not None, (
  883. "Make sure that `doc_scores` are passed, if no `input_ids` is set. Alternatively, you can set a"
  884. " retriever using the `set_retriever(...)` function."
  885. )
  886. individual_input_ids = generator_input_ids.repeat(
  887. num_candidates, 1
  888. ) # (num_candidates*n_docs, max_len)
  889. individual_attention_mask = context_attention_mask[index * n_docs : (index + 1) * n_docs]
  890. individual_attention_mask = individual_attention_mask.repeat(num_candidates, 1)
  891. individual_doc_scores = doc_scores[index : (index + 1), :] # doc_scores.shape = [batch, n_docs]
  892. individual_doc_scores = individual_doc_scores.repeat(num_candidates, 1) # [num_candidates, n_docs]
  893. outputs = self(
  894. context_input_ids=individual_input_ids,
  895. context_attention_mask=individual_attention_mask,
  896. doc_scores=individual_doc_scores,
  897. labels=output_sequences,
  898. exclude_bos_score=True,
  899. )
  900. top_cand_inds = (-outputs["loss"]).topk(num_doc_return_sequences)[1]
  901. # add hypothesis
  902. hypos.append(output_sequences[top_cand_inds])
  903. return self._cat_and_pad(hypos, pad_token_id=self.config.generator.pad_token_id)
  904. def get_nll(
  905. self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, exclude_bos_score=False, n_docs=None
  906. ):
  907. # shift tokens left
  908. target = torch.cat(
  909. [target[:, 1:], target.new(target.shape[0], 1).fill_(self.config.generator.pad_token_id)], 1
  910. )
  911. n_docs = n_docs if n_docs is not None else self.config.n_docs
  912. # bos_token_id is None for T5
  913. bos_token_id = self.config.bos_token_id or self.config.generator.bos_token_id
  914. use_bos = bos_token_id is not None and target[:, 0].eq(bos_token_id).all()
  915. def _mask_pads(ll, smooth_obj):
  916. pad_mask = target.eq(self.config.generator.pad_token_id)
  917. if pad_mask.any():
  918. ll.masked_fill_(pad_mask, 0.0)
  919. smooth_obj.masked_fill_(pad_mask, 0.0)
  920. return ll.squeeze(-1), smooth_obj.squeeze(-1)
  921. # seq_logits dim = (batch*n_docs, tgt_len , #vocabs)
  922. seq_logprobs = nn.functional.log_softmax(seq_logits, dim=-1).view(
  923. seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.size(-1)
  924. ) # batch_size x n_docs x tgt_len x #vocab_size
  925. doc_logprobs = nn.functional.log_softmax(doc_scores, dim=1).unsqueeze(-1).unsqueeze(-1)
  926. # RAG-sequence marginalization
  927. first_token_scores = seq_logprobs[:, :, :1, :]
  928. second_token_scores = seq_logprobs[:, :, 1:2, :]
  929. remainder = seq_logprobs[:, :, 2:, :]
  930. rag_logprobs = torch.cat([first_token_scores, second_token_scores + doc_logprobs, remainder], dim=2)
  931. # calculate loss
  932. target = target.unsqueeze(1).unsqueeze(-1).repeat(1, n_docs, 1, 1)
  933. assert target.dim() == rag_logprobs.dim()
  934. ll = rag_logprobs.gather(dim=-1, index=target)
  935. smooth_obj = rag_logprobs.sum(dim=-1, keepdim=True) # total sum of all (normalised) logits
  936. ll, smooth_obj = _mask_pads(ll, smooth_obj)
  937. # sum over tokens, exclude bos while scoring
  938. ll = ll[:, :, 1:].sum(2) if exclude_bos_score and use_bos else ll.sum(2)
  939. smooth_obj = smooth_obj.sum(2)
  940. ll = ll.logsumexp(1) # logsumexp over docs
  941. smooth_obj = smooth_obj.logsumexp(1)
  942. nll_loss = -ll
  943. smooth_loss = -smooth_obj
  944. if reduce_loss:
  945. nll_loss = nll_loss.sum()
  946. smooth_loss = smooth_loss.sum()
  947. eps_i = epsilon / rag_logprobs.size(-1)
  948. loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
  949. return loss
  950. @staticmethod
  951. def _cat_and_pad(tensors, pad_token_id):
  952. output = (
  953. tensors[0].new(sum([t.shape[0] for t in tensors]), max([t.shape[1] for t in tensors])).fill_(pad_token_id)
  954. )
  955. ind = 0
  956. for t in tensors:
  957. output[ind : ind + t.shape[0], : t.shape[1]] = t
  958. ind += t.shape[0]
  959. return output
  960. @add_start_docstrings_to_model_forward(
  961. """
  962. A RAG-token model implementation. It performs RAG-token specific marginalization in the forward pass.
  963. """,
  964. RAG_START_DOCSTRING,
  965. )
  966. class RagTokenForGeneration(RagPreTrainedModel):
  967. def __init__(
  968. self,
  969. config: Optional[PretrainedConfig] = None,
  970. question_encoder: Optional[PreTrainedModel] = None,
  971. generator: Optional[PreTrainedModel] = None,
  972. retriever: Optional[RagRetriever] = None,
  973. **kwargs,
  974. ):
  975. assert config is not None or (
  976. question_encoder is not None and generator is not None
  977. ), "Either a configuration or an encoder and a generator has to be provided."
  978. if config is None:
  979. config = RagConfig.from_question_encoder_generator_configs(
  980. question_encoder.config, generator.config, **kwargs
  981. )
  982. super().__init__(config)
  983. # instantiate model
  984. self.rag = RagModel(config=config, question_encoder=question_encoder, generator=generator, retriever=retriever)
  985. def set_retriever(self, retriever: RagRetriever):
  986. self.rag.retriever = retriever
  987. def set_context_encoder_for_training(self, ctx_encoder: PreTrainedModel):
  988. self.rag.context_encoder_training = True
  989. self.rag.ctx_encoder = ctx_encoder
  990. def prepare_inputs_for_generation(
  991. self,
  992. decoder_input_ids,
  993. past_key_values=None,
  994. attention_mask=None,
  995. use_cache=None,
  996. encoder_outputs=None,
  997. doc_scores=None,
  998. n_docs=None,
  999. **kwargs,
  1000. ):
  1001. # Overwritten -- `do_marginalize` is explicitly set in the output
  1002. if past_key_values is not None:
  1003. # if past is defined use only last decoder_input_ids
  1004. decoder_input_ids = decoder_input_ids[:, -1:]
  1005. return {
  1006. "input_ids": None,
  1007. "encoder_outputs": encoder_outputs,
  1008. "doc_scores": doc_scores,
  1009. "context_attention_mask": attention_mask,
  1010. "decoder_input_ids": decoder_input_ids,
  1011. "past_key_values": past_key_values,
  1012. "use_cache": use_cache,
  1013. "do_marginalize": True,
  1014. "n_docs": n_docs,
  1015. }
  1016. @property
  1017. def retriever(self):
  1018. return self.rag.retriever
  1019. @property
  1020. def generator(self):
  1021. return self.rag.generator
  1022. @property
  1023. def question_encoder(self):
  1024. return self.rag.question_encoder
  1025. @staticmethod
  1026. def _reorder_cache(past_key_values, beam_idx):
  1027. """Reorders cache for generation. BART-inspired but we need to take care of the extra dimension for docs"""
  1028. def _reorder_stacked(hidden_states, new_order):
  1029. n_docs = hidden_states.shape[0] // new_order.shape[0]
  1030. hidden_states = hidden_states.view(-1, n_docs, *hidden_states.shape[1:])
  1031. hidden_states = hidden_states.index_select(0, new_order)
  1032. result = hidden_states.view(-1, *hidden_states.shape[2:])
  1033. return result
  1034. reordered_past = ()
  1035. for layer_past in past_key_values:
  1036. # get the correct batch idx from decoder layer's batch dim for cross and self-attn
  1037. reordered_past += (
  1038. tuple(_reorder_stacked(past_state, beam_idx.to(past_state.device)) for past_state in layer_past),
  1039. )
  1040. return reordered_past
  1041. def marginalize(self, seq_logits, doc_scores, n_docs=None):
  1042. n_docs = n_docs if n_docs is not None else self.config.n_docs
  1043. # RAG-token marginalization
  1044. seq_logprobs = nn.functional.log_softmax(seq_logits, dim=-1).view(
  1045. seq_logits.shape[0] // n_docs, n_docs, -1, seq_logits.size(-1)
  1046. )
  1047. doc_logprobs = torch.log_softmax(doc_scores, dim=1)
  1048. log_prob_sum = seq_logprobs + doc_logprobs.unsqueeze(-1).unsqueeze(-1)
  1049. return torch.logsumexp(log_prob_sum, dim=1)
  1050. @add_start_docstrings_to_model_forward(RAG_FORWARD_INPUTS_DOCSTRING)
  1051. @replace_return_docstrings(output_type=RetrievAugLMMarginOutput, config_class=_CONFIG_FOR_DOC)
  1052. def forward(
  1053. self,
  1054. input_ids: Optional[torch.LongTensor] = None,
  1055. attention_mask: Optional[torch.FloatTensor] = None,
  1056. encoder_outputs: Optional[Tuple[Tuple[torch.Tensor]]] = None,
  1057. decoder_input_ids: Optional[torch.LongTensor] = None,
  1058. decoder_attention_mask: Optional[torch.BoolTensor] = None,
  1059. past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None,
  1060. context_input_ids: Optional[torch.LongTensor] = None,
  1061. context_attention_mask: Optional[torch.LongTensor] = None,
  1062. doc_scores: Optional[torch.FloatTensor] = None,
  1063. use_cache: Optional[bool] = None,
  1064. output_attentions: Optional[bool] = None,
  1065. output_hidden_states: Optional[bool] = None,
  1066. output_retrieved: Optional[bool] = None,
  1067. do_marginalize: Optional[bool] = None,
  1068. reduce_loss: Optional[bool] = None,
  1069. labels: Optional[torch.LongTensor] = None,
  1070. n_docs: Optional[int] = None,
  1071. **kwargs, # needs kwargs for generation
  1072. ) -> RetrievAugLMMarginOutput:
  1073. r"""
  1074. do_marginalize (`bool`, *optional*):
  1075. If `True`, the logits are marginalized over all documents by making use of
  1076. `torch.nn.functional.log_softmax`.
  1077. reduce_loss (`bool`, *optional*):
  1078. Only relevant if `labels` is passed. If `True`, the NLL loss is reduced using the `torch.Tensor.sum`
  1079. operation.
  1080. kwargs (`Dict[str, any]`, *optional*, defaults to `{}`):
  1081. Legacy dictionary, which is required so that model can use *generate()* function.
  1082. Returns:
  1083. Example:
  1084. ```python
  1085. >>> from transformers import AutoTokenizer, RagRetriever, RagTokenForGeneration
  1086. >>> import torch
  1087. >>> tokenizer = AutoTokenizer.from_pretrained("facebook/rag-token-nq")
  1088. >>> retriever = RagRetriever.from_pretrained(
  1089. ... "facebook/rag-token-nq", index_name="exact", use_dummy_dataset=True
  1090. ... )
  1091. >>> # initialize with RagRetriever to do everything in one forward call
  1092. >>> model = RagTokenForGeneration.from_pretrained("facebook/rag-token-nq", retriever=retriever)
  1093. >>> inputs = tokenizer("How many people live in Paris?", return_tensors="pt")
  1094. >>> targets = tokenizer(text_target="In Paris, there are 10 million people.", return_tensors="pt")
  1095. >>> input_ids = inputs["input_ids"]
  1096. >>> labels = targets["input_ids"]
  1097. >>> outputs = model(input_ids=input_ids, labels=labels)
  1098. >>> # or use retriever separately
  1099. >>> model = RagTokenForGeneration.from_pretrained("facebook/rag-token-nq", use_dummy_dataset=True)
  1100. >>> # 1. Encode
  1101. >>> question_hidden_states = model.question_encoder(input_ids)[0]
  1102. >>> # 2. Retrieve
  1103. >>> docs_dict = retriever(input_ids.numpy(), question_hidden_states.detach().numpy(), return_tensors="pt")
  1104. >>> doc_scores = torch.bmm(
  1105. ... question_hidden_states.unsqueeze(1), docs_dict["retrieved_doc_embeds"].float().transpose(1, 2)
  1106. ... ).squeeze(1)
  1107. >>> # 3. Forward to generator
  1108. >>> outputs = model(
  1109. ... context_input_ids=docs_dict["context_input_ids"],
  1110. ... context_attention_mask=docs_dict["context_attention_mask"],
  1111. ... doc_scores=doc_scores,
  1112. ... decoder_input_ids=labels,
  1113. ... )
  1114. >>> # or directly generate
  1115. >>> generated = model.generate(
  1116. ... context_input_ids=docs_dict["context_input_ids"],
  1117. ... context_attention_mask=docs_dict["context_attention_mask"],
  1118. ... doc_scores=doc_scores,
  1119. ... )
  1120. >>> generated_string = tokenizer.batch_decode(generated, skip_special_tokens=True)
  1121. ```"""
  1122. n_docs = n_docs if n_docs is not None else self.config.n_docs
  1123. do_marginalize = do_marginalize if do_marginalize is not None else self.config.do_marginalize
  1124. reduce_loss = reduce_loss if reduce_loss is not None else self.config.reduce_loss
  1125. if labels is not None:
  1126. if decoder_input_ids is None:
  1127. decoder_input_ids = labels
  1128. use_cache = False
  1129. outputs = self.rag(
  1130. input_ids=input_ids,
  1131. attention_mask=attention_mask,
  1132. encoder_outputs=encoder_outputs,
  1133. decoder_input_ids=decoder_input_ids,
  1134. decoder_attention_mask=decoder_attention_mask,
  1135. context_input_ids=context_input_ids,
  1136. context_attention_mask=context_attention_mask,
  1137. doc_scores=doc_scores,
  1138. past_key_values=past_key_values,
  1139. use_cache=use_cache,
  1140. output_attentions=output_attentions,
  1141. output_hidden_states=output_hidden_states,
  1142. output_retrieved=output_retrieved,
  1143. n_docs=n_docs,
  1144. )
  1145. loss = None
  1146. logits = outputs.logits
  1147. if labels is not None:
  1148. assert decoder_input_ids is not None
  1149. loss = self.get_nll(
  1150. outputs.logits,
  1151. outputs.doc_scores,
  1152. labels,
  1153. reduce_loss=reduce_loss,
  1154. epsilon=self.config.label_smoothing,
  1155. n_docs=n_docs,
  1156. )
  1157. if do_marginalize:
  1158. logits = self.marginalize(logits, outputs.doc_scores, n_docs)
  1159. return RetrievAugLMMarginOutput(
  1160. loss=loss,
  1161. logits=logits,
  1162. doc_scores=outputs.doc_scores,
  1163. past_key_values=outputs.past_key_values,
  1164. context_input_ids=outputs.context_input_ids,
  1165. context_attention_mask=outputs.context_attention_mask,
  1166. retrieved_doc_embeds=outputs.retrieved_doc_embeds,
  1167. retrieved_doc_ids=outputs.retrieved_doc_ids,
  1168. question_encoder_last_hidden_state=outputs.question_encoder_last_hidden_state,
  1169. question_enc_hidden_states=outputs.question_enc_hidden_states,
  1170. question_enc_attentions=outputs.question_enc_attentions,
  1171. generator_enc_last_hidden_state=outputs.generator_enc_last_hidden_state,
  1172. generator_enc_hidden_states=outputs.generator_enc_hidden_states,
  1173. generator_enc_attentions=outputs.generator_enc_attentions,
  1174. generator_dec_hidden_states=outputs.generator_dec_hidden_states,
  1175. generator_dec_attentions=outputs.generator_dec_attentions,
  1176. generator_cross_attentions=outputs.generator_cross_attentions,
  1177. )
  1178. @torch.no_grad()
  1179. def generate(
  1180. self,
  1181. input_ids: Optional[torch.LongTensor] = None,
  1182. attention_mask: Optional[torch.LongTensor] = None,
  1183. context_input_ids: Optional[torch.LongTensor] = None,
  1184. context_attention_mask: Optional[torch.LongTensor] = None,
  1185. doc_scores: Optional[torch.FloatTensor] = None,
  1186. n_docs: Optional[int] = None,
  1187. generation_config: Optional[GenerationConfig] = None,
  1188. prefix_allowed_tokens_fn: Callable[[int, torch.Tensor], List[int]] = None,
  1189. logits_processor: Optional[LogitsProcessorList] = LogitsProcessorList(),
  1190. stopping_criteria: Optional[StoppingCriteriaList] = StoppingCriteriaList(),
  1191. **kwargs,
  1192. ) -> torch.LongTensor:
  1193. """
  1194. Implements RAG token decoding.
  1195. Args:
  1196. input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
  1197. The sequence used as a prompt for the generation. If `input_ids` is not passed, then
  1198. `context_input_ids` has to be provided.
  1199. attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
  1200. Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
  1201. - 1 for tokens that are **not masked**,
  1202. - 0 for tokens that are **masked**.
  1203. [What are attention masks?](../glossary#attention-mask)
  1204. context_input_ids (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
  1205. Input IDs post-processed from the retrieved documents and the question encoder `input_ids` by the
  1206. retriever.
  1207. If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the
  1208. forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
  1209. context_attention_mask (`torch.LongTensor` of shape `(batch_size * config.n_docs, config.max_combined_length)`, *optional*, returned when *output_retrieved=True*):
  1210. Attention mask post-processed from the retrieved documents and the question encoder `input_ids` by the
  1211. retriever.
  1212. If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the
  1213. forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
  1214. doc_scores (`torch.FloatTensor` of shape `(batch_size, config.n_docs)`):
  1215. Score between each retrieved document embeddings (see `retrieved_doc_embeds`) and
  1216. `question_encoder_last_hidden_state`.
  1217. If the model has is not initialized with a `retriever`, `context_input_ids` has to be provided to the
  1218. forward pass. `context_input_ids` are returned by [`~RagRetriever.__call__`].
  1219. n_docs (`int`, *optional*, defaults to `config.n_docs`)
  1220. Number of documents to retrieve and/or number of documents for which to generate an answer.
  1221. generation_config (`~generation.GenerationConfig`, *optional*):
  1222. The generation configuration to be used as base parametrization for the generation call. `**kwargs`
  1223. passed to generate matching the attributes of `generation_config` will override them. If
  1224. `generation_config` is not provided, the default will be used, which has the following loading
  1225. priority: 1) from the `generation_config.json` model file, if it exists; 2) from the model
  1226. configuration. Please note that unspecified parameters will inherit [`~generation.GenerationConfig`]'s
  1227. default values, whose documentation should be checked to parameterize generation.
  1228. prefix_allowed_tokens_fn (`Callable[[int, torch.Tensor], List[int]]`, *optional*):
  1229. If provided, this function constraints the beam search to allowed tokens only at each step. If not
  1230. provided no constraint is applied. This function takes 2 arguments `inputs_ids` and the batch ID
  1231. `batch_id`. It has to return a list with the allowed tokens for the next generation step conditioned on
  1232. the previously generated tokens `inputs_ids` and the batch ID `batch_id`. This argument is useful for
  1233. constrained generation conditioned on the prefix, as described in [Autoregressive Entity
  1234. Retrieval](https://arxiv.org/abs/2010.00904).
  1235. logits_processor (`LogitsProcessorList`, *optional*):
  1236. Custom logits processors that complement the default logits processors built from arguments and a
  1237. model's config. If a logit processor is passed that is already created with the arguments or a model's
  1238. config an error is thrown.
  1239. stopping_criteria (`StoppingCriteriaList`, *optional*):
  1240. Custom stopping criteria that complement the default stopping criteria built from arguments and a
  1241. model's config. If a stopping criteria is passed that is already created with the arguments or a
  1242. model's config an error is thrown.
  1243. kwargs (`Dict[str, Any]`, *optional*):
  1244. Ad hoc parametrization of `generate_config` and/or additional model-specific kwargs that will be
  1245. forwarded to the `forward` function of the model.
  1246. Return:
  1247. `torch.LongTensor` of shape `(batch_size * num_return_sequences, sequence_length)`: The generated
  1248. sequences. The second dimension (sequence_length) is either equal to `max_length` or shorter if all batches
  1249. finished early due to the `eos_token_id`.
  1250. """
  1251. # Handle `generation_config` and kwargs that might update it
  1252. if generation_config is None:
  1253. generation_config = self.generation_config
  1254. generation_config = copy.deepcopy(generation_config)
  1255. model_kwargs = generation_config.update(**kwargs) # All unused kwargs must be model kwargs
  1256. kwargs_has_attention_mask = model_kwargs.get("attention_mask", None) is not None
  1257. self._prepare_special_tokens(generation_config, kwargs_has_attention_mask)
  1258. # set default parameters
  1259. n_docs = n_docs if n_docs is not None else self.config.n_docs
  1260. # retrieve docs
  1261. if self.retriever is not None and context_input_ids is None:
  1262. question_hidden_states = self.question_encoder(input_ids, attention_mask=attention_mask)[0]
  1263. out = self.retriever(
  1264. input_ids,
  1265. question_hidden_states.cpu().detach().to(torch.float32).numpy(),
  1266. prefix=self.generator.config.prefix,
  1267. n_docs=n_docs,
  1268. return_tensors="pt",
  1269. )
  1270. context_input_ids, context_attention_mask, retrieved_doc_embeds = (
  1271. out["context_input_ids"],
  1272. out["context_attention_mask"],
  1273. out["retrieved_doc_embeds"],
  1274. )
  1275. # set to correct device
  1276. retrieved_doc_embeds = retrieved_doc_embeds.to(question_hidden_states)
  1277. context_input_ids = context_input_ids.to(input_ids)
  1278. context_attention_mask = context_attention_mask.to(input_ids)
  1279. # compute doc_scores
  1280. doc_scores = torch.bmm(question_hidden_states.unsqueeze(1), retrieved_doc_embeds.transpose(1, 2)).squeeze(
  1281. 1
  1282. )
  1283. assert (context_input_ids.shape[0] % n_docs) == 0, (
  1284. f" The first dimension of `context_input_ids` should be a multiple of `n_docs`={n_docs}, but is"
  1285. f" {context_input_ids.shape[0]}."
  1286. )
  1287. # batch_size
  1288. batch_size = context_input_ids.shape[0] // n_docs
  1289. encoder = self.rag.generator.get_encoder()
  1290. encoder_outputs = encoder(input_ids=context_input_ids, attention_mask=context_attention_mask, return_dict=True)
  1291. input_ids = torch.full(
  1292. (batch_size * generation_config.num_beams, 1),
  1293. generation_config.decoder_start_token_id,
  1294. dtype=torch.long,
  1295. device=next(self.parameters()).device,
  1296. )
  1297. input_ids_seq_length = input_ids.shape[-1]
  1298. last_hidden_state = encoder_outputs["last_hidden_state"]
  1299. def extend_enc_output(tensor, num_beams=None):
  1300. # split into `batch_size`, `num_beams`, `num_docs`
  1301. tensor = tensor[None, None, :].reshape((batch_size, 1, n_docs) + tensor.shape[1:])
  1302. # repeat same last hidden states over `num_beams` dimension
  1303. tensor = tensor.expand((batch_size, num_beams, n_docs) + tensor.shape[3:])
  1304. # merge `batch_size`, `num_beams`, `num_docs` dims again
  1305. return tensor.reshape((batch_size * num_beams * n_docs,) + tensor.shape[3:])
  1306. # correctly extend last_hidden_state and attention mask
  1307. context_attention_mask = extend_enc_output(context_attention_mask, num_beams=generation_config.num_beams)
  1308. encoder_outputs["last_hidden_state"] = extend_enc_output(
  1309. last_hidden_state, num_beams=generation_config.num_beams
  1310. )
  1311. doc_scores = doc_scores.repeat_interleave(generation_config.num_beams, dim=0)
  1312. # define start_len & additional parameters
  1313. model_kwargs["doc_scores"] = doc_scores
  1314. model_kwargs["encoder_outputs"] = encoder_outputs
  1315. model_kwargs["attention_mask"] = context_attention_mask
  1316. model_kwargs["n_docs"] = n_docs
  1317. pre_processor = self._get_logits_processor(
  1318. generation_config=generation_config,
  1319. input_ids_seq_length=input_ids_seq_length,
  1320. encoder_input_ids=context_input_ids,
  1321. prefix_allowed_tokens_fn=prefix_allowed_tokens_fn,
  1322. logits_processor=logits_processor,
  1323. device=input_ids.device,
  1324. )
  1325. prepared_stopping_criteria = self._get_stopping_criteria(
  1326. generation_config=generation_config, stopping_criteria=stopping_criteria
  1327. )
  1328. if generation_config.num_beams == 1:
  1329. if generation_config.num_return_sequences > 1:
  1330. raise ValueError(
  1331. f"num_return_sequences has to be 1, but is {generation_config.num_return_sequences} when doing"
  1332. " greedy search."
  1333. )
  1334. return self._sample(
  1335. input_ids,
  1336. logits_processor=pre_processor,
  1337. stopping_criteria=prepared_stopping_criteria,
  1338. generation_config=generation_config,
  1339. synced_gpus=False,
  1340. streamer=None,
  1341. **model_kwargs,
  1342. )
  1343. elif generation_config.num_beams > 1:
  1344. if generation_config.num_return_sequences > generation_config.num_beams:
  1345. raise ValueError("`num_return_sequences` has to be smaller or equal to `num_beams`.")
  1346. beam_scorer = BeamSearchScorer(
  1347. batch_size=batch_size,
  1348. num_beams=generation_config.num_beams,
  1349. device=self.device,
  1350. length_penalty=generation_config.length_penalty,
  1351. do_early_stopping=generation_config.early_stopping,
  1352. num_beam_hyps_to_keep=generation_config.num_return_sequences,
  1353. max_length=generation_config.max_length,
  1354. )
  1355. return self._beam_search(
  1356. input_ids,
  1357. beam_scorer,
  1358. logits_processor=pre_processor,
  1359. stopping_criteria=prepared_stopping_criteria,
  1360. generation_config=generation_config,
  1361. synced_gpus=False,
  1362. **model_kwargs,
  1363. )
  1364. else:
  1365. raise ValueError(
  1366. f"`num_beams` has to be an integer strictly superior to 0 (≥ 1), but is {generation_config.num_beams}"
  1367. )
  1368. def get_input_embeddings(self):
  1369. return self.rag.generator.get_input_embeddings()
  1370. def get_output_embeddings(self):
  1371. return self.rag.generator.get_output_embeddings()
  1372. def set_output_embeddings(self, new_embeddings):
  1373. return self.rag.generator.set_output_embeddings(new_embeddings)
  1374. def shift_tokens_right(self, input_ids, start_token_id=None):
  1375. """Shift input ids one token to the right, and pad with start_token_id"""
  1376. if start_token_id is None:
  1377. start_token_id = self.config.decoder_start_token_id
  1378. shifted_input_ids = input_ids.new_zeros(input_ids.shape)
  1379. shifted_input_ids[:, 1:] = input_ids[:, :-1].clone()
  1380. shifted_input_ids[:, 0] = start_token_id
  1381. return shifted_input_ids
  1382. def get_nll(self, seq_logits, doc_scores, target, reduce_loss=False, epsilon=0.0, n_docs=None):
  1383. n_docs = n_docs if n_docs is not None else self.config.n_docs
  1384. # shift tokens left
  1385. target = torch.cat(
  1386. [target[:, 1:], target.new(target.shape[0], 1).fill_(self.config.generator.pad_token_id)], 1
  1387. )
  1388. def _mask_pads(ll, smooth_obj):
  1389. pad_mask = target.eq(self.config.generator.pad_token_id)
  1390. if pad_mask.any():
  1391. ll.masked_fill_(pad_mask, 0.0)
  1392. smooth_obj.masked_fill_(pad_mask, 0.0)
  1393. return ll.squeeze(-1), smooth_obj.squeeze(-1)
  1394. rag_logprobs = self.marginalize(seq_logits, doc_scores, n_docs)
  1395. target = target.unsqueeze(-1)
  1396. assert target.dim() == rag_logprobs.dim()
  1397. ll = rag_logprobs.gather(dim=-1, index=target)
  1398. smooth_obj = rag_logprobs.sum(dim=-1, keepdim=True) # total sum of all (normalised) logits
  1399. ll, smooth_obj = _mask_pads(ll, smooth_obj)
  1400. ll = ll.sum(1) # sum over tokens
  1401. smooth_obj = smooth_obj.sum(1)
  1402. nll_loss = -ll
  1403. smooth_loss = -smooth_obj
  1404. if reduce_loss:
  1405. nll_loss = nll_loss.sum()
  1406. smooth_loss = smooth_loss.sum()
  1407. eps_i = epsilon / rag_logprobs.size(-1)
  1408. loss = (1.0 - epsilon) * nll_loss + eps_i * smooth_loss
  1409. return loss