tokenization_dpr.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318
  1. # coding=utf-8
  2. # Copyright 2018 The HuggingFace Inc. team, The Hugging Face Team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """Tokenization classes for DPR."""
  16. import collections
  17. from typing import List, Optional, Union
  18. from ...tokenization_utils_base import BatchEncoding
  19. from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
  20. from ..bert.tokenization_bert import BertTokenizer
  21. logger = logging.get_logger(__name__)
  22. VOCAB_FILES_NAMES = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
  23. class DPRContextEncoderTokenizer(BertTokenizer):
  24. r"""
  25. Construct a DPRContextEncoder tokenizer.
  26. [`DPRContextEncoderTokenizer`] is identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation
  27. splitting and wordpiece.
  28. Refer to superclass [`BertTokenizer`] for usage examples and documentation concerning parameters.
  29. """
  30. vocab_files_names = VOCAB_FILES_NAMES
  31. class DPRQuestionEncoderTokenizer(BertTokenizer):
  32. r"""
  33. Constructs a DPRQuestionEncoder tokenizer.
  34. [`DPRQuestionEncoderTokenizer`] is identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation
  35. splitting and wordpiece.
  36. Refer to superclass [`BertTokenizer`] for usage examples and documentation concerning parameters.
  37. """
  38. vocab_files_names = VOCAB_FILES_NAMES
  39. DPRSpanPrediction = collections.namedtuple(
  40. "DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
  41. )
  42. DPRReaderOutput = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
  43. CUSTOM_DPR_READER_DOCSTRING = r"""
  44. Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
  45. It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
  46. using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
  47. with the format:
  48. ```
  49. [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
  50. ```
  51. Args:
  52. questions (`str` or `List[str]`):
  53. The questions to be encoded. You can specify one question for many passages. In this case, the question
  54. will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
  55. `titles` or `texts`.
  56. titles (`str` or `List[str]`):
  57. The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
  58. texts (`str` or `List[str]`):
  59. The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
  60. padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
  61. Activates and controls padding. Accepts the following values:
  62. - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
  63. if provided).
  64. - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
  65. acceptable input length for the model if that argument is not provided.
  66. - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
  67. lengths).
  68. truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
  69. Activates and controls truncation. Accepts the following values:
  70. - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
  71. the maximum acceptable input length for the model if that argument is not provided. This will truncate
  72. token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
  73. of pairs) is provided.
  74. - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
  75. acceptable input length for the model if that argument is not provided. This will only truncate the first
  76. sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
  77. - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
  78. acceptable input length for the model if that argument is not provided. This will only truncate the
  79. second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
  80. - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
  81. greater than the model maximum admissible input size).
  82. max_length (`int`, *optional*):
  83. Controls the maximum length to use by one of the truncation/padding parameters.
  84. If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
  85. is required by one of the truncation/padding parameters. If the model has no specific maximum input
  86. length (like XLNet) truncation/padding to a maximum length will be deactivated.
  87. return_tensors (`str` or [`~utils.TensorType`], *optional*):
  88. If set, will return tensors instead of list of python integers. Acceptable values are:
  89. - `'tf'`: Return TensorFlow `tf.constant` objects.
  90. - `'pt'`: Return PyTorch `torch.Tensor` objects.
  91. - `'np'`: Return Numpy `np.ndarray` objects.
  92. return_attention_mask (`bool`, *optional*):
  93. Whether or not to return the attention mask. If not set, will return the attention mask according to the
  94. specific tokenizer's default, defined by the `return_outputs` attribute.
  95. [What are attention masks?](../glossary#attention-mask)
  96. Returns:
  97. `Dict[str, List[List[int]]]`: A dictionary with the following keys:
  98. - `input_ids`: List of token ids to be fed to a model.
  99. - `attention_mask`: List of indices specifying which tokens should be attended to by the model.
  100. """
  101. @add_start_docstrings(CUSTOM_DPR_READER_DOCSTRING)
  102. class CustomDPRReaderTokenizerMixin:
  103. def __call__(
  104. self,
  105. questions,
  106. titles: Optional[str] = None,
  107. texts: Optional[str] = None,
  108. padding: Union[bool, str] = False,
  109. truncation: Union[bool, str] = False,
  110. max_length: Optional[int] = None,
  111. return_tensors: Optional[Union[str, TensorType]] = None,
  112. return_attention_mask: Optional[bool] = None,
  113. **kwargs,
  114. ) -> BatchEncoding:
  115. if titles is None and texts is None:
  116. return super().__call__(
  117. questions,
  118. padding=padding,
  119. truncation=truncation,
  120. max_length=max_length,
  121. return_tensors=return_tensors,
  122. return_attention_mask=return_attention_mask,
  123. **kwargs,
  124. )
  125. elif titles is None or texts is None:
  126. text_pair = titles if texts is None else texts
  127. return super().__call__(
  128. questions,
  129. text_pair,
  130. padding=padding,
  131. truncation=truncation,
  132. max_length=max_length,
  133. return_tensors=return_tensors,
  134. return_attention_mask=return_attention_mask,
  135. **kwargs,
  136. )
  137. titles = titles if not isinstance(titles, str) else [titles]
  138. texts = texts if not isinstance(texts, str) else [texts]
  139. n_passages = len(titles)
  140. questions = questions if not isinstance(questions, str) else [questions] * n_passages
  141. if len(titles) != len(texts):
  142. raise ValueError(
  143. f"There should be as many titles than texts but got {len(titles)} titles and {len(texts)} texts."
  144. )
  145. encoded_question_and_titles = super().__call__(questions, titles, padding=False, truncation=False)["input_ids"]
  146. encoded_texts = super().__call__(texts, add_special_tokens=False, padding=False, truncation=False)["input_ids"]
  147. encoded_inputs = {
  148. "input_ids": [
  149. (encoded_question_and_title + encoded_text)[:max_length]
  150. if max_length is not None and truncation
  151. else encoded_question_and_title + encoded_text
  152. for encoded_question_and_title, encoded_text in zip(encoded_question_and_titles, encoded_texts)
  153. ]
  154. }
  155. if return_attention_mask is not False:
  156. attention_mask = []
  157. for input_ids in encoded_inputs["input_ids"]:
  158. attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
  159. encoded_inputs["attention_mask"] = attention_mask
  160. return self.pad(encoded_inputs, padding=padding, max_length=max_length, return_tensors=return_tensors)
  161. def decode_best_spans(
  162. self,
  163. reader_input: BatchEncoding,
  164. reader_output: DPRReaderOutput,
  165. num_spans: int = 16,
  166. max_answer_length: int = 64,
  167. num_spans_per_passage: int = 4,
  168. ) -> List[DPRSpanPrediction]:
  169. """
  170. Get the span predictions for the extractive Q&A model.
  171. Returns: *List* of *DPRReaderOutput* sorted by descending *(relevance_score, span_score)*. Each
  172. *DPRReaderOutput* is a *Tuple* with:
  173. - **span_score**: `float` that corresponds to the score given by the reader for this span compared to other
  174. spans in the same passage. It corresponds to the sum of the start and end logits of the span.
  175. - **relevance_score**: `float` that corresponds to the score of the each passage to answer the question,
  176. compared to all the other passages. It corresponds to the output of the QA classifier of the DPRReader.
  177. - **doc_id**: `int` the id of the passage. - **start_index**: `int` the start index of the span
  178. (inclusive). - **end_index**: `int` the end index of the span (inclusive).
  179. Examples:
  180. ```python
  181. >>> from transformers import DPRReader, DPRReaderTokenizer
  182. >>> tokenizer = DPRReaderTokenizer.from_pretrained("facebook/dpr-reader-single-nq-base")
  183. >>> model = DPRReader.from_pretrained("facebook/dpr-reader-single-nq-base")
  184. >>> encoded_inputs = tokenizer(
  185. ... questions=["What is love ?"],
  186. ... titles=["Haddaway"],
  187. ... texts=["'What Is Love' is a song recorded by the artist Haddaway"],
  188. ... return_tensors="pt",
  189. ... )
  190. >>> outputs = model(**encoded_inputs)
  191. >>> predicted_spans = tokenizer.decode_best_spans(encoded_inputs, outputs)
  192. >>> print(predicted_spans[0].text) # best span
  193. a song
  194. ```"""
  195. input_ids = reader_input["input_ids"]
  196. start_logits, end_logits, relevance_logits = reader_output[:3]
  197. n_passages = len(relevance_logits)
  198. sorted_docs = sorted(range(n_passages), reverse=True, key=relevance_logits.__getitem__)
  199. nbest_spans_predictions: List[DPRReaderOutput] = []
  200. for doc_id in sorted_docs:
  201. sequence_ids = list(input_ids[doc_id])
  202. # assuming question & title information is at the beginning of the sequence
  203. passage_offset = sequence_ids.index(self.sep_token_id, 2) + 1 # second sep id
  204. if sequence_ids[-1] == self.pad_token_id:
  205. sequence_len = sequence_ids.index(self.pad_token_id)
  206. else:
  207. sequence_len = len(sequence_ids)
  208. best_spans = self._get_best_spans(
  209. start_logits=start_logits[doc_id][passage_offset:sequence_len],
  210. end_logits=end_logits[doc_id][passage_offset:sequence_len],
  211. max_answer_length=max_answer_length,
  212. top_spans=num_spans_per_passage,
  213. )
  214. for start_index, end_index in best_spans:
  215. start_index += passage_offset
  216. end_index += passage_offset
  217. nbest_spans_predictions.append(
  218. DPRSpanPrediction(
  219. span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index],
  220. relevance_score=relevance_logits[doc_id],
  221. doc_id=doc_id,
  222. start_index=start_index,
  223. end_index=end_index,
  224. text=self.decode(sequence_ids[start_index : end_index + 1]),
  225. )
  226. )
  227. if len(nbest_spans_predictions) >= num_spans:
  228. break
  229. return nbest_spans_predictions[:num_spans]
  230. def _get_best_spans(
  231. self,
  232. start_logits: List[int],
  233. end_logits: List[int],
  234. max_answer_length: int,
  235. top_spans: int,
  236. ) -> List[DPRSpanPrediction]:
  237. """
  238. Finds the best answer span for the extractive Q&A model for one passage. It returns the best span by descending
  239. `span_score` order and keeping max `top_spans` spans. Spans longer that `max_answer_length` are ignored.
  240. """
  241. scores = []
  242. for start_index, start_score in enumerate(start_logits):
  243. for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
  244. scores.append(((start_index, start_index + answer_length), start_score + end_score))
  245. scores = sorted(scores, key=lambda x: x[1], reverse=True)
  246. chosen_span_intervals = []
  247. for (start_index, end_index), score in scores:
  248. if start_index > end_index:
  249. raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]")
  250. length = end_index - start_index + 1
  251. if length > max_answer_length:
  252. raise ValueError(f"Span is too long: {length} > {max_answer_length}")
  253. if any(
  254. start_index <= prev_start_index <= prev_end_index <= end_index
  255. or prev_start_index <= start_index <= end_index <= prev_end_index
  256. for (prev_start_index, prev_end_index) in chosen_span_intervals
  257. ):
  258. continue
  259. chosen_span_intervals.append((start_index, end_index))
  260. if len(chosen_span_intervals) == top_spans:
  261. break
  262. return chosen_span_intervals
  263. @add_end_docstrings(CUSTOM_DPR_READER_DOCSTRING)
  264. class DPRReaderTokenizer(CustomDPRReaderTokenizerMixin, BertTokenizer):
  265. r"""
  266. Construct a DPRReader tokenizer.
  267. [`DPRReaderTokenizer`] is almost identical to [`BertTokenizer`] and runs end-to-end tokenization: punctuation
  268. splitting and wordpiece. The difference is that is has three inputs strings: question, titles and texts that are
  269. combined to be fed to the [`DPRReader`] model.
  270. Refer to superclass [`BertTokenizer`] for usage examples and documentation concerning parameters.
  271. """
  272. vocab_files_names = VOCAB_FILES_NAMES
  273. model_input_names = ["input_ids", "attention_mask"]