processing_donut.py 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. # coding=utf-8
  2. # Copyright 2022 The HuggingFace Inc. team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """
  16. Processor class for Donut.
  17. """
  18. import re
  19. import warnings
  20. from contextlib import contextmanager
  21. from typing import List, Optional, Union
  22. from ...image_utils import ImageInput
  23. from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack
  24. from ...tokenization_utils_base import PreTokenizedInput, TextInput
  25. class DonutProcessorKwargs(ProcessingKwargs, total=False):
  26. _defaults = {}
  27. class DonutProcessor(ProcessorMixin):
  28. r"""
  29. Constructs a Donut processor which wraps a Donut image processor and an XLMRoBERTa tokenizer into a single
  30. processor.
  31. [`DonutProcessor`] offers all the functionalities of [`DonutImageProcessor`] and
  32. [`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`]. See the [`~DonutProcessor.__call__`] and
  33. [`~DonutProcessor.decode`] for more information.
  34. Args:
  35. image_processor ([`DonutImageProcessor`], *optional*):
  36. An instance of [`DonutImageProcessor`]. The image processor is a required input.
  37. tokenizer ([`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`], *optional*):
  38. An instance of [`XLMRobertaTokenizer`/`XLMRobertaTokenizerFast`]. The tokenizer is a required input.
  39. """
  40. attributes = ["image_processor", "tokenizer"]
  41. image_processor_class = "AutoImageProcessor"
  42. tokenizer_class = "AutoTokenizer"
  43. def __init__(self, image_processor=None, tokenizer=None, **kwargs):
  44. feature_extractor = None
  45. if "feature_extractor" in kwargs:
  46. warnings.warn(
  47. "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
  48. " instead.",
  49. FutureWarning,
  50. )
  51. feature_extractor = kwargs.pop("feature_extractor")
  52. image_processor = image_processor if image_processor is not None else feature_extractor
  53. if image_processor is None:
  54. raise ValueError("You need to specify an `image_processor`.")
  55. if tokenizer is None:
  56. raise ValueError("You need to specify a `tokenizer`.")
  57. super().__init__(image_processor, tokenizer)
  58. self.current_processor = self.image_processor
  59. self._in_target_context_manager = False
  60. def __call__(
  61. self,
  62. images: ImageInput = None,
  63. text: Optional[Union[str, List[str], TextInput, PreTokenizedInput]] = None,
  64. audio=None,
  65. videos=None,
  66. **kwargs: Unpack[DonutProcessorKwargs],
  67. ):
  68. """
  69. When used in normal mode, this method forwards all its arguments to AutoImageProcessor's
  70. [`~AutoImageProcessor.__call__`] and returns its output. If used in the context
  71. [`~DonutProcessor.as_target_processor`] this method forwards all its arguments to DonutTokenizer's
  72. [`~DonutTokenizer.__call__`]. Please refer to the doctsring of the above two methods for more information.
  73. """
  74. # For backward compatibility
  75. if self._in_target_context_manager:
  76. return self.current_processor(images, text, **kwargs)
  77. if images is None and text is None:
  78. raise ValueError("You need to specify either an `images` or `text` input to process.")
  79. output_kwargs = self._merge_kwargs(
  80. DonutProcessorKwargs,
  81. tokenizer_init_kwargs=self.tokenizer.init_kwargs,
  82. **kwargs,
  83. )
  84. if images is not None:
  85. inputs = self.image_processor(images, **output_kwargs["images_kwargs"])
  86. if text is not None:
  87. encodings = self.tokenizer(text, **output_kwargs["text_kwargs"])
  88. if text is None:
  89. return inputs
  90. elif images is None:
  91. return encodings
  92. else:
  93. inputs["labels"] = encodings["input_ids"] # for BC
  94. inputs["input_ids"] = encodings["input_ids"]
  95. return inputs
  96. def batch_decode(self, *args, **kwargs):
  97. """
  98. This method forwards all its arguments to DonutTokenizer's [`~PreTrainedTokenizer.batch_decode`]. Please refer
  99. to the docstring of this method for more information.
  100. """
  101. return self.tokenizer.batch_decode(*args, **kwargs)
  102. def decode(self, *args, **kwargs):
  103. """
  104. This method forwards all its arguments to DonutTokenizer's [`~PreTrainedTokenizer.decode`]. Please refer to the
  105. docstring of this method for more information.
  106. """
  107. return self.tokenizer.decode(*args, **kwargs)
  108. @contextmanager
  109. def as_target_processor(self):
  110. """
  111. Temporarily sets the tokenizer for processing the input. Useful for encoding the labels when fine-tuning TrOCR.
  112. """
  113. warnings.warn(
  114. "`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your "
  115. "labels by using the argument `text` of the regular `__call__` method (either in the same call as "
  116. "your images inputs, or in a separate call."
  117. )
  118. self._in_target_context_manager = True
  119. self.current_processor = self.tokenizer
  120. yield
  121. self.current_processor = self.image_processor
  122. self._in_target_context_manager = False
  123. def token2json(self, tokens, is_inner_value=False, added_vocab=None):
  124. """
  125. Convert a (generated) token sequence into an ordered JSON format.
  126. """
  127. if added_vocab is None:
  128. added_vocab = self.tokenizer.get_added_vocab()
  129. output = {}
  130. while tokens:
  131. start_token = re.search(r"<s_(.*?)>", tokens, re.IGNORECASE)
  132. if start_token is None:
  133. break
  134. key = start_token.group(1)
  135. key_escaped = re.escape(key)
  136. end_token = re.search(rf"</s_{key_escaped}>", tokens, re.IGNORECASE)
  137. start_token = start_token.group()
  138. if end_token is None:
  139. tokens = tokens.replace(start_token, "")
  140. else:
  141. end_token = end_token.group()
  142. start_token_escaped = re.escape(start_token)
  143. end_token_escaped = re.escape(end_token)
  144. content = re.search(
  145. f"{start_token_escaped}(.*?){end_token_escaped}", tokens, re.IGNORECASE | re.DOTALL
  146. )
  147. if content is not None:
  148. content = content.group(1).strip()
  149. if r"<s_" in content and r"</s_" in content: # non-leaf node
  150. value = self.token2json(content, is_inner_value=True, added_vocab=added_vocab)
  151. if value:
  152. if len(value) == 1:
  153. value = value[0]
  154. output[key] = value
  155. else: # leaf nodes
  156. output[key] = []
  157. for leaf in content.split(r"<sep/>"):
  158. leaf = leaf.strip()
  159. if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
  160. leaf = leaf[1:-2] # for categorical special tokens
  161. output[key].append(leaf)
  162. if len(output[key]) == 1:
  163. output[key] = output[key][0]
  164. tokens = tokens[tokens.find(end_token) + len(end_token) :].strip()
  165. if tokens[:6] == r"<sep/>": # non-leaf nodes
  166. return [output] + self.token2json(tokens[6:], is_inner_value=True, added_vocab=added_vocab)
  167. if len(output):
  168. return [output] if is_inner_value else output
  169. else:
  170. return [] if is_inner_value else {"text_sequence": tokens}
  171. @property
  172. def feature_extractor_class(self):
  173. warnings.warn(
  174. "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
  175. FutureWarning,
  176. )
  177. return self.image_processor_class
  178. @property
  179. def feature_extractor(self):
  180. warnings.warn(
  181. "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
  182. FutureWarning,
  183. )
  184. return self.image_processor