text2text_generation.py 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376
  1. import enum
  2. import warnings
  3. from ..tokenization_utils import TruncationStrategy
  4. from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
  5. from .base import Pipeline, build_pipeline_init_args
  6. if is_tf_available():
  7. import tensorflow as tf
  8. from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
  9. if is_torch_available():
  10. from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
  11. logger = logging.get_logger(__name__)
  12. class ReturnType(enum.Enum):
  13. TENSORS = 0
  14. TEXT = 1
  15. @add_end_docstrings(build_pipeline_init_args(has_tokenizer=True))
  16. class Text2TextGenerationPipeline(Pipeline):
  17. """
  18. Pipeline for text to text generation using seq2seq models.
  19. Example:
  20. ```python
  21. >>> from transformers import pipeline
  22. >>> generator = pipeline(model="mrm8488/t5-base-finetuned-question-generation-ap")
  23. >>> generator(
  24. ... "answer: Manuel context: Manuel has created RuPERTa-base with the support of HF-Transformers and Google"
  25. ... )
  26. [{'generated_text': 'question: Who created the RuPERTa-base?'}]
  27. ```
  28. Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial). You can pass text
  29. generation parameters to this pipeline to control stopping criteria, decoding strategy, and more. Learn more about
  30. text generation parameters in [Text generation strategies](../generation_strategies) and [Text
  31. generation](text_generation).
  32. This Text2TextGenerationPipeline pipeline can currently be loaded from [`pipeline`] using the following task
  33. identifier: `"text2text-generation"`.
  34. The models that this pipeline can use are models that have been fine-tuned on a translation task. See the
  35. up-to-date list of available models on
  36. [huggingface.co/models](https://huggingface.co/models?filter=text2text-generation). For a list of available
  37. parameters, see the [following
  38. documentation](https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.generation.GenerationMixin.generate)
  39. Usage:
  40. ```python
  41. text2text_generator = pipeline("text2text-generation")
  42. text2text_generator("question: What is 42 ? context: 42 is the answer to life, the universe and everything")
  43. ```"""
  44. # Used in the return key of the pipeline.
  45. return_name = "generated"
  46. def __init__(self, *args, **kwargs):
  47. super().__init__(*args, **kwargs)
  48. self.check_model_type(
  49. TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
  50. if self.framework == "tf"
  51. else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES
  52. )
  53. def _sanitize_parameters(
  54. self,
  55. return_tensors=None,
  56. return_text=None,
  57. return_type=None,
  58. clean_up_tokenization_spaces=None,
  59. truncation=None,
  60. stop_sequence=None,
  61. **generate_kwargs,
  62. ):
  63. preprocess_params = {}
  64. if truncation is not None:
  65. preprocess_params["truncation"] = truncation
  66. forward_params = generate_kwargs
  67. postprocess_params = {}
  68. if return_tensors is not None and return_type is None:
  69. return_type = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
  70. if return_type is not None:
  71. postprocess_params["return_type"] = return_type
  72. if clean_up_tokenization_spaces is not None:
  73. postprocess_params["clean_up_tokenization_spaces"] = clean_up_tokenization_spaces
  74. if stop_sequence is not None:
  75. stop_sequence_ids = self.tokenizer.encode(stop_sequence, add_special_tokens=False)
  76. if len(stop_sequence_ids) > 1:
  77. warnings.warn(
  78. "Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
  79. " the stop sequence will be used as the stop sequence string in the interim."
  80. )
  81. generate_kwargs["eos_token_id"] = stop_sequence_ids[0]
  82. return preprocess_params, forward_params, postprocess_params
  83. def check_inputs(self, input_length: int, min_length: int, max_length: int):
  84. """
  85. Checks whether there might be something wrong with given input with regard to the model.
  86. """
  87. return True
  88. def _parse_and_tokenize(self, *args, truncation):
  89. prefix = self.prefix if self.prefix is not None else ""
  90. if isinstance(args[0], list):
  91. if self.tokenizer.pad_token_id is None:
  92. raise ValueError("Please make sure that the tokenizer has a pad_token_id when using a batch input")
  93. args = ([prefix + arg for arg in args[0]],)
  94. padding = True
  95. elif isinstance(args[0], str):
  96. args = (prefix + args[0],)
  97. padding = False
  98. else:
  99. raise ValueError(
  100. f" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`"
  101. )
  102. inputs = self.tokenizer(*args, padding=padding, truncation=truncation, return_tensors=self.framework)
  103. # This is produced by tokenizers but is an invalid generate kwargs
  104. if "token_type_ids" in inputs:
  105. del inputs["token_type_ids"]
  106. return inputs
  107. def __call__(self, *args, **kwargs):
  108. r"""
  109. Generate the output text(s) using text(s) given as inputs.
  110. Args:
  111. args (`str` or `List[str]`):
  112. Input text for the encoder.
  113. return_tensors (`bool`, *optional*, defaults to `False`):
  114. Whether or not to include the tensors of predictions (as token indices) in the outputs.
  115. return_text (`bool`, *optional*, defaults to `True`):
  116. Whether or not to include the decoded texts in the outputs.
  117. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
  118. Whether or not to clean up the potential extra spaces in the text output.
  119. truncation (`TruncationStrategy`, *optional*, defaults to `TruncationStrategy.DO_NOT_TRUNCATE`):
  120. The truncation strategy for the tokenization within the pipeline. `TruncationStrategy.DO_NOT_TRUNCATE`
  121. (default) will never truncate, but it is sometimes desirable to truncate the input to fit the model's
  122. max_length instead of throwing an error down the line.
  123. generate_kwargs:
  124. Additional keyword arguments to pass along to the generate method of the model (see the generate method
  125. corresponding to your framework [here](./text_generation)).
  126. Return:
  127. A list or a list of list of `dict`: Each result comes as a dictionary with the following keys:
  128. - **generated_text** (`str`, present when `return_text=True`) -- The generated text.
  129. - **generated_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token
  130. ids of the generated text.
  131. """
  132. result = super().__call__(*args, **kwargs)
  133. if (
  134. isinstance(args[0], list)
  135. and all(isinstance(el, str) for el in args[0])
  136. and all(len(res) == 1 for res in result)
  137. ):
  138. return [res[0] for res in result]
  139. return result
  140. def preprocess(self, inputs, truncation=TruncationStrategy.DO_NOT_TRUNCATE, **kwargs):
  141. inputs = self._parse_and_tokenize(inputs, truncation=truncation, **kwargs)
  142. return inputs
  143. def _forward(self, model_inputs, **generate_kwargs):
  144. if self.framework == "pt":
  145. in_b, input_length = model_inputs["input_ids"].shape
  146. elif self.framework == "tf":
  147. in_b, input_length = tf.shape(model_inputs["input_ids"]).numpy()
  148. self.check_inputs(
  149. input_length,
  150. generate_kwargs.get("min_length", self.generation_config.min_length),
  151. generate_kwargs.get("max_length", self.generation_config.max_length),
  152. )
  153. # User-defined `generation_config` passed to the pipeline call take precedence
  154. if "generation_config" not in generate_kwargs:
  155. generate_kwargs["generation_config"] = self.generation_config
  156. output_ids = self.model.generate(**model_inputs, **generate_kwargs)
  157. out_b = output_ids.shape[0]
  158. if self.framework == "pt":
  159. output_ids = output_ids.reshape(in_b, out_b // in_b, *output_ids.shape[1:])
  160. elif self.framework == "tf":
  161. output_ids = tf.reshape(output_ids, (in_b, out_b // in_b, *output_ids.shape[1:]))
  162. return {"output_ids": output_ids}
  163. def postprocess(self, model_outputs, return_type=ReturnType.TEXT, clean_up_tokenization_spaces=False):
  164. records = []
  165. for output_ids in model_outputs["output_ids"][0]:
  166. if return_type == ReturnType.TENSORS:
  167. record = {f"{self.return_name}_token_ids": output_ids}
  168. elif return_type == ReturnType.TEXT:
  169. record = {
  170. f"{self.return_name}_text": self.tokenizer.decode(
  171. output_ids,
  172. skip_special_tokens=True,
  173. clean_up_tokenization_spaces=clean_up_tokenization_spaces,
  174. )
  175. }
  176. records.append(record)
  177. return records
  178. @add_end_docstrings(build_pipeline_init_args(has_tokenizer=True))
  179. class SummarizationPipeline(Text2TextGenerationPipeline):
  180. """
  181. Summarize news articles and other documents.
  182. This summarizing pipeline can currently be loaded from [`pipeline`] using the following task identifier:
  183. `"summarization"`.
  184. The models that this pipeline can use are models that have been fine-tuned on a summarization task, which is
  185. currently, '*bart-large-cnn*', '*google-t5/t5-small*', '*google-t5/t5-base*', '*google-t5/t5-large*', '*google-t5/t5-3b*', '*google-t5/t5-11b*'. See the up-to-date
  186. list of available models on [huggingface.co/models](https://huggingface.co/models?filter=summarization). For a list
  187. of available parameters, see the [following
  188. documentation](https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.generation.GenerationMixin.generate)
  189. Usage:
  190. ```python
  191. # use bart in pytorch
  192. summarizer = pipeline("summarization")
  193. summarizer("An apple a day, keeps the doctor away", min_length=5, max_length=20)
  194. # use t5 in tf
  195. summarizer = pipeline("summarization", model="google-t5/t5-base", tokenizer="google-t5/t5-base", framework="tf")
  196. summarizer("An apple a day, keeps the doctor away", min_length=5, max_length=20)
  197. ```"""
  198. # Used in the return key of the pipeline.
  199. return_name = "summary"
  200. def __call__(self, *args, **kwargs):
  201. r"""
  202. Summarize the text(s) given as inputs.
  203. Args:
  204. documents (*str* or `List[str]`):
  205. One or several articles (or one list of articles) to summarize.
  206. return_text (`bool`, *optional*, defaults to `True`):
  207. Whether or not to include the decoded texts in the outputs
  208. return_tensors (`bool`, *optional*, defaults to `False`):
  209. Whether or not to include the tensors of predictions (as token indices) in the outputs.
  210. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
  211. Whether or not to clean up the potential extra spaces in the text output.
  212. generate_kwargs:
  213. Additional keyword arguments to pass along to the generate method of the model (see the generate method
  214. corresponding to your framework [here](./text_generation)).
  215. Return:
  216. A list or a list of list of `dict`: Each result comes as a dictionary with the following keys:
  217. - **summary_text** (`str`, present when `return_text=True`) -- The summary of the corresponding input.
  218. - **summary_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token
  219. ids of the summary.
  220. """
  221. return super().__call__(*args, **kwargs)
  222. def check_inputs(self, input_length: int, min_length: int, max_length: int) -> bool:
  223. """
  224. Checks whether there might be something wrong with given input with regard to the model.
  225. """
  226. if max_length < min_length:
  227. logger.warning(f"Your min_length={min_length} must be inferior than your max_length={max_length}.")
  228. if input_length < max_length:
  229. logger.warning(
  230. f"Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is "
  231. "a summarization task, where outputs shorter than the input are typically wanted, you might "
  232. f"consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})"
  233. )
  234. @add_end_docstrings(build_pipeline_init_args(has_tokenizer=True))
  235. class TranslationPipeline(Text2TextGenerationPipeline):
  236. """
  237. Translates from one language to another.
  238. This translation pipeline can currently be loaded from [`pipeline`] using the following task identifier:
  239. `"translation_xx_to_yy"`.
  240. The models that this pipeline can use are models that have been fine-tuned on a translation task. See the
  241. up-to-date list of available models on [huggingface.co/models](https://huggingface.co/models?filter=translation).
  242. For a list of available parameters, see the [following
  243. documentation](https://huggingface.co/docs/transformers/en/main_classes/text_generation#transformers.generation.GenerationMixin.generate)
  244. Usage:
  245. ```python
  246. en_fr_translator = pipeline("translation_en_to_fr")
  247. en_fr_translator("How old are you?")
  248. ```"""
  249. # Used in the return key of the pipeline.
  250. return_name = "translation"
  251. def check_inputs(self, input_length: int, min_length: int, max_length: int):
  252. if input_length > 0.9 * max_length:
  253. logger.warning(
  254. f"Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider "
  255. "increasing your max_length manually, e.g. translator('...', max_length=400)"
  256. )
  257. return True
  258. def preprocess(self, *args, truncation=TruncationStrategy.DO_NOT_TRUNCATE, src_lang=None, tgt_lang=None):
  259. if getattr(self.tokenizer, "_build_translation_inputs", None):
  260. return self.tokenizer._build_translation_inputs(
  261. *args, return_tensors=self.framework, truncation=truncation, src_lang=src_lang, tgt_lang=tgt_lang
  262. )
  263. else:
  264. return super()._parse_and_tokenize(*args, truncation=truncation)
  265. def _sanitize_parameters(self, src_lang=None, tgt_lang=None, **kwargs):
  266. preprocess_params, forward_params, postprocess_params = super()._sanitize_parameters(**kwargs)
  267. if src_lang is not None:
  268. preprocess_params["src_lang"] = src_lang
  269. if tgt_lang is not None:
  270. preprocess_params["tgt_lang"] = tgt_lang
  271. if src_lang is None and tgt_lang is None:
  272. # Backward compatibility, direct arguments use is preferred.
  273. task = kwargs.get("task", self.task)
  274. items = task.split("_")
  275. if task and len(items) == 4:
  276. # translation, XX, to YY
  277. preprocess_params["src_lang"] = items[1]
  278. preprocess_params["tgt_lang"] = items[3]
  279. return preprocess_params, forward_params, postprocess_params
  280. def __call__(self, *args, **kwargs):
  281. r"""
  282. Translate the text(s) given as inputs.
  283. Args:
  284. args (`str` or `List[str]`):
  285. Texts to be translated.
  286. return_tensors (`bool`, *optional*, defaults to `False`):
  287. Whether or not to include the tensors of predictions (as token indices) in the outputs.
  288. return_text (`bool`, *optional*, defaults to `True`):
  289. Whether or not to include the decoded texts in the outputs.
  290. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
  291. Whether or not to clean up the potential extra spaces in the text output.
  292. src_lang (`str`, *optional*):
  293. The language of the input. Might be required for multilingual models. Will not have any effect for
  294. single pair translation models
  295. tgt_lang (`str`, *optional*):
  296. The language of the desired output. Might be required for multilingual models. Will not have any effect
  297. for single pair translation models
  298. generate_kwargs:
  299. Additional keyword arguments to pass along to the generate method of the model (see the generate method
  300. corresponding to your framework [here](./text_generation)).
  301. Return:
  302. A list or a list of list of `dict`: Each result comes as a dictionary with the following keys:
  303. - **translation_text** (`str`, present when `return_text=True`) -- The translation.
  304. - **translation_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The
  305. token ids of the translation.
  306. """
  307. return super().__call__(*args, **kwargs)