text_generation.py 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436
  1. import enum
  2. import warnings
  3. from typing import Dict
  4. from ..utils import add_end_docstrings, is_tf_available, is_torch_available
  5. from .base import Pipeline, build_pipeline_init_args
  6. if is_torch_available():
  7. from ..models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
  8. from .pt_utils import KeyDataset
  9. if is_tf_available():
  10. import tensorflow as tf
  11. from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
  12. class ReturnType(enum.Enum):
  13. TENSORS = 0
  14. NEW_TEXT = 1
  15. FULL_TEXT = 2
  16. class Chat:
  17. """This class is intended to just be used internally in this pipeline and not exposed to users. We convert chats
  18. to this format because the rest of the pipeline code tends to assume that lists of messages are
  19. actually a batch of samples rather than messages in the same conversation."""
  20. def __init__(self, messages: Dict):
  21. for message in messages:
  22. if not ("role" in message and "content" in message):
  23. raise ValueError("When passing chat dicts as input, each dict must have a 'role' and 'content' key.")
  24. self.messages = messages
  25. @add_end_docstrings(build_pipeline_init_args(has_tokenizer=True))
  26. class TextGenerationPipeline(Pipeline):
  27. """
  28. Language generation pipeline using any `ModelWithLMHead`. This pipeline predicts the words that will follow a
  29. specified text prompt. When the underlying model is a conversational model, it can also accept one or more chats,
  30. in which case the pipeline will operate in chat mode and will continue the chat(s) by adding its response(s).
  31. Each chat takes the form of a list of dicts, where each dict contains "role" and "content" keys.
  32. Examples:
  33. ```python
  34. >>> from transformers import pipeline
  35. >>> generator = pipeline(model="openai-community/gpt2")
  36. >>> generator("I can't believe you did such a ", do_sample=False)
  37. [{'generated_text': "I can't believe you did such a icky thing to me. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I'm so sorry. I"}]
  38. >>> # These parameters will return suggestions, and only the newly created text making it easier for prompting suggestions.
  39. >>> outputs = generator("My tart needs some", num_return_sequences=4, return_full_text=False)
  40. ```
  41. ```python
  42. >>> from transformers import pipeline
  43. >>> generator = pipeline(model="HuggingFaceH4/zephyr-7b-beta")
  44. >>> # Zephyr-beta is a conversational model, so let's pass it a chat instead of a single string
  45. >>> generator([{"role": "user", "content": "What is the capital of France? Answer in one word."}], do_sample=False, max_new_tokens=2)
  46. [{'generated_text': [{'role': 'user', 'content': 'What is the capital of France? Answer in one word.'}, {'role': 'assistant', 'content': 'Paris'}]}]
  47. ```
  48. Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial). You can pass text
  49. generation parameters to this pipeline to control stopping criteria, decoding strategy, and more. Learn more about
  50. text generation parameters in [Text generation strategies](../generation_strategies) and [Text
  51. generation](text_generation).
  52. This language generation pipeline can currently be loaded from [`pipeline`] using the following task identifier:
  53. `"text-generation"`.
  54. The models that this pipeline can use are models that have been trained with an autoregressive language modeling
  55. objective. See the list of available [text completion models](https://huggingface.co/models?filter=text-generation)
  56. and the list of [conversational models](https://huggingface.co/models?other=conversational)
  57. on [huggingface.co/models].
  58. """
  59. # Prefix text to help Transformer-XL and XLNet with short prompts as proposed by Aman Rusia
  60. # in https://github.com/rusiaaman/XLNet-gen#methodology
  61. # and https://medium.com/@amanrusia/xlnet-speaks-comparison-to-gpt-2-ea1a4e9ba39e
  62. XL_PREFIX = """
  63. In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The
  64. voice of Nicholas's young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western
  65. Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision
  66. and denounces one of the men as a horse thief. Although his father initially slaps him for making such an
  67. accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of
  68. the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,
  69. begging for his blessing. <eod> </s> <eos>
  70. """
  71. def __init__(self, *args, **kwargs):
  72. super().__init__(*args, **kwargs)
  73. self.check_model_type(
  74. TF_MODEL_FOR_CAUSAL_LM_MAPPING_NAMES if self.framework == "tf" else MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
  75. )
  76. if "prefix" not in self._preprocess_params:
  77. # This is very specific. The logic is quite complex and needs to be done
  78. # as a "default".
  79. # It also defines both some preprocess_kwargs and generate_kwargs
  80. # which is why we cannot put them in their respective methods.
  81. prefix = None
  82. if self.prefix is not None:
  83. prefix = self.prefix
  84. if prefix is None and self.model.__class__.__name__ in [
  85. "XLNetLMHeadModel",
  86. "TransfoXLLMHeadModel",
  87. "TFXLNetLMHeadModel",
  88. "TFTransfoXLLMHeadModel",
  89. ]:
  90. # For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
  91. prefix = self.XL_PREFIX
  92. if prefix is not None:
  93. # Recalculate some generate_kwargs linked to prefix.
  94. preprocess_params, forward_params, _ = self._sanitize_parameters(prefix=prefix, **self._forward_params)
  95. self._preprocess_params = {**self._preprocess_params, **preprocess_params}
  96. self._forward_params = {**self._forward_params, **forward_params}
  97. def _sanitize_parameters(
  98. self,
  99. return_full_text=None,
  100. return_tensors=None,
  101. return_text=None,
  102. return_type=None,
  103. clean_up_tokenization_spaces=None,
  104. prefix=None,
  105. handle_long_generation=None,
  106. stop_sequence=None,
  107. truncation=None,
  108. max_length=None,
  109. continue_final_message=None,
  110. **generate_kwargs,
  111. ):
  112. preprocess_params = {}
  113. add_special_tokens = False
  114. if "add_special_tokens" in generate_kwargs:
  115. add_special_tokens = preprocess_params["add_special_tokens"] = generate_kwargs.pop("add_special_tokens")
  116. if "padding" in generate_kwargs:
  117. preprocess_params["padding"] = generate_kwargs.pop("padding")
  118. if truncation is not None:
  119. preprocess_params["truncation"] = truncation
  120. if max_length is not None:
  121. preprocess_params["max_length"] = max_length
  122. generate_kwargs["max_length"] = max_length
  123. if prefix is not None:
  124. preprocess_params["prefix"] = prefix
  125. if prefix:
  126. prefix_inputs = self.tokenizer(
  127. prefix, padding=False, add_special_tokens=add_special_tokens, return_tensors=self.framework
  128. )
  129. generate_kwargs["prefix_length"] = prefix_inputs["input_ids"].shape[-1]
  130. if handle_long_generation is not None:
  131. if handle_long_generation not in {"hole"}:
  132. raise ValueError(
  133. f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
  134. " [None, 'hole']"
  135. )
  136. preprocess_params["handle_long_generation"] = handle_long_generation
  137. if continue_final_message is not None:
  138. preprocess_params["continue_final_message"] = continue_final_message
  139. preprocess_params.update(generate_kwargs)
  140. forward_params = generate_kwargs
  141. postprocess_params = {}
  142. if return_full_text is not None and return_type is None:
  143. if return_text is not None:
  144. raise ValueError("`return_text` is mutually exclusive with `return_full_text`")
  145. if return_tensors is not None:
  146. raise ValueError("`return_full_text` is mutually exclusive with `return_tensors`")
  147. return_type = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
  148. if return_tensors is not None and return_type is None:
  149. if return_text is not None:
  150. raise ValueError("`return_text` is mutually exclusive with `return_tensors`")
  151. return_type = ReturnType.TENSORS
  152. if return_type is not None:
  153. postprocess_params["return_type"] = return_type
  154. if clean_up_tokenization_spaces is not None:
  155. postprocess_params["clean_up_tokenization_spaces"] = clean_up_tokenization_spaces
  156. if continue_final_message is not None:
  157. postprocess_params["continue_final_message"] = continue_final_message
  158. if stop_sequence is not None:
  159. stop_sequence_ids = self.tokenizer.encode(stop_sequence, add_special_tokens=False)
  160. if len(stop_sequence_ids) > 1:
  161. warnings.warn(
  162. "Stopping on a multiple token sequence is not yet supported on transformers. The first token of"
  163. " the stop sequence will be used as the stop sequence string in the interim."
  164. )
  165. generate_kwargs["eos_token_id"] = stop_sequence_ids[0]
  166. return preprocess_params, forward_params, postprocess_params
  167. # overriding _parse_and_tokenize to allow for unusual language-modeling tokenizer arguments
  168. def _parse_and_tokenize(self, *args, **kwargs):
  169. """
  170. Parse arguments and tokenize
  171. """
  172. # Parse arguments
  173. if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
  174. kwargs.update({"add_space_before_punct_symbol": True})
  175. return super()._parse_and_tokenize(*args, **kwargs)
  176. def __call__(self, text_inputs, **kwargs):
  177. """
  178. Complete the prompt(s) given as inputs.
  179. Args:
  180. text_inputs (`str`, `List[str]`, List[Dict[str, str]], or `List[List[Dict[str, str]]]`):
  181. One or several prompts (or one list of prompts) to complete. If strings or a list of string are
  182. passed, this pipeline will continue each prompt. Alternatively, a "chat", in the form of a list
  183. of dicts with "role" and "content" keys, can be passed, or a list of such chats. When chats are passed,
  184. the model's chat template will be used to format them before passing them to the model.
  185. return_tensors (`bool`, *optional*, defaults to `False`):
  186. Returns the tensors of predictions (as token indices) in the outputs. If set to
  187. `True`, the decoded text is not returned.
  188. return_text (`bool`, *optional*):
  189. Returns the decoded texts in the outputs.
  190. return_full_text (`bool`, *optional*, defaults to `True`):
  191. If set to `False` only added text is returned, otherwise the full text is returned. Cannot be
  192. specified at the same time as `return_text`.
  193. clean_up_tokenization_spaces (`bool`, *optional*, defaults to `True`):
  194. Whether or not to clean up the potential extra spaces in the text output.
  195. continue_final_message( `bool`, *optional*): This indicates that you want the model to continue the
  196. last message in the input chat rather than starting a new one, allowing you to "prefill" its response.
  197. By default this is `True` when the final message in the input chat has the `assistant` role and
  198. `False` otherwise, but you can manually override that behaviour by setting this flag.
  199. prefix (`str`, *optional*):
  200. Prefix added to prompt.
  201. handle_long_generation (`str`, *optional*):
  202. By default, this pipelines does not handle long generation (ones that exceed in one form or the other
  203. the model maximum length). There is no perfect way to adress this (more info
  204. :https://github.com/huggingface/transformers/issues/14033#issuecomment-948385227). This provides common
  205. strategies to work around that problem depending on your use case.
  206. - `None` : default strategy where nothing in particular happens
  207. - `"hole"`: Truncates left of input, and leaves a gap wide enough to let generation happen (might
  208. truncate a lot of the prompt and not suitable when generation exceed the model capacity)
  209. generate_kwargs (`dict`, *optional*):
  210. Additional keyword arguments to pass along to the generate method of the model (see the generate method
  211. corresponding to your framework [here](./text_generation)).
  212. Return:
  213. A list or a list of lists of `dict`: Returns one of the following dictionaries (cannot return a combination
  214. of both `generated_text` and `generated_token_ids`):
  215. - **generated_text** (`str`, present when `return_text=True`) -- The generated text.
  216. - **generated_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token
  217. ids of the generated text.
  218. """
  219. if isinstance(
  220. text_inputs, (list, tuple, KeyDataset) if is_torch_available() else (list, tuple)
  221. ) and isinstance(text_inputs[0], (list, tuple, dict)):
  222. # We have one or more prompts in list-of-dicts format, so this is chat mode
  223. if isinstance(text_inputs[0], dict):
  224. return super().__call__(Chat(text_inputs), **kwargs)
  225. else:
  226. chats = [Chat(chat) for chat in text_inputs] # 🐈 🐈 🐈
  227. return super().__call__(chats, **kwargs)
  228. else:
  229. return super().__call__(text_inputs, **kwargs)
  230. def preprocess(
  231. self,
  232. prompt_text,
  233. prefix="",
  234. handle_long_generation=None,
  235. add_special_tokens=None,
  236. truncation=None,
  237. padding=None,
  238. max_length=None,
  239. continue_final_message=None,
  240. **generate_kwargs,
  241. ):
  242. # Only set non-None tokenizer kwargs, so as to rely on the tokenizer's defaults
  243. tokenizer_kwargs = {
  244. "add_special_tokens": add_special_tokens,
  245. "truncation": truncation,
  246. "padding": padding,
  247. "max_length": max_length,
  248. }
  249. tokenizer_kwargs = {key: value for key, value in tokenizer_kwargs.items() if value is not None}
  250. if isinstance(prompt_text, Chat):
  251. tokenizer_kwargs.pop("add_special_tokens", None) # ignore add_special_tokens on chats
  252. # If the user passes a chat that ends in an assistant message, we treat it as a prefill by default
  253. # because very few models support multiple separate, consecutive assistant messages
  254. if continue_final_message is None:
  255. continue_final_message = prompt_text.messages[-1]["role"] == "assistant"
  256. inputs = self.tokenizer.apply_chat_template(
  257. prompt_text.messages,
  258. add_generation_prompt=not continue_final_message,
  259. continue_final_message=continue_final_message,
  260. return_dict=True,
  261. return_tensors=self.framework,
  262. **tokenizer_kwargs,
  263. )
  264. else:
  265. inputs = self.tokenizer(prefix + prompt_text, return_tensors=self.framework, **tokenizer_kwargs)
  266. inputs["prompt_text"] = prompt_text
  267. if handle_long_generation == "hole":
  268. cur_len = inputs["input_ids"].shape[-1]
  269. if "max_new_tokens" in generate_kwargs:
  270. new_tokens = generate_kwargs["max_new_tokens"]
  271. else:
  272. new_tokens = generate_kwargs.get("max_length", self.generation_config.max_length) - cur_len
  273. if new_tokens < 0:
  274. raise ValueError("We cannot infer how many new tokens are expected")
  275. if cur_len + new_tokens > self.tokenizer.model_max_length:
  276. keep_length = self.tokenizer.model_max_length - new_tokens
  277. if keep_length <= 0:
  278. raise ValueError(
  279. "We cannot use `hole` to handle this generation the number of desired tokens exceeds the"
  280. " models max length"
  281. )
  282. inputs["input_ids"] = inputs["input_ids"][:, -keep_length:]
  283. if "attention_mask" in inputs:
  284. inputs["attention_mask"] = inputs["attention_mask"][:, -keep_length:]
  285. return inputs
  286. def _forward(self, model_inputs, **generate_kwargs):
  287. input_ids = model_inputs["input_ids"]
  288. attention_mask = model_inputs.get("attention_mask", None)
  289. # Allow empty prompts
  290. if input_ids.shape[1] == 0:
  291. input_ids = None
  292. attention_mask = None
  293. in_b = 1
  294. else:
  295. in_b = input_ids.shape[0]
  296. prompt_text = model_inputs.pop("prompt_text")
  297. # If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
  298. # generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
  299. prefix_length = generate_kwargs.pop("prefix_length", 0)
  300. if prefix_length > 0:
  301. has_max_new_tokens = "max_new_tokens" in generate_kwargs or (
  302. "generation_config" in generate_kwargs
  303. and generate_kwargs["generation_config"].max_new_tokens is not None
  304. )
  305. if not has_max_new_tokens:
  306. generate_kwargs["max_length"] = generate_kwargs.get("max_length") or self.generation_config.max_length
  307. generate_kwargs["max_length"] += prefix_length
  308. has_min_new_tokens = "min_new_tokens" in generate_kwargs or (
  309. "generation_config" in generate_kwargs
  310. and generate_kwargs["generation_config"].min_new_tokens is not None
  311. )
  312. if not has_min_new_tokens and "min_length" in generate_kwargs:
  313. generate_kwargs["min_length"] += prefix_length
  314. # User-defined `generation_config` passed to the pipeline call take precedence
  315. if "generation_config" not in generate_kwargs:
  316. generate_kwargs["generation_config"] = self.generation_config
  317. generated_sequence = self.model.generate(input_ids=input_ids, attention_mask=attention_mask, **generate_kwargs)
  318. out_b = generated_sequence.shape[0]
  319. if self.framework == "pt":
  320. generated_sequence = generated_sequence.reshape(in_b, out_b // in_b, *generated_sequence.shape[1:])
  321. elif self.framework == "tf":
  322. generated_sequence = tf.reshape(generated_sequence, (in_b, out_b // in_b, *generated_sequence.shape[1:]))
  323. return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
  324. def postprocess(
  325. self,
  326. model_outputs,
  327. return_type=ReturnType.FULL_TEXT,
  328. clean_up_tokenization_spaces=True,
  329. continue_final_message=None,
  330. ):
  331. generated_sequence = model_outputs["generated_sequence"][0]
  332. input_ids = model_outputs["input_ids"]
  333. prompt_text = model_outputs["prompt_text"]
  334. generated_sequence = generated_sequence.numpy().tolist()
  335. records = []
  336. for sequence in generated_sequence:
  337. if return_type == ReturnType.TENSORS:
  338. record = {"generated_token_ids": sequence}
  339. elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
  340. # Decode text
  341. text = self.tokenizer.decode(
  342. sequence,
  343. skip_special_tokens=True,
  344. clean_up_tokenization_spaces=clean_up_tokenization_spaces,
  345. )
  346. # Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
  347. if input_ids is None:
  348. prompt_length = 0
  349. else:
  350. prompt_length = len(
  351. self.tokenizer.decode(
  352. input_ids[0],
  353. skip_special_tokens=True,
  354. clean_up_tokenization_spaces=clean_up_tokenization_spaces,
  355. )
  356. )
  357. all_text = text[prompt_length:]
  358. if return_type == ReturnType.FULL_TEXT:
  359. if isinstance(prompt_text, str):
  360. all_text = prompt_text + all_text
  361. elif isinstance(prompt_text, Chat):
  362. if continue_final_message is None:
  363. # If the user passes a chat ending in an assistant message, we treat it as a prefill by
  364. # default because very few models support multiple separate, consecutive assistant messages
  365. continue_final_message = prompt_text.messages[-1]["role"] == "assistant"
  366. if continue_final_message:
  367. # With assistant prefill, concat onto the end of the last message
  368. all_text = list(prompt_text.messages)[:-1] + [
  369. {
  370. "role": prompt_text.messages[-1]["role"],
  371. "content": prompt_text.messages[-1]["content"] + all_text,
  372. }
  373. ]
  374. else:
  375. # When we're not starting from a prefill, the output is a new assistant message
  376. all_text = list(prompt_text.messages) + [{"role": "assistant", "content": all_text}]
  377. record = {"generated_text": all_text}
  378. records.append(record)
  379. return records