candidate_generator.py 37 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764
  1. # coding=utf-8
  2. # Copyright 2023 The HuggingFace Inc. team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. import copy
  16. from typing import TYPE_CHECKING, Any, Dict, Optional, Tuple
  17. import numpy as np
  18. import torch
  19. from ..cache_utils import DynamicCache
  20. from ..pytorch_utils import isin_mps_friendly
  21. from .logits_process import LogitsProcessorList, MinLengthLogitsProcessor
  22. if TYPE_CHECKING:
  23. from ..modeling_utils import PreTrainedModel
  24. from ..tokenization_utils_base import PreTrainedTokenizerBase
  25. from .configuration_utils import GenerationConfig
  26. class CandidateGenerator:
  27. """Abstract base class for all candidate generators that can be applied during assisted generation."""
  28. def get_candidates(self, input_ids: torch.LongTensor) -> Tuple[torch.LongTensor, Optional[torch.FloatTensor]]:
  29. """
  30. Fetches the candidates to be tried for the current input.
  31. Args:
  32. input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
  33. Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
  34. Return:
  35. `torch.LongTensor` of shape `(batch_size, candidate_length)` containing the candidate sequences to be
  36. assessed by the model and, optionally, a `torch.FloatTensor` of shape `(batch_size, candidate_length,
  37. vocabulary_size)` containing the logits associated to each candidate.
  38. """
  39. raise NotImplementedError(
  40. f"{self.__class__} is an abstract class. Only classes inheriting this class can call `get_candidates`."
  41. )
  42. def update_candidate_strategy(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, num_matches: int):
  43. """
  44. Updates the candidate generation strategy based on the outcomes.
  45. Args:
  46. input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
  47. Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
  48. scores (`torch.FloatTensor` of shape `(batch_size, candidate_length, config.vocab_size)`):
  49. Prediction scores of a language modeling head. These can be logits for each vocabulary when not using
  50. beam search or log softmax for each vocabulary token when using beam search
  51. num_matches (`int`):
  52. The number of matches between the candidate sequences and the model predictions.
  53. """
  54. raise NotImplementedError(
  55. f"{self.__class__} is an abstract class. Only classes inheriting this class can call "
  56. "`update_candidate_strategy`."
  57. )
  58. class AssistedCandidateGenerator(CandidateGenerator):
  59. """
  60. `CandidateGenerator` class to be used for assisted generation and speculative decoding. This class generates
  61. candidates through the use of a smaller model. Read the following blog post for more information:
  62. https://huggingface.co/blog/assisted-generation
  63. Args:
  64. input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
  65. Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
  66. assistant_model (`PreTrainedModel`):
  67. The model to be used for generating candidates. This model should be smaller than the main model.
  68. generation_config (`~generation.GenerationConfig`, *optional*):
  69. The generation configuration to be used as base parametrization for the generation call.
  70. logits_processor (`LogitsProcessorList`):
  71. An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
  72. used to modify the prediction scores of the language modeling head applied at each generation step.
  73. model_kwargs (`Dict`):
  74. The keyword arguments that will be passed to the main model, and are used as base inputs for the assistant
  75. model as well.
  76. inputs_tensor (`torch.Tensor`, *optional*):
  77. The model input tensor. In encoder-decoder models, this is the encoder input.
  78. """
  79. def __init__(
  80. self,
  81. input_ids: torch.LongTensor,
  82. assistant_model: "PreTrainedModel",
  83. generation_config: "GenerationConfig",
  84. model_kwargs: Dict,
  85. inputs_tensor: Optional[torch.Tensor] = None,
  86. logits_processor: "LogitsProcessorList" = None,
  87. ):
  88. # Make sure all data at the same device as assistant model
  89. device = assistant_model.device
  90. input_ids = input_ids.to(device)
  91. if inputs_tensor is not None:
  92. inputs_tensor = inputs_tensor.to(device)
  93. # Prepare the assistant and the starting number of candidate tokens
  94. self.assistant_model = assistant_model
  95. self.num_assistant_tokens = assistant_model.generation_config.num_assistant_tokens
  96. self.assistant_confidence_threshold = assistant_model.generation_config.assistant_confidence_threshold
  97. # Set eos in assistant same as in target model
  98. self.assistant_model.generation_config.eos_token_id = generation_config.eos_token_id
  99. # Prepare the kwargs for the assistant model
  100. assistant_kwargs = {}
  101. for key, value in model_kwargs.items(): # deepcopy crashes if we attempt to copy encoder outputs with grads
  102. if key not in ("encoder_outputs", "assistant_encoder_outputs", "past_key_values"):
  103. assistant_kwargs[key] = (
  104. value.detach().to(device) if isinstance(value, torch.Tensor) else copy.deepcopy(value)
  105. )
  106. # Remove potential default "num_logits_to_keep" key
  107. if "num_logits_to_keep" in assistant_kwargs.keys() and not assistant_model._supports_num_logits_to_keep():
  108. del assistant_kwargs["num_logits_to_keep"]
  109. if "assistant_encoder_outputs" in model_kwargs:
  110. assistant_kwargs["encoder_outputs"] = model_kwargs["assistant_encoder_outputs"]
  111. elif assistant_model.config.is_encoder_decoder:
  112. inputs_tensor, model_input_name, assistant_kwargs = assistant_model._prepare_model_inputs(
  113. inputs_tensor, assistant_model.generation_config.bos_token_id, assistant_kwargs
  114. )
  115. assistant_kwargs = assistant_model._prepare_encoder_decoder_kwargs_for_generation(
  116. inputs_tensor, assistant_kwargs, model_input_name, assistant_model.generation_config
  117. )
  118. elif "encoder_outputs" in model_kwargs:
  119. assistant_kwargs["encoder_outputs"] = model_kwargs["encoder_outputs"]
  120. self.assistant_kwargs = assistant_kwargs
  121. # Prepare assistant model's keys of inputs
  122. if assistant_model.config.is_encoder_decoder:
  123. # both are encoder-decoder
  124. self.input_ids_key = "decoder_input_ids"
  125. elif "encoder_outputs" in assistant_kwargs:
  126. # special case for encoder-decoder with decoder-only assistant (like DistilWhisper)
  127. self.input_ids_key = "input_ids"
  128. self.assistant_kwargs["attention_mask"] = self.assistant_kwargs.get(
  129. "decoder_attention_mask",
  130. torch.ones((input_ids.shape[0], 1), device=input_ids.device, dtype=torch.long),
  131. )
  132. else:
  133. # both are decoder-only
  134. self.input_ids_key = "input_ids"
  135. # Prepare generation-related options.
  136. self.logits_processor = logits_processor if logits_processor is not None else LogitsProcessorList()
  137. self.generation_config = copy.deepcopy(generation_config)
  138. self.generation_config.return_dict_in_generate = True
  139. self.generation_config.output_scores = True
  140. self.generation_config.assistant_confidence_threshold = self.assistant_confidence_threshold
  141. # this flag allow us set the confidence stopping criteria for assistant model generation.
  142. self.generation_config.is_assistant = True
  143. # avoid unnecessary warnings that min_length is larger than max_new_tokens
  144. # remove the `MinLengthLogitsProcessor` if exists (NOTE: no need to check for `MinNewTokensLogitsProcessor`)
  145. self.main_model_min_length = self.generation_config.min_length
  146. self.generation_config.min_length = 0
  147. self.generation_config.min_new_tokens = None
  148. for processor in self.logits_processor:
  149. if isinstance(processor, MinLengthLogitsProcessor):
  150. raise ValueError(
  151. "Passing `MinLengthLogitsProcessor` when using `assisted_generation is disabled. "
  152. "Please pass in `min_length` into `.generate()` instead"
  153. )
  154. # We need to roll back the cache in assisted generation, only DynamicCache is supported
  155. self.generation_config.cache_implementation = None
  156. def get_candidates(self, input_ids: torch.LongTensor) -> Tuple[torch.LongTensor, Optional[torch.FloatTensor]]:
  157. """
  158. Fetches the candidates to be tried for the current input.
  159. Args:
  160. input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
  161. Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
  162. Return:
  163. `torch.LongTensor` of shape `(batch_size, candidate_length)` containing the candidate sequences to be
  164. assessed by the model and a `torch.FloatTensor` of shape `(batch_size, candidate_length,
  165. vocabulary_size)` containing the logits associated to each candidate.
  166. """
  167. input_ids = input_ids.to(self.assistant_model.device)
  168. # Don't generate more than `max_length - 1` candidates since the target model generates one extra token.
  169. new_cur_len = input_ids.shape[-1]
  170. max_new_tokens = min(int(self.num_assistant_tokens), self.generation_config.max_length - new_cur_len - 1)
  171. min_new_tokens = max(min(max_new_tokens, self.main_model_min_length - new_cur_len), 0)
  172. if max_new_tokens == 0:
  173. return input_ids, None
  174. # 1. If it is not the first round of candidate generation, prepare the inputs based on the input_ids length
  175. # (which implicitly contains the number of accepted candidates from the previous round)
  176. has_past_key_values = self.assistant_kwargs.get("past_key_values", None) is not None
  177. if has_past_key_values:
  178. new_cache_size = new_cur_len - 1
  179. self.assistant_kwargs["past_key_values"] = _crop_past_key_values(
  180. self.assistant_model, self.assistant_kwargs["past_key_values"], new_cache_size - 1
  181. ) # the assistant does not have the token after the last match, hence the -1
  182. self.assistant_kwargs = _prepare_attention_mask(
  183. self.assistant_kwargs, new_cur_len, self.assistant_model.config.is_encoder_decoder
  184. )
  185. self.assistant_kwargs = _prepare_token_type_ids(self.assistant_kwargs, new_cur_len)
  186. # 2. Forecast next N tokens using the assistant model.
  187. assistant_generation_kwargs = {
  188. self.input_ids_key: input_ids,
  189. "min_new_tokens": min_new_tokens,
  190. "max_new_tokens": max_new_tokens,
  191. "generation_config": self.generation_config,
  192. "logits_processor": self.logits_processor,
  193. }
  194. assistant_output = self.assistant_model.generate(**assistant_generation_kwargs, **self.assistant_kwargs)
  195. # 3. Update variables for the next round of candidate generation
  196. self.assistant_kwargs["past_key_values"] = assistant_output.past_key_values
  197. # 4. Prepare variables for output
  198. candidate_logits = torch.stack(assistant_output.scores, dim=1)
  199. candidate_ids = assistant_output.sequences
  200. return candidate_ids, candidate_logits
  201. def update_candidate_strategy(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, num_matches: int):
  202. """
  203. Updates the candidate generation strategy based on the outcomes.
  204. Args:
  205. input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
  206. Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
  207. scores (`torch.FloatTensor` of shape `(batch_size, candidate_length, config.vocab_size)`):
  208. Prediction scores of a language modeling head. These can be logits for each vocabulary when not using
  209. beam search or log softmax for each vocabulary token when using beam search
  210. num_matches (`int`):
  211. The number of matches between the candidate sequences and the model predictions.
  212. """
  213. # Adjust the max number of assistant tokens to use in the next iteration. This is a simple heuristic,
  214. # probably can be improved -- we want to balance the benefits of getting assistant tokens correct with the
  215. # cost of forecasting incorrect assistant tokens.
  216. if self.assistant_model.generation_config.num_assistant_tokens_schedule in {
  217. "heuristic",
  218. "heuristic_transient",
  219. }:
  220. if num_matches == int(self.num_assistant_tokens):
  221. self.num_assistant_tokens += 2.0
  222. else:
  223. self.num_assistant_tokens = max(1.0, self.num_assistant_tokens - 1.0)
  224. class AssistedCandidateGeneratorDifferentTokenizers(AssistedCandidateGenerator):
  225. """
  226. `CandidateGenerator` class to be used for Universal Assisted Generation (UAD): assisted generation with different tokenizers
  227. for the assistant and main models. This class generates candidates through the use of a smaller
  228. model.
  229. The main model input tokens are re-encoded into assistant model tokens, then candidate tokens are generated in the assistant encoding, which are
  230. in turn re-encoded into main model candidate tokens. Validation then proceeds as explained above.
  231. The re-encoding steps involve decoding token ids into text and then encoding the text using a different tokenizer.
  232. Since re-encoding the tokens may result in tokenization discrepancies, UAD finds the longest common subsequence between the source and target encodings,
  233. to ensure the new tokens include the correct prompt suffix.
  234. Args:
  235. input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
  236. Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
  237. assistant_model (`PreTrainedModel`):
  238. The model to be used for generating candidates. This model should be smaller than the main model.
  239. target_tokenizer (`PreTrainedTokenizerBase`):
  240. The tokenizer used for the target model.
  241. assistant_tokenizer (`PreTrainedTokenizerBase`):
  242. The tokenizer used for the assistant model.
  243. generation_config (`~generation.GenerationConfig`, *optional*):
  244. The generation configuration to be used as base parametrization for the generation call.
  245. logits_processor (`LogitsProcessorList`):
  246. An instance of [`LogitsProcessorList`]. List of instances of class derived from [`LogitsProcessor`]
  247. used to modify the prediction scores of the language modeling head applied at each generation step.
  248. model_kwargs (`Dict`):
  249. The keyword arguments that will be passed to the main model, and are used as base inputs for the assistant
  250. model as well.
  251. inputs_tensor (`torch.Tensor`, *optional*):
  252. The model input tensor. In encoder-decoder models, this is the encoder input.
  253. """
  254. def __init__(
  255. self,
  256. input_ids: torch.LongTensor,
  257. assistant_model: "PreTrainedModel",
  258. target_tokenizer: "PreTrainedTokenizerBase",
  259. assistant_tokenizer: "PreTrainedTokenizerBase",
  260. generation_config: "GenerationConfig",
  261. model_kwargs: Dict,
  262. inputs_tensor: Optional[torch.Tensor] = None,
  263. logits_processor: "LogitsProcessorList" = None,
  264. ):
  265. super().__init__(input_ids, assistant_model, generation_config, model_kwargs, inputs_tensor, logits_processor)
  266. self.target_tokenizer = target_tokenizer
  267. self.assistant_tokenizer = assistant_tokenizer
  268. self.prev_tokens = None
  269. self.prev_assistant_ids = None
  270. self.target_lookbehind = 10
  271. self.assistant_lookbehind = 10
  272. @staticmethod
  273. def _get_longest_diag_dict(input_matrix, nonzero_idx):
  274. """
  275. Calculates the length of the longest diagonal sequence in a given matrix.
  276. Args:
  277. input_matrix (torch.Tensor): The input matrix.
  278. nonzero_idx (torch.Tensor): The indices of the non-zero elements in the matrix.
  279. Returns:
  280. dict: A dictionary where the keys are the indices of the non-zero elements and the values are the lengths of the longest diagonal sequences starting from those indices.
  281. """
  282. visited = set()
  283. diags = {}
  284. for idx in nonzero_idx:
  285. start_idx = torch.clone(idx)
  286. tuple_start_idx = tuple(start_idx.tolist())
  287. if tuple_start_idx in visited:
  288. continue
  289. visited.add(tuple_start_idx)
  290. cur_diag_len = 1
  291. start_idx += 1
  292. while start_idx[0] < input_matrix.shape[0] and start_idx[1] < input_matrix.shape[1]:
  293. tuple_start_idx = tuple(start_idx.tolist())
  294. visited.add(tuple_start_idx)
  295. if input_matrix[start_idx[0], start_idx[1]] == 1:
  296. cur_diag_len += 1
  297. start_idx += 1
  298. else:
  299. break
  300. diags[idx] = cur_diag_len
  301. return diags
  302. @staticmethod
  303. def _get_longest_diag_index(input_matrix):
  304. """
  305. Returns the start index and length of the longest diagonal in the given input.
  306. Args:
  307. input_matrix (numpy.ndarray): The input matrix.
  308. Returns:
  309. tuple: A tuple containing the start index and length of the longest diagonal.
  310. """
  311. diags = AssistedCandidateGeneratorDifferentTokenizers._get_longest_diag_dict(
  312. input_matrix, input_matrix.nonzero()
  313. )
  314. diags_values = list(diags.values())
  315. diags_keys = list(diags.keys())
  316. best_diag = np.argmax(diags_values)
  317. diag_start_index = diags_keys[best_diag]
  318. diag_start_length = diags_values[best_diag]
  319. return diag_start_index, diag_start_length
  320. @staticmethod
  321. def _get_tokens_diag(prompt, prompt_plus_new_tokens):
  322. """
  323. Input:
  324. prompt: 2D array of shape (batch_size, prompt_length), represents the original prompt tokens
  325. prompt_plus_new_tokens: 2D array of shape (batch_size, prompt_length), represents the suffix of the original prompt, with additional new tokens.
  326. Output:
  327. discrepancy_length: int, represents the number of tokens that need to be replaced from prompt
  328. new_tokens_only: 2D array of shape (batch_size, new_token_length), represents the new tokens that are not in prompt
  329. discrepancy_only: 2D array of shape (batch_size, discrepancy_length), represents the new tokens that are in prompt but not in prompt_plus_new_tokens
  330. """
  331. compare_mat = prompt_plus_new_tokens.T == prompt
  332. if not torch.is_tensor(compare_mat):
  333. compare_mat = torch.tensor(compare_mat)
  334. compare_mat_int = compare_mat.to(int)
  335. if not compare_mat_int.any().item():
  336. # empty intersection between prompt and prompt_plus_new_tokens
  337. return None, None, None
  338. longest_location, longest_diag_length = AssistedCandidateGeneratorDifferentTokenizers._get_longest_diag_index(
  339. compare_mat_int
  340. )
  341. new_token_start_index = longest_location[0] + longest_diag_length
  342. discrepancy_with_old = longest_location[1] + longest_diag_length
  343. discrepancy_length = (prompt.shape[1] - discrepancy_with_old).item()
  344. new_tokens_only = prompt_plus_new_tokens[:, new_token_start_index + discrepancy_length :]
  345. discrepancy_only = prompt_plus_new_tokens[
  346. :, new_token_start_index : new_token_start_index + discrepancy_length
  347. ]
  348. return discrepancy_length, new_tokens_only, discrepancy_only
  349. def convert_source_tokens_to_target_tokens(
  350. self,
  351. input_ids,
  352. source_tokenizer,
  353. destination_tokenizer,
  354. ):
  355. """
  356. Convert token IDs from one tokenizer to another.
  357. Args:
  358. input_ids: The input token IDs.
  359. source_tokenizer: The source tokenizer.
  360. destination_tokenizer: The destination tokenizer.
  361. Returns:
  362. The converted token IDs.
  363. """
  364. text = source_tokenizer.batch_decode(input_ids, skip_special_tokens=True, clean_up_tokenization_spaces=True)
  365. dest_ids = destination_tokenizer(text, add_special_tokens=True, return_tensors="pt")["input_ids"]
  366. return dest_ids.to(input_ids.device)
  367. def get_candidates(self, input_ids: torch.LongTensor) -> Tuple[torch.LongTensor, Optional[torch.FloatTensor]]:
  368. """
  369. Fetches the candidates to be tried for the current input.
  370. Args:
  371. input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
  372. Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
  373. Return:
  374. `torch.LongTensor` of shape `(batch_size, candidate_length)` containing the candidate sequences to be
  375. assessed by the model and a `torch.FloatTensor` of shape `(batch_size, candidate_length,
  376. vocabulary_size)` containing the logits associated to each candidate.
  377. """
  378. max_new_tokens = int(self.num_assistant_tokens)
  379. if max_new_tokens == 0:
  380. return input_ids, None
  381. input_ids = input_ids.to(self.assistant_model.device)
  382. convert_kwargs = {
  383. "source_tokenizer": self.target_tokenizer,
  384. "destination_tokenizer": self.assistant_tokenizer,
  385. }
  386. remove_from_pkv = 0
  387. # Since re-encoding the tokens may result in tokenization discrepancies, we use 2 look behind values
  388. # (one for each conversion) which mark where to start looking for the overlap between the
  389. # source and target encodings, to ensure the new tokens include the correct prompt suffix.
  390. if self.prev_tokens is not None and self.prev_target_ids.shape[1] > self.target_lookbehind:
  391. # input_ids contains all target prompt input ids and some new target input ids
  392. start_index_in_target_window = self.prev_target_ids.shape[1] - self.target_lookbehind
  393. new_assistant_ids = self.convert_source_tokens_to_target_tokens(
  394. input_ids[:, start_index_in_target_window:], **convert_kwargs
  395. )
  396. prompt_use_length = new_assistant_ids.shape[1]
  397. prompt_use = self.prev_assistant_ids[:, -prompt_use_length:]
  398. discrepancy_length, new_tokens_only, discrepancy_only = (
  399. AssistedCandidateGeneratorDifferentTokenizers._get_tokens_diag(prompt_use, new_assistant_ids)
  400. )
  401. assistant_input_ids = self.prev_assistant_ids
  402. if new_tokens_only is not None:
  403. if discrepancy_length > 0 and discrepancy_only.shape[1] > 0:
  404. if discrepancy_length == discrepancy_only.shape[1]:
  405. assistant_input_ids[:, -discrepancy_length:] = discrepancy_only
  406. elif discrepancy_length > discrepancy_only.shape[1]:
  407. discrepancy_length_diff = discrepancy_length - discrepancy_only.shape[1]
  408. assistant_input_ids = assistant_input_ids[:, :-discrepancy_length_diff]
  409. assistant_input_ids[:, -discrepancy_only.shape[1] :] = discrepancy_only
  410. remove_from_pkv = discrepancy_length
  411. if new_tokens_only.shape[1] > 0:
  412. assistant_input_ids = torch.cat([assistant_input_ids, new_tokens_only], dim=-1)
  413. else:
  414. # edge case: in case of no intersection between prompt and new_assistant_ids
  415. assistant_input_ids = torch.cat([assistant_input_ids, new_assistant_ids], dim=-1)
  416. else:
  417. assistant_input_ids = self.convert_source_tokens_to_target_tokens(input_ids, **convert_kwargs)
  418. self.prev_target_ids = input_ids
  419. self.prev_assistant_ids = assistant_input_ids
  420. new_cur_len = assistant_input_ids.shape[-1]
  421. min_new_tokens = max(min(max_new_tokens, self.main_model_min_length - new_cur_len), 0)
  422. # 1. If it is not the first round of candidate generation, prepare the inputs based on the input_ids length
  423. # (which implicitly contains the number of accepted candidates from the previous round)
  424. has_past_key_values = self.assistant_kwargs.get("past_key_values", None) is not None
  425. if has_past_key_values:
  426. new_cache_size = new_cur_len - 1 - remove_from_pkv
  427. self.assistant_kwargs["past_key_values"] = _crop_past_key_values(
  428. self.assistant_model, self.assistant_kwargs["past_key_values"], new_cache_size - 1
  429. ) # the assistant does not have the token after the last match, hence the -1
  430. self.assistant_kwargs = _prepare_attention_mask(
  431. self.assistant_kwargs, new_cur_len, self.assistant_model.config.is_encoder_decoder
  432. )
  433. self.assistant_kwargs = _prepare_token_type_ids(self.assistant_kwargs, new_cur_len)
  434. # 2. Forecast next N tokens using the assistant model.
  435. assistant_generation_kwargs = {
  436. self.input_ids_key: assistant_input_ids,
  437. "min_new_tokens": min_new_tokens,
  438. "max_new_tokens": max_new_tokens,
  439. "generation_config": self.generation_config,
  440. "logits_processor": self.logits_processor,
  441. }
  442. self.assistant_kwargs.pop("attention_mask", None)
  443. assistant_output = self.assistant_model.generate(**assistant_generation_kwargs, **self.assistant_kwargs)
  444. num_prev_assistant = self.prev_assistant_ids.shape[1]
  445. start_assistant_look_index = num_prev_assistant - self.assistant_lookbehind
  446. new_target_ids_from_window = self.convert_source_tokens_to_target_tokens(
  447. assistant_output.sequences[:, start_assistant_look_index:],
  448. source_tokenizer=self.assistant_tokenizer,
  449. destination_tokenizer=self.target_tokenizer,
  450. )
  451. target_prompt_use_length = new_target_ids_from_window.shape[1]
  452. target_prompt_use = input_ids[:, -target_prompt_use_length:]
  453. _, target_new_tokens_only, _ = AssistedCandidateGeneratorDifferentTokenizers._get_tokens_diag(
  454. target_prompt_use, new_target_ids_from_window
  455. )
  456. new_target_ids = input_ids
  457. if target_new_tokens_only is not None:
  458. if target_new_tokens_only.shape[1] > 0:
  459. new_target_ids = torch.cat([new_target_ids, target_new_tokens_only], dim=-1)
  460. else:
  461. # edge case: in case of no intersection between prompt and new_target_ids
  462. new_target_ids = torch.cat([new_target_ids, new_target_ids_from_window], dim=-1)
  463. self.prev_target_ids = input_ids
  464. if hasattr(self.generation_config, "max_length"):
  465. new_target_ids = new_target_ids[:, : self.generation_config.max_length]
  466. # 3. Update variables for the next round of candidate generation
  467. self.assistant_kwargs["past_key_values"] = assistant_output.past_key_values
  468. self.prev_tokens = assistant_output.sequences
  469. # 4. Prepare variables for output
  470. if input_ids.shape[1] >= new_target_ids.shape[1]:
  471. return input_ids, None
  472. return new_target_ids, None
  473. class PromptLookupCandidateGenerator(CandidateGenerator):
  474. """
  475. `CandidateGenerator` class to be used for prompt lookup generation. This class generates candidates by looking up
  476. likely continuations in the provided prompt (input_ids) itself.
  477. Read the following blog post for more information: https://github.com/apoorvumang/prompt-lookup-decoding
  478. Args:
  479. max_matching_ngram_size (`int`):
  480. The maximum ngram size to be considered for matching in the prompt
  481. num_output_tokens (`int`):
  482. The number of tokens to be output as candidate tokens.
  483. max_length (`int`):
  484. The number of total maximum tokens that can be generated. For decoder-only models that includes the prompt length.
  485. Defaults to 20, which is the max length used as default in generation config.
  486. """
  487. def __init__(
  488. self,
  489. eos_token_id: torch.Tensor = None,
  490. num_output_tokens: int = 10,
  491. max_matching_ngram_size: int = None,
  492. max_length: int = 20,
  493. ):
  494. self.num_output_tokens = num_output_tokens
  495. self.max_matching_ngram_size = max_matching_ngram_size if max_matching_ngram_size else 2
  496. self.max_length = max_length
  497. self.eos_token_id = eos_token_id
  498. if self.max_matching_ngram_size <= 0 or self.num_output_tokens <= 0:
  499. raise ValueError("Invalid max_matching_ngram_size or num_output_tokens")
  500. def get_candidates(self, input_ids: torch.LongTensor) -> Tuple[torch.LongTensor, Optional[torch.FloatTensor]]:
  501. """
  502. Fetches the candidates to be tried for the current input.
  503. Args:
  504. input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
  505. Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
  506. Return:
  507. `torch.LongTensor` of shape `(num_candidates, candidate_length)`: The candidate sequences to be tried.
  508. """
  509. input_length = input_ids.size(1)
  510. # Don't generate more than `max_length - 1` candidates since the target model generates one extra token.
  511. if self.max_length == input_length + 1:
  512. return input_ids, None
  513. chosen_ids = None
  514. match_found = False
  515. for ngram_size in range(min(self.max_matching_ngram_size, input_length - 1), 0, -1):
  516. # Create sliding windows of size ngram_size
  517. windows = input_ids.unfold(dimension=1, size=ngram_size, step=1)
  518. # Convert ngram to a tensor for comparison
  519. ngram_tensor = input_ids[0, -ngram_size:]
  520. # Find where the windows match the ngram
  521. matches = (windows == ngram_tensor).all(dim=2)
  522. # Get the indices of matches
  523. match_indices = matches.nonzero(as_tuple=True)[1]
  524. # Iterate through match indices to find a valid continuation
  525. for idx in match_indices:
  526. start_idx = idx + ngram_size
  527. end_idx = start_idx + self.num_output_tokens
  528. end_idx = min(end_idx, input_length, self.max_length)
  529. if start_idx < end_idx:
  530. chosen_ids = input_ids[0, start_idx:end_idx]
  531. match_found = True
  532. # remove remaining candidate ids if an "eos" token is found, otherwise the target model may
  533. # accept eos and the rest as valid, thus not stopping generation after "eos"
  534. # NOTE: below code is written based on the fact that assisted decoding supports only bs=1
  535. mask = isin_mps_friendly(chosen_ids, self.eos_token_id)
  536. match_indices_eos = torch.nonzero(mask)
  537. if match_indices_eos.numel() > 0:
  538. first_eos_index = match_indices_eos[0].item()
  539. chosen_ids = chosen_ids[:first_eos_index]
  540. break
  541. if match_found:
  542. break
  543. if chosen_ids is None or len(chosen_ids) == 0:
  544. # In case we didn't find a match return the input sequence unchanged, reverts back to autoregressive decoding
  545. return input_ids, None
  546. # Now need extend input_ids with chosen_ids
  547. chosen_ids = chosen_ids.unsqueeze(0)
  548. candidate_input_ids = torch.cat((input_ids, chosen_ids), dim=1)
  549. # assisted_generation expects logits as well, but we don't have those here, so returning None
  550. return candidate_input_ids, None
  551. def update_candidate_strategy(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, num_matches: int):
  552. """
  553. Updates the candidate generation strategy based on the outcomes.
  554. Args:
  555. input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
  556. Indices of input sequence tokens in the vocabulary. [What are input IDs?](../glossary#input-ids)
  557. scores (`torch.FloatTensor` of shape `(batch_size, candidate_length, config.vocab_size)`):
  558. Prediction scores of a language modeling head. These can be logits for each vocabulary when not using
  559. beam search or log softmax for each vocabulary token when using beam search
  560. num_matches (`int`):
  561. The number of matches between the candidate sequences and the model predictions.
  562. """
  563. # Currently does nothing
  564. return
  565. def _crop_past_key_values(model, past_key_values, max_length):
  566. """Crops the past key values up to a certain maximum length."""
  567. new_past = []
  568. if model.config.is_encoder_decoder:
  569. for idx in range(len(past_key_values)):
  570. new_past.append(
  571. (
  572. past_key_values[idx][0][:, :, :max_length, :],
  573. past_key_values[idx][1][:, :, :max_length, :],
  574. past_key_values[idx][2],
  575. past_key_values[idx][3],
  576. )
  577. )
  578. past_key_values = tuple(new_past)
  579. # gptbigcode is special and stores kv in shape (batch_size, seq_len, dim), if it's a multi_query model
  580. elif "gptbigcode" in model.__class__.__name__.lower() or (
  581. model.config.architectures is not None and "gptbigcode" in model.config.architectures[0].lower()
  582. ):
  583. if model.config.multi_query:
  584. for idx in range(len(past_key_values)):
  585. past_key_values[idx] = past_key_values[idx][:, :max_length, :]
  586. else:
  587. for idx in range(len(past_key_values)):
  588. past_key_values[idx] = past_key_values[idx][:, :, :max_length, :]
  589. elif isinstance(past_key_values, DynamicCache):
  590. past_key_values.crop(max_length)
  591. elif past_key_values is not None:
  592. for idx in range(len(past_key_values)):
  593. if past_key_values[idx] != ([], []):
  594. new_past.append(
  595. (
  596. past_key_values[idx][0][:, :, :max_length, :],
  597. past_key_values[idx][1][:, :, :max_length, :],
  598. )
  599. )
  600. else:
  601. new_past.append((past_key_values[idx][0], past_key_values[idx][1]))
  602. past_key_values = tuple(new_past)
  603. return past_key_values
  604. def _prepare_attention_mask(model_kwargs: Dict[str, Any], new_length: int, is_encoder_decoder: bool) -> Dict[str, Any]:
  605. """Expands or crops the model's mask for decoding purposes, to the defined length"""
  606. mask_key = "decoder_attention_mask" if is_encoder_decoder else "attention_mask"
  607. if mask_key not in model_kwargs:
  608. return model_kwargs
  609. mask = model_kwargs[mask_key]
  610. mask_length_diff = new_length - mask.shape[1]
  611. if mask_length_diff < 0:
  612. model_kwargs[mask_key] = mask[:, :mask_length_diff]
  613. elif mask_length_diff > 0:
  614. model_kwargs[mask_key] = torch.cat([mask, mask.new_ones((mask.shape[0], mask_length_diff))], dim=-1)
  615. # Handle cross attention models
  616. if "cross_attention_mask" in model_kwargs:
  617. # Mllama case
  618. cross_mask = model_kwargs["cross_attention_mask"]
  619. if mask_length_diff < 0:
  620. model_kwargs["cross_attention_mask"] = cross_mask[:, :mask_length_diff]
  621. elif mask_length_diff > 0:
  622. new_mask = cross_mask[:, -1:, :, :].repeat(1, mask_length_diff, 1, 1)
  623. model_kwargs["cross_attention_mask"] = torch.cat([cross_mask, new_mask], dim=1)
  624. elif "image_attention_mask" in model_kwargs:
  625. # IDEFICS case
  626. cross_mask = model_kwargs["image_attention_mask"]
  627. if mask_length_diff < 0:
  628. model_kwargs["image_attention_mask"] = cross_mask[:, :mask_length_diff]
  629. elif mask_length_diff > 0:
  630. new_mask = cross_mask[:, -1:, :].repeat(1, mask_length_diff, 1)
  631. model_kwargs["image_attention_mask"] = torch.cat([cross_mask, new_mask], dim=1)
  632. return model_kwargs
  633. def _prepare_token_type_ids(model_kwargs: Dict[str, Any], new_length: int) -> Dict[str, Any]:
  634. """Expands or crops the model's token_type_ids for decoding purposes, to the defined length"""
  635. if "token_type_ids" not in model_kwargs or model_kwargs["token_type_ids"] is None:
  636. return model_kwargs
  637. token_type_ids = model_kwargs["token_type_ids"]
  638. final_token_type = token_type_ids[:, -1].unsqueeze(-1)
  639. type_length_diff = new_length - token_type_ids.shape[1]
  640. if type_length_diff < 0:
  641. token_type_ids = token_type_ids[:, :type_length_diff]
  642. elif type_length_diff > 0:
  643. token_type_copies = final_token_type.repeat(1, type_length_diff)
  644. model_kwargs["token_type_ids"] = torch.cat([model_kwargs["token_type_ids"], token_type_copies], dim=-1)
  645. return model_kwargs