processing_mllama.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349
  1. # coding=utf-8
  2. # Copyright 2024 The HuggingFace Inc. team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """Processor class for Mllama."""
  16. from typing import List, Optional, Union
  17. import numpy as np
  18. from ...feature_extraction_utils import BatchFeature
  19. from ...image_utils import ImageInput
  20. from ...processing_utils import ImagesKwargs, ProcessingKwargs, ProcessorMixin, Unpack
  21. from ...tokenization_utils_base import (
  22. PreTokenizedInput,
  23. TextInput,
  24. )
  25. # TODO: Can we do it that way or its better include as "Copied from ..."
  26. from .image_processing_mllama import make_list_of_images
  27. class MllamaImagesKwargs(ImagesKwargs, total=False):
  28. max_image_tiles: Optional[int]
  29. class MllamaProcessorKwargs(ProcessingKwargs, total=False):
  30. images_kwargs: MllamaImagesKwargs
  31. _defaults = {
  32. "image_kwargs": {
  33. "max_image_tiles": 4,
  34. },
  35. }
  36. def get_cross_attention_token_mask(input_ids: List[int], image_token_id: int) -> List[List[int]]:
  37. """
  38. Generate a cross-attention token mask for image tokens in the input sequence.
  39. This function identifies the positions of image tokens in the input sequence and creates
  40. a mask that defines which subsequent tokens each image token should attend to.
  41. Args:
  42. input_ids (List[int]): A list of token ids representing the input sequence.
  43. image_token_id (int): The id of the token used to represent images in the sequence.
  44. Returns:
  45. List[List[int]]: A list of [start, end] pairs, where each pair represents the range
  46. of tokens an image token should attend to.
  47. Notes:
  48. - If no image tokens are present, an empty list is returned.
  49. - For a single image token, it attends to all subsequent tokens until the end of the sequence.
  50. - For multiple image tokens, each attends to tokens up to the next image token or the end of the sequence.
  51. - Consecutive image tokens are treated as a group and attend to all subsequent tokens together.
  52. """
  53. image_token_locations = [i for i, token in enumerate(input_ids) if token == image_token_id]
  54. if len(image_token_locations) == 0:
  55. return []
  56. # only one image present, unmask until end of sequence
  57. if len(image_token_locations) == 1:
  58. return [[image_token_locations[0], -1]]
  59. vision_masks = [[loc1, loc2] for loc1, loc2 in zip(image_token_locations[:-1], image_token_locations[1:])]
  60. # last image will attend to all subsequent text
  61. vision_masks.append([image_token_locations[-1], len(input_ids)])
  62. # if there are two or more consecutive vision tokens,
  63. # they should all attend to all subsequent
  64. # text present
  65. last_mask_end = vision_masks[-1][1]
  66. for vision_mask in vision_masks[::-1]:
  67. if vision_mask[0] == vision_mask[1] - 1:
  68. vision_mask[1] = last_mask_end
  69. last_mask_end = vision_mask[1]
  70. return vision_masks
  71. def convert_sparse_cross_attention_mask_to_dense(
  72. cross_attention_token_mask: List[List[List[int]]],
  73. num_tiles: List[List[int]],
  74. max_num_tiles: int,
  75. length: int,
  76. ) -> np.ndarray:
  77. """
  78. Convert the cross attention mask indices to a cross attention mask 4D array.
  79. This function takes a sparse representation of cross attention masks and converts it to a dense 4D numpy array.
  80. The sparse representation is a nested list structure that defines attention ranges for each image in each batch item.
  81. Args:
  82. cross_attention_token_mask (List[List[List[int]]]): A nested list structure where:
  83. - The outer list represents the batch dimension.
  84. - The middle list represents different images within each batch item.
  85. - The inner list contains pairs of integers [start, end] representing token ranges for each image.
  86. num_tiles (List[List[int]]): A nested list structure specifying the number of tiles for each image in each batch item.
  87. max_num_tiles (int): The maximum possible number of tiles.
  88. length (int): The total sequence length of the input.
  89. Returns:
  90. np.ndarray: A 4D numpy array of shape (batch_size, length, max_num_images, max_num_tiles)
  91. The array contains `1` where attention is allowed and `0` where it is not.
  92. Note:
  93. - Special handling is done for cases where the end token is -1, which is interpreted as attending to the end of the sequence.
  94. """
  95. batch_size = len(cross_attention_token_mask)
  96. max_num_images = max([len(masks) for masks in cross_attention_token_mask])
  97. cross_attention_mask = np.zeros(
  98. shape=(batch_size, length, max_num_images, max_num_tiles),
  99. dtype=np.int64,
  100. )
  101. for sample_idx, (sample_masks, sample_num_tiles) in enumerate(zip(cross_attention_token_mask, num_tiles)):
  102. for mask_idx, (locations, mask_num_tiles) in enumerate(zip(sample_masks, sample_num_tiles)):
  103. if len(locations) == 2:
  104. start, end = locations
  105. end = min(end, length)
  106. if end == -1:
  107. end = length
  108. cross_attention_mask[sample_idx, start:end, mask_idx, :mask_num_tiles] = 1
  109. return cross_attention_mask
  110. def build_string_from_input(prompt: str, bos_token: str, image_token: str) -> str:
  111. """
  112. Builds a string from the input prompt by adding `bos_token` if not already present.
  113. Args:
  114. prompt (`str`):
  115. The input prompt string.
  116. bos_token (`str`):
  117. The beginning of sentence token to be added.
  118. image_token (`str`):
  119. The image token used to identify the start of an image sequence.
  120. Returns:
  121. str: The modified prompt string with the `bos_token` added if necessary.
  122. Examples:
  123. >>> build_string_from_input("Hello world", "<begin_of_text>", "<|image|>")
  124. '<begin_of_text>Hello world'
  125. >>> build_string_from_input("<|image|>Hello world", "<begin_of_text>", "<|image|>")
  126. '<|image|><begin_of_text>Hello world'
  127. >>> build_string_from_input("<begin_of_text>Hello world", "<begin_of_text>", "<|image|>")
  128. '<begin_of_text>Hello world'
  129. """
  130. if bos_token in prompt:
  131. return prompt
  132. num_image_tokens_on_start = 0
  133. while prompt.startswith(image_token):
  134. prompt = prompt[len(image_token) :]
  135. num_image_tokens_on_start += 1
  136. return f"{image_token * num_image_tokens_on_start}{bos_token}{prompt}"
  137. class MllamaProcessor(ProcessorMixin):
  138. r"""
  139. Constructs a Mllama processor which wraps [`MllamaImageProcessor`] and
  140. [`PretrainedTokenizerFast`] into a single processor that inherits both the image processor and
  141. tokenizer functionalities. See the [`~MllamaProcessor.__call__`] and [`~OwlViTProcessor.decode`] for more
  142. information.
  143. The preferred way of passing kwargs is as a dictionary per modality, see usage example below.
  144. ```python
  145. from transformers import MllamaProcessor
  146. from PIL import Image
  147. processor = MllamaProcessor.from_pretrained("meta-llama/Llama-3.2-11B-Vision")
  148. processor(
  149. images=your_pil_image,
  150. text=["<|image|>If I had to write a haiku for this one"],
  151. images_kwargs = {"size": {"height": 448, "width": 448}},
  152. text_kwargs = {"padding": "right"},
  153. common_kwargs = {"return_tensors": "pt"},
  154. )
  155. ```
  156. Args:
  157. image_processor ([`MllamaImageProcessor`]):
  158. The image processor is a required input.
  159. tokenizer ([`PreTrainedTokenizer`, `PreTrainedTokenizerFast`]):
  160. The tokenizer is a required input.
  161. """
  162. attributes = ["image_processor", "tokenizer"]
  163. image_processor_class = "MllamaImageProcessor"
  164. tokenizer_class = "PreTrainedTokenizerFast"
  165. def __init__(self, image_processor, tokenizer):
  166. self.image_token = "<|image|>"
  167. self.image_token_id = tokenizer.convert_tokens_to_ids(self.image_token)
  168. self.python_token = "<|python_tag|>"
  169. self.python_token_id = tokenizer.convert_tokens_to_ids(self.python_token)
  170. self.bos_token = tokenizer.bos_token
  171. self.chat_template = tokenizer.chat_template
  172. super().__init__(image_processor, tokenizer)
  173. def __call__(
  174. self,
  175. images: Optional[ImageInput] = None,
  176. text: Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None,
  177. audio=None,
  178. videos=None,
  179. **kwargs: Unpack[MllamaProcessorKwargs],
  180. ) -> BatchFeature:
  181. """
  182. Main method to prepare text(s) and image(s) to be fed as input to the model. This method forwards the `text`
  183. arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizerFast.__call__`] if `text` is not `None` to encode
  184. the text. To prepare the image(s), this method forwards the `images` arguments to
  185. MllamaImageProcessor's [`~MllamaImageProcessor.__call__`] if `images` is not `None`. Please refer
  186. to the docstring of the above two methods for more information.
  187. Args:
  188. images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
  189. The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
  190. tensor. Both channels-first and channels-last formats are supported.
  191. text (`str`, `List[str]`, `List[List[str]]`):
  192. The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
  193. (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
  194. `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
  195. return_tensors (`str` or [`~utils.TensorType`], *optional*):
  196. If set, will return tensors of a particular framework. Acceptable values are:
  197. - `'tf'`: Return TensorFlow `tf.constant` objects.
  198. - `'pt'`: Return PyTorch `torch.Tensor` objects.
  199. - `'np'`: Return NumPy `np.ndarray` objects.
  200. - `'jax'`: Return JAX `jnp.ndarray` objects.
  201. Returns:
  202. [`BatchFeature`]: A [`BatchFeature`] with the following fields:
  203. - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
  204. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
  205. `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
  206. `None`).
  207. - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
  208. TODO: add aspect_ratio_ids and aspect_ratio_mask and cross_attention_mask
  209. """
  210. if text is None and images is None:
  211. raise ValueError("You must specify either text or images.")
  212. output_kwargs = self._merge_kwargs(
  213. MllamaProcessorKwargs,
  214. tokenizer_init_kwargs=self.tokenizer.init_kwargs,
  215. **kwargs,
  216. )
  217. text_kwargs = output_kwargs["text_kwargs"]
  218. images_kwargs = output_kwargs["images_kwargs"]
  219. common_kwargs = output_kwargs["common_kwargs"]
  220. data = {}
  221. if text is not None:
  222. if isinstance(text, str):
  223. text = [text]
  224. elif not (isinstance(text, (list, tuple)) and all(isinstance(t, str) for t in text)):
  225. raise ValueError("Invalid input text. Please provide a string, or a list of strings")
  226. n_images_in_text = [t.count(self.image_token) for t in text]
  227. text = [build_string_from_input(text_item, self.bos_token, self.image_token) for text_item in text]
  228. _ = text_kwargs.pop("padding_side", None) # hack until padding-side is an accepted kwarg by tokenizers
  229. encoding = self.tokenizer(text, **text_kwargs)
  230. data.update(encoding)
  231. n_images_in_images = [0]
  232. if images is not None:
  233. images = make_list_of_images(images)
  234. n_images_in_images = [len(sample) for sample in images]
  235. if text is not None:
  236. if any(batch_img == 0 for batch_img in n_images_in_text) and not all(
  237. batch_img == 0 for batch_img in n_images_in_text
  238. ):
  239. raise ValueError(
  240. "If a batch of text is provided, there should be either no images or at least one image per sample"
  241. )
  242. if sum(n_images_in_images) != sum(n_images_in_text):
  243. if images is None:
  244. raise ValueError("No image were provided, but there are image tokens in the prompt")
  245. else:
  246. raise ValueError(
  247. f"The number of image token ({sum(n_images_in_text)}) should be the same as in the number of provided images ({sum(n_images_in_images)})"
  248. )
  249. if images is not None:
  250. image_features = self.image_processor(images, **images_kwargs)
  251. num_tiles = image_features.pop("num_tiles")
  252. data.update(image_features)
  253. # Create cross attention mask
  254. if images is not None and text is not None:
  255. cross_attention_token_mask = [
  256. get_cross_attention_token_mask(token_ids, self.image_token_id) for token_ids in encoding["input_ids"]
  257. ]
  258. cross_attention_mask = convert_sparse_cross_attention_mask_to_dense(
  259. cross_attention_token_mask,
  260. num_tiles=num_tiles,
  261. max_num_tiles=self.image_processor.max_image_tiles,
  262. length=max(len(input_ids) for input_ids in encoding["input_ids"]),
  263. )
  264. data["cross_attention_mask"] = cross_attention_mask
  265. return_tensors = common_kwargs.pop("return_tensors", None)
  266. batch_feature = BatchFeature(data=data, tensor_type=return_tensors)
  267. return batch_feature
  268. def batch_decode(self, *args, **kwargs):
  269. """
  270. This method forwards all its arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
  271. refer to the docstring of this method for more information.
  272. """
  273. return self.tokenizer.batch_decode(*args, **kwargs)
  274. def decode(self, *args, **kwargs):
  275. """
  276. This method forwards all its arguments to PreTrainedTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
  277. the docstring of this method for more information.
  278. """
  279. return self.tokenizer.decode(*args, **kwargs)
  280. @property
  281. def model_input_names(self):
  282. tokenizer_input_names = self.tokenizer.model_input_names
  283. image_processor_input_names = self.image_processor.model_input_names
  284. return list(tokenizer_input_names + image_processor_input_names + ["cross_attention_mask"])