processing_fuyu.py 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697
  1. # coding=utf-8
  2. # Copyright 2023 The HuggingFace Inc. team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """
  16. Image/Text processor class for GIT
  17. """
  18. import re
  19. from typing import Dict, List, Optional, Tuple, Union
  20. import numpy as np
  21. from ...image_utils import ImageInput
  22. from ...processing_utils import ProcessingKwargs, ProcessorMixin, Unpack, _validate_images_text_input_order
  23. from ...tokenization_utils_base import PreTokenizedInput, TextInput
  24. from ...utils import is_torch_available, logging, requires_backends
  25. if is_torch_available():
  26. from .image_processing_fuyu import FuyuBatchFeature
  27. logger = logging.get_logger(__name__)
  28. if is_torch_available():
  29. import torch
  30. TEXT_REPR_BBOX_OPEN = "<box>"
  31. TEXT_REPR_BBOX_CLOSE = "</box>"
  32. TEXT_REPR_POINT_OPEN = "<point>"
  33. TEXT_REPR_POINT_CLOSE = "</point>"
  34. TOKEN_BBOX_OPEN_STRING = "<0x00>" # <bbox>
  35. TOKEN_BBOX_CLOSE_STRING = "<0x01>" # </bbox>
  36. TOKEN_POINT_OPEN_STRING = "<0x02>" # <point>
  37. TOKEN_POINT_CLOSE_STRING = "<0x03>" # </point>
  38. BEGINNING_OF_ANSWER_STRING = "<0x04>" # <boa>
  39. class FuyuProcessorKwargs(ProcessingKwargs, total=False):
  40. _defaults = {
  41. "text_kwargs": {
  42. "add_special_tokens": True,
  43. "padding": False,
  44. "stride": 0,
  45. "return_attention_mask": True,
  46. "return_overflowing_tokens": False,
  47. "return_special_tokens_mask": False,
  48. "return_offsets_mapping": False,
  49. "return_token_type_ids": False,
  50. "return_length": False,
  51. "verbose": True,
  52. },
  53. "images_kwargs": {},
  54. }
  55. def full_unpacked_stream_to_tensor(
  56. all_bi_tokens_to_place: List[int],
  57. full_unpacked_stream: List["torch.Tensor"],
  58. fill_value: int,
  59. batch_size: int,
  60. new_seq_len: int,
  61. offset: int,
  62. ) -> "torch.Tensor":
  63. """Takes an unpacked stream of tokens (i.e. a list of tensors, one for each item in the batch) and does
  64. the required padding to create a single tensor for the batch of shape batch_size x new_seq_len.
  65. """
  66. assert len(all_bi_tokens_to_place) == batch_size
  67. assert len(full_unpacked_stream) == batch_size
  68. # Create padded tensors for the full batch.
  69. new_padded_tensor = torch.full(
  70. [batch_size, new_seq_len],
  71. fill_value=fill_value,
  72. dtype=full_unpacked_stream[0].dtype,
  73. device=full_unpacked_stream[0].device,
  74. )
  75. # Place each batch entry into the batch tensor.
  76. for bi in range(batch_size):
  77. tokens_to_place = all_bi_tokens_to_place[bi]
  78. new_padded_tensor[bi, :tokens_to_place] = full_unpacked_stream[bi][offset : tokens_to_place + offset]
  79. return new_padded_tensor
  80. def construct_full_unpacked_stream(
  81. num_real_text_tokens: Union[List[List[int]], "torch.Tensor"],
  82. input_stream: "torch.Tensor",
  83. image_tokens: List[List["torch.Tensor"]],
  84. batch_size: int,
  85. num_sub_sequences: int,
  86. ) -> List["torch.Tensor"]:
  87. """Takes an input_stream tensor of shape B x S x ?. For each subsequence, adds any required
  88. padding to account for images and then unpacks the subsequences to create a single sequence per item in the batch.
  89. Returns a list of tensors, one for each item in the batch."""
  90. all_bi_stream = []
  91. for batch_index in range(batch_size):
  92. all_si_stream = []
  93. # First, construct full token stream (including image placeholder tokens) and loss mask for each subsequence
  94. # and append to lists. We use lists rather than tensors because each subsequence is variable-sized.
  95. # TODO Remove this logic in a subsequent release since subsequences are not supported.
  96. image_adjustment = image_tokens[batch_index][0]
  97. subsequence_stream = torch.cat([image_adjustment, input_stream[batch_index, 0]], dim=0)
  98. num_real_tokens = image_adjustment.shape[0] + num_real_text_tokens[batch_index][0]
  99. all_si_stream.append(subsequence_stream[:num_real_tokens])
  100. all_bi_stream.append(torch.cat(all_si_stream, dim=0))
  101. return all_bi_stream
  102. def _replace_string_repr_with_token_tags(prompt: str) -> str:
  103. prompt = prompt.replace(TEXT_REPR_POINT_OPEN, TOKEN_POINT_OPEN_STRING)
  104. prompt = prompt.replace(TEXT_REPR_POINT_CLOSE, TOKEN_POINT_CLOSE_STRING)
  105. prompt = prompt.replace(TEXT_REPR_BBOX_OPEN, TOKEN_BBOX_OPEN_STRING)
  106. prompt = prompt.replace(TEXT_REPR_BBOX_CLOSE, TOKEN_BBOX_CLOSE_STRING)
  107. return prompt
  108. def _segment_prompt_into_text_token_conversions(prompt: str) -> List:
  109. """
  110. Given a string prompt, converts the prompt into a list of TextTokenConversions.
  111. """
  112. # Wherever, we notice the [TOKEN_OPEN_STRING, TOKEN_CLOSE_STRING], we split the prompt
  113. prompt_text_list: List = []
  114. regex_pattern = re.compile(
  115. f"({TOKEN_BBOX_OPEN_STRING}|{TOKEN_BBOX_CLOSE_STRING}|{TOKEN_POINT_OPEN_STRING}|{TOKEN_POINT_CLOSE_STRING})"
  116. )
  117. # Split by the regex pattern
  118. prompt_split = regex_pattern.split(prompt)
  119. for i, elem in enumerate(prompt_split):
  120. if len(elem) == 0 or elem in [
  121. TOKEN_BBOX_OPEN_STRING,
  122. TOKEN_BBOX_CLOSE_STRING,
  123. TOKEN_POINT_OPEN_STRING,
  124. TOKEN_POINT_CLOSE_STRING,
  125. ]:
  126. continue
  127. prompt_text_list.append(
  128. (elem, i > 1 and prompt_split[i - 1] in [TOKEN_BBOX_OPEN_STRING, TOKEN_POINT_OPEN_STRING])
  129. )
  130. return prompt_text_list
  131. def _transform_coordinates_and_tokenize(prompt: str, scale_factor: float, tokenizer) -> List[int]:
  132. """
  133. This function transforms the prompt in the following fashion:
  134. - <box> <point> and </box> </point> to their respective token mappings
  135. - extract the coordinates from the tag
  136. - transform the coordinates into the transformed image space
  137. - return the prompt tokens with the transformed coordinates and new tags
  138. Bounding boxes and points MUST be in the following format: <box>y1, x1, y2, x2</box> <point>x, y</point> The spaces
  139. and punctuation added above are NOT optional.
  140. """
  141. # Make a namedtuple that stores "text" and "is_bbox"
  142. # We want to do the following: Tokenize the code normally -> when we see a point or box, tokenize using the tokenize_within_tag function
  143. # When point or box close tag, continue tokenizing normally
  144. # First, we replace the point and box tags with their respective tokens
  145. prompt = _replace_string_repr_with_token_tags(prompt)
  146. # Tokenize the prompt
  147. # Convert prompt into a list split
  148. prompt_text_list = _segment_prompt_into_text_token_conversions(prompt)
  149. transformed_prompt_tokens: List[int] = []
  150. for elem in prompt_text_list:
  151. if elem[1]:
  152. # This is a location, we need to tokenize it
  153. within_tag_tokenized = _transform_within_tags(elem[0], scale_factor, tokenizer)
  154. # Surround the text with the open and close tags
  155. transformed_prompt_tokens.extend(within_tag_tokenized)
  156. else:
  157. transformed_prompt_tokens.extend(tokenizer(elem[0], add_special_tokens=False).input_ids)
  158. return transformed_prompt_tokens
  159. def _transform_within_tags(text: str, scale_factor: float, tokenizer) -> List[int]:
  160. """
  161. Given a bounding box of the fashion <box>1, 2, 3, 4</box> | <point>1, 2</point> This function is responsible for
  162. converting 1, 2, 3, 4 into tokens of 1 2 3 4 without any commas.
  163. """
  164. # Convert the text into a list of strings.
  165. num_int_strs = text.split(",")
  166. if len(num_int_strs) == 2:
  167. # If there are any open or close tags, remove them.
  168. token_space_open_string = tokenizer.vocab[TOKEN_POINT_OPEN_STRING]
  169. token_space_close_string = tokenizer.vocab[TOKEN_POINT_CLOSE_STRING]
  170. else:
  171. token_space_open_string = tokenizer.vocab[TOKEN_BBOX_OPEN_STRING]
  172. token_space_close_string = tokenizer.vocab[TOKEN_BBOX_CLOSE_STRING]
  173. # Remove all spaces from num_ints
  174. num_ints = [float(num.strip()) for num in num_int_strs]
  175. # scale to transformed image siz
  176. if len(num_ints) == 2:
  177. num_ints_translated = scale_point_to_transformed_image(x=num_ints[0], y=num_ints[1], scale_factor=scale_factor)
  178. elif len(num_ints) == 4:
  179. num_ints_translated = scale_bbox_to_transformed_image(
  180. top=num_ints[0],
  181. left=num_ints[1],
  182. bottom=num_ints[2],
  183. right=num_ints[3],
  184. scale_factor=scale_factor,
  185. )
  186. else:
  187. raise ValueError(f"Invalid number of ints: {len(num_ints)}")
  188. # Tokenize the text, skipping the
  189. tokens = [tokenizer.vocab[str(num)] for num in num_ints_translated]
  190. return [token_space_open_string] + tokens + [token_space_close_string]
  191. def _tokenize_prompts_with_image_and_batch(
  192. tokenizer,
  193. prompts: List[List[str]],
  194. scale_factors: Optional[List[List["torch.Tensor"]]],
  195. max_tokens_to_generate: int,
  196. max_position_embeddings: int,
  197. add_BOS: bool, # Same issue with types as above
  198. add_beginning_of_answer_token: bool,
  199. ) -> Tuple["torch.Tensor", "torch.Tensor"]:
  200. """
  201. Given a set of prompts and number of tokens to generate:
  202. - tokenize prompts
  203. - set the sequence length to be the max of length of prompts plus the number of tokens we would like to generate
  204. - pad all the sequences to this length so we can convert them into a 3D tensor.
  205. """
  206. # If not tool use, tranform the coordinates while tokenizing
  207. if scale_factors is not None:
  208. transformed_prompt_tokens = []
  209. for prompt_seq, scale_factor_seq in zip(prompts, scale_factors):
  210. transformed_prompt_tokens.append(
  211. [
  212. _transform_coordinates_and_tokenize(prompt, scale_factor.item(), tokenizer)
  213. for prompt, scale_factor in zip(prompt_seq, scale_factor_seq)
  214. ]
  215. )
  216. else:
  217. transformed_prompt_tokens = [[tokenizer.tokenize(prompt) for prompt in prompt_seq] for prompt_seq in prompts]
  218. prompts_tokens = transformed_prompt_tokens
  219. if add_BOS:
  220. bos_token = tokenizer.vocab["<s>"]
  221. else:
  222. bos_token = tokenizer.vocab["|ENDOFTEXT|"]
  223. prompts_tokens = [[[bos_token] + x for x in prompt_seq] for prompt_seq in prompts_tokens]
  224. if add_beginning_of_answer_token:
  225. boa = tokenizer.vocab[BEGINNING_OF_ANSWER_STRING]
  226. # Only add bbox open token to the last subsequence since that is what will be completed
  227. for token_seq in prompts_tokens:
  228. token_seq[-1].append(boa)
  229. # Now we have a list of list of tokens which each list has a different
  230. # size. We want to extend this list to:
  231. # - incorporate the tokens that need to be generated
  232. # - make all the sequences equal length.
  233. # Get the prompts length.
  234. prompts_length = [[len(x) for x in prompts_tokens_seq] for prompts_tokens_seq in prompts_tokens]
  235. # Get the max prompts length.
  236. max_prompt_len: int = np.max(prompts_length)
  237. # Number of tokens in the each sample of the batch.
  238. samples_length = min(max_prompt_len + max_tokens_to_generate, max_position_embeddings)
  239. if max_prompt_len + max_tokens_to_generate > max_position_embeddings:
  240. logger.warning(
  241. f"Max subsequence prompt length of {max_prompt_len} + max tokens to generate {max_tokens_to_generate}",
  242. f"exceeds context length of {max_position_embeddings}. Will generate as many tokens as possible.",
  243. )
  244. # Now update the list of list to be of the same size: samples_length.
  245. for prompt_tokens_seq, prompts_length_seq in zip(prompts_tokens, prompts_length):
  246. for prompt_tokens, prompt_length in zip(prompt_tokens_seq, prompts_length_seq):
  247. if len(prompt_tokens) > samples_length:
  248. raise ValueError("Length of subsequence prompt exceeds sequence length.")
  249. padding_size = samples_length - prompt_length
  250. prompt_tokens.extend([tokenizer.vocab["|ENDOFTEXT|"]] * padding_size)
  251. # Now we are in a structured format, we can convert to tensors.
  252. prompts_tokens_tensor = torch.tensor(prompts_tokens, dtype=torch.int64)
  253. prompts_length_tensor = torch.tensor(prompts_length, dtype=torch.int64)
  254. return prompts_tokens_tensor, prompts_length_tensor
  255. # Simplified assuming self.crop_top = self.padding_top = 0
  256. def original_to_transformed_h_coords(original_coords, scale_h):
  257. return np.round(original_coords * scale_h).astype(np.int32)
  258. # Simplified assuming self.crop_left = self.padding_left = 0
  259. def original_to_transformed_w_coords(original_coords, scale_w):
  260. return np.round(original_coords * scale_w).astype(np.int32)
  261. def scale_point_to_transformed_image(x: float, y: float, scale_factor: float) -> List[int]:
  262. x_scaled = original_to_transformed_w_coords(np.array([x / 2]), scale_factor)[0]
  263. y_scaled = original_to_transformed_h_coords(np.array([y / 2]), scale_factor)[0]
  264. return [x_scaled, y_scaled]
  265. def scale_bbox_to_transformed_image(
  266. top: float, left: float, bottom: float, right: float, scale_factor: float
  267. ) -> List[int]:
  268. top_scaled = original_to_transformed_w_coords(np.array([top / 2]), scale_factor)[0]
  269. left_scaled = original_to_transformed_h_coords(np.array([left / 2]), scale_factor)[0]
  270. bottom_scaled = original_to_transformed_w_coords(np.array([bottom / 2]), scale_factor)[0]
  271. right_scaled = original_to_transformed_h_coords(np.array([right / 2]), scale_factor)[0]
  272. return [top_scaled, left_scaled, bottom_scaled, right_scaled]
  273. class FuyuProcessor(ProcessorMixin):
  274. r"""
  275. Constructs a Fuyu processor which wraps a Fuyu image processor and a Llama tokenizer into a single processor.
  276. [`FuyuProcessor`] offers all the functionalities of [`FuyuImageProcessor`] and [`LlamaTokenizerFast`]. See the
  277. [`~FuyuProcessor.__call__`] and [`~FuyuProcessor.decode`] for more information.
  278. Args:
  279. image_processor ([`FuyuImageProcessor`]):
  280. The image processor is a required input.
  281. tokenizer ([`LlamaTokenizerFast`]):
  282. The tokenizer is a required input.
  283. """
  284. attributes = ["image_processor", "tokenizer"]
  285. valid_kwargs = []
  286. image_processor_class = "FuyuImageProcessor"
  287. tokenizer_class = "AutoTokenizer"
  288. def __init__(self, image_processor, tokenizer, **kwargs):
  289. super().__init__(image_processor=image_processor, tokenizer=tokenizer)
  290. self.image_processor = image_processor
  291. self.tokenizer = tokenizer
  292. self.max_tokens_to_generate = 10
  293. self.max_position_embeddings = 16384 # TODO Can't derive this from model files: where to set it?
  294. self.pad_token_id = 0
  295. self.dummy_image_index = -1
  296. def _left_pad_inputs_with_attention_mask(self, model_inputs: List[Dict], return_attention_mask: bool):
  297. max_length_input_ids = max(entry["input_ids"].shape[1] for entry in model_inputs)
  298. max_length_image_patch_indices = max(entry["image_patches_indices"].shape[1] for entry in model_inputs)
  299. batched_inputs = {"input_ids": [], "image_patches": [], "image_patches_indices": [], "attention_mask": []}
  300. for entry in model_inputs:
  301. for key, tensor in entry.items():
  302. if key == "input_ids":
  303. num_padding_tokens = max_length_input_ids - tensor.shape[1]
  304. padded_input_ids = torch.cat(
  305. [
  306. torch.full((tensor.shape[0], num_padding_tokens), self.pad_token_id, dtype=torch.long),
  307. tensor,
  308. ],
  309. dim=1,
  310. )
  311. batched_inputs[key].append(padded_input_ids)
  312. attention_mask = torch.cat(
  313. [torch.zeros(tensor.shape[0], num_padding_tokens, dtype=torch.long), torch.ones_like(tensor)],
  314. dim=1,
  315. )
  316. batched_inputs["attention_mask"].append(attention_mask)
  317. elif key == "image_patches":
  318. # For image_patches, we don't pad but just append them to the list.
  319. batched_inputs[key].append(tensor)
  320. else: # for image_patches_indices
  321. num_padding_indices = max_length_image_patch_indices - tensor.shape[1]
  322. padded_indices = torch.cat(
  323. [
  324. torch.full(
  325. (tensor.shape[0], num_padding_indices), self.dummy_image_index, dtype=torch.long
  326. ),
  327. tensor,
  328. ],
  329. dim=1,
  330. )
  331. batched_inputs[key].append(padded_indices)
  332. batched_keys = ["input_ids", "image_patches_indices"]
  333. if return_attention_mask:
  334. batched_keys.append("attention_mask")
  335. for key in batched_keys:
  336. batched_inputs[key] = torch.cat(batched_inputs[key], dim=0)
  337. return batched_inputs
  338. def get_sample_encoding(
  339. self,
  340. prompts,
  341. scale_factors,
  342. image_unpadded_heights,
  343. image_unpadded_widths,
  344. image_placeholder_id,
  345. image_newline_id,
  346. tensor_batch_images,
  347. ):
  348. image_present = torch.ones(1, 1, 1)
  349. model_image_input = self.image_processor.preprocess_with_tokenizer_info(
  350. image_input=tensor_batch_images,
  351. image_present=image_present,
  352. image_unpadded_h=image_unpadded_heights,
  353. image_unpadded_w=image_unpadded_widths,
  354. image_placeholder_id=image_placeholder_id,
  355. image_newline_id=image_newline_id,
  356. variable_sized=True,
  357. )
  358. # FIXME max_tokens_to_generate is embedded into this processor's call.
  359. prompt_tokens, prompts_length = _tokenize_prompts_with_image_and_batch(
  360. tokenizer=self.tokenizer,
  361. prompts=prompts,
  362. scale_factors=scale_factors,
  363. max_tokens_to_generate=self.max_tokens_to_generate,
  364. max_position_embeddings=self.max_position_embeddings,
  365. add_BOS=True,
  366. add_beginning_of_answer_token=True,
  367. )
  368. image_padded_unpacked_tokens = construct_full_unpacked_stream(
  369. num_real_text_tokens=prompts_length,
  370. input_stream=prompt_tokens,
  371. image_tokens=model_image_input["image_input_ids"],
  372. batch_size=1,
  373. num_sub_sequences=self.subsequence_length,
  374. )
  375. # Construct inputs for image patch indices.
  376. unpacked_image_patch_indices_per_batch = construct_full_unpacked_stream(
  377. num_real_text_tokens=prompts_length,
  378. input_stream=torch.full_like(prompt_tokens, -1),
  379. image_tokens=model_image_input["image_patch_indices_per_batch"],
  380. batch_size=1,
  381. num_sub_sequences=self.subsequence_length,
  382. )
  383. max_prompt_length = max(x.shape[-1] for x in image_padded_unpacked_tokens)
  384. max_seq_len_batch = min(max_prompt_length + self.max_tokens_to_generate, self.max_position_embeddings)
  385. tokens_to_place = min(max_seq_len_batch, max(0, image_padded_unpacked_tokens[0].shape[0]))
  386. # Use same packing logic for the image patch indices.
  387. image_patch_input_indices = full_unpacked_stream_to_tensor(
  388. all_bi_tokens_to_place=[tokens_to_place],
  389. full_unpacked_stream=unpacked_image_patch_indices_per_batch,
  390. fill_value=-1,
  391. batch_size=1,
  392. new_seq_len=max_seq_len_batch,
  393. offset=0,
  394. )
  395. image_patches_tensor = torch.stack([img[0] for img in model_image_input["image_patches"]])
  396. batch_encoding = {
  397. "input_ids": image_padded_unpacked_tokens[0].unsqueeze(0),
  398. "image_patches": image_patches_tensor,
  399. "image_patches_indices": image_patch_input_indices,
  400. }
  401. return batch_encoding
  402. def __call__(
  403. self,
  404. images: ImageInput = None,
  405. text: Optional[Union[str, List[str], TextInput, PreTokenizedInput]] = None,
  406. audio=None,
  407. videos=None,
  408. **kwargs: Unpack[FuyuProcessorKwargs],
  409. ) -> "FuyuBatchFeature":
  410. """
  411. Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
  412. and `kwargs` arguments to LlamaTokenizerFast's [`~LlamaTokenizerFast.__call__`] if `text` is not `None` to
  413. encode the text. To prepare the image(s), this method forwards the `images` and `kwargs` arguments to
  414. FuyuImageProcessor's [`~FuyuImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
  415. of the above two methods for more information.
  416. Args:
  417. images (`PIL.Image.Image`, `List[PIL.Image.Image]`):
  418. The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
  419. tensor. Both channels-first and channels-last formats are supported.
  420. text (`str`, `List[str]`):
  421. The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
  422. (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
  423. `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
  424. Returns:
  425. [`FuyuBatchEncoding`]: A [`FuyuBatchEncoding`] with the following fields:
  426. - **input_ids** -- Tensor of token ids to be fed to a model. Returned when `text` is not `None`.
  427. - **image_patches** -- List of Tensor of image patches. Returned when `images` is not `None`.
  428. - **image_patches_indices** -- Tensor of indices where patch embeddings have to be inserted by the model.
  429. - **attention_mask** -- List of indices specifying which tokens should be attended to by the model when
  430. `return_attention_mask=True`.
  431. """
  432. requires_backends(self, ["torch"])
  433. # --- Check input validity ---
  434. if text is None and images is None:
  435. raise ValueError("You have to specify either text or images. Both cannot be None.")
  436. # check if images and text inputs are reversed for BC
  437. images, text = _validate_images_text_input_order(images, text)
  438. output_kwargs = self._merge_kwargs(
  439. FuyuProcessorKwargs,
  440. tokenizer_init_kwargs=self.tokenizer.init_kwargs,
  441. **kwargs,
  442. )
  443. if not output_kwargs["text_kwargs"].setdefault("return_attention_mask", True):
  444. raise ValueError("`return_attention_mask=False` is not supported for this model.")
  445. if text is not None and images is None:
  446. logger.warning("You are processing a text with no associated image. Make sure it is intended.")
  447. self.current_processor = self.tokenizer
  448. text_encoding = self.tokenizer(text, **output_kwargs["text_kwargs"])
  449. return text_encoding
  450. if text is None and images is not None:
  451. logger.warning("You are processing an image with no associated text. Make sure it is intended.")
  452. prompts = [[""]]
  453. if text is not None and images is not None:
  454. if isinstance(text, str):
  455. prompts = [[text]]
  456. elif isinstance(text, list):
  457. prompts = [[text_seq] for text_seq in text]
  458. # --- Preprocess images using self.image_processor ---
  459. # FIXME - We hard code "pt" here because the rest of the processing assumes torch tensors
  460. output_kwargs["images_kwargs"]["return_tensors"] = "pt"
  461. image_encoding = self.image_processor.preprocess(images, **output_kwargs["images_kwargs"])
  462. batch_images = image_encoding["images"]
  463. image_unpadded_heights = image_encoding["image_unpadded_heights"]
  464. image_unpadded_widths = image_encoding["image_unpadded_widths"]
  465. scale_factors = image_encoding["image_scale_factors"]
  466. self.subsequence_length = 1 # Each batch contains only one sequence.
  467. self.batch_size = len(batch_images)
  468. # --- Use self.tokenizer to get the ids of special tokens to insert into image ids ---
  469. image_placeholder_id = self.tokenizer("|SPEAKER|", add_special_tokens=False)["input_ids"][1]
  470. image_newline_id = self.tokenizer("|NEWLINE|", add_special_tokens=False)["input_ids"][1]
  471. tensor_batch_images = torch.stack([img[0] for img in batch_images]).unsqueeze(1)
  472. # --- Use self.image_processor again to obtain the full token ids and batch inputs ---
  473. all_encodings = []
  474. for prompt, scale_factor, image_unpadded_height, image_unpadded_width, tensor_batch_image in zip(
  475. prompts, scale_factors, image_unpadded_heights, image_unpadded_widths, tensor_batch_images
  476. ):
  477. sample_encoding = self.get_sample_encoding(
  478. prompts=[prompt],
  479. scale_factors=[scale_factor],
  480. image_unpadded_heights=torch.tensor([image_unpadded_height]),
  481. image_unpadded_widths=torch.tensor([image_unpadded_width]),
  482. image_placeholder_id=image_placeholder_id,
  483. image_newline_id=image_newline_id,
  484. tensor_batch_images=tensor_batch_image.unsqueeze(0),
  485. )
  486. all_encodings.append(sample_encoding)
  487. batch_encoding = self._left_pad_inputs_with_attention_mask(
  488. model_inputs=all_encodings, return_attention_mask=True
  489. )
  490. return FuyuBatchFeature(data=batch_encoding)
  491. def post_process_box_coordinates(self, outputs, target_sizes=None):
  492. """
  493. Transforms raw coordinates detected by [`FuyuForCausalLM`] to the original images' coordinate space.
  494. Coordinates will be returned in "box" format, with the following pattern:
  495. `<box>top, left, bottom, right</box>`
  496. Point coordinates are not supported yet.
  497. Args:
  498. outputs ([`GenerateOutput`]):
  499. Raw outputs from `generate`.
  500. target_sizes (`torch.Tensor`, *optional*):
  501. Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in
  502. the batch. If set, found coordinates in the output sequence are rescaled to the target sizes. If left
  503. to None, coordinates will not be rescaled.
  504. Returns:
  505. `GenerateOutput`: Same output type returned by `generate`, with output token ids replaced with
  506. boxed and possible rescaled coordinates.
  507. """
  508. def scale_factor_to_fit(original_size, target_size=None):
  509. height, width = original_size
  510. if target_size is None:
  511. max_height = self.image_processor.size["height"]
  512. max_width = self.image_processor.size["width"]
  513. else:
  514. max_height, max_width = target_size
  515. if width <= max_width and height <= max_height:
  516. return 1.0
  517. return min(max_height / height, max_width / width)
  518. def find_delimiters_pair(tokens, start_token, end_token):
  519. start_id = self.tokenizer.convert_tokens_to_ids(start_token)
  520. end_id = self.tokenizer.convert_tokens_to_ids(end_token)
  521. starting_positions = (tokens == start_id).nonzero(as_tuple=True)[0]
  522. ending_positions = (tokens == end_id).nonzero(as_tuple=True)[0]
  523. if torch.any(starting_positions) and torch.any(ending_positions):
  524. return (starting_positions[0], ending_positions[0])
  525. return (None, None)
  526. def tokens_to_boxes(tokens, original_size):
  527. while (pair := find_delimiters_pair(tokens, TOKEN_BBOX_OPEN_STRING, TOKEN_BBOX_CLOSE_STRING)) != (
  528. None,
  529. None,
  530. ):
  531. start, end = pair
  532. if end != start + 5:
  533. continue
  534. # Retrieve transformed coordinates from tokens
  535. coords = self.tokenizer.convert_ids_to_tokens(tokens[start + 1 : end])
  536. # Scale back to original image size and multiply by 2
  537. scale = scale_factor_to_fit(original_size)
  538. top, left, bottom, right = [2 * int(float(c) / scale) for c in coords]
  539. # Replace the IDs so they get detokenized right
  540. replacement = f" {TEXT_REPR_BBOX_OPEN}{top}, {left}, {bottom}, {right}{TEXT_REPR_BBOX_CLOSE}"
  541. replacement = self.tokenizer.tokenize(replacement)[1:]
  542. replacement = self.tokenizer.convert_tokens_to_ids(replacement)
  543. replacement = torch.tensor(replacement).to(tokens)
  544. tokens = torch.cat([tokens[:start], replacement, tokens[end + 1 :]], 0)
  545. return tokens
  546. def tokens_to_points(tokens, original_size):
  547. while (pair := find_delimiters_pair(tokens, TOKEN_POINT_OPEN_STRING, TOKEN_POINT_CLOSE_STRING)) != (
  548. None,
  549. None,
  550. ):
  551. start, end = pair
  552. if end != start + 3:
  553. continue
  554. # Retrieve transformed coordinates from tokens
  555. coords = self.tokenizer.convert_ids_to_tokens(tokens[start + 1 : end])
  556. # Scale back to original image size and multiply by 2
  557. scale = scale_factor_to_fit(original_size)
  558. x, y = [2 * int(float(c) / scale) for c in coords]
  559. # Replace the IDs so they get detokenized right
  560. replacement = f" {TEXT_REPR_POINT_OPEN}{x}, {y}{TEXT_REPR_POINT_CLOSE}"
  561. replacement = self.tokenizer.tokenize(replacement)[1:]
  562. replacement = self.tokenizer.convert_tokens_to_ids(replacement)
  563. replacement = torch.tensor(replacement).to(tokens)
  564. tokens = torch.cat([tokens[:start], replacement, tokens[end + 1 :]], 0)
  565. return tokens
  566. if target_sizes is None:
  567. target_sizes = ((self.image_processor.size["height"], self.image_processor.size["width"]),) * len(outputs)
  568. elif target_sizes.shape[1] != 2:
  569. raise ValueError("Each element of target_sizes must contain the size (h, w) of each image of the batch")
  570. if len(outputs) != len(target_sizes):
  571. raise ValueError("Make sure that you pass in as many target sizes as output sequences")
  572. results = []
  573. for seq, size in zip(outputs, target_sizes):
  574. seq = tokens_to_boxes(seq, size)
  575. seq = tokens_to_points(seq, size)
  576. results.append(seq)
  577. return results
  578. def batch_decode(self, *args, **kwargs):
  579. """
  580. This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
  581. refer to the docstring of this method for more information.
  582. """
  583. return self.tokenizer.batch_decode(*args, **kwargs)
  584. def decode(self, *args, **kwargs):
  585. """
  586. This method forwards all its arguments to LlamaTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
  587. the docstring of this method for more information.
  588. """
  589. return self.tokenizer.decode(*args, **kwargs)