visual_question_answering.py 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192
  1. from typing import List, Union
  2. from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
  3. from .base import Pipeline, build_pipeline_init_args
  4. if is_vision_available():
  5. from PIL import Image
  6. from ..image_utils import load_image
  7. if is_torch_available():
  8. from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES
  9. from .pt_utils import KeyDataset
  10. logger = logging.get_logger(__name__)
  11. @add_end_docstrings(build_pipeline_init_args(has_tokenizer=True, has_image_processor=True))
  12. class VisualQuestionAnsweringPipeline(Pipeline):
  13. """
  14. Visual Question Answering pipeline using a `AutoModelForVisualQuestionAnswering`. This pipeline is currently only
  15. available in PyTorch.
  16. Example:
  17. ```python
  18. >>> from transformers import pipeline
  19. >>> oracle = pipeline(model="dandelin/vilt-b32-finetuned-vqa")
  20. >>> image_url = "https://huggingface.co/datasets/Narsil/image_dummy/raw/main/lena.png"
  21. >>> oracle(question="What is she wearing ?", image=image_url)
  22. [{'score': 0.948, 'answer': 'hat'}, {'score': 0.009, 'answer': 'fedora'}, {'score': 0.003, 'answer': 'clothes'}, {'score': 0.003, 'answer': 'sun hat'}, {'score': 0.002, 'answer': 'nothing'}]
  23. >>> oracle(question="What is she wearing ?", image=image_url, top_k=1)
  24. [{'score': 0.948, 'answer': 'hat'}]
  25. >>> oracle(question="Is this a person ?", image=image_url, top_k=1)
  26. [{'score': 0.993, 'answer': 'yes'}]
  27. >>> oracle(question="Is this a man ?", image=image_url, top_k=1)
  28. [{'score': 0.996, 'answer': 'no'}]
  29. ```
  30. Learn more about the basics of using a pipeline in the [pipeline tutorial](../pipeline_tutorial)
  31. This visual question answering pipeline can currently be loaded from [`pipeline`] using the following task
  32. identifiers: `"visual-question-answering", "vqa"`.
  33. The models that this pipeline can use are models that have been fine-tuned on a visual question answering task. See
  34. the up-to-date list of available models on
  35. [huggingface.co/models](https://huggingface.co/models?filter=visual-question-answering).
  36. """
  37. def __init__(self, *args, **kwargs):
  38. super().__init__(*args, **kwargs)
  39. self.check_model_type(MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING_NAMES)
  40. def _sanitize_parameters(self, top_k=None, padding=None, truncation=None, timeout=None, **kwargs):
  41. preprocess_params, postprocess_params = {}, {}
  42. if padding is not None:
  43. preprocess_params["padding"] = padding
  44. if truncation is not None:
  45. preprocess_params["truncation"] = truncation
  46. if timeout is not None:
  47. preprocess_params["timeout"] = timeout
  48. if top_k is not None:
  49. postprocess_params["top_k"] = top_k
  50. return preprocess_params, {}, postprocess_params
  51. def __call__(
  52. self,
  53. image: Union["Image.Image", str, List["Image.Image"], List[str], "KeyDataset"],
  54. question: Union[str, List[str]] = None,
  55. **kwargs,
  56. ):
  57. r"""
  58. Answers open-ended questions about images. The pipeline accepts several types of inputs which are detailed
  59. below:
  60. - `pipeline(image=image, question=question)`
  61. - `pipeline({"image": image, "question": question})`
  62. - `pipeline([{"image": image, "question": question}])`
  63. - `pipeline([{"image": image, "question": question}, {"image": image, "question": question}])`
  64. Args:
  65. image (`str`, `List[str]`, `PIL.Image`, `List[PIL.Image]` or `KeyDataset`):
  66. The pipeline handles three types of images:
  67. - A string containing a http link pointing to an image
  68. - A string containing a local path to an image
  69. - An image loaded in PIL directly
  70. The pipeline accepts either a single image or a batch of images. If given a single image, it can be
  71. broadcasted to multiple questions.
  72. For dataset: the passed in dataset must be of type `transformers.pipelines.pt_utils.KeyDataset`
  73. Example:
  74. ```python
  75. >>> from transformers.pipelines.pt_utils import KeyDataset
  76. >>> from datasets import load_dataset
  77. >>> dataset = load_dataset("detection-datasets/coco")
  78. >>> oracle(image=KeyDataset(dataset, "image"), question="What's in this image?")
  79. ```
  80. question (`str`, `List[str]`):
  81. The question(s) asked. If given a single question, it can be broadcasted to multiple images.
  82. If multiple images and questions are given, each and every question will be broadcasted to all images
  83. (same effect as a Cartesian product)
  84. top_k (`int`, *optional*, defaults to 5):
  85. The number of top labels that will be returned by the pipeline. If the provided number is higher than
  86. the number of labels available in the model configuration, it will default to the number of labels.
  87. timeout (`float`, *optional*, defaults to None):
  88. The maximum time in seconds to wait for fetching images from the web. If None, no timeout is set and
  89. the call may block forever.
  90. Return:
  91. A dictionary or a list of dictionaries containing the result. The dictionaries contain the following keys:
  92. - **label** (`str`) -- The label identified by the model.
  93. - **score** (`int`) -- The score attributed by the model for that label.
  94. """
  95. is_dataset = isinstance(image, KeyDataset)
  96. is_image_batch = isinstance(image, list) and all(isinstance(item, (Image.Image, str)) for item in image)
  97. is_question_batch = isinstance(question, list) and all(isinstance(item, str) for item in question)
  98. if isinstance(image, (Image.Image, str)) and isinstance(question, str):
  99. inputs = {"image": image, "question": question}
  100. elif (is_image_batch or is_dataset) and isinstance(question, str):
  101. inputs = [{"image": im, "question": question} for im in image]
  102. elif isinstance(image, (Image.Image, str)) and is_question_batch:
  103. inputs = [{"image": image, "question": q} for q in question]
  104. elif (is_image_batch or is_dataset) and is_question_batch:
  105. question_image_pairs = []
  106. for q in question:
  107. for im in image:
  108. question_image_pairs.append({"image": im, "question": q})
  109. inputs = question_image_pairs
  110. else:
  111. """
  112. Supports the following format
  113. - {"image": image, "question": question}
  114. - [{"image": image, "question": question}]
  115. - Generator and datasets
  116. """
  117. inputs = image
  118. results = super().__call__(inputs, **kwargs)
  119. return results
  120. def preprocess(self, inputs, padding=False, truncation=False, timeout=None):
  121. image = load_image(inputs["image"], timeout=timeout)
  122. model_inputs = self.tokenizer(
  123. inputs["question"],
  124. return_tensors=self.framework,
  125. padding=padding,
  126. truncation=truncation,
  127. )
  128. image_features = self.image_processor(images=image, return_tensors=self.framework)
  129. if self.framework == "pt":
  130. image_features = image_features.to(self.torch_dtype)
  131. model_inputs.update(image_features)
  132. return model_inputs
  133. def _forward(self, model_inputs, **generate_kwargs):
  134. if self.model.can_generate():
  135. # User-defined `generation_config` passed to the pipeline call take precedence
  136. if "generation_config" not in generate_kwargs:
  137. generate_kwargs["generation_config"] = self.generation_config
  138. model_outputs = self.model.generate(**model_inputs, **generate_kwargs)
  139. else:
  140. model_outputs = self.model(**model_inputs)
  141. return model_outputs
  142. def postprocess(self, model_outputs, top_k=5):
  143. if self.model.can_generate():
  144. return [
  145. {"answer": self.tokenizer.decode(output_ids, skip_special_tokens=True).strip()}
  146. for output_ids in model_outputs
  147. ]
  148. else:
  149. if top_k > self.model.config.num_labels:
  150. top_k = self.model.config.num_labels
  151. if self.framework == "pt":
  152. probs = model_outputs.logits.sigmoid()[0]
  153. scores, ids = probs.topk(top_k)
  154. else:
  155. raise ValueError(f"Unsupported framework: {self.framework}")
  156. scores = scores.tolist()
  157. ids = ids.tolist()
  158. return [{"score": score, "answer": self.model.config.id2label[_id]} for score, _id in zip(scores, ids)]