image_utils.py 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811
  1. # coding=utf-8
  2. # Copyright 2021 The HuggingFace Inc. team.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. import base64
  16. import os
  17. from io import BytesIO
  18. from typing import TYPE_CHECKING, Dict, Iterable, List, Optional, Tuple, Union
  19. import numpy as np
  20. import requests
  21. from packaging import version
  22. from .utils import (
  23. ExplicitEnum,
  24. is_jax_tensor,
  25. is_numpy_array,
  26. is_tf_tensor,
  27. is_torch_available,
  28. is_torch_tensor,
  29. is_torchvision_available,
  30. is_vision_available,
  31. logging,
  32. requires_backends,
  33. to_numpy,
  34. )
  35. from .utils.constants import ( # noqa: F401
  36. IMAGENET_DEFAULT_MEAN,
  37. IMAGENET_DEFAULT_STD,
  38. IMAGENET_STANDARD_MEAN,
  39. IMAGENET_STANDARD_STD,
  40. OPENAI_CLIP_MEAN,
  41. OPENAI_CLIP_STD,
  42. )
  43. if is_vision_available():
  44. import PIL.Image
  45. import PIL.ImageOps
  46. if version.parse(version.parse(PIL.__version__).base_version) >= version.parse("9.1.0"):
  47. PILImageResampling = PIL.Image.Resampling
  48. else:
  49. PILImageResampling = PIL.Image
  50. if is_torchvision_available():
  51. from torchvision.transforms import InterpolationMode
  52. pil_torch_interpolation_mapping = {
  53. PILImageResampling.NEAREST: InterpolationMode.NEAREST,
  54. PILImageResampling.BOX: InterpolationMode.BOX,
  55. PILImageResampling.BILINEAR: InterpolationMode.BILINEAR,
  56. PILImageResampling.HAMMING: InterpolationMode.HAMMING,
  57. PILImageResampling.BICUBIC: InterpolationMode.BICUBIC,
  58. PILImageResampling.LANCZOS: InterpolationMode.LANCZOS,
  59. }
  60. if TYPE_CHECKING:
  61. if is_torch_available():
  62. import torch
  63. logger = logging.get_logger(__name__)
  64. ImageInput = Union[
  65. "PIL.Image.Image", np.ndarray, "torch.Tensor", List["PIL.Image.Image"], List[np.ndarray], List["torch.Tensor"]
  66. ] # noqa
  67. VideoInput = Union[
  68. List["PIL.Image.Image"],
  69. "np.ndarray",
  70. "torch.Tensor",
  71. List["np.ndarray"],
  72. List["torch.Tensor"],
  73. List[List["PIL.Image.Image"]],
  74. List[List["np.ndarrray"]],
  75. List[List["torch.Tensor"]],
  76. ] # noqa
  77. class ChannelDimension(ExplicitEnum):
  78. FIRST = "channels_first"
  79. LAST = "channels_last"
  80. class AnnotationFormat(ExplicitEnum):
  81. COCO_DETECTION = "coco_detection"
  82. COCO_PANOPTIC = "coco_panoptic"
  83. class AnnotionFormat(ExplicitEnum):
  84. COCO_DETECTION = AnnotationFormat.COCO_DETECTION.value
  85. COCO_PANOPTIC = AnnotationFormat.COCO_PANOPTIC.value
  86. AnnotationType = Dict[str, Union[int, str, List[Dict]]]
  87. def is_pil_image(img):
  88. return is_vision_available() and isinstance(img, PIL.Image.Image)
  89. class ImageType(ExplicitEnum):
  90. PIL = "pillow"
  91. TORCH = "torch"
  92. NUMPY = "numpy"
  93. TENSORFLOW = "tensorflow"
  94. JAX = "jax"
  95. def get_image_type(image):
  96. if is_pil_image(image):
  97. return ImageType.PIL
  98. if is_torch_tensor(image):
  99. return ImageType.TORCH
  100. if is_numpy_array(image):
  101. return ImageType.NUMPY
  102. if is_tf_tensor(image):
  103. return ImageType.TENSORFLOW
  104. if is_jax_tensor(image):
  105. return ImageType.JAX
  106. raise ValueError(f"Unrecognised image type {type(image)}")
  107. def is_valid_image(img):
  108. return is_pil_image(img) or is_numpy_array(img) or is_torch_tensor(img) or is_tf_tensor(img) or is_jax_tensor(img)
  109. def valid_images(imgs):
  110. # If we have an list of images, make sure every image is valid
  111. if isinstance(imgs, (list, tuple)):
  112. for img in imgs:
  113. if not valid_images(img):
  114. return False
  115. # If not a list of tuple, we have been given a single image or batched tensor of images
  116. elif not is_valid_image(imgs):
  117. return False
  118. return True
  119. def is_batched(img):
  120. if isinstance(img, (list, tuple)):
  121. return is_valid_image(img[0])
  122. return False
  123. def is_scaled_image(image: np.ndarray) -> bool:
  124. """
  125. Checks to see whether the pixel values have already been rescaled to [0, 1].
  126. """
  127. if image.dtype == np.uint8:
  128. return False
  129. # It's possible the image has pixel values in [0, 255] but is of floating type
  130. return np.min(image) >= 0 and np.max(image) <= 1
  131. def make_list_of_images(images, expected_ndims: int = 3) -> List[ImageInput]:
  132. """
  133. Ensure that the input is a list of images. If the input is a single image, it is converted to a list of length 1.
  134. If the input is a batch of images, it is converted to a list of images.
  135. Args:
  136. images (`ImageInput`):
  137. Image of images to turn into a list of images.
  138. expected_ndims (`int`, *optional*, defaults to 3):
  139. Expected number of dimensions for a single input image. If the input image has a different number of
  140. dimensions, an error is raised.
  141. """
  142. if is_batched(images):
  143. return images
  144. # Either the input is a single image, in which case we create a list of length 1
  145. if isinstance(images, PIL.Image.Image):
  146. # PIL images are never batched
  147. return [images]
  148. if is_valid_image(images):
  149. if images.ndim == expected_ndims + 1:
  150. # Batch of images
  151. images = list(images)
  152. elif images.ndim == expected_ndims:
  153. # Single image
  154. images = [images]
  155. else:
  156. raise ValueError(
  157. f"Invalid image shape. Expected either {expected_ndims + 1} or {expected_ndims} dimensions, but got"
  158. f" {images.ndim} dimensions."
  159. )
  160. return images
  161. raise ValueError(
  162. "Invalid image type. Expected either PIL.Image.Image, numpy.ndarray, torch.Tensor, tf.Tensor or "
  163. f"jax.ndarray, but got {type(images)}."
  164. )
  165. def to_numpy_array(img) -> np.ndarray:
  166. if not is_valid_image(img):
  167. raise ValueError(f"Invalid image type: {type(img)}")
  168. if is_vision_available() and isinstance(img, PIL.Image.Image):
  169. return np.array(img)
  170. return to_numpy(img)
  171. def infer_channel_dimension_format(
  172. image: np.ndarray, num_channels: Optional[Union[int, Tuple[int, ...]]] = None
  173. ) -> ChannelDimension:
  174. """
  175. Infers the channel dimension format of `image`.
  176. Args:
  177. image (`np.ndarray`):
  178. The image to infer the channel dimension of.
  179. num_channels (`int` or `Tuple[int, ...]`, *optional*, defaults to `(1, 3)`):
  180. The number of channels of the image.
  181. Returns:
  182. The channel dimension of the image.
  183. """
  184. num_channels = num_channels if num_channels is not None else (1, 3)
  185. num_channels = (num_channels,) if isinstance(num_channels, int) else num_channels
  186. if image.ndim == 3:
  187. first_dim, last_dim = 0, 2
  188. elif image.ndim == 4:
  189. first_dim, last_dim = 1, 3
  190. else:
  191. raise ValueError(f"Unsupported number of image dimensions: {image.ndim}")
  192. if image.shape[first_dim] in num_channels and image.shape[last_dim] in num_channels:
  193. logger.warning(
  194. f"The channel dimension is ambiguous. Got image shape {image.shape}. Assuming channels are the first dimension."
  195. )
  196. return ChannelDimension.FIRST
  197. elif image.shape[first_dim] in num_channels:
  198. return ChannelDimension.FIRST
  199. elif image.shape[last_dim] in num_channels:
  200. return ChannelDimension.LAST
  201. raise ValueError("Unable to infer channel dimension format")
  202. def get_channel_dimension_axis(
  203. image: np.ndarray, input_data_format: Optional[Union[ChannelDimension, str]] = None
  204. ) -> int:
  205. """
  206. Returns the channel dimension axis of the image.
  207. Args:
  208. image (`np.ndarray`):
  209. The image to get the channel dimension axis of.
  210. input_data_format (`ChannelDimension` or `str`, *optional*):
  211. The channel dimension format of the image. If `None`, will infer the channel dimension from the image.
  212. Returns:
  213. The channel dimension axis of the image.
  214. """
  215. if input_data_format is None:
  216. input_data_format = infer_channel_dimension_format(image)
  217. if input_data_format == ChannelDimension.FIRST:
  218. return image.ndim - 3
  219. elif input_data_format == ChannelDimension.LAST:
  220. return image.ndim - 1
  221. raise ValueError(f"Unsupported data format: {input_data_format}")
  222. def get_image_size(image: np.ndarray, channel_dim: ChannelDimension = None) -> Tuple[int, int]:
  223. """
  224. Returns the (height, width) dimensions of the image.
  225. Args:
  226. image (`np.ndarray`):
  227. The image to get the dimensions of.
  228. channel_dim (`ChannelDimension`, *optional*):
  229. Which dimension the channel dimension is in. If `None`, will infer the channel dimension from the image.
  230. Returns:
  231. A tuple of the image's height and width.
  232. """
  233. if channel_dim is None:
  234. channel_dim = infer_channel_dimension_format(image)
  235. if channel_dim == ChannelDimension.FIRST:
  236. return image.shape[-2], image.shape[-1]
  237. elif channel_dim == ChannelDimension.LAST:
  238. return image.shape[-3], image.shape[-2]
  239. else:
  240. raise ValueError(f"Unsupported data format: {channel_dim}")
  241. def is_valid_annotation_coco_detection(annotation: Dict[str, Union[List, Tuple]]) -> bool:
  242. if (
  243. isinstance(annotation, dict)
  244. and "image_id" in annotation
  245. and "annotations" in annotation
  246. and isinstance(annotation["annotations"], (list, tuple))
  247. and (
  248. # an image can have no annotations
  249. len(annotation["annotations"]) == 0 or isinstance(annotation["annotations"][0], dict)
  250. )
  251. ):
  252. return True
  253. return False
  254. def is_valid_annotation_coco_panoptic(annotation: Dict[str, Union[List, Tuple]]) -> bool:
  255. if (
  256. isinstance(annotation, dict)
  257. and "image_id" in annotation
  258. and "segments_info" in annotation
  259. and "file_name" in annotation
  260. and isinstance(annotation["segments_info"], (list, tuple))
  261. and (
  262. # an image can have no segments
  263. len(annotation["segments_info"]) == 0 or isinstance(annotation["segments_info"][0], dict)
  264. )
  265. ):
  266. return True
  267. return False
  268. def valid_coco_detection_annotations(annotations: Iterable[Dict[str, Union[List, Tuple]]]) -> bool:
  269. return all(is_valid_annotation_coco_detection(ann) for ann in annotations)
  270. def valid_coco_panoptic_annotations(annotations: Iterable[Dict[str, Union[List, Tuple]]]) -> bool:
  271. return all(is_valid_annotation_coco_panoptic(ann) for ann in annotations)
  272. def load_image(image: Union[str, "PIL.Image.Image"], timeout: Optional[float] = None) -> "PIL.Image.Image":
  273. """
  274. Loads `image` to a PIL Image.
  275. Args:
  276. image (`str` or `PIL.Image.Image`):
  277. The image to convert to the PIL Image format.
  278. timeout (`float`, *optional*):
  279. The timeout value in seconds for the URL request.
  280. Returns:
  281. `PIL.Image.Image`: A PIL Image.
  282. """
  283. requires_backends(load_image, ["vision"])
  284. if isinstance(image, str):
  285. if image.startswith("http://") or image.startswith("https://"):
  286. # We need to actually check for a real protocol, otherwise it's impossible to use a local file
  287. # like http_huggingface_co.png
  288. image = PIL.Image.open(BytesIO(requests.get(image, timeout=timeout).content))
  289. elif os.path.isfile(image):
  290. image = PIL.Image.open(image)
  291. else:
  292. if image.startswith("data:image/"):
  293. image = image.split(",")[1]
  294. # Try to load as base64
  295. try:
  296. b64 = base64.decodebytes(image.encode())
  297. image = PIL.Image.open(BytesIO(b64))
  298. except Exception as e:
  299. raise ValueError(
  300. f"Incorrect image source. Must be a valid URL starting with `http://` or `https://`, a valid path to an image file, or a base64 encoded string. Got {image}. Failed with {e}"
  301. )
  302. elif isinstance(image, PIL.Image.Image):
  303. image = image
  304. else:
  305. raise TypeError(
  306. "Incorrect format used for image. Should be an url linking to an image, a base64 string, a local path, or a PIL image."
  307. )
  308. image = PIL.ImageOps.exif_transpose(image)
  309. image = image.convert("RGB")
  310. return image
  311. def validate_preprocess_arguments(
  312. do_rescale: Optional[bool] = None,
  313. rescale_factor: Optional[float] = None,
  314. do_normalize: Optional[bool] = None,
  315. image_mean: Optional[Union[float, List[float]]] = None,
  316. image_std: Optional[Union[float, List[float]]] = None,
  317. do_pad: Optional[bool] = None,
  318. size_divisibility: Optional[int] = None,
  319. do_center_crop: Optional[bool] = None,
  320. crop_size: Optional[Dict[str, int]] = None,
  321. do_resize: Optional[bool] = None,
  322. size: Optional[Dict[str, int]] = None,
  323. resample: Optional["PILImageResampling"] = None,
  324. ):
  325. """
  326. Checks validity of typically used arguments in an `ImageProcessor` `preprocess` method.
  327. Raises `ValueError` if arguments incompatibility is caught.
  328. Many incompatibilities are model-specific. `do_pad` sometimes needs `size_divisor`,
  329. sometimes `size_divisibility`, and sometimes `size`. New models and processors added should follow
  330. existing arguments when possible.
  331. """
  332. if do_rescale and rescale_factor is None:
  333. raise ValueError("`rescale_factor` must be specified if `do_rescale` is `True`.")
  334. if do_pad and size_divisibility is None:
  335. # Here, size_divisor might be passed as the value of size
  336. raise ValueError(
  337. "Depending on the model, `size_divisibility`, `size_divisor`, `pad_size` or `size` must be specified if `do_pad` is `True`."
  338. )
  339. if do_normalize and (image_mean is None or image_std is None):
  340. raise ValueError("`image_mean` and `image_std` must both be specified if `do_normalize` is `True`.")
  341. if do_center_crop and crop_size is None:
  342. raise ValueError("`crop_size` must be specified if `do_center_crop` is `True`.")
  343. if do_resize and (size is None or resample is None):
  344. raise ValueError("`size` and `resample` must be specified if `do_resize` is `True`.")
  345. # In the future we can add a TF implementation here when we have TF models.
  346. class ImageFeatureExtractionMixin:
  347. """
  348. Mixin that contain utilities for preparing image features.
  349. """
  350. def _ensure_format_supported(self, image):
  351. if not isinstance(image, (PIL.Image.Image, np.ndarray)) and not is_torch_tensor(image):
  352. raise ValueError(
  353. f"Got type {type(image)} which is not supported, only `PIL.Image.Image`, `np.array` and "
  354. "`torch.Tensor` are."
  355. )
  356. def to_pil_image(self, image, rescale=None):
  357. """
  358. Converts `image` to a PIL Image. Optionally rescales it and puts the channel dimension back as the last axis if
  359. needed.
  360. Args:
  361. image (`PIL.Image.Image` or `numpy.ndarray` or `torch.Tensor`):
  362. The image to convert to the PIL Image format.
  363. rescale (`bool`, *optional*):
  364. Whether or not to apply the scaling factor (to make pixel values integers between 0 and 255). Will
  365. default to `True` if the image type is a floating type, `False` otherwise.
  366. """
  367. self._ensure_format_supported(image)
  368. if is_torch_tensor(image):
  369. image = image.numpy()
  370. if isinstance(image, np.ndarray):
  371. if rescale is None:
  372. # rescale default to the array being of floating type.
  373. rescale = isinstance(image.flat[0], np.floating)
  374. # If the channel as been moved to first dim, we put it back at the end.
  375. if image.ndim == 3 and image.shape[0] in [1, 3]:
  376. image = image.transpose(1, 2, 0)
  377. if rescale:
  378. image = image * 255
  379. image = image.astype(np.uint8)
  380. return PIL.Image.fromarray(image)
  381. return image
  382. def convert_rgb(self, image):
  383. """
  384. Converts `PIL.Image.Image` to RGB format.
  385. Args:
  386. image (`PIL.Image.Image`):
  387. The image to convert.
  388. """
  389. self._ensure_format_supported(image)
  390. if not isinstance(image, PIL.Image.Image):
  391. return image
  392. return image.convert("RGB")
  393. def rescale(self, image: np.ndarray, scale: Union[float, int]) -> np.ndarray:
  394. """
  395. Rescale a numpy image by scale amount
  396. """
  397. self._ensure_format_supported(image)
  398. return image * scale
  399. def to_numpy_array(self, image, rescale=None, channel_first=True):
  400. """
  401. Converts `image` to a numpy array. Optionally rescales it and puts the channel dimension as the first
  402. dimension.
  403. Args:
  404. image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
  405. The image to convert to a NumPy array.
  406. rescale (`bool`, *optional*):
  407. Whether or not to apply the scaling factor (to make pixel values floats between 0. and 1.). Will
  408. default to `True` if the image is a PIL Image or an array/tensor of integers, `False` otherwise.
  409. channel_first (`bool`, *optional*, defaults to `True`):
  410. Whether or not to permute the dimensions of the image to put the channel dimension first.
  411. """
  412. self._ensure_format_supported(image)
  413. if isinstance(image, PIL.Image.Image):
  414. image = np.array(image)
  415. if is_torch_tensor(image):
  416. image = image.numpy()
  417. rescale = isinstance(image.flat[0], np.integer) if rescale is None else rescale
  418. if rescale:
  419. image = self.rescale(image.astype(np.float32), 1 / 255.0)
  420. if channel_first and image.ndim == 3:
  421. image = image.transpose(2, 0, 1)
  422. return image
  423. def expand_dims(self, image):
  424. """
  425. Expands 2-dimensional `image` to 3 dimensions.
  426. Args:
  427. image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
  428. The image to expand.
  429. """
  430. self._ensure_format_supported(image)
  431. # Do nothing if PIL image
  432. if isinstance(image, PIL.Image.Image):
  433. return image
  434. if is_torch_tensor(image):
  435. image = image.unsqueeze(0)
  436. else:
  437. image = np.expand_dims(image, axis=0)
  438. return image
  439. def normalize(self, image, mean, std, rescale=False):
  440. """
  441. Normalizes `image` with `mean` and `std`. Note that this will trigger a conversion of `image` to a NumPy array
  442. if it's a PIL Image.
  443. Args:
  444. image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
  445. The image to normalize.
  446. mean (`List[float]` or `np.ndarray` or `torch.Tensor`):
  447. The mean (per channel) to use for normalization.
  448. std (`List[float]` or `np.ndarray` or `torch.Tensor`):
  449. The standard deviation (per channel) to use for normalization.
  450. rescale (`bool`, *optional*, defaults to `False`):
  451. Whether or not to rescale the image to be between 0 and 1. If a PIL image is provided, scaling will
  452. happen automatically.
  453. """
  454. self._ensure_format_supported(image)
  455. if isinstance(image, PIL.Image.Image):
  456. image = self.to_numpy_array(image, rescale=True)
  457. # If the input image is a PIL image, it automatically gets rescaled. If it's another
  458. # type it may need rescaling.
  459. elif rescale:
  460. if isinstance(image, np.ndarray):
  461. image = self.rescale(image.astype(np.float32), 1 / 255.0)
  462. elif is_torch_tensor(image):
  463. image = self.rescale(image.float(), 1 / 255.0)
  464. if isinstance(image, np.ndarray):
  465. if not isinstance(mean, np.ndarray):
  466. mean = np.array(mean).astype(image.dtype)
  467. if not isinstance(std, np.ndarray):
  468. std = np.array(std).astype(image.dtype)
  469. elif is_torch_tensor(image):
  470. import torch
  471. if not isinstance(mean, torch.Tensor):
  472. if isinstance(mean, np.ndarray):
  473. mean = torch.from_numpy(mean)
  474. else:
  475. mean = torch.tensor(mean)
  476. if not isinstance(std, torch.Tensor):
  477. if isinstance(std, np.ndarray):
  478. std = torch.from_numpy(std)
  479. else:
  480. std = torch.tensor(std)
  481. if image.ndim == 3 and image.shape[0] in [1, 3]:
  482. return (image - mean[:, None, None]) / std[:, None, None]
  483. else:
  484. return (image - mean) / std
  485. def resize(self, image, size, resample=None, default_to_square=True, max_size=None):
  486. """
  487. Resizes `image`. Enforces conversion of input to PIL.Image.
  488. Args:
  489. image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
  490. The image to resize.
  491. size (`int` or `Tuple[int, int]`):
  492. The size to use for resizing the image. If `size` is a sequence like (h, w), output size will be
  493. matched to this.
  494. If `size` is an int and `default_to_square` is `True`, then image will be resized to (size, size). If
  495. `size` is an int and `default_to_square` is `False`, then smaller edge of the image will be matched to
  496. this number. i.e, if height > width, then image will be rescaled to (size * height / width, size).
  497. resample (`int`, *optional*, defaults to `PILImageResampling.BILINEAR`):
  498. The filter to user for resampling.
  499. default_to_square (`bool`, *optional*, defaults to `True`):
  500. How to convert `size` when it is a single int. If set to `True`, the `size` will be converted to a
  501. square (`size`,`size`). If set to `False`, will replicate
  502. [`torchvision.transforms.Resize`](https://pytorch.org/vision/stable/transforms.html#torchvision.transforms.Resize)
  503. with support for resizing only the smallest edge and providing an optional `max_size`.
  504. max_size (`int`, *optional*, defaults to `None`):
  505. The maximum allowed for the longer edge of the resized image: if the longer edge of the image is
  506. greater than `max_size` after being resized according to `size`, then the image is resized again so
  507. that the longer edge is equal to `max_size`. As a result, `size` might be overruled, i.e the smaller
  508. edge may be shorter than `size`. Only used if `default_to_square` is `False`.
  509. Returns:
  510. image: A resized `PIL.Image.Image`.
  511. """
  512. resample = resample if resample is not None else PILImageResampling.BILINEAR
  513. self._ensure_format_supported(image)
  514. if not isinstance(image, PIL.Image.Image):
  515. image = self.to_pil_image(image)
  516. if isinstance(size, list):
  517. size = tuple(size)
  518. if isinstance(size, int) or len(size) == 1:
  519. if default_to_square:
  520. size = (size, size) if isinstance(size, int) else (size[0], size[0])
  521. else:
  522. width, height = image.size
  523. # specified size only for the smallest edge
  524. short, long = (width, height) if width <= height else (height, width)
  525. requested_new_short = size if isinstance(size, int) else size[0]
  526. if short == requested_new_short:
  527. return image
  528. new_short, new_long = requested_new_short, int(requested_new_short * long / short)
  529. if max_size is not None:
  530. if max_size <= requested_new_short:
  531. raise ValueError(
  532. f"max_size = {max_size} must be strictly greater than the requested "
  533. f"size for the smaller edge size = {size}"
  534. )
  535. if new_long > max_size:
  536. new_short, new_long = int(max_size * new_short / new_long), max_size
  537. size = (new_short, new_long) if width <= height else (new_long, new_short)
  538. return image.resize(size, resample=resample)
  539. def center_crop(self, image, size):
  540. """
  541. Crops `image` to the given size using a center crop. Note that if the image is too small to be cropped to the
  542. size given, it will be padded (so the returned result has the size asked).
  543. Args:
  544. image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor` of shape (n_channels, height, width) or (height, width, n_channels)):
  545. The image to resize.
  546. size (`int` or `Tuple[int, int]`):
  547. The size to which crop the image.
  548. Returns:
  549. new_image: A center cropped `PIL.Image.Image` or `np.ndarray` or `torch.Tensor` of shape: (n_channels,
  550. height, width).
  551. """
  552. self._ensure_format_supported(image)
  553. if not isinstance(size, tuple):
  554. size = (size, size)
  555. # PIL Image.size is (width, height) but NumPy array and torch Tensors have (height, width)
  556. if is_torch_tensor(image) or isinstance(image, np.ndarray):
  557. if image.ndim == 2:
  558. image = self.expand_dims(image)
  559. image_shape = image.shape[1:] if image.shape[0] in [1, 3] else image.shape[:2]
  560. else:
  561. image_shape = (image.size[1], image.size[0])
  562. top = (image_shape[0] - size[0]) // 2
  563. bottom = top + size[0] # In case size is odd, (image_shape[0] + size[0]) // 2 won't give the proper result.
  564. left = (image_shape[1] - size[1]) // 2
  565. right = left + size[1] # In case size is odd, (image_shape[1] + size[1]) // 2 won't give the proper result.
  566. # For PIL Images we have a method to crop directly.
  567. if isinstance(image, PIL.Image.Image):
  568. return image.crop((left, top, right, bottom))
  569. # Check if image is in (n_channels, height, width) or (height, width, n_channels) format
  570. channel_first = True if image.shape[0] in [1, 3] else False
  571. # Transpose (height, width, n_channels) format images
  572. if not channel_first:
  573. if isinstance(image, np.ndarray):
  574. image = image.transpose(2, 0, 1)
  575. if is_torch_tensor(image):
  576. image = image.permute(2, 0, 1)
  577. # Check if cropped area is within image boundaries
  578. if top >= 0 and bottom <= image_shape[0] and left >= 0 and right <= image_shape[1]:
  579. return image[..., top:bottom, left:right]
  580. # Otherwise, we may need to pad if the image is too small. Oh joy...
  581. new_shape = image.shape[:-2] + (max(size[0], image_shape[0]), max(size[1], image_shape[1]))
  582. if isinstance(image, np.ndarray):
  583. new_image = np.zeros_like(image, shape=new_shape)
  584. elif is_torch_tensor(image):
  585. new_image = image.new_zeros(new_shape)
  586. top_pad = (new_shape[-2] - image_shape[0]) // 2
  587. bottom_pad = top_pad + image_shape[0]
  588. left_pad = (new_shape[-1] - image_shape[1]) // 2
  589. right_pad = left_pad + image_shape[1]
  590. new_image[..., top_pad:bottom_pad, left_pad:right_pad] = image
  591. top += top_pad
  592. bottom += top_pad
  593. left += left_pad
  594. right += left_pad
  595. new_image = new_image[
  596. ..., max(0, top) : min(new_image.shape[-2], bottom), max(0, left) : min(new_image.shape[-1], right)
  597. ]
  598. return new_image
  599. def flip_channel_order(self, image):
  600. """
  601. Flips the channel order of `image` from RGB to BGR, or vice versa. Note that this will trigger a conversion of
  602. `image` to a NumPy array if it's a PIL Image.
  603. Args:
  604. image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
  605. The image whose color channels to flip. If `np.ndarray` or `torch.Tensor`, the channel dimension should
  606. be first.
  607. """
  608. self._ensure_format_supported(image)
  609. if isinstance(image, PIL.Image.Image):
  610. image = self.to_numpy_array(image)
  611. return image[::-1, :, :]
  612. def rotate(self, image, angle, resample=None, expand=0, center=None, translate=None, fillcolor=None):
  613. """
  614. Returns a rotated copy of `image`. This method returns a copy of `image`, rotated the given number of degrees
  615. counter clockwise around its centre.
  616. Args:
  617. image (`PIL.Image.Image` or `np.ndarray` or `torch.Tensor`):
  618. The image to rotate. If `np.ndarray` or `torch.Tensor`, will be converted to `PIL.Image.Image` before
  619. rotating.
  620. Returns:
  621. image: A rotated `PIL.Image.Image`.
  622. """
  623. resample = resample if resample is not None else PIL.Image.NEAREST
  624. self._ensure_format_supported(image)
  625. if not isinstance(image, PIL.Image.Image):
  626. image = self.to_pil_image(image)
  627. return image.rotate(
  628. angle, resample=resample, expand=expand, center=center, translate=translate, fillcolor=fillcolor
  629. )
  630. def validate_annotations(
  631. annotation_format: AnnotationFormat,
  632. supported_annotation_formats: Tuple[AnnotationFormat, ...],
  633. annotations: List[Dict],
  634. ) -> None:
  635. if annotation_format not in supported_annotation_formats:
  636. raise ValueError(f"Unsupported annotation format: {format} must be one of {supported_annotation_formats}")
  637. if annotation_format is AnnotationFormat.COCO_DETECTION:
  638. if not valid_coco_detection_annotations(annotations):
  639. raise ValueError(
  640. "Invalid COCO detection annotations. Annotations must a dict (single image) or list of dicts "
  641. "(batch of images) with the following keys: `image_id` and `annotations`, with the latter "
  642. "being a list of annotations in the COCO format."
  643. )
  644. if annotation_format is AnnotationFormat.COCO_PANOPTIC:
  645. if not valid_coco_panoptic_annotations(annotations):
  646. raise ValueError(
  647. "Invalid COCO panoptic annotations. Annotations must a dict (single image) or list of dicts "
  648. "(batch of images) with the following keys: `image_id`, `file_name` and `segments_info`, with "
  649. "the latter being a list of annotations in the COCO format."
  650. )
  651. def validate_kwargs(valid_processor_keys: List[str], captured_kwargs: List[str]):
  652. unused_keys = set(captured_kwargs).difference(set(valid_processor_keys))
  653. if unused_keys:
  654. unused_key_str = ", ".join(unused_keys)
  655. # TODO raise a warning here instead of simply logging?
  656. logger.warning(f"Unused or unrecognized kwargs: {unused_key_str}.")