image_processing_sam.py 65 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475
  1. # coding=utf-8
  2. # Copyright 2023 The HuggingFace Inc. team. All rights reserved.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. """Image processor class for SAM."""
  16. import math
  17. from copy import deepcopy
  18. from itertools import product
  19. from typing import Any, Dict, List, Optional, Tuple, Union
  20. import numpy as np
  21. from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
  22. from ...image_transforms import convert_to_rgb, pad, resize, to_channel_dimension_format
  23. from ...image_utils import (
  24. IMAGENET_DEFAULT_MEAN,
  25. IMAGENET_DEFAULT_STD,
  26. ChannelDimension,
  27. ImageInput,
  28. PILImageResampling,
  29. get_image_size,
  30. infer_channel_dimension_format,
  31. is_scaled_image,
  32. make_list_of_images,
  33. to_numpy_array,
  34. valid_images,
  35. validate_preprocess_arguments,
  36. )
  37. from ...utils import (
  38. TensorType,
  39. filter_out_non_signature_kwargs,
  40. is_tf_available,
  41. is_torch_available,
  42. is_torchvision_available,
  43. logging,
  44. requires_backends,
  45. )
  46. if is_torch_available():
  47. import torch
  48. import torch.nn.functional as F
  49. if is_torchvision_available():
  50. from torchvision.ops.boxes import batched_nms
  51. if is_tf_available():
  52. import tensorflow as tf
  53. from tensorflow.experimental import numpy as tnp
  54. from ...tf_utils import flatten, shape_list
  55. logger = logging.get_logger(__name__)
  56. class SamImageProcessor(BaseImageProcessor):
  57. r"""
  58. Constructs a SAM image processor.
  59. Args:
  60. do_resize (`bool`, *optional*, defaults to `True`):
  61. Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by the
  62. `do_resize` parameter in the `preprocess` method.
  63. size (`dict`, *optional*, defaults to `{"longest_edge": 1024}`):
  64. Size of the output image after resizing. Resizes the longest edge of the image to match
  65. `size["longest_edge"]` while maintaining the aspect ratio. Can be overridden by the `size` parameter in the
  66. `preprocess` method.
  67. mask_size (`dict`, *optional*, defaults to `{"longest_edge": 256}`):
  68. Size of the output segmentation map after resizing. Resizes the longest edge of the image to match
  69. `size["longest_edge"]` while maintaining the aspect ratio. Can be overridden by the `mask_size` parameter
  70. in the `preprocess` method.
  71. resample (`PILImageResampling`, *optional*, defaults to `Resampling.BILINEAR`):
  72. Resampling filter to use if resizing the image. Can be overridden by the `resample` parameter in the
  73. `preprocess` method.
  74. do_rescale (`bool`, *optional*, defaults to `True`):
  75. Wwhether to rescale the image by the specified scale `rescale_factor`. Can be overridden by the
  76. `do_rescale` parameter in the `preprocess` method.
  77. rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
  78. Scale factor to use if rescaling the image. Only has an effect if `do_rescale` is set to `True`. Can be
  79. overridden by the `rescale_factor` parameter in the `preprocess` method.
  80. do_normalize (`bool`, *optional*, defaults to `True`):
  81. Whether to normalize the image. Can be overridden by the `do_normalize` parameter in the `preprocess`
  82. method. Can be overridden by the `do_normalize` parameter in the `preprocess` method.
  83. image_mean (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_MEAN`):
  84. Mean to use if normalizing the image. This is a float or list of floats the length of the number of
  85. channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method. Can be
  86. overridden by the `image_mean` parameter in the `preprocess` method.
  87. image_std (`float` or `List[float]`, *optional*, defaults to `IMAGENET_DEFAULT_STD`):
  88. Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
  89. number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
  90. Can be overridden by the `image_std` parameter in the `preprocess` method.
  91. do_pad (`bool`, *optional*, defaults to `True`):
  92. Whether to pad the image to the specified `pad_size`. Can be overridden by the `do_pad` parameter in the
  93. `preprocess` method.
  94. pad_size (`dict`, *optional*, defaults to `{"height": 1024, "width": 1024}`):
  95. Size of the output image after padding. Can be overridden by the `pad_size` parameter in the `preprocess`
  96. method.
  97. mask_pad_size (`dict`, *optional*, defaults to `{"height": 256, "width": 256}`):
  98. Size of the output segmentation map after padding. Can be overridden by the `mask_pad_size` parameter in
  99. the `preprocess` method.
  100. do_convert_rgb (`bool`, *optional*, defaults to `True`):
  101. Whether to convert the image to RGB.
  102. """
  103. model_input_names = ["pixel_values"]
  104. def __init__(
  105. self,
  106. do_resize: bool = True,
  107. size: Dict[str, int] = None,
  108. mask_size: Dict[str, int] = None,
  109. resample: PILImageResampling = PILImageResampling.BILINEAR,
  110. do_rescale: bool = True,
  111. rescale_factor: Union[int, float] = 1 / 255,
  112. do_normalize: bool = True,
  113. image_mean: Optional[Union[float, List[float]]] = None,
  114. image_std: Optional[Union[float, List[float]]] = None,
  115. do_pad: bool = True,
  116. pad_size: int = None,
  117. mask_pad_size: int = None,
  118. do_convert_rgb: bool = True,
  119. **kwargs,
  120. ) -> None:
  121. super().__init__(**kwargs)
  122. size = size if size is not None else {"longest_edge": 1024}
  123. size = get_size_dict(max_size=size, default_to_square=False) if not isinstance(size, dict) else size
  124. pad_size = pad_size if pad_size is not None else {"height": 1024, "width": 1024}
  125. pad_size = get_size_dict(pad_size, default_to_square=True)
  126. mask_size = mask_size if mask_size is not None else {"longest_edge": 256}
  127. mask_size = (
  128. get_size_dict(max_size=mask_size, default_to_square=False)
  129. if not isinstance(mask_size, dict)
  130. else mask_size
  131. )
  132. mask_pad_size = mask_pad_size if mask_pad_size is not None else {"height": 256, "width": 256}
  133. mask_pad_size = get_size_dict(mask_pad_size, default_to_square=True)
  134. self.do_resize = do_resize
  135. self.size = size
  136. self.mask_size = mask_size
  137. self.resample = resample
  138. self.do_rescale = do_rescale
  139. self.rescale_factor = rescale_factor
  140. self.do_normalize = do_normalize
  141. self.image_mean = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
  142. self.image_std = image_std if image_std is not None else IMAGENET_DEFAULT_STD
  143. self.do_pad = do_pad
  144. self.pad_size = pad_size
  145. self.mask_pad_size = mask_pad_size
  146. self.do_convert_rgb = do_convert_rgb
  147. def pad_image(
  148. self,
  149. image: np.ndarray,
  150. pad_size: Dict[str, int],
  151. data_format: Optional[Union[str, ChannelDimension]] = None,
  152. input_data_format: Optional[Union[str, ChannelDimension]] = None,
  153. **kwargs,
  154. ) -> np.ndarray:
  155. """
  156. Pad an image to `(pad_size["height"], pad_size["width"])` with zeros to the right and bottom.
  157. Args:
  158. image (`np.ndarray`):
  159. Image to pad.
  160. pad_size (`Dict[str, int]`):
  161. Size of the output image after padding.
  162. data_format (`str` or `ChannelDimension`, *optional*):
  163. The data format of the image. Can be either "channels_first" or "channels_last". If `None`, the
  164. `data_format` of the `image` will be used.
  165. input_data_format (`str` or `ChannelDimension`, *optional*):
  166. The channel dimension format of the input image. If not provided, it will be inferred.
  167. """
  168. output_height, output_width = pad_size["height"], pad_size["width"]
  169. input_height, input_width = get_image_size(image, channel_dim=input_data_format)
  170. pad_width = output_width - input_width
  171. pad_height = output_height - input_height
  172. padded_image = pad(
  173. image,
  174. ((0, pad_height), (0, pad_width)),
  175. data_format=data_format,
  176. input_data_format=input_data_format,
  177. **kwargs,
  178. )
  179. return padded_image
  180. def _get_preprocess_shape(self, old_shape: Tuple[int, int], longest_edge: int):
  181. """
  182. Compute the output size given input size and target long side length.
  183. """
  184. oldh, oldw = old_shape
  185. scale = longest_edge * 1.0 / max(oldh, oldw)
  186. newh, neww = oldh * scale, oldw * scale
  187. newh = int(newh + 0.5)
  188. neww = int(neww + 0.5)
  189. return (newh, neww)
  190. def resize(
  191. self,
  192. image: np.ndarray,
  193. size: Dict[str, int],
  194. resample: PILImageResampling = PILImageResampling.BICUBIC,
  195. data_format: Optional[Union[str, ChannelDimension]] = None,
  196. input_data_format: Optional[Union[str, ChannelDimension]] = None,
  197. **kwargs,
  198. ) -> np.ndarray:
  199. """
  200. Resize an image to `(size["height"], size["width"])`.
  201. Args:
  202. image (`np.ndarray`):
  203. Image to resize.
  204. size (`Dict[str, int]`):
  205. Dictionary in the format `{"longest_edge": int}` specifying the size of the output image. The longest
  206. edge of the image will be resized to the specified size, while the other edge will be resized to
  207. maintain the aspect ratio.
  208. resample:
  209. `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
  210. data_format (`ChannelDimension` or `str`, *optional*):
  211. The channel dimension format for the output image. If unset, the channel dimension format of the input
  212. image is used. Can be one of:
  213. - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
  214. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
  215. input_data_format (`ChannelDimension` or `str`, *optional*):
  216. The channel dimension format for the input image. If unset, the channel dimension format is inferred
  217. from the input image. Can be one of:
  218. - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
  219. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
  220. Returns:
  221. `np.ndarray`: The resized image.
  222. """
  223. size = get_size_dict(size)
  224. if "longest_edge" not in size:
  225. raise ValueError(f"The `size` dictionary must contain the key `longest_edge`. Got {size.keys()}")
  226. input_size = get_image_size(image, channel_dim=input_data_format)
  227. output_height, output_width = self._get_preprocess_shape(input_size, size["longest_edge"])
  228. return resize(
  229. image,
  230. size=(output_height, output_width),
  231. resample=resample,
  232. data_format=data_format,
  233. input_data_format=input_data_format,
  234. **kwargs,
  235. )
  236. def _preprocess(
  237. self,
  238. image: ImageInput,
  239. do_resize: bool,
  240. do_rescale: bool,
  241. do_normalize: bool,
  242. size: Optional[Dict[str, int]] = None,
  243. resample: PILImageResampling = None,
  244. rescale_factor: Optional[float] = None,
  245. image_mean: Optional[Union[float, List[float]]] = None,
  246. image_std: Optional[Union[float, List[float]]] = None,
  247. do_pad: Optional[bool] = None,
  248. pad_size: Optional[Dict[str, int]] = None,
  249. input_data_format: Optional[Union[str, ChannelDimension]] = None,
  250. ):
  251. if do_resize:
  252. image = self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
  253. reshaped_input_size = get_image_size(image, channel_dim=input_data_format)
  254. if do_rescale:
  255. image = self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
  256. if do_normalize:
  257. image = self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
  258. if do_pad:
  259. image = self.pad_image(image=image, pad_size=pad_size, input_data_format=input_data_format)
  260. return image, reshaped_input_size
  261. def _preprocess_image(
  262. self,
  263. image: ImageInput,
  264. do_resize: Optional[bool] = None,
  265. size: Dict[str, int] = None,
  266. resample: PILImageResampling = None,
  267. do_rescale: bool = None,
  268. rescale_factor: Optional[float] = None,
  269. do_normalize: Optional[bool] = None,
  270. image_mean: Optional[Union[float, List[float]]] = None,
  271. image_std: Optional[Union[float, List[float]]] = None,
  272. do_pad: Optional[bool] = None,
  273. pad_size: Optional[Dict[str, int]] = None,
  274. do_convert_rgb: Optional[bool] = None,
  275. data_format: Optional[Union[str, ChannelDimension]] = None,
  276. input_data_format: Optional[Union[str, ChannelDimension]] = None,
  277. ) -> Tuple[np.ndarray, Tuple[int, int], Tuple[int, int]]:
  278. image = to_numpy_array(image)
  279. # PIL RGBA images are converted to RGB
  280. if do_convert_rgb:
  281. image = convert_to_rgb(image)
  282. # All transformations expect numpy arrays.
  283. image = to_numpy_array(image)
  284. if is_scaled_image(image) and do_rescale:
  285. logger.warning_once(
  286. "It looks like you are trying to rescale already rescaled images. If the input"
  287. " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
  288. )
  289. if input_data_format is None:
  290. input_data_format = infer_channel_dimension_format(image)
  291. original_size = get_image_size(image, channel_dim=input_data_format)
  292. image, reshaped_input_size = self._preprocess(
  293. image=image,
  294. do_resize=do_resize,
  295. size=size,
  296. resample=resample,
  297. do_rescale=do_rescale,
  298. rescale_factor=rescale_factor,
  299. do_normalize=do_normalize,
  300. image_mean=image_mean,
  301. image_std=image_std,
  302. do_pad=do_pad,
  303. pad_size=pad_size,
  304. input_data_format=input_data_format,
  305. )
  306. if data_format is not None:
  307. image = to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format)
  308. return image, original_size, reshaped_input_size
  309. def _preprocess_mask(
  310. self,
  311. segmentation_map: ImageInput,
  312. do_resize: Optional[bool] = None,
  313. mask_size: Dict[str, int] = None,
  314. do_pad: Optional[bool] = None,
  315. mask_pad_size: Optional[Dict[str, int]] = None,
  316. input_data_format: Optional[Union[str, ChannelDimension]] = None,
  317. ) -> np.ndarray:
  318. segmentation_map = to_numpy_array(segmentation_map)
  319. # Add channel dimension if missing - needed for certain transformations
  320. if segmentation_map.ndim == 2:
  321. added_channel_dim = True
  322. segmentation_map = segmentation_map[None, ...]
  323. input_data_format = ChannelDimension.FIRST
  324. else:
  325. added_channel_dim = False
  326. if input_data_format is None:
  327. input_data_format = infer_channel_dimension_format(segmentation_map, num_channels=1)
  328. original_size = get_image_size(segmentation_map, channel_dim=input_data_format)
  329. segmentation_map, _ = self._preprocess(
  330. image=segmentation_map,
  331. do_resize=do_resize,
  332. size=mask_size,
  333. resample=PILImageResampling.NEAREST,
  334. do_rescale=False,
  335. do_normalize=False,
  336. do_pad=do_pad,
  337. pad_size=mask_pad_size,
  338. input_data_format=input_data_format,
  339. )
  340. # Remove extra channel dimension if added for processing
  341. if added_channel_dim:
  342. segmentation_map = segmentation_map.squeeze(0)
  343. segmentation_map = segmentation_map.astype(np.int64)
  344. return segmentation_map, original_size
  345. @filter_out_non_signature_kwargs()
  346. def preprocess(
  347. self,
  348. images: ImageInput,
  349. segmentation_maps: Optional[ImageInput] = None,
  350. do_resize: Optional[bool] = None,
  351. size: Optional[Dict[str, int]] = None,
  352. mask_size: Optional[Dict[str, int]] = None,
  353. resample: Optional["PILImageResampling"] = None,
  354. do_rescale: Optional[bool] = None,
  355. rescale_factor: Optional[Union[int, float]] = None,
  356. do_normalize: Optional[bool] = None,
  357. image_mean: Optional[Union[float, List[float]]] = None,
  358. image_std: Optional[Union[float, List[float]]] = None,
  359. do_pad: Optional[bool] = None,
  360. pad_size: Optional[Dict[str, int]] = None,
  361. mask_pad_size: Optional[Dict[str, int]] = None,
  362. do_convert_rgb: Optional[bool] = None,
  363. return_tensors: Optional[Union[str, TensorType]] = None,
  364. data_format: ChannelDimension = ChannelDimension.FIRST,
  365. input_data_format: Optional[Union[str, ChannelDimension]] = None,
  366. ):
  367. """
  368. Preprocess an image or batch of images.
  369. Args:
  370. images (`ImageInput`):
  371. Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
  372. passing in images with pixel values between 0 and 1, set `do_rescale=False`.
  373. segmentation_maps (`ImageInput`, *optional*):
  374. Segmentation map to preprocess.
  375. do_resize (`bool`, *optional*, defaults to `self.do_resize`):
  376. Whether to resize the image.
  377. size (`Dict[str, int]`, *optional*, defaults to `self.size`):
  378. Controls the size of the image after `resize`. The longest edge of the image is resized to
  379. `size["longest_edge"]` whilst preserving the aspect ratio.
  380. mask_size (`Dict[str, int]`, *optional*, defaults to `self.mask_size`):
  381. Controls the size of the segmentation map after `resize`. The longest edge of the image is resized to
  382. `size["longest_edge"]` whilst preserving the aspect ratio.
  383. resample (`PILImageResampling`, *optional*, defaults to `self.resample`):
  384. `PILImageResampling` filter to use when resizing the image e.g. `PILImageResampling.BILINEAR`.
  385. do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
  386. Whether to rescale the image pixel values by rescaling factor.
  387. rescale_factor (`int` or `float`, *optional*, defaults to `self.rescale_factor`):
  388. Rescale factor to apply to the image pixel values.
  389. do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
  390. Whether to normalize the image.
  391. image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
  392. Image mean to normalize the image by if `do_normalize` is set to `True`.
  393. image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
  394. Image standard deviation to normalize the image by if `do_normalize` is set to `True`.
  395. do_pad (`bool`, *optional*, defaults to `self.do_pad`):
  396. Whether to pad the image.
  397. pad_size (`Dict[str, int]`, *optional*, defaults to `self.pad_size`):
  398. Controls the size of the padding applied to the image. The image is padded to `pad_size["height"]` and
  399. `pad_size["width"]` if `do_pad` is set to `True`.
  400. mask_pad_size (`Dict[str, int]`, *optional*, defaults to `self.mask_pad_size`):
  401. Controls the size of the padding applied to the segmentation map. The image is padded to
  402. `mask_pad_size["height"]` and `mask_pad_size["width"]` if `do_pad` is set to `True`.
  403. do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
  404. Whether to convert the image to RGB.
  405. return_tensors (`str` or `TensorType`, *optional*):
  406. The type of tensors to return. Can be one of:
  407. - Unset: Return a list of `np.ndarray`.
  408. - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
  409. - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
  410. - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
  411. - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
  412. data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
  413. The channel dimension format for the output image. Can be one of:
  414. - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
  415. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
  416. - Unset: Use the channel dimension format of the input image.
  417. input_data_format (`ChannelDimension` or `str`, *optional*):
  418. The channel dimension format for the input image. If unset, the channel dimension format is inferred
  419. from the input image. Can be one of:
  420. - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
  421. - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
  422. - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
  423. """
  424. do_resize = do_resize if do_resize is not None else self.do_resize
  425. size = size if size is not None else self.size
  426. size = get_size_dict(max_size=size, default_to_square=False) if not isinstance(size, dict) else size
  427. mask_size = mask_size if mask_size is not None else self.mask_size
  428. mask_size = (
  429. get_size_dict(max_size=mask_size, default_to_square=False)
  430. if not isinstance(mask_size, dict)
  431. else mask_size
  432. )
  433. resample = resample if resample is not None else self.resample
  434. do_rescale = do_rescale if do_rescale is not None else self.do_rescale
  435. rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
  436. do_normalize = do_normalize if do_normalize is not None else self.do_normalize
  437. image_mean = image_mean if image_mean is not None else self.image_mean
  438. image_std = image_std if image_std is not None else self.image_std
  439. do_pad = do_pad if do_pad is not None else self.do_pad
  440. pad_size = pad_size if pad_size is not None else self.pad_size
  441. pad_size = get_size_dict(pad_size, default_to_square=True)
  442. mask_pad_size = mask_pad_size if mask_pad_size is not None else self.mask_pad_size
  443. mask_pad_size = get_size_dict(mask_pad_size, default_to_square=True)
  444. do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
  445. images = make_list_of_images(images)
  446. if not valid_images(images):
  447. raise ValueError(
  448. "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
  449. "torch.Tensor, tf.Tensor or jax.ndarray."
  450. )
  451. if segmentation_maps is not None:
  452. segmentation_maps = make_list_of_images(segmentation_maps, expected_ndims=2)
  453. if not valid_images(segmentation_maps):
  454. raise ValueError(
  455. "Invalid segmentation map type. Must be of type PIL.Image.Image, numpy.ndarray, "
  456. "torch.Tensor, tf.Tensor or jax.ndarray."
  457. )
  458. validate_preprocess_arguments(
  459. do_rescale=do_rescale,
  460. rescale_factor=rescale_factor,
  461. do_normalize=do_normalize,
  462. image_mean=image_mean,
  463. image_std=image_std,
  464. do_pad=do_pad,
  465. size_divisibility=pad_size, # Here _preprocess needs do_pad and pad_size.
  466. do_resize=do_resize,
  467. size=size,
  468. resample=resample,
  469. )
  470. images, original_sizes, reshaped_input_sizes = zip(
  471. *(
  472. self._preprocess_image(
  473. image=img,
  474. do_resize=do_resize,
  475. size=size,
  476. resample=resample,
  477. do_rescale=do_rescale,
  478. rescale_factor=rescale_factor,
  479. do_normalize=do_normalize,
  480. image_mean=image_mean,
  481. image_std=image_std,
  482. do_pad=do_pad,
  483. pad_size=pad_size,
  484. do_convert_rgb=do_convert_rgb,
  485. data_format=data_format,
  486. input_data_format=input_data_format,
  487. )
  488. for img in images
  489. )
  490. )
  491. data = {
  492. "pixel_values": images,
  493. "original_sizes": original_sizes,
  494. "reshaped_input_sizes": reshaped_input_sizes,
  495. }
  496. if segmentation_maps is not None:
  497. segmentation_maps, original_mask_sizes = zip(
  498. *(
  499. self._preprocess_mask(
  500. segmentation_map=mask,
  501. do_resize=do_resize,
  502. mask_size=mask_size,
  503. do_pad=do_pad,
  504. mask_pad_size=mask_pad_size,
  505. input_data_format=input_data_format,
  506. )
  507. for mask in segmentation_maps
  508. )
  509. )
  510. # masks should start out the same size as input images
  511. assert all(
  512. original_im_size == original_mask_size
  513. for original_im_size, original_mask_size in zip(original_sizes, original_mask_sizes)
  514. ), "Segmentation maps should be the same size as input images."
  515. data["labels"] = segmentation_maps
  516. return BatchFeature(data=data, tensor_type=return_tensors)
  517. def post_process_masks(
  518. self,
  519. masks,
  520. original_sizes,
  521. reshaped_input_sizes,
  522. mask_threshold=0.0,
  523. binarize=True,
  524. pad_size=None,
  525. return_tensors="pt",
  526. ):
  527. """
  528. Remove padding and upscale masks to the original image size.
  529. Args:
  530. masks (`Union[List[torch.Tensor], List[np.ndarray], List[tf.Tensor]]`):
  531. Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format.
  532. original_sizes (`Union[torch.Tensor, tf.Tensor, List[Tuple[int,int]]]`):
  533. The original sizes of each image before it was resized to the model's expected input shape, in (height,
  534. width) format.
  535. reshaped_input_sizes (`Union[torch.Tensor, tf.Tensor, List[Tuple[int,int]]]`):
  536. The size of each image as it is fed to the model, in (height, width) format. Used to remove padding.
  537. mask_threshold (`float`, *optional*, defaults to 0.0):
  538. The threshold to use for binarizing the masks.
  539. binarize (`bool`, *optional*, defaults to `True`):
  540. Whether to binarize the masks.
  541. pad_size (`int`, *optional*, defaults to `self.pad_size`):
  542. The target size the images were padded to before being passed to the model. If None, the target size is
  543. assumed to be the processor's `pad_size`.
  544. return_tensors (`str`, *optional*, defaults to `"pt"`):
  545. If `"pt"`, return PyTorch tensors. If `"tf"`, return TensorFlow tensors.
  546. Returns:
  547. (`Union[torch.Tensor, tf.Tensor]`): Batched masks in batch_size, num_channels, height, width) format, where
  548. (height, width) is given by original_size.
  549. """
  550. if return_tensors == "pt":
  551. return self._post_process_masks_pt(
  552. masks=masks,
  553. original_sizes=original_sizes,
  554. reshaped_input_sizes=reshaped_input_sizes,
  555. mask_threshold=mask_threshold,
  556. binarize=binarize,
  557. pad_size=pad_size,
  558. )
  559. elif return_tensors == "tf":
  560. return self._post_process_masks_tf(
  561. masks=masks,
  562. original_sizes=original_sizes,
  563. reshaped_input_sizes=reshaped_input_sizes,
  564. mask_threshold=mask_threshold,
  565. binarize=binarize,
  566. pad_size=pad_size,
  567. )
  568. else:
  569. raise ValueError("return_tensors must be either 'pt' or 'tf'")
  570. def _post_process_masks_pt(
  571. self, masks, original_sizes, reshaped_input_sizes, mask_threshold=0.0, binarize=True, pad_size=None
  572. ):
  573. """
  574. Remove padding and upscale masks to the original image size.
  575. Args:
  576. masks (`Union[List[torch.Tensor], List[np.ndarray]]`):
  577. Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format.
  578. original_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`):
  579. The original sizes of each image before it was resized to the model's expected input shape, in (height,
  580. width) format.
  581. reshaped_input_sizes (`Union[torch.Tensor, List[Tuple[int,int]]]`):
  582. The size of each image as it is fed to the model, in (height, width) format. Used to remove padding.
  583. mask_threshold (`float`, *optional*, defaults to 0.0):
  584. The threshold to use for binarizing the masks.
  585. binarize (`bool`, *optional*, defaults to `True`):
  586. Whether to binarize the masks.
  587. pad_size (`int`, *optional*, defaults to `self.pad_size`):
  588. The target size the images were padded to before being passed to the model. If None, the target size is
  589. assumed to be the processor's `pad_size`.
  590. Returns:
  591. (`torch.Tensor`): Batched masks in batch_size, num_channels, height, width) format, where (height, width)
  592. is given by original_size.
  593. """
  594. requires_backends(self, ["torch"])
  595. pad_size = self.pad_size if pad_size is None else pad_size
  596. target_image_size = (pad_size["height"], pad_size["width"])
  597. if isinstance(original_sizes, (torch.Tensor, np.ndarray)):
  598. original_sizes = original_sizes.tolist()
  599. if isinstance(reshaped_input_sizes, (torch.Tensor, np.ndarray)):
  600. reshaped_input_sizes = reshaped_input_sizes.tolist()
  601. output_masks = []
  602. for i, original_size in enumerate(original_sizes):
  603. if isinstance(masks[i], np.ndarray):
  604. masks[i] = torch.from_numpy(masks[i])
  605. elif not isinstance(masks[i], torch.Tensor):
  606. raise ValueError("Input masks should be a list of `torch.tensors` or a list of `np.ndarray`")
  607. interpolated_mask = F.interpolate(masks[i], target_image_size, mode="bilinear", align_corners=False)
  608. interpolated_mask = interpolated_mask[..., : reshaped_input_sizes[i][0], : reshaped_input_sizes[i][1]]
  609. interpolated_mask = F.interpolate(interpolated_mask, original_size, mode="bilinear", align_corners=False)
  610. if binarize:
  611. interpolated_mask = interpolated_mask > mask_threshold
  612. output_masks.append(interpolated_mask)
  613. return output_masks
  614. def _post_process_masks_tf(
  615. self, masks, original_sizes, reshaped_input_sizes, mask_threshold=0.0, binarize=True, pad_size=None
  616. ):
  617. """
  618. Remove padding and upscale masks to the original image size.
  619. Args:
  620. masks (`tf.Tensor`):
  621. Batched masks from the mask_decoder in (batch_size, num_channels, height, width) format.
  622. original_sizes (`tf.Tensor`):
  623. The original size of the images before resizing for input to the model, in (height, width) format.
  624. reshaped_input_sizes (`tf.Tensor`):
  625. The size of the image input to the model, in (height, width) format. Used to remove padding.
  626. mask_threshold (`float`, *optional*, defaults to 0.0):
  627. The threshold to use for binarizing the masks.
  628. binarize (`bool`, *optional*, defaults to `True`):
  629. Whether to binarize the masks.
  630. pad_size (`int`, *optional*, defaults to `self.pad_size`):
  631. The target size the images were padded to before being passed to the model. If None, the target size is
  632. assumed to be the processor's `pad_size`.
  633. Returns:
  634. (`tf.Tensor`): Batched masks in batch_size, num_channels, height, width) format, where (height, width) is
  635. given by original_size.
  636. """
  637. requires_backends(self, ["tf"])
  638. pad_size = self.pad_size if pad_size is None else pad_size
  639. target_image_size = (pad_size["height"], pad_size["width"])
  640. output_masks = []
  641. for i, original_size in enumerate(original_sizes):
  642. # tf.image expects NHWC, we transpose the NCHW inputs for it
  643. mask = tf.transpose(masks[i], perm=[0, 2, 3, 1])
  644. interpolated_mask = tf.image.resize(mask, target_image_size, method="bilinear")
  645. interpolated_mask = interpolated_mask[:, : reshaped_input_sizes[i][0], : reshaped_input_sizes[i][1], :]
  646. interpolated_mask = tf.image.resize(interpolated_mask, original_size, method="bilinear")
  647. if binarize:
  648. interpolated_mask = interpolated_mask > mask_threshold
  649. # And then we transpose them back at the end
  650. output_masks.append(tf.transpose(interpolated_mask, perm=[0, 3, 1, 2]))
  651. return output_masks
  652. def post_process_for_mask_generation(
  653. self, all_masks, all_scores, all_boxes, crops_nms_thresh, return_tensors="pt"
  654. ):
  655. """
  656. Post processes mask that are generated by calling the Non Maximum Suppression algorithm on the predicted masks.
  657. Args:
  658. all_masks (`Union[List[torch.Tensor], List[tf.Tensor]]`):
  659. List of all predicted segmentation masks
  660. all_scores (`Union[List[torch.Tensor], List[tf.Tensor]]`):
  661. List of all predicted iou scores
  662. all_boxes (`Union[List[torch.Tensor], List[tf.Tensor]]`):
  663. List of all bounding boxes of the predicted masks
  664. crops_nms_thresh (`float`):
  665. Threshold for NMS (Non Maximum Suppression) algorithm.
  666. return_tensors (`str`, *optional*, defaults to `pt`):
  667. If `pt`, returns `torch.Tensor`. If `tf`, returns `tf.Tensor`.
  668. """
  669. if return_tensors == "pt":
  670. return _postprocess_for_mg(all_masks, all_scores, all_boxes, crops_nms_thresh)
  671. elif return_tensors == "tf":
  672. return _postprocess_for_mg_tf(all_masks, all_scores, all_boxes, crops_nms_thresh)
  673. def generate_crop_boxes(
  674. self,
  675. image,
  676. target_size,
  677. crop_n_layers: int = 0,
  678. overlap_ratio: float = 512 / 1500,
  679. points_per_crop: Optional[int] = 32,
  680. crop_n_points_downscale_factor: Optional[List[int]] = 1,
  681. device: Optional["torch.device"] = None,
  682. input_data_format: Optional[Union[str, ChannelDimension]] = None,
  683. return_tensors: str = "pt",
  684. ):
  685. """
  686. Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer.
  687. Args:
  688. image (`np.array`):
  689. Input original image
  690. target_size (`int`):
  691. Target size of the resized image
  692. crop_n_layers (`int`, *optional*, defaults to 0):
  693. If >0, mask prediction will be run again on crops of the image. Sets the number of layers to run, where
  694. each layer has 2**i_layer number of image crops.
  695. overlap_ratio (`float`, *optional*, defaults to 512/1500):
  696. Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of
  697. the image length. Later layers with more crops scale down this overlap.
  698. points_per_crop (`int`, *optional*, defaults to 32):
  699. Number of points to sample from each crop.
  700. crop_n_points_downscale_factor (`List[int]`, *optional*, defaults to 1):
  701. The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
  702. device (`torch.device`, *optional*, defaults to None):
  703. Device to use for the computation. If None, cpu will be used.
  704. input_data_format (`str` or `ChannelDimension`, *optional*):
  705. The channel dimension format of the input image. If not provided, it will be inferred.
  706. return_tensors (`str`, *optional*, defaults to `pt`):
  707. If `pt`, returns `torch.Tensor`. If `tf`, returns `tf.Tensor`.
  708. """
  709. crop_boxes, points_per_crop, cropped_images, input_labels = _generate_crop_boxes(
  710. image,
  711. target_size,
  712. crop_n_layers,
  713. overlap_ratio,
  714. points_per_crop,
  715. crop_n_points_downscale_factor,
  716. input_data_format,
  717. )
  718. if return_tensors == "pt":
  719. if device is None:
  720. device = torch.device("cpu")
  721. crop_boxes = torch.tensor(crop_boxes, device=device)
  722. points_per_crop = torch.tensor(points_per_crop, device=device)
  723. # cropped_images stays as np
  724. input_labels = torch.tensor(input_labels, device=device)
  725. elif return_tensors == "tf":
  726. if device is not None:
  727. raise ValueError("device is not a supported argument when return_tensors is tf!")
  728. crop_boxes = tf.convert_to_tensor(crop_boxes)
  729. points_per_crop = tf.convert_to_tensor(points_per_crop)
  730. # cropped_images stays as np
  731. input_labels = tf.convert_to_tensor(input_labels)
  732. else:
  733. raise ValueError("return_tensors must be either 'pt' or 'tf'.")
  734. return crop_boxes, points_per_crop, cropped_images, input_labels
  735. def filter_masks(
  736. self,
  737. masks,
  738. iou_scores,
  739. original_size,
  740. cropped_box_image,
  741. pred_iou_thresh=0.88,
  742. stability_score_thresh=0.95,
  743. mask_threshold=0,
  744. stability_score_offset=1,
  745. return_tensors="pt",
  746. ):
  747. """
  748. Filters the predicted masks by selecting only the ones that meets several criteria. The first criterion being
  749. that the iou scores needs to be greater than `pred_iou_thresh`. The second criterion is that the stability
  750. score needs to be greater than `stability_score_thresh`. The method also converts the predicted masks to
  751. bounding boxes and pad the predicted masks if necessary.
  752. Args:
  753. masks (`Union[torch.Tensor, tf.Tensor]`):
  754. Input masks.
  755. iou_scores (`Union[torch.Tensor, tf.Tensor]`):
  756. List of IoU scores.
  757. original_size (`Tuple[int,int]`):
  758. Size of the orginal image.
  759. cropped_box_image (`np.array`):
  760. The cropped image.
  761. pred_iou_thresh (`float`, *optional*, defaults to 0.88):
  762. The threshold for the iou scores.
  763. stability_score_thresh (`float`, *optional*, defaults to 0.95):
  764. The threshold for the stability score.
  765. mask_threshold (`float`, *optional*, defaults to 0):
  766. The threshold for the predicted masks.
  767. stability_score_offset (`float`, *optional*, defaults to 1):
  768. The offset for the stability score used in the `_compute_stability_score` method.
  769. return_tensors (`str`, *optional*, defaults to `pt`):
  770. If `pt`, returns `torch.Tensor`. If `tf`, returns `tf.Tensor`.
  771. """
  772. if return_tensors == "pt":
  773. return self._filter_masks_pt(
  774. masks=masks,
  775. iou_scores=iou_scores,
  776. original_size=original_size,
  777. cropped_box_image=cropped_box_image,
  778. pred_iou_thresh=pred_iou_thresh,
  779. stability_score_thresh=stability_score_thresh,
  780. mask_threshold=mask_threshold,
  781. stability_score_offset=stability_score_offset,
  782. )
  783. elif return_tensors == "tf":
  784. return self._filter_masks_tf(
  785. masks=masks,
  786. iou_scores=iou_scores,
  787. original_size=original_size,
  788. cropped_box_image=cropped_box_image,
  789. pred_iou_thresh=pred_iou_thresh,
  790. stability_score_thresh=stability_score_thresh,
  791. mask_threshold=mask_threshold,
  792. stability_score_offset=stability_score_offset,
  793. )
  794. def _filter_masks_pt(
  795. self,
  796. masks,
  797. iou_scores,
  798. original_size,
  799. cropped_box_image,
  800. pred_iou_thresh=0.88,
  801. stability_score_thresh=0.95,
  802. mask_threshold=0,
  803. stability_score_offset=1,
  804. ):
  805. """
  806. Filters the predicted masks by selecting only the ones that meets several criteria. The first criterion being
  807. that the iou scores needs to be greater than `pred_iou_thresh`. The second criterion is that the stability
  808. score needs to be greater than `stability_score_thresh`. The method also converts the predicted masks to
  809. bounding boxes and pad the predicted masks if necessary.
  810. Args:
  811. masks (`torch.Tensor`):
  812. Input masks.
  813. iou_scores (`torch.Tensor`):
  814. List of IoU scores.
  815. original_size (`Tuple[int,int]`):
  816. Size of the orginal image.
  817. cropped_box_image (`np.array`):
  818. The cropped image.
  819. pred_iou_thresh (`float`, *optional*, defaults to 0.88):
  820. The threshold for the iou scores.
  821. stability_score_thresh (`float`, *optional*, defaults to 0.95):
  822. The threshold for the stability score.
  823. mask_threshold (`float`, *optional*, defaults to 0):
  824. The threshold for the predicted masks.
  825. stability_score_offset (`float`, *optional*, defaults to 1):
  826. The offset for the stability score used in the `_compute_stability_score` method.
  827. """
  828. requires_backends(self, ["torch"])
  829. original_height, original_width = original_size
  830. iou_scores = iou_scores.flatten(0, 1)
  831. masks = masks.flatten(0, 1)
  832. if masks.shape[0] != iou_scores.shape[0]:
  833. raise ValueError("masks and iou_scores must have the same batch size.")
  834. if masks.device != iou_scores.device:
  835. iou_scores = iou_scores.to(masks.device)
  836. batch_size = masks.shape[0]
  837. keep_mask = torch.ones(batch_size, dtype=torch.bool, device=masks.device)
  838. if pred_iou_thresh > 0.0:
  839. keep_mask = keep_mask & (iou_scores > pred_iou_thresh)
  840. # compute stability score
  841. if stability_score_thresh > 0.0:
  842. stability_scores = _compute_stability_score_pt(masks, mask_threshold, stability_score_offset)
  843. keep_mask = keep_mask & (stability_scores > stability_score_thresh)
  844. scores = iou_scores[keep_mask]
  845. masks = masks[keep_mask]
  846. # binarize masks
  847. masks = masks > mask_threshold
  848. converted_boxes = _batched_mask_to_box(masks)
  849. keep_mask = ~_is_box_near_crop_edge(
  850. converted_boxes, cropped_box_image, [0, 0, original_width, original_height]
  851. )
  852. scores = scores[keep_mask]
  853. masks = masks[keep_mask]
  854. converted_boxes = converted_boxes[keep_mask]
  855. masks = _pad_masks(masks, cropped_box_image, original_height, original_width)
  856. # conversion to rle is necessary to run non-maximum suppresion
  857. masks = _mask_to_rle_pytorch(masks)
  858. return masks, scores, converted_boxes
  859. def _filter_masks_tf(
  860. self,
  861. masks,
  862. iou_scores,
  863. original_size,
  864. cropped_box_image,
  865. pred_iou_thresh=0.88,
  866. stability_score_thresh=0.95,
  867. mask_threshold=0,
  868. stability_score_offset=1,
  869. ):
  870. """
  871. Filters the predicted masks by selecting only the ones that meets several criteria. The first criterion being
  872. that the iou scores needs to be greater than `pred_iou_thresh`. The second criterion is that the stability
  873. score needs to be greater than `stability_score_thresh`. The method also converts the predicted masks to
  874. bounding boxes and pad the predicted masks if necessary.
  875. Args:
  876. masks (`tf.Tensor`):
  877. Input masks.
  878. iou_scores (`tf.Tensor`):
  879. List of IoU scores.
  880. original_size (`Tuple[int,int]`):
  881. Size of the orginal image.
  882. cropped_box_image (`np.array`):
  883. The cropped image.
  884. pred_iou_thresh (`float`, *optional*, defaults to 0.88):
  885. The threshold for the iou scores.
  886. stability_score_thresh (`float`, *optional*, defaults to 0.95):
  887. The threshold for the stability score.
  888. mask_threshold (`float`, *optional*, defaults to 0):
  889. The threshold for the predicted masks.
  890. stability_score_offset (`float`, *optional*, defaults to 1):
  891. The offset for the stability score used in the `_compute_stability_score` method.
  892. """
  893. requires_backends(self, ["tf"])
  894. original_height, original_width = original_size
  895. iou_scores = tf.reshape(iou_scores, [iou_scores.shape[0] * iou_scores.shape[1], iou_scores.shape[2:]])
  896. masks = tf.reshape(masks, [masks.shape[0] * masks.shape[1], masks.shape[2:]])
  897. if masks.shape[0] != iou_scores.shape[0]:
  898. raise ValueError("masks and iou_scores must have the same batch size.")
  899. batch_size = masks.shape[0]
  900. keep_mask = tf.ones(batch_size, dtype=tf.bool)
  901. if pred_iou_thresh > 0.0:
  902. keep_mask = keep_mask & (iou_scores > pred_iou_thresh)
  903. # compute stability score
  904. if stability_score_thresh > 0.0:
  905. stability_scores = _compute_stability_score_tf(masks, mask_threshold, stability_score_offset)
  906. keep_mask = keep_mask & (stability_scores > stability_score_thresh)
  907. scores = iou_scores[keep_mask]
  908. masks = masks[keep_mask]
  909. # binarize masks
  910. masks = masks > mask_threshold
  911. converted_boxes = _batched_mask_to_box_tf(masks)
  912. keep_mask = ~_is_box_near_crop_edge_tf(
  913. converted_boxes, cropped_box_image, [0, 0, original_width, original_height]
  914. )
  915. scores = scores[keep_mask]
  916. masks = masks[keep_mask]
  917. converted_boxes = converted_boxes[keep_mask]
  918. masks = _pad_masks_tf(masks, cropped_box_image, original_height, original_width)
  919. # conversion to rle is necessary to run non-maximum suppresion
  920. masks = _mask_to_rle_tf(masks)
  921. return masks, scores, converted_boxes
  922. def _compute_stability_score_pt(masks: "torch.Tensor", mask_threshold: float, stability_score_offset: int):
  923. # One mask is always contained inside the other.
  924. # Save memory by preventing unnecesary cast to torch.int64
  925. intersections = (
  926. (masks > (mask_threshold + stability_score_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32)
  927. )
  928. unions = (masks > (mask_threshold - stability_score_offset)).sum(-1, dtype=torch.int16).sum(-1, dtype=torch.int32)
  929. stability_scores = intersections / unions
  930. return stability_scores
  931. def _compute_stability_score_tf(masks: "tf.Tensor", mask_threshold: float, stability_score_offset: int):
  932. # Torch does Py3-style division but TF does floor division with ints. We cast to float32 in TF to make sure
  933. # we get the right division results.
  934. intersections = tf.count_nonzero(
  935. masks > (mask_threshold + stability_score_offset), axis=[-1, -2], dtype=tf.float32
  936. )
  937. unions = tf.count_nonzero(masks > (mask_threshold - stability_score_offset), axis=[-1, -2], dtype=tf.float32)
  938. stability_scores = intersections / unions
  939. return stability_scores
  940. def _build_point_grid(n_per_side: int) -> np.ndarray:
  941. """Generates a 2D grid of points evenly spaced in [0,1]x[0,1]."""
  942. offset = 1 / (2 * n_per_side)
  943. points_one_side = np.linspace(offset, 1 - offset, n_per_side)
  944. points_x = np.tile(points_one_side[None, :], (n_per_side, 1))
  945. points_y = np.tile(points_one_side[:, None], (1, n_per_side))
  946. points = np.stack([points_x, points_y], axis=-1).reshape(-1, 2)
  947. return points
  948. def _normalize_coordinates(
  949. target_size: int, coords: np.ndarray, original_size: Tuple[int, int], is_bounding_box=False
  950. ) -> np.ndarray:
  951. """
  952. Expects a numpy array of length 2 in the final dimension. Requires the original image size in (height, width)
  953. format.
  954. """
  955. old_height, old_width = original_size
  956. scale = target_size * 1.0 / max(old_height, old_width)
  957. new_height, new_width = old_height * scale, old_width * scale
  958. new_width = int(new_width + 0.5)
  959. new_height = int(new_height + 0.5)
  960. coords = deepcopy(coords).astype(float)
  961. if is_bounding_box:
  962. coords = coords.reshape(-1, 2, 2)
  963. coords[..., 0] = coords[..., 0] * (new_width / old_width)
  964. coords[..., 1] = coords[..., 1] * (new_height / old_height)
  965. if is_bounding_box:
  966. coords = coords.reshape(-1, 4)
  967. return coords
  968. def _generate_crop_boxes(
  969. image,
  970. target_size: int, # Is it tuple here?
  971. crop_n_layers: int = 0,
  972. overlap_ratio: float = 512 / 1500,
  973. points_per_crop: Optional[int] = 32,
  974. crop_n_points_downscale_factor: Optional[List[int]] = 1,
  975. input_data_format: Optional[Union[str, ChannelDimension]] = None,
  976. ) -> Tuple[List[List[int]], List[int]]:
  977. """
  978. Generates a list of crop boxes of different sizes. Each layer has (2**i)**2 boxes for the ith layer.
  979. Args:
  980. image (Union[`numpy.ndarray`, `PIL.Image`, `torch.Tensor`]):
  981. Image to generate crops for.
  982. target_size (`int`):
  983. Size of the smallest crop.
  984. crop_n_layers (`int`, *optional*):
  985. If `crops_n_layers>0`, mask prediction will be run again on crops of the image. Sets the number of layers
  986. to run, where each layer has 2**i_layer number of image crops.
  987. overlap_ratio (`int`, *optional*):
  988. Sets the degree to which crops overlap. In the first crop layer, crops will overlap by this fraction of the
  989. image length. Later layers with more crops scale down this overlap.
  990. points_per_crop (`int`, *optional*):
  991. Number of points to sample per crop.
  992. crop_n_points_downscale_factor (`int`, *optional*):
  993. The number of points-per-side sampled in layer n is scaled down by crop_n_points_downscale_factor**n.
  994. input_data_format (`str` or `ChannelDimension`, *optional*):
  995. The channel dimension format of the input image. If not provided, it will be inferred.
  996. """
  997. if isinstance(image, list):
  998. raise ValueError("Only one image is allowed for crop generation.")
  999. image = to_numpy_array(image)
  1000. original_size = get_image_size(image, input_data_format)
  1001. points_grid = []
  1002. for i in range(crop_n_layers + 1):
  1003. n_points = int(points_per_crop / (crop_n_points_downscale_factor**i))
  1004. points_grid.append(_build_point_grid(n_points))
  1005. crop_boxes, layer_idxs = _generate_per_layer_crops(crop_n_layers, overlap_ratio, original_size)
  1006. cropped_images, point_grid_per_crop = _generate_crop_images(
  1007. crop_boxes, image, points_grid, layer_idxs, target_size, original_size, input_data_format
  1008. )
  1009. crop_boxes = np.array(crop_boxes)
  1010. crop_boxes = crop_boxes.astype(np.float32)
  1011. points_per_crop = np.array([point_grid_per_crop])
  1012. points_per_crop = np.transpose(points_per_crop, axes=(0, 2, 1, 3))
  1013. input_labels = np.ones_like(points_per_crop[:, :, :, 0], dtype=np.int64)
  1014. return crop_boxes, points_per_crop, cropped_images, input_labels
  1015. def _generate_per_layer_crops(crop_n_layers, overlap_ratio, original_size):
  1016. """
  1017. Generates 2 ** (layers idx + 1) crops for each crop_n_layers. Crops are in the XYWH format : The XYWH format
  1018. consists of the following required indices:
  1019. - X: X coordinate of the top left of the bounding box
  1020. - Y: Y coordinate of the top left of the bounding box
  1021. - W: width of the bounding box
  1022. - H: height of the bounding box
  1023. """
  1024. crop_boxes, layer_idxs = [], []
  1025. im_height, im_width = original_size
  1026. short_side = min(im_height, im_width)
  1027. # Original image
  1028. crop_boxes.append([0, 0, im_width, im_height])
  1029. layer_idxs.append(0)
  1030. for i_layer in range(crop_n_layers):
  1031. n_crops_per_side = 2 ** (i_layer + 1)
  1032. overlap = int(overlap_ratio * short_side * (2 / n_crops_per_side))
  1033. crop_width = int(math.ceil((overlap * (n_crops_per_side - 1) + im_width) / n_crops_per_side))
  1034. crop_height = int(math.ceil((overlap * (n_crops_per_side - 1) + im_height) / n_crops_per_side))
  1035. crop_box_x0 = [int((crop_width - overlap) * i) for i in range(n_crops_per_side)]
  1036. crop_box_y0 = [int((crop_height - overlap) * i) for i in range(n_crops_per_side)]
  1037. for left, top in product(crop_box_x0, crop_box_y0):
  1038. box = [left, top, min(left + crop_width, im_width), min(top + crop_height, im_height)]
  1039. crop_boxes.append(box)
  1040. layer_idxs.append(i_layer + 1)
  1041. return crop_boxes, layer_idxs
  1042. def _generate_crop_images(
  1043. crop_boxes, image, points_grid, layer_idxs, target_size, original_size, input_data_format=None
  1044. ):
  1045. """
  1046. Takes as an input bounding boxes that are used to crop the image. Based in the crops, the corresponding points are
  1047. also passed.
  1048. """
  1049. cropped_images = []
  1050. total_points_per_crop = []
  1051. for i, crop_box in enumerate(crop_boxes):
  1052. left, top, right, bottom = crop_box
  1053. channel_dim = infer_channel_dimension_format(image, input_data_format)
  1054. if channel_dim == ChannelDimension.LAST:
  1055. cropped_im = image[top:bottom, left:right, :]
  1056. else:
  1057. cropped_im = image[:, top:bottom, left:right]
  1058. cropped_images.append(cropped_im)
  1059. cropped_im_size = get_image_size(cropped_im, channel_dim)
  1060. points_scale = np.array(cropped_im_size)[None, ::-1]
  1061. points = points_grid[layer_idxs[i]] * points_scale
  1062. normalized_points = _normalize_coordinates(target_size, points, original_size)
  1063. total_points_per_crop.append(normalized_points)
  1064. return cropped_images, total_points_per_crop
  1065. def _pad_masks(masks, crop_box: List[int], orig_height: int, orig_width: int):
  1066. left, top, right, bottom = crop_box
  1067. if left == 0 and top == 0 and right == orig_width and bottom == orig_height:
  1068. return masks
  1069. # Coordinate transform masks
  1070. pad_x, pad_y = orig_width - (right - left), orig_height - (bottom - top)
  1071. pad = (left, pad_x - left, top, pad_y - top)
  1072. return torch.nn.functional.pad(masks, pad, value=0)
  1073. def _pad_masks_tf(masks, crop_box: List[int], orig_height: int, orig_width: int):
  1074. left, top, right, bottom = crop_box
  1075. if left == 0 and top == 0 and right == orig_width and bottom == orig_height:
  1076. return masks
  1077. # Coordinate transform masks
  1078. pad_x, pad_y = orig_width - (right - left), orig_height - (bottom - top)
  1079. pad = (left, pad_x - left, top, pad_y - top)
  1080. return tf.pad(masks, pad, constant_values=0)
  1081. def _is_box_near_crop_edge(boxes, crop_box, orig_box, atol=20.0):
  1082. """Filter masks at the edge of a crop, but not at the edge of the original image."""
  1083. crop_box_torch = torch.as_tensor(crop_box, dtype=torch.float, device=boxes.device)
  1084. orig_box_torch = torch.as_tensor(orig_box, dtype=torch.float, device=boxes.device)
  1085. left, top, _, _ = crop_box
  1086. offset = torch.tensor([[left, top, left, top]], device=boxes.device)
  1087. # Check if boxes has a channel dimension
  1088. if len(boxes.shape) == 3:
  1089. offset = offset.unsqueeze(1)
  1090. boxes = (boxes + offset).float()
  1091. near_crop_edge = torch.isclose(boxes, crop_box_torch[None, :], atol=atol, rtol=0)
  1092. near_image_edge = torch.isclose(boxes, orig_box_torch[None, :], atol=atol, rtol=0)
  1093. near_crop_edge = torch.logical_and(near_crop_edge, ~near_image_edge)
  1094. return torch.any(near_crop_edge, dim=1)
  1095. def _is_box_near_crop_edge_tf(boxes, crop_box, orig_box, atol=20.0):
  1096. """Filter masks at the edge of a crop, but not at the edge of the original image."""
  1097. crop_box_tf = tf.convert_to_tensor(crop_box, dtype=tf.float32)
  1098. orig_box_tf = tf.convert_to_tensor(orig_box, dtype=tf.float32)
  1099. left, top, _, _ = crop_box
  1100. offset = tf.convert_to_tensor([[left, top, left, top]])
  1101. # Check if boxes has a channel dimension
  1102. if len(boxes.shape) == 3:
  1103. offset = tf.expand_dims(offset, 1)
  1104. boxes = tf.cast(boxes + offset, tf.float32)
  1105. near_crop_edge = tnp.isclose(boxes, crop_box_tf[None, :], atol=atol, rtol=0)
  1106. near_image_edge = tnp.isclose(boxes, orig_box_tf[None, :], atol=atol, rtol=0)
  1107. near_crop_edge = tf.math.logical_and(near_crop_edge, ~near_image_edge)
  1108. return tf.reduce_any(near_crop_edge, axis=1)
  1109. def _batched_mask_to_box(masks: "torch.Tensor"):
  1110. """
  1111. Computes the bounding boxes around the given input masks. The bounding boxes are in the XYXY format which
  1112. corresponds the following required indices:
  1113. - LEFT: left hand side of the bounding box
  1114. - TOP: top of the bounding box
  1115. - RIGHT: right of the bounding box
  1116. - BOTTOM: bottom of the bounding box
  1117. Return [0,0,0,0] for an empty mask. For input shape channel_1 x channel_2 x ... x height x width, the output shape
  1118. is channel_1 x channel_2 x ... x 4.
  1119. Args:
  1120. - masks (`torch.Tensor` of shape `(batch, nb_mask, height, width)`)
  1121. """
  1122. # torch.max below raises an error on empty inputs, just skip in this case
  1123. if torch.numel(masks) == 0:
  1124. return torch.zeros(*masks.shape[:-2], 4, device=masks.device)
  1125. # Normalize shape to Cxheightxwidth
  1126. shape = masks.shape
  1127. height, width = shape[-2:]
  1128. # Get top and bottom edges
  1129. in_height, _ = torch.max(masks, dim=-1)
  1130. in_height_coords = in_height * torch.arange(height, device=in_height.device)[None, :]
  1131. bottom_edges, _ = torch.max(in_height_coords, dim=-1)
  1132. in_height_coords = in_height_coords + height * (~in_height)
  1133. top_edges, _ = torch.min(in_height_coords, dim=-1)
  1134. # Get left and right edges
  1135. in_width, _ = torch.max(masks, dim=-2)
  1136. in_width_coords = in_width * torch.arange(width, device=in_width.device)[None, :]
  1137. right_edges, _ = torch.max(in_width_coords, dim=-1)
  1138. in_width_coords = in_width_coords + width * (~in_width)
  1139. left_edges, _ = torch.min(in_width_coords, dim=-1)
  1140. # If the mask is empty the right edge will be to the left of the left edge.
  1141. # Replace these boxes with [0, 0, 0, 0]
  1142. empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)
  1143. out = torch.stack([left_edges, top_edges, right_edges, bottom_edges], dim=-1)
  1144. out = out * (~empty_filter).unsqueeze(-1)
  1145. # Return to original shape
  1146. out = out.reshape(*shape[:-2], 4)
  1147. return out
  1148. def _batched_mask_to_box_tf(masks: "tf.Tensor"):
  1149. """
  1150. Computes the bounding boxes around the given input masks. The bounding boxes are in the XYXY format which
  1151. corresponds the following required indices:
  1152. - LEFT: left hand side of the bounding box
  1153. - TOP: top of the bounding box
  1154. - RIGHT: right of the bounding box
  1155. - BOTTOM: bottom of the bounding box
  1156. Return [0,0,0,0] for an empty mask. For input shape channel_1 x channel_2 x ... x height x width, the output shape
  1157. is channel_1 x channel_2 x ... x 4.
  1158. Args:
  1159. - masks (`tf.Tensor` of shape `(batch, nb_mask, height, width)`)
  1160. """
  1161. if tf.size(masks) == 0:
  1162. return tf.zeros([*masks.shape[:-2], 4])
  1163. # Normalize shape to Cxheightxwidth
  1164. shape = shape_list(masks)
  1165. height, width = shape[-2:]
  1166. # Get top and bottom edges
  1167. in_height = tf.reduce_max(masks, axis=-1)
  1168. in_height_coords = in_height * tf.range(height)[None, :]
  1169. bottom_edges = tf.reduce_max(in_height_coords, axis=-1)
  1170. in_height_coords = in_height_coords + height * (~in_height)
  1171. top_edges = tf.reduce_min(in_height_coords, axis=-1)
  1172. # Get left and right edges
  1173. in_width, _ = tf.reduce_max(masks, axis=-2)
  1174. in_width_coords = in_width * tf.range(width)[None, :]
  1175. right_edges, _ = tf.reduce_max(in_width_coords, axis=-1)
  1176. in_width_coords = in_width_coords + width * (~in_width)
  1177. left_edges, _ = tf.reduce_min(in_width_coords, axis=-1)
  1178. # If the mask is empty the right edge will be to the left of the left edge.
  1179. # Replace these boxes with [0, 0, 0, 0]
  1180. empty_filter = (right_edges < left_edges) | (bottom_edges < top_edges)
  1181. out = tf.stack([left_edges, top_edges, right_edges, bottom_edges], axis=-1)
  1182. out = out * tf.expand_dims(~empty_filter, -1)
  1183. # Return to original shape
  1184. out = tf.reshape(out, *shape[:-2], 4)
  1185. return out
  1186. def _mask_to_rle_pytorch(input_mask: "torch.Tensor"):
  1187. """
  1188. Encodes masks the run-length encoding (RLE), in the format expected by pycoco tools.
  1189. """
  1190. # Put in fortran order and flatten height and width
  1191. batch_size, height, width = input_mask.shape
  1192. input_mask = input_mask.permute(0, 2, 1).flatten(1)
  1193. # Compute change indices
  1194. diff = input_mask[:, 1:] ^ input_mask[:, :-1]
  1195. change_indices = diff.nonzero()
  1196. # Encode run length
  1197. out = []
  1198. for i in range(batch_size):
  1199. cur_idxs = change_indices[change_indices[:, 0] == i, 1] + 1
  1200. btw_idxs = cur_idxs[1:] - cur_idxs[:-1]
  1201. counts = [] if input_mask[i, 0] == 0 else [0]
  1202. counts += [cur_idxs[0].item()] + btw_idxs.tolist() + [height * width - cur_idxs[-1]]
  1203. out.append({"size": [height, width], "counts": counts})
  1204. return out
  1205. def _mask_to_rle_tf(input_mask: "tf.Tensor"):
  1206. """
  1207. Encodes masks the run-length encoding (RLE), in the format expected by pycoco tools.
  1208. """
  1209. # Put in fortran order and flatten height and width
  1210. batch_size, height, width = input_mask.shape
  1211. input_mask = flatten(tf.transpose(input_mask, perm=(0, 2, 1)), 1)
  1212. # Compute change indices
  1213. diff = input_mask[:, 1:] ^ input_mask[:, :-1]
  1214. change_indices = tf.where(diff)
  1215. # Encode run length
  1216. out = []
  1217. for i in range(batch_size):
  1218. cur_idxs = change_indices[change_indices[:, 0] == i, 1] + 1
  1219. btw_idxs = cur_idxs[1:] - cur_idxs[:-1]
  1220. counts = [] if input_mask[i, 0] == 0 else [0]
  1221. counts += [cur_idxs[0].item()] + btw_idxs.tolist() + [height * width - cur_idxs[-1]]
  1222. out.append({"size": [height, width], "counts": counts})
  1223. return out
  1224. def _rle_to_mask(rle: Dict[str, Any]) -> np.ndarray:
  1225. """Compute a binary mask from an uncompressed RLE."""
  1226. height, width = rle["size"]
  1227. mask = np.empty(height * width, dtype=bool)
  1228. idx = 0
  1229. parity = False
  1230. for count in rle["counts"]:
  1231. mask[idx : idx + count] = parity
  1232. idx += count
  1233. parity = not parity
  1234. mask = mask.reshape(width, height)
  1235. return mask.transpose() # Reshape to original shape
  1236. def _postprocess_for_mg(rle_masks, iou_scores, mask_boxes, amg_crops_nms_thresh=0.7):
  1237. """
  1238. Perform NMS (Non Maximum Suppression) on the outputs.
  1239. Args:
  1240. rle_masks (`torch.Tensor`):
  1241. binary masks in the RLE format
  1242. iou_scores (`torch.Tensor` of shape (nb_masks, 1)):
  1243. iou_scores predicted by the model
  1244. mask_boxes (`torch.Tensor`):
  1245. The bounding boxes corresponding to segmentation masks
  1246. amg_crops_nms_thresh (`float`, *optional*, defaults to 0.7):
  1247. NMS threshold.
  1248. """
  1249. keep_by_nms = batched_nms(
  1250. boxes=mask_boxes.float(),
  1251. scores=iou_scores,
  1252. idxs=torch.zeros(mask_boxes.shape[0]),
  1253. iou_threshold=amg_crops_nms_thresh,
  1254. )
  1255. iou_scores = iou_scores[keep_by_nms]
  1256. rle_masks = [rle_masks[i] for i in keep_by_nms]
  1257. mask_boxes = mask_boxes[keep_by_nms]
  1258. masks = [_rle_to_mask(rle) for rle in rle_masks]
  1259. return masks, iou_scores, rle_masks, mask_boxes
  1260. def _postprocess_for_mg_tf(rle_masks, iou_scores, mask_boxes, amg_crops_nms_thresh=0.7):
  1261. """
  1262. Perform NMS (Non Maximum Suppression) on the outputs.
  1263. Args:
  1264. rle_masks (`tf.Tensor`):
  1265. binary masks in the RLE format
  1266. iou_scores (`tf.Tensor` of shape (nb_masks, 1)):
  1267. iou_scores predicted by the model
  1268. mask_boxes (`tf.Tensor`):
  1269. The bounding boxes corresponding to segmentation masks
  1270. amg_crops_nms_thresh (`float`, *optional*, defaults to 0.7):
  1271. NMS threshold.
  1272. """
  1273. keep_by_nms = tf.image.combined_non_max_suppression(
  1274. boxes=mask_boxes.float(),
  1275. scores=iou_scores,
  1276. idxs=torch.zeros(mask_boxes.shape[0]),
  1277. iou_threshold=amg_crops_nms_thresh,
  1278. )
  1279. iou_scores = iou_scores[keep_by_nms]
  1280. rle_masks = [rle_masks[i] for i in keep_by_nms]
  1281. mask_boxes = mask_boxes[keep_by_nms]
  1282. masks = [_rle_to_mask(rle) for rle in rle_masks]
  1283. return masks, iou_scores, rle_masks, mask_boxes