modeling_superpoint.py 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499
  1. # Copyright 2024 The HuggingFace Team. All rights reserved.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License");
  4. # you may not use this file except in compliance with the License.
  5. # You may obtain a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS,
  11. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. # See the License for the specific language governing permissions and
  13. # limitations under the License.
  14. """PyTorch SuperPoint model."""
  15. from dataclasses import dataclass
  16. from typing import Optional, Tuple, Union
  17. import torch
  18. from torch import nn
  19. from transformers import PreTrainedModel
  20. from transformers.modeling_outputs import (
  21. BaseModelOutputWithNoAttention,
  22. )
  23. from transformers.models.superpoint.configuration_superpoint import SuperPointConfig
  24. from ...pytorch_utils import is_torch_greater_or_equal_than_1_13
  25. from ...utils import (
  26. ModelOutput,
  27. add_start_docstrings,
  28. add_start_docstrings_to_model_forward,
  29. logging,
  30. )
  31. logger = logging.get_logger(__name__)
  32. _CONFIG_FOR_DOC = "SuperPointConfig"
  33. _CHECKPOINT_FOR_DOC = "magic-leap-community/superpoint"
  34. def remove_keypoints_from_borders(
  35. keypoints: torch.Tensor, scores: torch.Tensor, border: int, height: int, width: int
  36. ) -> Tuple[torch.Tensor, torch.Tensor]:
  37. """Removes keypoints (and their associated scores) that are too close to the border"""
  38. mask_h = (keypoints[:, 0] >= border) & (keypoints[:, 0] < (height - border))
  39. mask_w = (keypoints[:, 1] >= border) & (keypoints[:, 1] < (width - border))
  40. mask = mask_h & mask_w
  41. return keypoints[mask], scores[mask]
  42. def top_k_keypoints(keypoints: torch.Tensor, scores: torch.Tensor, k: int) -> Tuple[torch.Tensor, torch.Tensor]:
  43. """Keeps the k keypoints with highest score"""
  44. if k >= len(keypoints):
  45. return keypoints, scores
  46. scores, indices = torch.topk(scores, k, dim=0)
  47. return keypoints[indices], scores
  48. def simple_nms(scores: torch.Tensor, nms_radius: int) -> torch.Tensor:
  49. """Applies non-maximum suppression on scores"""
  50. if nms_radius < 0:
  51. raise ValueError("Expected positive values for nms_radius")
  52. def max_pool(x):
  53. return nn.functional.max_pool2d(x, kernel_size=nms_radius * 2 + 1, stride=1, padding=nms_radius)
  54. zeros = torch.zeros_like(scores)
  55. max_mask = scores == max_pool(scores)
  56. for _ in range(2):
  57. supp_mask = max_pool(max_mask.float()) > 0
  58. supp_scores = torch.where(supp_mask, zeros, scores)
  59. new_max_mask = supp_scores == max_pool(supp_scores)
  60. max_mask = max_mask | (new_max_mask & (~supp_mask))
  61. return torch.where(max_mask, scores, zeros)
  62. @dataclass
  63. class SuperPointKeypointDescriptionOutput(ModelOutput):
  64. """
  65. Base class for outputs of image point description models. Due to the nature of keypoint detection, the number of
  66. keypoints is not fixed and can vary from image to image, which makes batching non-trivial. In the batch of images,
  67. the maximum number of keypoints is set as the dimension of the keypoints, scores and descriptors tensors. The mask
  68. tensor is used to indicate which values in the keypoints, scores and descriptors tensors are keypoint information
  69. and which are padding.
  70. Args:
  71. loss (`torch.FloatTensor` of shape `(1,)`, *optional*):
  72. Loss computed during training.
  73. keypoints (`torch.FloatTensor` of shape `(batch_size, num_keypoints, 2)`):
  74. Relative (x, y) coordinates of predicted keypoints in a given image.
  75. scores (`torch.FloatTensor` of shape `(batch_size, num_keypoints)`):
  76. Scores of predicted keypoints.
  77. descriptors (`torch.FloatTensor` of shape `(batch_size, num_keypoints, descriptor_size)`):
  78. Descriptors of predicted keypoints.
  79. mask (`torch.BoolTensor` of shape `(batch_size, num_keypoints)`):
  80. Mask indicating which values in keypoints, scores and descriptors are keypoint information.
  81. hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or
  82. when `config.output_hidden_states=True`):
  83. Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
  84. one for the output of each stage) of shape `(batch_size, sequence_length, hidden_size)`. Hidden-states
  85. (also called feature maps) of the model at the output of each stage.
  86. """
  87. loss: Optional[torch.FloatTensor] = None
  88. keypoints: Optional[torch.IntTensor] = None
  89. scores: Optional[torch.FloatTensor] = None
  90. descriptors: Optional[torch.FloatTensor] = None
  91. mask: Optional[torch.BoolTensor] = None
  92. hidden_states: Optional[Tuple[torch.FloatTensor]] = None
  93. class SuperPointConvBlock(nn.Module):
  94. def __init__(
  95. self, config: SuperPointConfig, in_channels: int, out_channels: int, add_pooling: bool = False
  96. ) -> None:
  97. super().__init__()
  98. self.conv_a = nn.Conv2d(
  99. in_channels,
  100. out_channels,
  101. kernel_size=3,
  102. stride=1,
  103. padding=1,
  104. )
  105. self.conv_b = nn.Conv2d(
  106. out_channels,
  107. out_channels,
  108. kernel_size=3,
  109. stride=1,
  110. padding=1,
  111. )
  112. self.relu = nn.ReLU(inplace=True)
  113. self.pool = nn.MaxPool2d(kernel_size=2, stride=2) if add_pooling else None
  114. def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
  115. hidden_states = self.relu(self.conv_a(hidden_states))
  116. hidden_states = self.relu(self.conv_b(hidden_states))
  117. if self.pool is not None:
  118. hidden_states = self.pool(hidden_states)
  119. return hidden_states
  120. class SuperPointEncoder(nn.Module):
  121. """
  122. SuperPoint encoder module. It is made of 4 convolutional layers with ReLU activation and max pooling, reducing the
  123. dimensionality of the image.
  124. """
  125. def __init__(self, config: SuperPointConfig) -> None:
  126. super().__init__()
  127. # SuperPoint uses 1 channel images
  128. self.input_dim = 1
  129. conv_blocks = []
  130. conv_blocks.append(
  131. SuperPointConvBlock(config, self.input_dim, config.encoder_hidden_sizes[0], add_pooling=True)
  132. )
  133. for i in range(1, len(config.encoder_hidden_sizes) - 1):
  134. conv_blocks.append(
  135. SuperPointConvBlock(
  136. config, config.encoder_hidden_sizes[i - 1], config.encoder_hidden_sizes[i], add_pooling=True
  137. )
  138. )
  139. conv_blocks.append(
  140. SuperPointConvBlock(
  141. config, config.encoder_hidden_sizes[-2], config.encoder_hidden_sizes[-1], add_pooling=False
  142. )
  143. )
  144. self.conv_blocks = nn.ModuleList(conv_blocks)
  145. def forward(
  146. self,
  147. input,
  148. output_hidden_states: Optional[bool] = False,
  149. return_dict: Optional[bool] = True,
  150. ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
  151. all_hidden_states = () if output_hidden_states else None
  152. for conv_block in self.conv_blocks:
  153. input = conv_block(input)
  154. if output_hidden_states:
  155. all_hidden_states = all_hidden_states + (input,)
  156. output = input
  157. if not return_dict:
  158. return tuple(v for v in [output, all_hidden_states] if v is not None)
  159. return BaseModelOutputWithNoAttention(
  160. last_hidden_state=output,
  161. hidden_states=all_hidden_states,
  162. )
  163. class SuperPointInterestPointDecoder(nn.Module):
  164. """
  165. The SuperPointInterestPointDecoder uses the output of the SuperPointEncoder to compute the keypoint with scores.
  166. The scores are first computed by a convolutional layer, then a softmax is applied to get a probability distribution
  167. over the 65 possible keypoint classes. The keypoints are then extracted from the scores by thresholding and
  168. non-maximum suppression. Post-processing is then applied to remove keypoints too close to the image borders as well
  169. as to keep only the k keypoints with highest score.
  170. """
  171. def __init__(self, config: SuperPointConfig) -> None:
  172. super().__init__()
  173. self.keypoint_threshold = config.keypoint_threshold
  174. self.max_keypoints = config.max_keypoints
  175. self.nms_radius = config.nms_radius
  176. self.border_removal_distance = config.border_removal_distance
  177. self.relu = nn.ReLU(inplace=True)
  178. self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
  179. self.conv_score_a = nn.Conv2d(
  180. config.encoder_hidden_sizes[-1],
  181. config.decoder_hidden_size,
  182. kernel_size=3,
  183. stride=1,
  184. padding=1,
  185. )
  186. self.conv_score_b = nn.Conv2d(
  187. config.decoder_hidden_size, config.keypoint_decoder_dim, kernel_size=1, stride=1, padding=0
  188. )
  189. def forward(self, encoded: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
  190. scores = self._get_pixel_scores(encoded)
  191. keypoints, scores = self._extract_keypoints(scores)
  192. return keypoints, scores
  193. def _get_pixel_scores(self, encoded: torch.Tensor) -> torch.Tensor:
  194. """Based on the encoder output, compute the scores for each pixel of the image"""
  195. scores = self.relu(self.conv_score_a(encoded))
  196. scores = self.conv_score_b(scores)
  197. scores = nn.functional.softmax(scores, 1)[:, :-1]
  198. batch_size, _, height, width = scores.shape
  199. scores = scores.permute(0, 2, 3, 1).reshape(batch_size, height, width, 8, 8)
  200. scores = scores.permute(0, 1, 3, 2, 4).reshape(batch_size, height * 8, width * 8)
  201. scores = simple_nms(scores, self.nms_radius)
  202. return scores
  203. def _extract_keypoints(self, scores: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
  204. """Based on their scores, extract the pixels that represent the keypoints that will be used for descriptors computation"""
  205. _, height, width = scores.shape
  206. # Threshold keypoints by score value
  207. keypoints = torch.nonzero(scores[0] > self.keypoint_threshold)
  208. scores = scores[0][tuple(keypoints.t())]
  209. # Discard keypoints near the image borders
  210. keypoints, scores = remove_keypoints_from_borders(
  211. keypoints, scores, self.border_removal_distance, height * 8, width * 8
  212. )
  213. # Keep the k keypoints with highest score
  214. if self.max_keypoints >= 0:
  215. keypoints, scores = top_k_keypoints(keypoints, scores, self.max_keypoints)
  216. # Convert (y, x) to (x, y)
  217. keypoints = torch.flip(keypoints, [1]).float()
  218. return keypoints, scores
  219. class SuperPointDescriptorDecoder(nn.Module):
  220. """
  221. The SuperPointDescriptorDecoder uses the outputs of both the SuperPointEncoder and the
  222. SuperPointInterestPointDecoder to compute the descriptors at the keypoints locations.
  223. The descriptors are first computed by a convolutional layer, then normalized to have a norm of 1. The descriptors
  224. are then interpolated at the keypoints locations.
  225. """
  226. def __init__(self, config: SuperPointConfig) -> None:
  227. super().__init__()
  228. self.relu = nn.ReLU(inplace=True)
  229. self.pool = nn.MaxPool2d(kernel_size=2, stride=2)
  230. self.conv_descriptor_a = nn.Conv2d(
  231. config.encoder_hidden_sizes[-1],
  232. config.decoder_hidden_size,
  233. kernel_size=3,
  234. stride=1,
  235. padding=1,
  236. )
  237. self.conv_descriptor_b = nn.Conv2d(
  238. config.decoder_hidden_size,
  239. config.descriptor_decoder_dim,
  240. kernel_size=1,
  241. stride=1,
  242. padding=0,
  243. )
  244. def forward(self, encoded: torch.Tensor, keypoints: torch.Tensor) -> torch.Tensor:
  245. """Based on the encoder output and the keypoints, compute the descriptors for each keypoint"""
  246. descriptors = self.conv_descriptor_b(self.relu(self.conv_descriptor_a(encoded)))
  247. descriptors = nn.functional.normalize(descriptors, p=2, dim=1)
  248. descriptors = self._sample_descriptors(keypoints[None], descriptors[0][None], 8)[0]
  249. # [descriptor_dim, num_keypoints] -> [num_keypoints, descriptor_dim]
  250. descriptors = torch.transpose(descriptors, 0, 1)
  251. return descriptors
  252. @staticmethod
  253. def _sample_descriptors(keypoints, descriptors, scale: int = 8) -> torch.Tensor:
  254. """Interpolate descriptors at keypoint locations"""
  255. batch_size, num_channels, height, width = descriptors.shape
  256. keypoints = keypoints - scale / 2 + 0.5
  257. divisor = torch.tensor([[(width * scale - scale / 2 - 0.5), (height * scale - scale / 2 - 0.5)]])
  258. divisor = divisor.to(keypoints)
  259. keypoints /= divisor
  260. keypoints = keypoints * 2 - 1 # normalize to (-1, 1)
  261. kwargs = {"align_corners": True} if is_torch_greater_or_equal_than_1_13 else {}
  262. # [batch_size, num_channels, num_keypoints, 2] -> [batch_size, num_channels, num_keypoints, 2]
  263. keypoints = keypoints.view(batch_size, 1, -1, 2)
  264. descriptors = nn.functional.grid_sample(descriptors, keypoints, mode="bilinear", **kwargs)
  265. # [batch_size, descriptor_decoder_dim, num_channels, num_keypoints] -> [batch_size, descriptor_decoder_dim, num_keypoints]
  266. descriptors = descriptors.reshape(batch_size, num_channels, -1)
  267. descriptors = nn.functional.normalize(descriptors, p=2, dim=1)
  268. return descriptors
  269. class SuperPointPreTrainedModel(PreTrainedModel):
  270. """
  271. An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
  272. models.
  273. """
  274. config_class = SuperPointConfig
  275. base_model_prefix = "superpoint"
  276. main_input_name = "pixel_values"
  277. supports_gradient_checkpointing = False
  278. def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
  279. """Initialize the weights"""
  280. if isinstance(module, (nn.Linear, nn.Conv2d)):
  281. # Slightly different from the TF version which uses truncated_normal for initialization
  282. # cf https://github.com/pytorch/pytorch/pull/5617
  283. module.weight.data.normal_(mean=0.0, std=self.config.initializer_range)
  284. if module.bias is not None:
  285. module.bias.data.zero_()
  286. elif isinstance(module, nn.LayerNorm):
  287. module.bias.data.zero_()
  288. module.weight.data.fill_(1.0)
  289. def extract_one_channel_pixel_values(self, pixel_values: torch.FloatTensor) -> torch.FloatTensor:
  290. """
  291. Assuming pixel_values has shape (batch_size, 3, height, width), and that all channels values are the same,
  292. extract the first channel value to get a tensor of shape (batch_size, 1, height, width) for SuperPoint. This is
  293. a workaround for the issue discussed in :
  294. https://github.com/huggingface/transformers/pull/25786#issuecomment-1730176446
  295. Args:
  296. pixel_values: torch.FloatTensor of shape (batch_size, 3, height, width)
  297. Returns:
  298. pixel_values: torch.FloatTensor of shape (batch_size, 1, height, width)
  299. """
  300. return pixel_values[:, 0, :, :][:, None, :, :]
  301. SUPERPOINT_START_DOCSTRING = r"""
  302. This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
  303. as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
  304. behavior.
  305. Parameters:
  306. config ([`SuperPointConfig`]): Model configuration class with all the parameters of the model.
  307. Initializing with a config file does not load the weights associated with the model, only the
  308. configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
  309. """
  310. SUPERPOINT_INPUTS_DOCSTRING = r"""
  311. Args:
  312. pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
  313. Pixel values. Pixel values can be obtained using [`SuperPointImageProcessor`]. See
  314. [`SuperPointImageProcessor.__call__`] for details.
  315. output_hidden_states (`bool`, *optional*):
  316. Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for more
  317. detail.
  318. return_dict (`bool`, *optional*):
  319. Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
  320. """
  321. @add_start_docstrings(
  322. "SuperPoint model outputting keypoints and descriptors.",
  323. SUPERPOINT_START_DOCSTRING,
  324. )
  325. class SuperPointForKeypointDetection(SuperPointPreTrainedModel):
  326. """
  327. SuperPoint model. It consists of a SuperPointEncoder, a SuperPointInterestPointDecoder and a
  328. SuperPointDescriptorDecoder. SuperPoint was proposed in `SuperPoint: Self-Supervised Interest Point Detection and
  329. Description <https://arxiv.org/abs/1712.07629>`__ by Daniel DeTone, Tomasz Malisiewicz, and Andrew Rabinovich. It
  330. is a fully convolutional neural network that extracts keypoints and descriptors from an image. It is trained in a
  331. self-supervised manner, using a combination of a photometric loss and a loss based on the homographic adaptation of
  332. keypoints. It is made of a convolutional encoder and two decoders: one for keypoints and one for descriptors.
  333. """
  334. def __init__(self, config: SuperPointConfig) -> None:
  335. super().__init__(config)
  336. self.config = config
  337. self.encoder = SuperPointEncoder(config)
  338. self.keypoint_decoder = SuperPointInterestPointDecoder(config)
  339. self.descriptor_decoder = SuperPointDescriptorDecoder(config)
  340. self.post_init()
  341. @add_start_docstrings_to_model_forward(SUPERPOINT_INPUTS_DOCSTRING)
  342. def forward(
  343. self,
  344. pixel_values: torch.FloatTensor,
  345. labels: Optional[torch.LongTensor] = None,
  346. output_hidden_states: Optional[bool] = None,
  347. return_dict: Optional[bool] = None,
  348. ) -> Union[Tuple, SuperPointKeypointDescriptionOutput]:
  349. """
  350. Examples:
  351. ```python
  352. >>> from transformers import AutoImageProcessor, SuperPointForKeypointDetection
  353. >>> import torch
  354. >>> from PIL import Image
  355. >>> import requests
  356. >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
  357. >>> image = Image.open(requests.get(url, stream=True).raw)
  358. >>> processor = AutoImageProcessor.from_pretrained("magic-leap-community/superpoint")
  359. >>> model = SuperPointForKeypointDetection.from_pretrained("magic-leap-community/superpoint")
  360. >>> inputs = processor(image, return_tensors="pt")
  361. >>> outputs = model(**inputs)
  362. ```"""
  363. loss = None
  364. if labels is not None:
  365. raise ValueError("SuperPoint does not support training for now.")
  366. output_hidden_states = (
  367. output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
  368. )
  369. return_dict = return_dict if return_dict is not None else self.config.use_return_dict
  370. pixel_values = self.extract_one_channel_pixel_values(pixel_values)
  371. batch_size = pixel_values.shape[0]
  372. encoder_outputs = self.encoder(
  373. pixel_values,
  374. output_hidden_states=output_hidden_states,
  375. return_dict=return_dict,
  376. )
  377. last_hidden_state = encoder_outputs[0]
  378. list_keypoints_scores = [
  379. self.keypoint_decoder(last_hidden_state[None, ...]) for last_hidden_state in last_hidden_state
  380. ]
  381. list_keypoints = [keypoints_scores[0] for keypoints_scores in list_keypoints_scores]
  382. list_scores = [keypoints_scores[1] for keypoints_scores in list_keypoints_scores]
  383. list_descriptors = [
  384. self.descriptor_decoder(last_hidden_state[None, ...], keypoints[None, ...])
  385. for last_hidden_state, keypoints in zip(last_hidden_state, list_keypoints)
  386. ]
  387. maximum_num_keypoints = max(keypoints.shape[0] for keypoints in list_keypoints)
  388. keypoints = torch.zeros((batch_size, maximum_num_keypoints, 2), device=pixel_values.device)
  389. scores = torch.zeros((batch_size, maximum_num_keypoints), device=pixel_values.device)
  390. descriptors = torch.zeros(
  391. (batch_size, maximum_num_keypoints, self.config.descriptor_decoder_dim),
  392. device=pixel_values.device,
  393. )
  394. mask = torch.zeros((batch_size, maximum_num_keypoints), device=pixel_values.device, dtype=torch.int)
  395. for i, (_keypoints, _scores, _descriptors) in enumerate(zip(list_keypoints, list_scores, list_descriptors)):
  396. keypoints[i, : _keypoints.shape[0]] = _keypoints
  397. scores[i, : _scores.shape[0]] = _scores
  398. descriptors[i, : _descriptors.shape[0]] = _descriptors
  399. mask[i, : _scores.shape[0]] = 1
  400. hidden_states = encoder_outputs[1] if output_hidden_states else None
  401. if not return_dict:
  402. return tuple(v for v in [loss, keypoints, scores, descriptors, mask, hidden_states] if v is not None)
  403. return SuperPointKeypointDescriptionOutput(
  404. loss=loss,
  405. keypoints=keypoints,
  406. scores=scores,
  407. descriptors=descriptors,
  408. mask=mask,
  409. hidden_states=hidden_states,
  410. )