pooling.py 56 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306
  1. from typing import List, Optional
  2. from torch import Tensor
  3. from .module import Module
  4. from .utils import _single, _pair, _triple
  5. from .. import functional as F
  6. from ..common_types import (_size_any_t, _size_1_t, _size_2_t, _size_3_t,
  7. _ratio_3_t, _ratio_2_t, _size_any_opt_t, _size_2_opt_t, _size_3_opt_t)
  8. __all__ = ['MaxPool1d', 'MaxPool2d', 'MaxPool3d', 'MaxUnpool1d', 'MaxUnpool2d', 'MaxUnpool3d',
  9. 'AvgPool1d', 'AvgPool2d', 'AvgPool3d', 'FractionalMaxPool2d', 'FractionalMaxPool3d', 'LPPool1d',
  10. 'LPPool2d', 'LPPool3d', 'AdaptiveMaxPool1d', 'AdaptiveMaxPool2d', 'AdaptiveMaxPool3d',
  11. 'AdaptiveAvgPool1d', 'AdaptiveAvgPool2d', 'AdaptiveAvgPool3d']
  12. class _MaxPoolNd(Module):
  13. __constants__ = ['kernel_size', 'stride', 'padding', 'dilation',
  14. 'return_indices', 'ceil_mode']
  15. return_indices: bool
  16. ceil_mode: bool
  17. def __init__(self, kernel_size: _size_any_t, stride: Optional[_size_any_t] = None,
  18. padding: _size_any_t = 0, dilation: _size_any_t = 1,
  19. return_indices: bool = False, ceil_mode: bool = False) -> None:
  20. super().__init__()
  21. self.kernel_size = kernel_size
  22. self.stride = stride if (stride is not None) else kernel_size
  23. self.padding = padding
  24. self.dilation = dilation
  25. self.return_indices = return_indices
  26. self.ceil_mode = ceil_mode
  27. def extra_repr(self) -> str:
  28. return 'kernel_size={kernel_size}, stride={stride}, padding={padding}' \
  29. ', dilation={dilation}, ceil_mode={ceil_mode}'.format(**self.__dict__)
  30. class MaxPool1d(_MaxPoolNd):
  31. r"""Applies a 1D max pooling over an input signal composed of several input planes.
  32. In the simplest case, the output value of the layer with input size :math:`(N, C, L)`
  33. and output :math:`(N, C, L_{out})` can be precisely described as:
  34. .. math::
  35. out(N_i, C_j, k) = \max_{m=0, \ldots, \text{kernel\_size} - 1}
  36. input(N_i, C_j, stride \times k + m)
  37. If :attr:`padding` is non-zero, then the input is implicitly padded with negative infinity on both sides
  38. for :attr:`padding` number of points. :attr:`dilation` is the stride between the elements within the
  39. sliding window. This `link`_ has a nice visualization of the pooling parameters.
  40. Note:
  41. When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
  42. or the input. Sliding windows that would start in the right padded region are ignored.
  43. Args:
  44. kernel_size: The size of the sliding window, must be > 0.
  45. stride: The stride of the sliding window, must be > 0. Default value is :attr:`kernel_size`.
  46. padding: Implicit negative infinity padding to be added on both sides, must be >= 0 and <= kernel_size / 2.
  47. dilation: The stride between elements within a sliding window, must be > 0.
  48. return_indices: If ``True``, will return the argmax along with the max values.
  49. Useful for :class:`torch.nn.MaxUnpool1d` later
  50. ceil_mode: If ``True``, will use `ceil` instead of `floor` to compute the output shape. This
  51. ensures that every element in the input tensor is covered by a sliding window.
  52. Shape:
  53. - Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
  54. - Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
  55. .. math::
  56. L_{out} = \left\lfloor \frac{L_{in} + 2 \times \text{padding} - \text{dilation}
  57. \times (\text{kernel\_size} - 1) - 1}{\text{stride}} + 1\right\rfloor
  58. Examples::
  59. >>> # pool of size=3, stride=2
  60. >>> m = nn.MaxPool1d(3, stride=2)
  61. >>> input = torch.randn(20, 16, 50)
  62. >>> output = m(input)
  63. .. _link:
  64. https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
  65. """
  66. kernel_size: _size_1_t
  67. stride: _size_1_t
  68. padding: _size_1_t
  69. dilation: _size_1_t
  70. def forward(self, input: Tensor):
  71. return F.max_pool1d(input, self.kernel_size, self.stride,
  72. self.padding, self.dilation, ceil_mode=self.ceil_mode,
  73. return_indices=self.return_indices)
  74. class MaxPool2d(_MaxPoolNd):
  75. r"""Applies a 2D max pooling over an input signal composed of several input planes.
  76. In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`,
  77. output :math:`(N, C, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kH, kW)`
  78. can be precisely described as:
  79. .. math::
  80. \begin{aligned}
  81. out(N_i, C_j, h, w) ={} & \max_{m=0, \ldots, kH-1} \max_{n=0, \ldots, kW-1} \\
  82. & \text{input}(N_i, C_j, \text{stride[0]} \times h + m,
  83. \text{stride[1]} \times w + n)
  84. \end{aligned}
  85. If :attr:`padding` is non-zero, then the input is implicitly padded with negative infinity on both sides
  86. for :attr:`padding` number of points. :attr:`dilation` controls the spacing between the kernel points.
  87. It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
  88. Note:
  89. When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
  90. or the input. Sliding windows that would start in the right padded region are ignored.
  91. The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
  92. - a single ``int`` -- in which case the same value is used for the height and width dimension
  93. - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
  94. and the second `int` for the width dimension
  95. Args:
  96. kernel_size: the size of the window to take a max over
  97. stride: the stride of the window. Default value is :attr:`kernel_size`
  98. padding: Implicit negative infinity padding to be added on both sides
  99. dilation: a parameter that controls the stride of elements in the window
  100. return_indices: if ``True``, will return the max indices along with the outputs.
  101. Useful for :class:`torch.nn.MaxUnpool2d` later
  102. ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
  103. Shape:
  104. - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`
  105. - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
  106. .. math::
  107. H_{out} = \left\lfloor\frac{H_{in} + 2 * \text{padding[0]} - \text{dilation[0]}
  108. \times (\text{kernel\_size[0]} - 1) - 1}{\text{stride[0]}} + 1\right\rfloor
  109. .. math::
  110. W_{out} = \left\lfloor\frac{W_{in} + 2 * \text{padding[1]} - \text{dilation[1]}
  111. \times (\text{kernel\_size[1]} - 1) - 1}{\text{stride[1]}} + 1\right\rfloor
  112. Examples::
  113. >>> # pool of square window of size=3, stride=2
  114. >>> m = nn.MaxPool2d(3, stride=2)
  115. >>> # pool of non-square window
  116. >>> m = nn.MaxPool2d((3, 2), stride=(2, 1))
  117. >>> input = torch.randn(20, 16, 50, 32)
  118. >>> output = m(input)
  119. .. _link:
  120. https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
  121. """
  122. kernel_size: _size_2_t
  123. stride: _size_2_t
  124. padding: _size_2_t
  125. dilation: _size_2_t
  126. def forward(self, input: Tensor):
  127. return F.max_pool2d(input, self.kernel_size, self.stride,
  128. self.padding, self.dilation, ceil_mode=self.ceil_mode,
  129. return_indices=self.return_indices)
  130. class MaxPool3d(_MaxPoolNd):
  131. r"""Applies a 3D max pooling over an input signal composed of several input planes.
  132. In the simplest case, the output value of the layer with input size :math:`(N, C, D, H, W)`,
  133. output :math:`(N, C, D_{out}, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kD, kH, kW)`
  134. can be precisely described as:
  135. .. math::
  136. \begin{aligned}
  137. \text{out}(N_i, C_j, d, h, w) ={} & \max_{k=0, \ldots, kD-1} \max_{m=0, \ldots, kH-1} \max_{n=0, \ldots, kW-1} \\
  138. & \text{input}(N_i, C_j, \text{stride[0]} \times d + k,
  139. \text{stride[1]} \times h + m, \text{stride[2]} \times w + n)
  140. \end{aligned}
  141. If :attr:`padding` is non-zero, then the input is implicitly padded with negative infinity on both sides
  142. for :attr:`padding` number of points. :attr:`dilation` controls the spacing between the kernel points.
  143. It is harder to describe, but this `link`_ has a nice visualization of what :attr:`dilation` does.
  144. Note:
  145. When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
  146. or the input. Sliding windows that would start in the right padded region are ignored.
  147. The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding`, :attr:`dilation` can either be:
  148. - a single ``int`` -- in which case the same value is used for the depth, height and width dimension
  149. - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
  150. the second `int` for the height dimension and the third `int` for the width dimension
  151. Args:
  152. kernel_size: the size of the window to take a max over
  153. stride: the stride of the window. Default value is :attr:`kernel_size`
  154. padding: Implicit negative infinity padding to be added on all three sides
  155. dilation: a parameter that controls the stride of elements in the window
  156. return_indices: if ``True``, will return the max indices along with the outputs.
  157. Useful for :class:`torch.nn.MaxUnpool3d` later
  158. ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
  159. Shape:
  160. - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
  161. - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`, where
  162. .. math::
  163. D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] - \text{dilation}[0] \times
  164. (\text{kernel\_size}[0] - 1) - 1}{\text{stride}[0]} + 1\right\rfloor
  165. .. math::
  166. H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] - \text{dilation}[1] \times
  167. (\text{kernel\_size}[1] - 1) - 1}{\text{stride}[1]} + 1\right\rfloor
  168. .. math::
  169. W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] - \text{dilation}[2] \times
  170. (\text{kernel\_size}[2] - 1) - 1}{\text{stride}[2]} + 1\right\rfloor
  171. Examples::
  172. >>> # pool of square window of size=3, stride=2
  173. >>> m = nn.MaxPool3d(3, stride=2)
  174. >>> # pool of non-square window
  175. >>> m = nn.MaxPool3d((3, 2, 2), stride=(2, 1, 2))
  176. >>> input = torch.randn(20, 16, 50, 44, 31)
  177. >>> output = m(input)
  178. .. _link:
  179. https://github.com/vdumoulin/conv_arithmetic/blob/master/README.md
  180. """ # noqa: E501
  181. kernel_size: _size_3_t
  182. stride: _size_3_t
  183. padding: _size_3_t
  184. dilation: _size_3_t
  185. def forward(self, input: Tensor):
  186. return F.max_pool3d(input, self.kernel_size, self.stride,
  187. self.padding, self.dilation, ceil_mode=self.ceil_mode,
  188. return_indices=self.return_indices)
  189. class _MaxUnpoolNd(Module):
  190. def extra_repr(self) -> str:
  191. return f'kernel_size={self.kernel_size}, stride={self.stride}, padding={self.padding}'
  192. class MaxUnpool1d(_MaxUnpoolNd):
  193. r"""Computes a partial inverse of :class:`MaxPool1d`.
  194. :class:`MaxPool1d` is not fully invertible, since the non-maximal values are lost.
  195. :class:`MaxUnpool1d` takes in as input the output of :class:`MaxPool1d`
  196. including the indices of the maximal values and computes a partial inverse
  197. in which all non-maximal values are set to zero.
  198. Note:
  199. This operation may behave nondeterministically when the input indices has repeat values.
  200. See https://github.com/pytorch/pytorch/issues/80827 and :doc:`/notes/randomness` for more information.
  201. .. note:: :class:`MaxPool1d` can map several input sizes to the same output
  202. sizes. Hence, the inversion process can get ambiguous.
  203. To accommodate this, you can provide the needed output size
  204. as an additional argument :attr:`output_size` in the forward call.
  205. See the Inputs and Example below.
  206. Args:
  207. kernel_size (int or tuple): Size of the max pooling window.
  208. stride (int or tuple): Stride of the max pooling window.
  209. It is set to :attr:`kernel_size` by default.
  210. padding (int or tuple): Padding that was added to the input
  211. Inputs:
  212. - `input`: the input Tensor to invert
  213. - `indices`: the indices given out by :class:`~torch.nn.MaxPool1d`
  214. - `output_size` (optional): the targeted output size
  215. Shape:
  216. - Input: :math:`(N, C, H_{in})` or :math:`(C, H_{in})`.
  217. - Output: :math:`(N, C, H_{out})` or :math:`(C, H_{out})`, where
  218. .. math::
  219. H_{out} = (H_{in} - 1) \times \text{stride}[0] - 2 \times \text{padding}[0] + \text{kernel\_size}[0]
  220. or as given by :attr:`output_size` in the call operator
  221. Example::
  222. >>> # xdoctest: +IGNORE_WANT("do other tests modify the global state?")
  223. >>> pool = nn.MaxPool1d(2, stride=2, return_indices=True)
  224. >>> unpool = nn.MaxUnpool1d(2, stride=2)
  225. >>> input = torch.tensor([[[1., 2, 3, 4, 5, 6, 7, 8]]])
  226. >>> output, indices = pool(input)
  227. >>> unpool(output, indices)
  228. tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8.]]])
  229. >>> # Example showcasing the use of output_size
  230. >>> input = torch.tensor([[[1., 2, 3, 4, 5, 6, 7, 8, 9]]])
  231. >>> output, indices = pool(input)
  232. >>> unpool(output, indices, output_size=input.size())
  233. tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8., 0.]]])
  234. >>> unpool(output, indices)
  235. tensor([[[ 0., 2., 0., 4., 0., 6., 0., 8.]]])
  236. """
  237. kernel_size: _size_1_t
  238. stride: _size_1_t
  239. padding: _size_1_t
  240. def __init__(self, kernel_size: _size_1_t, stride: Optional[_size_1_t] = None, padding: _size_1_t = 0) -> None:
  241. super().__init__()
  242. self.kernel_size = _single(kernel_size)
  243. self.stride = _single(stride if (stride is not None) else kernel_size)
  244. self.padding = _single(padding)
  245. def forward(self, input: Tensor, indices: Tensor, output_size: Optional[List[int]] = None) -> Tensor:
  246. return F.max_unpool1d(input, indices, self.kernel_size, self.stride,
  247. self.padding, output_size)
  248. class MaxUnpool2d(_MaxUnpoolNd):
  249. r"""Computes a partial inverse of :class:`MaxPool2d`.
  250. :class:`MaxPool2d` is not fully invertible, since the non-maximal values are lost.
  251. :class:`MaxUnpool2d` takes in as input the output of :class:`MaxPool2d`
  252. including the indices of the maximal values and computes a partial inverse
  253. in which all non-maximal values are set to zero.
  254. Note:
  255. This operation may behave nondeterministically when the input indices has repeat values.
  256. See https://github.com/pytorch/pytorch/issues/80827 and :doc:`/notes/randomness` for more information.
  257. .. note:: :class:`MaxPool2d` can map several input sizes to the same output
  258. sizes. Hence, the inversion process can get ambiguous.
  259. To accommodate this, you can provide the needed output size
  260. as an additional argument :attr:`output_size` in the forward call.
  261. See the Inputs and Example below.
  262. Args:
  263. kernel_size (int or tuple): Size of the max pooling window.
  264. stride (int or tuple): Stride of the max pooling window.
  265. It is set to :attr:`kernel_size` by default.
  266. padding (int or tuple): Padding that was added to the input
  267. Inputs:
  268. - `input`: the input Tensor to invert
  269. - `indices`: the indices given out by :class:`~torch.nn.MaxPool2d`
  270. - `output_size` (optional): the targeted output size
  271. Shape:
  272. - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
  273. - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
  274. .. math::
  275. H_{out} = (H_{in} - 1) \times \text{stride[0]} - 2 \times \text{padding[0]} + \text{kernel\_size[0]}
  276. .. math::
  277. W_{out} = (W_{in} - 1) \times \text{stride[1]} - 2 \times \text{padding[1]} + \text{kernel\_size[1]}
  278. or as given by :attr:`output_size` in the call operator
  279. Example::
  280. >>> pool = nn.MaxPool2d(2, stride=2, return_indices=True)
  281. >>> unpool = nn.MaxUnpool2d(2, stride=2)
  282. >>> input = torch.tensor([[[[ 1., 2., 3., 4.],
  283. [ 5., 6., 7., 8.],
  284. [ 9., 10., 11., 12.],
  285. [13., 14., 15., 16.]]]])
  286. >>> output, indices = pool(input)
  287. >>> unpool(output, indices)
  288. tensor([[[[ 0., 0., 0., 0.],
  289. [ 0., 6., 0., 8.],
  290. [ 0., 0., 0., 0.],
  291. [ 0., 14., 0., 16.]]]])
  292. >>> # Now using output_size to resolve an ambiguous size for the inverse
  293. >>> input = torch.tensor([[[[ 1., 2., 3., 4., 5.],
  294. [ 6., 7., 8., 9., 10.],
  295. [11., 12., 13., 14., 15.],
  296. [16., 17., 18., 19., 20.]]]])
  297. >>> output, indices = pool(input)
  298. >>> # This call will not work without specifying output_size
  299. >>> unpool(output, indices, output_size=input.size())
  300. tensor([[[[ 0., 0., 0., 0., 0.],
  301. [ 0., 7., 0., 9., 0.],
  302. [ 0., 0., 0., 0., 0.],
  303. [ 0., 17., 0., 19., 0.]]]])
  304. """
  305. kernel_size: _size_2_t
  306. stride: _size_2_t
  307. padding: _size_2_t
  308. def __init__(self, kernel_size: _size_2_t, stride: Optional[_size_2_t] = None, padding: _size_2_t = 0) -> None:
  309. super().__init__()
  310. self.kernel_size = _pair(kernel_size)
  311. self.stride = _pair(stride if (stride is not None) else kernel_size)
  312. self.padding = _pair(padding)
  313. def forward(self, input: Tensor, indices: Tensor, output_size: Optional[List[int]] = None) -> Tensor:
  314. return F.max_unpool2d(input, indices, self.kernel_size, self.stride,
  315. self.padding, output_size)
  316. class MaxUnpool3d(_MaxUnpoolNd):
  317. r"""Computes a partial inverse of :class:`MaxPool3d`.
  318. :class:`MaxPool3d` is not fully invertible, since the non-maximal values are lost.
  319. :class:`MaxUnpool3d` takes in as input the output of :class:`MaxPool3d`
  320. including the indices of the maximal values and computes a partial inverse
  321. in which all non-maximal values are set to zero.
  322. Note:
  323. This operation may behave nondeterministically when the input indices has repeat values.
  324. See https://github.com/pytorch/pytorch/issues/80827 and :doc:`/notes/randomness` for more information.
  325. .. note:: :class:`MaxPool3d` can map several input sizes to the same output
  326. sizes. Hence, the inversion process can get ambiguous.
  327. To accommodate this, you can provide the needed output size
  328. as an additional argument :attr:`output_size` in the forward call.
  329. See the Inputs section below.
  330. Args:
  331. kernel_size (int or tuple): Size of the max pooling window.
  332. stride (int or tuple): Stride of the max pooling window.
  333. It is set to :attr:`kernel_size` by default.
  334. padding (int or tuple): Padding that was added to the input
  335. Inputs:
  336. - `input`: the input Tensor to invert
  337. - `indices`: the indices given out by :class:`~torch.nn.MaxPool3d`
  338. - `output_size` (optional): the targeted output size
  339. Shape:
  340. - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
  341. - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`, where
  342. .. math::
  343. D_{out} = (D_{in} - 1) \times \text{stride[0]} - 2 \times \text{padding[0]} + \text{kernel\_size[0]}
  344. .. math::
  345. H_{out} = (H_{in} - 1) \times \text{stride[1]} - 2 \times \text{padding[1]} + \text{kernel\_size[1]}
  346. .. math::
  347. W_{out} = (W_{in} - 1) \times \text{stride[2]} - 2 \times \text{padding[2]} + \text{kernel\_size[2]}
  348. or as given by :attr:`output_size` in the call operator
  349. Example::
  350. >>> # pool of square window of size=3, stride=2
  351. >>> pool = nn.MaxPool3d(3, stride=2, return_indices=True)
  352. >>> unpool = nn.MaxUnpool3d(3, stride=2)
  353. >>> output, indices = pool(torch.randn(20, 16, 51, 33, 15))
  354. >>> unpooled_output = unpool(output, indices)
  355. >>> unpooled_output.size()
  356. torch.Size([20, 16, 51, 33, 15])
  357. """
  358. kernel_size: _size_3_t
  359. stride: _size_3_t
  360. padding: _size_3_t
  361. def __init__(self, kernel_size: _size_3_t, stride: Optional[_size_3_t] = None, padding: _size_3_t = 0) -> None:
  362. super().__init__()
  363. self.kernel_size = _triple(kernel_size)
  364. self.stride = _triple(stride if (stride is not None) else kernel_size)
  365. self.padding = _triple(padding)
  366. def forward(self, input: Tensor, indices: Tensor, output_size: Optional[List[int]] = None) -> Tensor:
  367. return F.max_unpool3d(input, indices, self.kernel_size, self.stride,
  368. self.padding, output_size)
  369. class _AvgPoolNd(Module):
  370. __constants__ = ['kernel_size', 'stride', 'padding', 'ceil_mode', 'count_include_pad']
  371. def extra_repr(self) -> str:
  372. return f'kernel_size={self.kernel_size}, stride={self.stride}, padding={self.padding}'
  373. class AvgPool1d(_AvgPoolNd):
  374. r"""Applies a 1D average pooling over an input signal composed of several input planes.
  375. In the simplest case, the output value of the layer with input size :math:`(N, C, L)`,
  376. output :math:`(N, C, L_{out})` and :attr:`kernel_size` :math:`k`
  377. can be precisely described as:
  378. .. math::
  379. \text{out}(N_i, C_j, l) = \frac{1}{k} \sum_{m=0}^{k-1}
  380. \text{input}(N_i, C_j, \text{stride} \times l + m)
  381. If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides
  382. for :attr:`padding` number of points.
  383. Note:
  384. When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
  385. or the input. Sliding windows that would start in the right padded region are ignored.
  386. The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding` can each be
  387. an ``int`` or a one-element tuple.
  388. Args:
  389. kernel_size: the size of the window
  390. stride: the stride of the window. Default value is :attr:`kernel_size`
  391. padding: implicit zero padding to be added on both sides
  392. ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
  393. count_include_pad: when True, will include the zero-padding in the averaging calculation
  394. Shape:
  395. - Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
  396. - Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
  397. .. math::
  398. L_{out} = \left\lfloor \frac{L_{in} +
  399. 2 \times \text{padding} - \text{kernel\_size}}{\text{stride}} + 1\right\rfloor
  400. Per the note above, if ``ceil_mode`` is True and :math:`(L_{out} - 1) \times \text{stride} \geq L_{in}
  401. + \text{padding}`, we skip the last window as it would start in the right padded region, resulting in
  402. :math:`L_{out}` being reduced by one.
  403. Examples::
  404. >>> # pool with window of size=3, stride=2
  405. >>> m = nn.AvgPool1d(3, stride=2)
  406. >>> m(torch.tensor([[[1., 2, 3, 4, 5, 6, 7]]]))
  407. tensor([[[2., 4., 6.]]])
  408. """
  409. kernel_size: _size_1_t
  410. stride: _size_1_t
  411. padding: _size_1_t
  412. ceil_mode: bool
  413. count_include_pad: bool
  414. def __init__(self, kernel_size: _size_1_t, stride: _size_1_t = None, padding: _size_1_t = 0, ceil_mode: bool = False,
  415. count_include_pad: bool = True) -> None:
  416. super().__init__()
  417. self.kernel_size = _single(kernel_size)
  418. self.stride = _single(stride if stride is not None else kernel_size)
  419. self.padding = _single(padding)
  420. self.ceil_mode = ceil_mode
  421. self.count_include_pad = count_include_pad
  422. def forward(self, input: Tensor) -> Tensor:
  423. return F.avg_pool1d(
  424. input, self.kernel_size, self.stride, self.padding, self.ceil_mode,
  425. self.count_include_pad)
  426. class AvgPool2d(_AvgPoolNd):
  427. r"""Applies a 2D average pooling over an input signal composed of several input planes.
  428. In the simplest case, the output value of the layer with input size :math:`(N, C, H, W)`,
  429. output :math:`(N, C, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kH, kW)`
  430. can be precisely described as:
  431. .. math::
  432. out(N_i, C_j, h, w) = \frac{1}{kH * kW} \sum_{m=0}^{kH-1} \sum_{n=0}^{kW-1}
  433. input(N_i, C_j, stride[0] \times h + m, stride[1] \times w + n)
  434. If :attr:`padding` is non-zero, then the input is implicitly zero-padded on both sides
  435. for :attr:`padding` number of points.
  436. Note:
  437. When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
  438. or the input. Sliding windows that would start in the right padded region are ignored.
  439. The parameters :attr:`kernel_size`, :attr:`stride`, :attr:`padding` can either be:
  440. - a single ``int`` -- in which case the same value is used for the height and width dimension
  441. - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
  442. and the second `int` for the width dimension
  443. Args:
  444. kernel_size: the size of the window
  445. stride: the stride of the window. Default value is :attr:`kernel_size`
  446. padding: implicit zero padding to be added on both sides
  447. ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
  448. count_include_pad: when True, will include the zero-padding in the averaging calculation
  449. divisor_override: if specified, it will be used as divisor, otherwise size of the pooling region will be used.
  450. Shape:
  451. - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
  452. - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
  453. .. math::
  454. H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[0] -
  455. \text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor
  456. .. math::
  457. W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[1] -
  458. \text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor
  459. Per the note above, if ``ceil_mode`` is True and :math:`(H_{out} - 1)\times \text{stride}[0]\geq H_{in}
  460. + \text{padding}[0]`, we skip the last window as it would start in the bottom padded region,
  461. resulting in :math:`H_{out}` being reduced by one.
  462. The same applies for :math:`W_{out}`.
  463. Examples::
  464. >>> # pool of square window of size=3, stride=2
  465. >>> m = nn.AvgPool2d(3, stride=2)
  466. >>> # pool of non-square window
  467. >>> m = nn.AvgPool2d((3, 2), stride=(2, 1))
  468. >>> input = torch.randn(20, 16, 50, 32)
  469. >>> output = m(input)
  470. """
  471. __constants__ = ['kernel_size', 'stride', 'padding', 'ceil_mode', 'count_include_pad', 'divisor_override']
  472. kernel_size: _size_2_t
  473. stride: _size_2_t
  474. padding: _size_2_t
  475. ceil_mode: bool
  476. count_include_pad: bool
  477. def __init__(self, kernel_size: _size_2_t, stride: Optional[_size_2_t] = None, padding: _size_2_t = 0,
  478. ceil_mode: bool = False, count_include_pad: bool = True, divisor_override: Optional[int] = None) -> None:
  479. super().__init__()
  480. self.kernel_size = kernel_size
  481. self.stride = stride if (stride is not None) else kernel_size
  482. self.padding = padding
  483. self.ceil_mode = ceil_mode
  484. self.count_include_pad = count_include_pad
  485. self.divisor_override = divisor_override
  486. def forward(self, input: Tensor) -> Tensor:
  487. return F.avg_pool2d(input, self.kernel_size, self.stride,
  488. self.padding, self.ceil_mode, self.count_include_pad, self.divisor_override)
  489. class AvgPool3d(_AvgPoolNd):
  490. r"""Applies a 3D average pooling over an input signal composed of several input planes.
  491. In the simplest case, the output value of the layer with input size :math:`(N, C, D, H, W)`,
  492. output :math:`(N, C, D_{out}, H_{out}, W_{out})` and :attr:`kernel_size` :math:`(kD, kH, kW)`
  493. can be precisely described as:
  494. .. math::
  495. \begin{aligned}
  496. \text{out}(N_i, C_j, d, h, w) ={} & \sum_{k=0}^{kD-1} \sum_{m=0}^{kH-1} \sum_{n=0}^{kW-1} \\
  497. & \frac{\text{input}(N_i, C_j, \text{stride}[0] \times d + k,
  498. \text{stride}[1] \times h + m, \text{stride}[2] \times w + n)}
  499. {kD \times kH \times kW}
  500. \end{aligned}
  501. If :attr:`padding` is non-zero, then the input is implicitly zero-padded on all three sides
  502. for :attr:`padding` number of points.
  503. Note:
  504. When ceil_mode=True, sliding windows are allowed to go off-bounds if they start within the left padding
  505. or the input. Sliding windows that would start in the right padded region are ignored.
  506. The parameters :attr:`kernel_size`, :attr:`stride` can either be:
  507. - a single ``int`` -- in which case the same value is used for the depth, height and width dimension
  508. - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
  509. the second `int` for the height dimension and the third `int` for the width dimension
  510. Args:
  511. kernel_size: the size of the window
  512. stride: the stride of the window. Default value is :attr:`kernel_size`
  513. padding: implicit zero padding to be added on all three sides
  514. ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
  515. count_include_pad: when True, will include the zero-padding in the averaging calculation
  516. divisor_override: if specified, it will be used as divisor, otherwise :attr:`kernel_size` will be used
  517. Shape:
  518. - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
  519. - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or
  520. :math:`(C, D_{out}, H_{out}, W_{out})`, where
  521. .. math::
  522. D_{out} = \left\lfloor\frac{D_{in} + 2 \times \text{padding}[0] -
  523. \text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor
  524. .. math::
  525. H_{out} = \left\lfloor\frac{H_{in} + 2 \times \text{padding}[1] -
  526. \text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor
  527. .. math::
  528. W_{out} = \left\lfloor\frac{W_{in} + 2 \times \text{padding}[2] -
  529. \text{kernel\_size}[2]}{\text{stride}[2]} + 1\right\rfloor
  530. Per the note above, if ``ceil_mode`` is True and :math:`(D_{out} - 1)\times \text{stride}[0]\geq D_{in}
  531. + \text{padding}[0]`, we skip the last window as it would start in the padded region,
  532. resulting in :math:`D_{out}` being reduced by one.
  533. The same applies for :math:`W_{out}` and :math:`H_{out}`.
  534. Examples::
  535. >>> # pool of square window of size=3, stride=2
  536. >>> m = nn.AvgPool3d(3, stride=2)
  537. >>> # pool of non-square window
  538. >>> m = nn.AvgPool3d((3, 2, 2), stride=(2, 1, 2))
  539. >>> input = torch.randn(20, 16, 50, 44, 31)
  540. >>> output = m(input)
  541. """
  542. __constants__ = ['kernel_size', 'stride', 'padding', 'ceil_mode', 'count_include_pad', 'divisor_override']
  543. kernel_size: _size_3_t
  544. stride: _size_3_t
  545. padding: _size_3_t
  546. ceil_mode: bool
  547. count_include_pad: bool
  548. def __init__(self, kernel_size: _size_3_t, stride: Optional[_size_3_t] = None, padding: _size_3_t = 0,
  549. ceil_mode: bool = False, count_include_pad: bool = True, divisor_override: Optional[int] = None) -> None:
  550. super().__init__()
  551. self.kernel_size = kernel_size
  552. self.stride = stride if (stride is not None) else kernel_size
  553. self.padding = padding
  554. self.ceil_mode = ceil_mode
  555. self.count_include_pad = count_include_pad
  556. self.divisor_override = divisor_override
  557. def forward(self, input: Tensor) -> Tensor:
  558. return F.avg_pool3d(input, self.kernel_size, self.stride,
  559. self.padding, self.ceil_mode, self.count_include_pad, self.divisor_override)
  560. def __setstate__(self, d):
  561. super().__setstate__(d)
  562. self.__dict__.setdefault('padding', 0)
  563. self.__dict__.setdefault('ceil_mode', False)
  564. self.__dict__.setdefault('count_include_pad', True)
  565. class FractionalMaxPool2d(Module):
  566. r"""Applies a 2D fractional max pooling over an input signal composed of several input planes.
  567. Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham
  568. The max-pooling operation is applied in :math:`kH \times kW` regions by a stochastic
  569. step size determined by the target output size.
  570. The number of output features is equal to the number of input planes.
  571. .. note:: Exactly one of ``output_size`` or ``output_ratio`` must be defined.
  572. Args:
  573. kernel_size: the size of the window to take a max over.
  574. Can be a single number k (for a square kernel of k x k) or a tuple `(kh, kw)`
  575. output_size: the target output size of the image of the form `oH x oW`.
  576. Can be a tuple `(oH, oW)` or a single number oH for a square image `oH x oH`.
  577. Note that we must have :math:`kH + oH - 1 <= H_{in}` and :math:`kW + oW - 1 <= W_{in}`
  578. output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given.
  579. This has to be a number or tuple in the range (0, 1).
  580. Note that we must have :math:`kH + (output\_ratio\_H * H_{in}) - 1 <= H_{in}`
  581. and :math:`kW + (output\_ratio\_W * W_{in}) - 1 <= W_{in}`
  582. return_indices: if ``True``, will return the indices along with the outputs.
  583. Useful to pass to :meth:`nn.MaxUnpool2d`. Default: ``False``
  584. Shape:
  585. - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
  586. - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
  587. :math:`(H_{out}, W_{out})=\text{output\_size}` or
  588. :math:`(H_{out}, W_{out})=\text{output\_ratio} \times (H_{in}, W_{in})`.
  589. Examples:
  590. >>> # pool of square window of size=3, and target output size 13x12
  591. >>> m = nn.FractionalMaxPool2d(3, output_size=(13, 12))
  592. >>> # pool of square window and target output size being half of input image size
  593. >>> m = nn.FractionalMaxPool2d(3, output_ratio=(0.5, 0.5))
  594. >>> input = torch.randn(20, 16, 50, 32)
  595. >>> output = m(input)
  596. .. _Fractional MaxPooling:
  597. https://arxiv.org/abs/1412.6071
  598. """
  599. __constants__ = ['kernel_size', 'return_indices', 'output_size',
  600. 'output_ratio']
  601. kernel_size: _size_2_t
  602. return_indices: bool
  603. output_size: _size_2_t
  604. output_ratio: _ratio_2_t
  605. def __init__(self, kernel_size: _size_2_t, output_size: Optional[_size_2_t] = None,
  606. output_ratio: Optional[_ratio_2_t] = None,
  607. return_indices: bool = False, _random_samples=None) -> None:
  608. super().__init__()
  609. self.kernel_size = _pair(kernel_size)
  610. self.return_indices = return_indices
  611. self.register_buffer('_random_samples', _random_samples)
  612. self.output_size = _pair(output_size) if output_size is not None else None
  613. self.output_ratio = _pair(output_ratio) if output_ratio is not None else None
  614. if output_size is None and output_ratio is None:
  615. raise ValueError("FractionalMaxPool2d requires specifying either "
  616. "an output size, or a pooling ratio")
  617. if output_size is not None and output_ratio is not None:
  618. raise ValueError("only one of output_size and output_ratio may be specified")
  619. if self.output_ratio is not None:
  620. if not (0 < self.output_ratio[0] < 1 and 0 < self.output_ratio[1] < 1):
  621. raise ValueError(f"output_ratio must be between 0 and 1 (got {output_ratio})")
  622. def forward(self, input: Tensor):
  623. return F.fractional_max_pool2d(
  624. input, self.kernel_size, self.output_size, self.output_ratio,
  625. self.return_indices,
  626. _random_samples=self._random_samples)
  627. class FractionalMaxPool3d(Module):
  628. r"""Applies a 3D fractional max pooling over an input signal composed of several input planes.
  629. Fractional MaxPooling is described in detail in the paper `Fractional MaxPooling`_ by Ben Graham
  630. The max-pooling operation is applied in :math:`kT \times kH \times kW` regions by a stochastic
  631. step size determined by the target output size.
  632. The number of output features is equal to the number of input planes.
  633. .. note:: Exactly one of ``output_size`` or ``output_ratio`` must be defined.
  634. Args:
  635. kernel_size: the size of the window to take a max over.
  636. Can be a single number k (for a square kernel of k x k x k) or a tuple `(kt x kh x kw)`
  637. output_size: the target output size of the image of the form `oT x oH x oW`.
  638. Can be a tuple `(oT, oH, oW)` or a single number oH for a square image `oH x oH x oH`
  639. output_ratio: If one wants to have an output size as a ratio of the input size, this option can be given.
  640. This has to be a number or tuple in the range (0, 1)
  641. return_indices: if ``True``, will return the indices along with the outputs.
  642. Useful to pass to :meth:`nn.MaxUnpool3d`. Default: ``False``
  643. Shape:
  644. - Input: :math:`(N, C, T_{in}, H_{in}, W_{in})` or :math:`(C, T_{in}, H_{in}, W_{in})`.
  645. - Output: :math:`(N, C, T_{out}, H_{out}, W_{out})` or :math:`(C, T_{out}, H_{out}, W_{out})`, where
  646. :math:`(T_{out}, H_{out}, W_{out})=\text{output\_size}` or
  647. :math:`(T_{out}, H_{out}, W_{out})=\text{output\_ratio} \times (T_{in}, H_{in}, W_{in})`
  648. Examples:
  649. >>> # pool of cubic window of size=3, and target output size 13x12x11
  650. >>> m = nn.FractionalMaxPool3d(3, output_size=(13, 12, 11))
  651. >>> # pool of cubic window and target output size being half of input size
  652. >>> m = nn.FractionalMaxPool3d(3, output_ratio=(0.5, 0.5, 0.5))
  653. >>> input = torch.randn(20, 16, 50, 32, 16)
  654. >>> output = m(input)
  655. .. _Fractional MaxPooling:
  656. https://arxiv.org/abs/1412.6071
  657. """
  658. __constants__ = ['kernel_size', 'return_indices', 'output_size',
  659. 'output_ratio']
  660. kernel_size: _size_3_t
  661. return_indices: bool
  662. output_size: _size_3_t
  663. output_ratio: _ratio_3_t
  664. def __init__(self, kernel_size: _size_3_t, output_size: Optional[_size_3_t] = None,
  665. output_ratio: Optional[_ratio_3_t] = None,
  666. return_indices: bool = False, _random_samples=None) -> None:
  667. super().__init__()
  668. self.kernel_size = _triple(kernel_size)
  669. self.return_indices = return_indices
  670. self.register_buffer('_random_samples', _random_samples)
  671. self.output_size = _triple(output_size) if output_size is not None else None
  672. self.output_ratio = _triple(output_ratio) if output_ratio is not None else None
  673. if output_size is None and output_ratio is None:
  674. raise ValueError("FractionalMaxPool3d requires specifying either "
  675. "an output size, or a pooling ratio")
  676. if output_size is not None and output_ratio is not None:
  677. raise ValueError("only one of output_size and output_ratio may be specified")
  678. if self.output_ratio is not None:
  679. if not (0 < self.output_ratio[0] < 1 and 0 < self.output_ratio[1] < 1 and 0 < self.output_ratio[2] < 1):
  680. raise ValueError(f"output_ratio must be between 0 and 1 (got {output_ratio})")
  681. def forward(self, input: Tensor):
  682. return F.fractional_max_pool3d(
  683. input, self.kernel_size, self.output_size, self.output_ratio,
  684. self.return_indices,
  685. _random_samples=self._random_samples)
  686. class _LPPoolNd(Module):
  687. __constants__ = ['norm_type', 'kernel_size', 'stride', 'ceil_mode']
  688. norm_type: float
  689. ceil_mode: bool
  690. def __init__(self, norm_type: float, kernel_size: _size_any_t, stride: Optional[_size_any_t] = None,
  691. ceil_mode: bool = False) -> None:
  692. super().__init__()
  693. self.norm_type = norm_type
  694. self.kernel_size = kernel_size
  695. self.stride = stride
  696. self.ceil_mode = ceil_mode
  697. def extra_repr(self) -> str:
  698. return 'norm_type={norm_type}, kernel_size={kernel_size}, stride={stride}, ' \
  699. 'ceil_mode={ceil_mode}'.format(**self.__dict__)
  700. class LPPool1d(_LPPoolNd):
  701. r"""Applies a 1D power-average pooling over an input signal composed of several input planes.
  702. On each window, the function computed is:
  703. .. math::
  704. f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}
  705. - At p = :math:`\infty`, one gets Max Pooling
  706. - At p = 1, one gets Sum Pooling (which is proportional to Average Pooling)
  707. .. note:: If the sum to the power of `p` is zero, the gradient of this function is
  708. not defined. This implementation will set the gradient to zero in this case.
  709. Args:
  710. kernel_size: a single int, the size of the window
  711. stride: a single int, the stride of the window. Default value is :attr:`kernel_size`
  712. ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
  713. Shape:
  714. - Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
  715. - Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
  716. .. math::
  717. L_{out} = \left\lfloor\frac{L_{in} - \text{kernel\_size}}{\text{stride}} + 1\right\rfloor
  718. Examples::
  719. >>> # power-2 pool of window of length 3, with stride 2.
  720. >>> m = nn.LPPool1d(2, 3, stride=2)
  721. >>> input = torch.randn(20, 16, 50)
  722. >>> output = m(input)
  723. """
  724. kernel_size: _size_1_t
  725. stride: _size_1_t
  726. def forward(self, input: Tensor) -> Tensor:
  727. return F.lp_pool1d(input, float(self.norm_type), self.kernel_size,
  728. self.stride, self.ceil_mode)
  729. class LPPool2d(_LPPoolNd):
  730. r"""Applies a 2D power-average pooling over an input signal composed of several input planes.
  731. On each window, the function computed is:
  732. .. math::
  733. f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}
  734. - At p = :math:`\infty`, one gets Max Pooling
  735. - At p = 1, one gets Sum Pooling (which is proportional to average pooling)
  736. The parameters :attr:`kernel_size`, :attr:`stride` can either be:
  737. - a single ``int`` -- in which case the same value is used for the height and width dimension
  738. - a ``tuple`` of two ints -- in which case, the first `int` is used for the height dimension,
  739. and the second `int` for the width dimension
  740. .. note:: If the sum to the power of `p` is zero, the gradient of this function is
  741. not defined. This implementation will set the gradient to zero in this case.
  742. Args:
  743. kernel_size: the size of the window
  744. stride: the stride of the window. Default value is :attr:`kernel_size`
  745. ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
  746. Shape:
  747. - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
  748. - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
  749. .. math::
  750. H_{out} = \left\lfloor\frac{H_{in} - \text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor
  751. .. math::
  752. W_{out} = \left\lfloor\frac{W_{in} - \text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor
  753. Examples::
  754. >>> # power-2 pool of square window of size=3, stride=2
  755. >>> m = nn.LPPool2d(2, 3, stride=2)
  756. >>> # pool of non-square window of power 1.2
  757. >>> m = nn.LPPool2d(1.2, (3, 2), stride=(2, 1))
  758. >>> input = torch.randn(20, 16, 50, 32)
  759. >>> output = m(input)
  760. """
  761. kernel_size: _size_2_t
  762. stride: _size_2_t
  763. def forward(self, input: Tensor) -> Tensor:
  764. return F.lp_pool2d(input, float(self.norm_type), self.kernel_size,
  765. self.stride, self.ceil_mode)
  766. class LPPool3d(_LPPoolNd):
  767. r"""Applies a 3D power-average pooling over an input signal composed of several input planes.
  768. On each window, the function computed is:
  769. .. math::
  770. f(X) = \sqrt[p]{\sum_{x \in X} x^{p}}
  771. - At p = :math:`\infty`, one gets Max Pooling
  772. - At p = 1, one gets Sum Pooling (which is proportional to average pooling)
  773. The parameters :attr:`kernel_size`, :attr:`stride` can either be:
  774. - a single ``int`` -- in which case the same value is used for the height, width and depth dimension
  775. - a ``tuple`` of three ints -- in which case, the first `int` is used for the depth dimension,
  776. the second `int` for the height dimension and the third `int` for the width dimension
  777. .. note:: If the sum to the power of `p` is zero, the gradient of this function is
  778. not defined. This implementation will set the gradient to zero in this case.
  779. Args:
  780. kernel_size: the size of the window
  781. stride: the stride of the window. Default value is :attr:`kernel_size`
  782. ceil_mode: when True, will use `ceil` instead of `floor` to compute the output shape
  783. Shape:
  784. - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
  785. - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or
  786. :math:`(C, D_{out}, H_{out}, W_{out})`, where
  787. .. math::
  788. D_{out} = \left\lfloor\frac{D_{in} - \text{kernel\_size}[0]}{\text{stride}[0]} + 1\right\rfloor
  789. .. math::
  790. H_{out} = \left\lfloor\frac{H_{in} - \text{kernel\_size}[1]}{\text{stride}[1]} + 1\right\rfloor
  791. .. math::
  792. W_{out} = \left\lfloor\frac{W_{in} - \text{kernel\_size}[2]}{\text{stride}[2]} + 1\right\rfloor
  793. Examples::
  794. >>> # power-2 pool of square window of size=3, stride=2
  795. >>> m = nn.LPPool3d(2, 3, stride=2)
  796. >>> # pool of non-square window of power 1.2
  797. >>> m = nn.LPPool3d(1.2, (3, 2, 2), stride=(2, 1, 2))
  798. >>> input = torch.randn(20, 16, 50, 44, 31)
  799. >>> output = m(input)
  800. """
  801. kernel_size: _size_3_t
  802. stride: _size_3_t
  803. def forward(self, input: Tensor) -> Tensor:
  804. return F.lp_pool3d(input, float(self.norm_type), self.kernel_size,
  805. self.stride, self.ceil_mode)
  806. class _AdaptiveMaxPoolNd(Module):
  807. __constants__ = ['output_size', 'return_indices']
  808. return_indices: bool
  809. def __init__(self, output_size: _size_any_opt_t, return_indices: bool = False) -> None:
  810. super().__init__()
  811. self.output_size = output_size
  812. self.return_indices = return_indices
  813. def extra_repr(self) -> str:
  814. return f'output_size={self.output_size}'
  815. # FIXME (by @ssnl): Improve adaptive pooling docs: specify what the input and
  816. # output shapes are, and how the operation computes output.
  817. class AdaptiveMaxPool1d(_AdaptiveMaxPoolNd):
  818. r"""Applies a 1D adaptive max pooling over an input signal composed of several input planes.
  819. The output size is :math:`L_{out}`, for any input size.
  820. The number of output features is equal to the number of input planes.
  821. Args:
  822. output_size: the target output size :math:`L_{out}`.
  823. return_indices: if ``True``, will return the indices along with the outputs.
  824. Useful to pass to nn.MaxUnpool1d. Default: ``False``
  825. Shape:
  826. - Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
  827. - Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
  828. :math:`L_{out}=\text{output\_size}`.
  829. Examples:
  830. >>> # target output size of 5
  831. >>> m = nn.AdaptiveMaxPool1d(5)
  832. >>> input = torch.randn(1, 64, 8)
  833. >>> output = m(input)
  834. """
  835. output_size: _size_1_t
  836. def forward(self, input: Tensor):
  837. return F.adaptive_max_pool1d(input, self.output_size, self.return_indices)
  838. class AdaptiveMaxPool2d(_AdaptiveMaxPoolNd):
  839. r"""Applies a 2D adaptive max pooling over an input signal composed of several input planes.
  840. The output is of size :math:`H_{out} \times W_{out}`, for any input size.
  841. The number of output features is equal to the number of input planes.
  842. Args:
  843. output_size: the target output size of the image of the form :math:`H_{out} \times W_{out}`.
  844. Can be a tuple :math:`(H_{out}, W_{out})` or a single :math:`H_{out}` for a
  845. square image :math:`H_{out} \times H_{out}`. :math:`H_{out}` and :math:`W_{out}`
  846. can be either a ``int``, or ``None`` which means the size will be the same as that
  847. of the input.
  848. return_indices: if ``True``, will return the indices along with the outputs.
  849. Useful to pass to nn.MaxUnpool2d. Default: ``False``
  850. Shape:
  851. - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
  852. - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
  853. :math:`(H_{out}, W_{out})=\text{output\_size}`.
  854. Examples:
  855. >>> # target output size of 5x7
  856. >>> m = nn.AdaptiveMaxPool2d((5, 7))
  857. >>> input = torch.randn(1, 64, 8, 9)
  858. >>> output = m(input)
  859. >>> # target output size of 7x7 (square)
  860. >>> m = nn.AdaptiveMaxPool2d(7)
  861. >>> input = torch.randn(1, 64, 10, 9)
  862. >>> output = m(input)
  863. >>> # target output size of 10x7
  864. >>> m = nn.AdaptiveMaxPool2d((None, 7))
  865. >>> input = torch.randn(1, 64, 10, 9)
  866. >>> output = m(input)
  867. """
  868. output_size: _size_2_opt_t
  869. def forward(self, input: Tensor):
  870. return F.adaptive_max_pool2d(input, self.output_size, self.return_indices)
  871. class AdaptiveMaxPool3d(_AdaptiveMaxPoolNd):
  872. r"""Applies a 3D adaptive max pooling over an input signal composed of several input planes.
  873. The output is of size :math:`D_{out} \times H_{out} \times W_{out}`, for any input size.
  874. The number of output features is equal to the number of input planes.
  875. Args:
  876. output_size: the target output size of the image of the form :math:`D_{out} \times H_{out} \times W_{out}`.
  877. Can be a tuple :math:`(D_{out}, H_{out}, W_{out})` or a single
  878. :math:`D_{out}` for a cube :math:`D_{out} \times D_{out} \times D_{out}`.
  879. :math:`D_{out}`, :math:`H_{out}` and :math:`W_{out}` can be either a
  880. ``int``, or ``None`` which means the size will be the same as that of the input.
  881. return_indices: if ``True``, will return the indices along with the outputs.
  882. Useful to pass to nn.MaxUnpool3d. Default: ``False``
  883. Shape:
  884. - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
  885. - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
  886. where :math:`(D_{out}, H_{out}, W_{out})=\text{output\_size}`.
  887. Examples:
  888. >>> # target output size of 5x7x9
  889. >>> m = nn.AdaptiveMaxPool3d((5, 7, 9))
  890. >>> input = torch.randn(1, 64, 8, 9, 10)
  891. >>> output = m(input)
  892. >>> # target output size of 7x7x7 (cube)
  893. >>> m = nn.AdaptiveMaxPool3d(7)
  894. >>> input = torch.randn(1, 64, 10, 9, 8)
  895. >>> output = m(input)
  896. >>> # target output size of 7x9x8
  897. >>> m = nn.AdaptiveMaxPool3d((7, None, None))
  898. >>> input = torch.randn(1, 64, 10, 9, 8)
  899. >>> output = m(input)
  900. """
  901. output_size: _size_3_opt_t
  902. def forward(self, input: Tensor):
  903. return F.adaptive_max_pool3d(input, self.output_size, self.return_indices)
  904. class _AdaptiveAvgPoolNd(Module):
  905. __constants__ = ['output_size']
  906. def __init__(self, output_size: _size_any_opt_t) -> None:
  907. super().__init__()
  908. self.output_size = output_size
  909. def extra_repr(self) -> str:
  910. return f'output_size={self.output_size}'
  911. class AdaptiveAvgPool1d(_AdaptiveAvgPoolNd):
  912. r"""Applies a 1D adaptive average pooling over an input signal composed of several input planes.
  913. The output size is :math:`L_{out}`, for any input size.
  914. The number of output features is equal to the number of input planes.
  915. Args:
  916. output_size: the target output size :math:`L_{out}`.
  917. Shape:
  918. - Input: :math:`(N, C, L_{in})` or :math:`(C, L_{in})`.
  919. - Output: :math:`(N, C, L_{out})` or :math:`(C, L_{out})`, where
  920. :math:`L_{out}=\text{output\_size}`.
  921. Examples:
  922. >>> # target output size of 5
  923. >>> m = nn.AdaptiveAvgPool1d(5)
  924. >>> input = torch.randn(1, 64, 8)
  925. >>> output = m(input)
  926. """
  927. output_size: _size_1_t
  928. def forward(self, input: Tensor) -> Tensor:
  929. return F.adaptive_avg_pool1d(input, self.output_size)
  930. class AdaptiveAvgPool2d(_AdaptiveAvgPoolNd):
  931. r"""Applies a 2D adaptive average pooling over an input signal composed of several input planes.
  932. The output is of size H x W, for any input size.
  933. The number of output features is equal to the number of input planes.
  934. Args:
  935. output_size: the target output size of the image of the form H x W.
  936. Can be a tuple (H, W) or a single H for a square image H x H.
  937. H and W can be either a ``int``, or ``None`` which means the size will
  938. be the same as that of the input.
  939. Shape:
  940. - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
  941. - Output: :math:`(N, C, S_{0}, S_{1})` or :math:`(C, S_{0}, S_{1})`, where
  942. :math:`S=\text{output\_size}`.
  943. Examples:
  944. >>> # target output size of 5x7
  945. >>> m = nn.AdaptiveAvgPool2d((5, 7))
  946. >>> input = torch.randn(1, 64, 8, 9)
  947. >>> output = m(input)
  948. >>> # target output size of 7x7 (square)
  949. >>> m = nn.AdaptiveAvgPool2d(7)
  950. >>> input = torch.randn(1, 64, 10, 9)
  951. >>> output = m(input)
  952. >>> # target output size of 10x7
  953. >>> m = nn.AdaptiveAvgPool2d((None, 7))
  954. >>> input = torch.randn(1, 64, 10, 9)
  955. >>> output = m(input)
  956. """
  957. output_size: _size_2_opt_t
  958. def forward(self, input: Tensor) -> Tensor:
  959. return F.adaptive_avg_pool2d(input, self.output_size)
  960. class AdaptiveAvgPool3d(_AdaptiveAvgPoolNd):
  961. r"""Applies a 3D adaptive average pooling over an input signal composed of several input planes.
  962. The output is of size D x H x W, for any input size.
  963. The number of output features is equal to the number of input planes.
  964. Args:
  965. output_size: the target output size of the form D x H x W.
  966. Can be a tuple (D, H, W) or a single number D for a cube D x D x D.
  967. D, H and W can be either a ``int``, or ``None`` which means the size will
  968. be the same as that of the input.
  969. Shape:
  970. - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
  971. - Output: :math:`(N, C, S_{0}, S_{1}, S_{2})` or :math:`(C, S_{0}, S_{1}, S_{2})`,
  972. where :math:`S=\text{output\_size}`.
  973. Examples:
  974. >>> # target output size of 5x7x9
  975. >>> m = nn.AdaptiveAvgPool3d((5, 7, 9))
  976. >>> input = torch.randn(1, 64, 8, 9, 10)
  977. >>> output = m(input)
  978. >>> # target output size of 7x7x7 (cube)
  979. >>> m = nn.AdaptiveAvgPool3d(7)
  980. >>> input = torch.randn(1, 64, 10, 9, 8)
  981. >>> output = m(input)
  982. >>> # target output size of 7x9x8
  983. >>> m = nn.AdaptiveAvgPool3d((7, None, None))
  984. >>> input = torch.randn(1, 64, 10, 9, 8)
  985. >>> output = m(input)
  986. """
  987. output_size: _size_3_opt_t
  988. def forward(self, input: Tensor) -> Tensor:
  989. return F.adaptive_avg_pool3d(input, self.output_size)