padding.py 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802
  1. # mypy: allow-untyped-defs
  2. from .module import Module
  3. from .utils import _pair, _quadruple, _ntuple
  4. from .. import functional as F
  5. from torch import Tensor
  6. from ..common_types import _size_2_t, _size_4_t, _size_6_t
  7. from typing import Sequence, Tuple
  8. # TODO: grad_output size asserts in THNN
  9. __all__ = ['CircularPad1d', 'CircularPad2d', 'CircularPad3d', 'ConstantPad1d', 'ConstantPad2d',
  10. 'ConstantPad3d', 'ReflectionPad1d', 'ReflectionPad2d', 'ReflectionPad3d',
  11. 'ReplicationPad1d', 'ReplicationPad2d', 'ReplicationPad3d', 'ZeroPad1d', 'ZeroPad2d', 'ZeroPad3d']
  12. class _CircularPadNd(Module):
  13. __constants__ = ['padding']
  14. padding: Sequence[int]
  15. def _check_input_dim(self, input):
  16. raise NotImplementedError
  17. def forward(self, input: Tensor) -> Tensor:
  18. self._check_input_dim(input)
  19. return F.pad(input, self.padding, 'circular')
  20. def extra_repr(self) -> str:
  21. return f'{self.padding}'
  22. class CircularPad1d(_CircularPadNd):
  23. r"""Pads the input tensor using circular padding of the input boundary.
  24. Tensor values at the beginning of the dimension are used to pad the end,
  25. and values at the end are used to pad the beginning. If negative padding is
  26. applied then the ends of the tensor get removed.
  27. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
  28. Args:
  29. padding (int, tuple): the size of the padding. If is `int`, uses the same
  30. padding in all boundaries. If a 2-`tuple`, uses
  31. (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
  32. Shape:
  33. - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
  34. - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
  35. :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
  36. Examples::
  37. >>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
  38. >>> m = nn.CircularPad1d(2)
  39. >>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4)
  40. >>> input
  41. tensor([[[0., 1., 2., 3.],
  42. [4., 5., 6., 7.]]])
  43. >>> m(input)
  44. tensor([[[2., 3., 0., 1., 2., 3., 0., 1.],
  45. [6., 7., 4., 5., 6., 7., 4., 5.]]])
  46. >>> # using different paddings for different sides
  47. >>> m = nn.CircularPad1d((3, 1))
  48. >>> m(input)
  49. tensor([[[1., 2., 3., 0., 1., 2., 3., 0.],
  50. [5., 6., 7., 4., 5., 6., 7., 4.]]])
  51. """
  52. padding: Tuple[int, int]
  53. def __init__(self, padding: _size_2_t) -> None:
  54. super().__init__()
  55. self.padding = _pair(padding)
  56. def _check_input_dim(self, input):
  57. if input.dim() != 2 and input.dim() != 3:
  58. raise ValueError(
  59. f"expected 2D or 3D input (got {input.dim()}D input)"
  60. )
  61. class CircularPad2d(_CircularPadNd):
  62. r"""Pads the input tensor using circular padding of the input boundary.
  63. Tensor values at the beginning of the dimension are used to pad the end,
  64. and values at the end are used to pad the beginning. If negative padding is
  65. applied then the ends of the tensor get removed.
  66. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
  67. Args:
  68. padding (int, tuple): the size of the padding. If is `int`, uses the same
  69. padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
  70. :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
  71. Shape:
  72. - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
  73. - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
  74. :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
  75. :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
  76. Examples::
  77. >>> m = nn.CircularPad2d(2)
  78. >>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)
  79. >>> input
  80. tensor([[[[0., 1., 2.],
  81. [3., 4., 5.],
  82. [6., 7., 8.]]]])
  83. >>> m(input)
  84. tensor([[[[4., 5., 3., 4., 5., 3., 4.],
  85. [7., 8., 6., 7., 8., 6., 7.],
  86. [1., 2., 0., 1., 2., 0., 1.],
  87. [4., 5., 3., 4., 5., 3., 4.],
  88. [7., 8., 6., 7., 8., 6., 7.],
  89. [1., 2., 0., 1., 2., 0., 1.],
  90. [4., 5., 3., 4., 5., 3., 4.]]]])
  91. >>> # using different paddings for different sides
  92. >>> m = nn.CircularPad2d((1, 1, 2, 0))
  93. >>> m(input)
  94. tensor([[[[5., 3., 4., 5., 3.],
  95. [8., 6., 7., 8., 6.],
  96. [2., 0., 1., 2., 0.],
  97. [5., 3., 4., 5., 3.],
  98. [8., 6., 7., 8., 6.]]]])
  99. """
  100. padding: Tuple[int, int, int, int]
  101. def __init__(self, padding: _size_4_t) -> None:
  102. super().__init__()
  103. self.padding = _quadruple(padding)
  104. def _check_input_dim(self, input):
  105. if input.dim() != 3 and input.dim() != 4:
  106. raise ValueError(
  107. f"expected 3D or 4D input (got {input.dim()}D input)"
  108. )
  109. class CircularPad3d(_CircularPadNd):
  110. r"""Pads the input tensor using circular padding of the input boundary.
  111. Tensor values at the beginning of the dimension are used to pad the end,
  112. and values at the end are used to pad the beginning. If negative padding is
  113. applied then the ends of the tensor get removed.
  114. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
  115. Args:
  116. padding (int, tuple): the size of the padding. If is `int`, uses the same
  117. padding in all boundaries. If a 6-`tuple`, uses
  118. (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
  119. :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
  120. :math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
  121. Shape:
  122. - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
  123. - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
  124. where
  125. :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
  126. :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
  127. :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
  128. Examples::
  129. >>> # xdoctest: +IGNORE_WANT("non-deterministic")
  130. >>> m = nn.CircularPad3d(3)
  131. >>> input = torch.randn(16, 3, 8, 320, 480)
  132. >>> output = m(input)
  133. >>> # using different paddings for different sides
  134. >>> m = nn.CircularPad3d((3, 3, 6, 6, 1, 1))
  135. >>> output = m(input)
  136. """
  137. padding: Tuple[int, int, int, int, int, int]
  138. def __init__(self, padding: _size_6_t) -> None:
  139. super().__init__()
  140. self.padding = _ntuple(6)(padding)
  141. def _check_input_dim(self, input):
  142. if input.dim() != 4 and input.dim() != 5:
  143. raise ValueError(
  144. f"expected 4D or 5D input (got {input.dim()}D input)"
  145. )
  146. class _ConstantPadNd(Module):
  147. __constants__ = ['padding', 'value']
  148. value: float
  149. padding: Sequence[int]
  150. def __init__(self, value: float) -> None:
  151. super().__init__()
  152. self.value = value
  153. def forward(self, input: Tensor) -> Tensor:
  154. return F.pad(input, self.padding, 'constant', self.value)
  155. def extra_repr(self) -> str:
  156. return f'padding={self.padding}, value={self.value}'
  157. class ConstantPad1d(_ConstantPadNd):
  158. r"""Pads the input tensor boundaries with a constant value.
  159. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
  160. Args:
  161. padding (int, tuple): the size of the padding. If is `int`, uses the same
  162. padding in both boundaries. If a 2-`tuple`, uses
  163. (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
  164. Shape:
  165. - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
  166. - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
  167. :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
  168. Examples::
  169. >>> # xdoctest: +IGNORE_WANT("non-deterministic")
  170. >>> m = nn.ConstantPad1d(2, 3.5)
  171. >>> input = torch.randn(1, 2, 4)
  172. >>> input
  173. tensor([[[-1.0491, -0.7152, -0.0749, 0.8530],
  174. [-1.3287, 1.8966, 0.1466, -0.2771]]])
  175. >>> m(input)
  176. tensor([[[ 3.5000, 3.5000, -1.0491, -0.7152, -0.0749, 0.8530, 3.5000,
  177. 3.5000],
  178. [ 3.5000, 3.5000, -1.3287, 1.8966, 0.1466, -0.2771, 3.5000,
  179. 3.5000]]])
  180. >>> m = nn.ConstantPad1d(2, 3.5)
  181. >>> input = torch.randn(1, 2, 3)
  182. >>> input
  183. tensor([[[ 1.6616, 1.4523, -1.1255],
  184. [-3.6372, 0.1182, -1.8652]]])
  185. >>> m(input)
  186. tensor([[[ 3.5000, 3.5000, 1.6616, 1.4523, -1.1255, 3.5000, 3.5000],
  187. [ 3.5000, 3.5000, -3.6372, 0.1182, -1.8652, 3.5000, 3.5000]]])
  188. >>> # using different paddings for different sides
  189. >>> m = nn.ConstantPad1d((3, 1), 3.5)
  190. >>> m(input)
  191. tensor([[[ 3.5000, 3.5000, 3.5000, 1.6616, 1.4523, -1.1255, 3.5000],
  192. [ 3.5000, 3.5000, 3.5000, -3.6372, 0.1182, -1.8652, 3.5000]]])
  193. """
  194. padding: Tuple[int, int]
  195. def __init__(self, padding: _size_2_t, value: float):
  196. super().__init__(value)
  197. self.padding = _pair(padding)
  198. class ConstantPad2d(_ConstantPadNd):
  199. r"""Pads the input tensor boundaries with a constant value.
  200. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
  201. Args:
  202. padding (int, tuple): the size of the padding. If is `int`, uses the same
  203. padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
  204. :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
  205. Shape:
  206. - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
  207. - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
  208. :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
  209. :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
  210. Examples::
  211. >>> # xdoctest: +IGNORE_WANT("non-deterministic")
  212. >>> m = nn.ConstantPad2d(2, 3.5)
  213. >>> input = torch.randn(1, 2, 2)
  214. >>> input
  215. tensor([[[ 1.6585, 0.4320],
  216. [-0.8701, -0.4649]]])
  217. >>> m(input)
  218. tensor([[[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
  219. [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
  220. [ 3.5000, 3.5000, 1.6585, 0.4320, 3.5000, 3.5000],
  221. [ 3.5000, 3.5000, -0.8701, -0.4649, 3.5000, 3.5000],
  222. [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
  223. [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000, 3.5000]]])
  224. >>> # using different paddings for different sides
  225. >>> m = nn.ConstantPad2d((3, 0, 2, 1), 3.5)
  226. >>> m(input)
  227. tensor([[[ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
  228. [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000],
  229. [ 3.5000, 3.5000, 3.5000, 1.6585, 0.4320],
  230. [ 3.5000, 3.5000, 3.5000, -0.8701, -0.4649],
  231. [ 3.5000, 3.5000, 3.5000, 3.5000, 3.5000]]])
  232. """
  233. __constants__ = ['padding', 'value']
  234. padding: Tuple[int, int, int, int]
  235. def __init__(self, padding: _size_4_t, value: float) -> None:
  236. super().__init__(value)
  237. self.padding = _quadruple(padding)
  238. class ConstantPad3d(_ConstantPadNd):
  239. r"""Pads the input tensor boundaries with a constant value.
  240. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
  241. Args:
  242. padding (int, tuple): the size of the padding. If is `int`, uses the same
  243. padding in all boundaries. If a 6-`tuple`, uses
  244. (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
  245. :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
  246. :math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
  247. Shape:
  248. - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
  249. - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or
  250. :math:`(C, D_{out}, H_{out}, W_{out})`, where
  251. :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
  252. :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
  253. :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
  254. Examples::
  255. >>> m = nn.ConstantPad3d(3, 3.5)
  256. >>> input = torch.randn(16, 3, 10, 20, 30)
  257. >>> output = m(input)
  258. >>> # using different paddings for different sides
  259. >>> m = nn.ConstantPad3d((3, 3, 6, 6, 0, 1), 3.5)
  260. >>> output = m(input)
  261. """
  262. padding: Tuple[int, int, int, int, int, int]
  263. def __init__(self, padding: _size_6_t, value: float) -> None:
  264. super().__init__(value)
  265. self.padding = _ntuple(6)(padding)
  266. class _ReflectionPadNd(Module):
  267. __constants__ = ['padding']
  268. padding: Sequence[int]
  269. def forward(self, input: Tensor) -> Tensor:
  270. return F.pad(input, self.padding, 'reflect')
  271. def extra_repr(self) -> str:
  272. return f'{self.padding}'
  273. class ReflectionPad1d(_ReflectionPadNd):
  274. r"""Pads the input tensor using the reflection of the input boundary.
  275. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
  276. Args:
  277. padding (int, tuple): the size of the padding. If is `int`, uses the same
  278. padding in all boundaries. If a 2-`tuple`, uses
  279. (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
  280. Shape:
  281. - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
  282. - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
  283. :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
  284. Examples::
  285. >>> m = nn.ReflectionPad1d(2)
  286. >>> # xdoctest: +IGNORE_WANT("other tests seem to modify printing styles")
  287. >>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4)
  288. >>> input
  289. tensor([[[0., 1., 2., 3.],
  290. [4., 5., 6., 7.]]])
  291. >>> m(input)
  292. tensor([[[2., 1., 0., 1., 2., 3., 2., 1.],
  293. [6., 5., 4., 5., 6., 7., 6., 5.]]])
  294. >>> # using different paddings for different sides
  295. >>> m = nn.ReflectionPad1d((3, 1))
  296. >>> m(input)
  297. tensor([[[3., 2., 1., 0., 1., 2., 3., 2.],
  298. [7., 6., 5., 4., 5., 6., 7., 6.]]])
  299. """
  300. padding: Tuple[int, int]
  301. def __init__(self, padding: _size_2_t) -> None:
  302. super().__init__()
  303. self.padding = _pair(padding)
  304. class ReflectionPad2d(_ReflectionPadNd):
  305. r"""Pads the input tensor using the reflection of the input boundary.
  306. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
  307. Args:
  308. padding (int, tuple): the size of the padding. If is `int`, uses the same
  309. padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
  310. :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
  311. Note that padding size should be less than the corresponding input dimension.
  312. Shape:
  313. - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
  314. - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})` where
  315. :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
  316. :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
  317. Examples::
  318. >>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
  319. >>> m = nn.ReflectionPad2d(2)
  320. >>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)
  321. >>> input
  322. tensor([[[[0., 1., 2.],
  323. [3., 4., 5.],
  324. [6., 7., 8.]]]])
  325. >>> m(input)
  326. tensor([[[[8., 7., 6., 7., 8., 7., 6.],
  327. [5., 4., 3., 4., 5., 4., 3.],
  328. [2., 1., 0., 1., 2., 1., 0.],
  329. [5., 4., 3., 4., 5., 4., 3.],
  330. [8., 7., 6., 7., 8., 7., 6.],
  331. [5., 4., 3., 4., 5., 4., 3.],
  332. [2., 1., 0., 1., 2., 1., 0.]]]])
  333. >>> # using different paddings for different sides
  334. >>> m = nn.ReflectionPad2d((1, 1, 2, 0))
  335. >>> m(input)
  336. tensor([[[[7., 6., 7., 8., 7.],
  337. [4., 3., 4., 5., 4.],
  338. [1., 0., 1., 2., 1.],
  339. [4., 3., 4., 5., 4.],
  340. [7., 6., 7., 8., 7.]]]])
  341. """
  342. padding: Tuple[int, int, int, int]
  343. def __init__(self, padding: _size_4_t) -> None:
  344. super().__init__()
  345. self.padding = _quadruple(padding)
  346. class ReflectionPad3d(_ReflectionPadNd):
  347. r"""Pads the input tensor using the reflection of the input boundary.
  348. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
  349. Args:
  350. padding (int, tuple): the size of the padding. If is `int`, uses the same
  351. padding in all boundaries. If a 6-`tuple`, uses
  352. (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
  353. :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
  354. :math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
  355. Shape:
  356. - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
  357. - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
  358. where
  359. :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
  360. :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
  361. :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
  362. Examples::
  363. >>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
  364. >>> m = nn.ReflectionPad3d(1)
  365. >>> input = torch.arange(8, dtype=torch.float).reshape(1, 1, 2, 2, 2)
  366. >>> m(input)
  367. tensor([[[[[7., 6., 7., 6.],
  368. [5., 4., 5., 4.],
  369. [7., 6., 7., 6.],
  370. [5., 4., 5., 4.]],
  371. [[3., 2., 3., 2.],
  372. [1., 0., 1., 0.],
  373. [3., 2., 3., 2.],
  374. [1., 0., 1., 0.]],
  375. [[7., 6., 7., 6.],
  376. [5., 4., 5., 4.],
  377. [7., 6., 7., 6.],
  378. [5., 4., 5., 4.]],
  379. [[3., 2., 3., 2.],
  380. [1., 0., 1., 0.],
  381. [3., 2., 3., 2.],
  382. [1., 0., 1., 0.]]]]])
  383. """
  384. padding: Tuple[int, int, int, int, int, int]
  385. def __init__(self, padding: _size_6_t) -> None:
  386. super().__init__()
  387. self.padding = _ntuple(6)(padding)
  388. class _ReplicationPadNd(Module):
  389. __constants__ = ['padding']
  390. padding: Sequence[int]
  391. def forward(self, input: Tensor) -> Tensor:
  392. return F.pad(input, self.padding, 'replicate')
  393. def extra_repr(self) -> str:
  394. return f'{self.padding}'
  395. class ReplicationPad1d(_ReplicationPadNd):
  396. r"""Pads the input tensor using replication of the input boundary.
  397. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
  398. Args:
  399. padding (int, tuple): the size of the padding. If is `int`, uses the same
  400. padding in all boundaries. If a 2-`tuple`, uses
  401. (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
  402. Shape:
  403. - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
  404. - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
  405. :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
  406. Examples::
  407. >>> # xdoctest: +IGNORE_WANT("not sure why xdoctest is choking on this")
  408. >>> m = nn.ReplicationPad1d(2)
  409. >>> input = torch.arange(8, dtype=torch.float).reshape(1, 2, 4)
  410. >>> input
  411. tensor([[[0., 1., 2., 3.],
  412. [4., 5., 6., 7.]]])
  413. >>> m(input)
  414. tensor([[[0., 0., 0., 1., 2., 3., 3., 3.],
  415. [4., 4., 4., 5., 6., 7., 7., 7.]]])
  416. >>> # using different paddings for different sides
  417. >>> m = nn.ReplicationPad1d((3, 1))
  418. >>> m(input)
  419. tensor([[[0., 0., 0., 0., 1., 2., 3., 3.],
  420. [4., 4., 4., 4., 5., 6., 7., 7.]]])
  421. """
  422. padding: Tuple[int, int]
  423. def __init__(self, padding: _size_2_t) -> None:
  424. super().__init__()
  425. self.padding = _pair(padding)
  426. class ReplicationPad2d(_ReplicationPadNd):
  427. r"""Pads the input tensor using replication of the input boundary.
  428. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
  429. Args:
  430. padding (int, tuple): the size of the padding. If is `int`, uses the same
  431. padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
  432. :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
  433. Shape:
  434. - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
  435. - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
  436. :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
  437. :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
  438. Examples::
  439. >>> m = nn.ReplicationPad2d(2)
  440. >>> # xdoctest: +IGNORE_WANT("non-deterministic")
  441. >>> input = torch.arange(9, dtype=torch.float).reshape(1, 1, 3, 3)
  442. >>> input
  443. tensor([[[[0., 1., 2.],
  444. [3., 4., 5.],
  445. [6., 7., 8.]]]])
  446. >>> m(input)
  447. tensor([[[[0., 0., 0., 1., 2., 2., 2.],
  448. [0., 0., 0., 1., 2., 2., 2.],
  449. [0., 0., 0., 1., 2., 2., 2.],
  450. [3., 3., 3., 4., 5., 5., 5.],
  451. [6., 6., 6., 7., 8., 8., 8.],
  452. [6., 6., 6., 7., 8., 8., 8.],
  453. [6., 6., 6., 7., 8., 8., 8.]]]])
  454. >>> # using different paddings for different sides
  455. >>> m = nn.ReplicationPad2d((1, 1, 2, 0))
  456. >>> m(input)
  457. tensor([[[[0., 0., 1., 2., 2.],
  458. [0., 0., 1., 2., 2.],
  459. [0., 0., 1., 2., 2.],
  460. [3., 3., 4., 5., 5.],
  461. [6., 6., 7., 8., 8.]]]])
  462. """
  463. padding: Tuple[int, int, int, int]
  464. def __init__(self, padding: _size_4_t) -> None:
  465. super().__init__()
  466. self.padding = _quadruple(padding)
  467. class ReplicationPad3d(_ReplicationPadNd):
  468. r"""Pads the input tensor using replication of the input boundary.
  469. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
  470. Args:
  471. padding (int, tuple): the size of the padding. If is `int`, uses the same
  472. padding in all boundaries. If a 6-`tuple`, uses
  473. (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
  474. :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
  475. :math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
  476. Shape:
  477. - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
  478. - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or :math:`(C, D_{out}, H_{out}, W_{out})`,
  479. where
  480. :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
  481. :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
  482. :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
  483. Examples::
  484. >>> # xdoctest: +IGNORE_WANT("non-deterministic")
  485. >>> m = nn.ReplicationPad3d(3)
  486. >>> input = torch.randn(16, 3, 8, 320, 480)
  487. >>> output = m(input)
  488. >>> # using different paddings for different sides
  489. >>> m = nn.ReplicationPad3d((3, 3, 6, 6, 1, 1))
  490. >>> output = m(input)
  491. """
  492. padding: Tuple[int, int, int, int, int, int]
  493. def __init__(self, padding: _size_6_t) -> None:
  494. super().__init__()
  495. self.padding = _ntuple(6)(padding)
  496. class ZeroPad1d(ConstantPad1d):
  497. r"""Pads the input tensor boundaries with zero.
  498. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
  499. Args:
  500. padding (int, tuple): the size of the padding. If is `int`, uses the same
  501. padding in both boundaries. If a 2-`tuple`, uses
  502. (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`)
  503. Shape:
  504. - Input: :math:`(C, W_{in})` or :math:`(N, C, W_{in})`.
  505. - Output: :math:`(C, W_{out})` or :math:`(N, C, W_{out})`, where
  506. :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
  507. Examples::
  508. >>> # xdoctest: +IGNORE_WANT("non-deterministic")
  509. >>> m = nn.ZeroPad1d(2)
  510. >>> input = torch.randn(1, 2, 4)
  511. >>> input
  512. tensor([[[-1.0491, -0.7152, -0.0749, 0.8530],
  513. [-1.3287, 1.8966, 0.1466, -0.2771]]])
  514. >>> m(input)
  515. tensor([[[ 0.0000, 0.0000, -1.0491, -0.7152, -0.0749, 0.8530, 0.0000,
  516. 0.0000],
  517. [ 0.0000, 0.0000, -1.3287, 1.8966, 0.1466, -0.2771, 0.0000,
  518. 0.0000]]])
  519. >>> m = nn.ZeroPad1d(2)
  520. >>> input = torch.randn(1, 2, 3)
  521. >>> input
  522. tensor([[[ 1.6616, 1.4523, -1.1255],
  523. [-3.6372, 0.1182, -1.8652]]])
  524. >>> m(input)
  525. tensor([[[ 0.0000, 0.0000, 1.6616, 1.4523, -1.1255, 0.0000, 0.0000],
  526. [ 0.0000, 0.0000, -3.6372, 0.1182, -1.8652, 0.0000, 0.0000]]])
  527. >>> # using different paddings for different sides
  528. >>> m = nn.ZeroPad1d((3, 1))
  529. >>> m(input)
  530. tensor([[[ 0.0000, 0.0000, 0.0000, 1.6616, 1.4523, -1.1255, 0.0000],
  531. [ 0.0000, 0.0000, 0.0000, -3.6372, 0.1182, -1.8652, 0.0000]]])
  532. """
  533. padding: Tuple[int, int]
  534. def __init__(self, padding: _size_2_t) -> None:
  535. super().__init__(padding, 0.)
  536. def extra_repr(self) -> str:
  537. return f'{self.padding}'
  538. class ZeroPad2d(ConstantPad2d):
  539. r"""Pads the input tensor boundaries with zero.
  540. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
  541. Args:
  542. padding (int, tuple): the size of the padding. If is `int`, uses the same
  543. padding in all boundaries. If a 4-`tuple`, uses (:math:`\text{padding\_left}`,
  544. :math:`\text{padding\_right}`, :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`)
  545. Shape:
  546. - Input: :math:`(N, C, H_{in}, W_{in})` or :math:`(C, H_{in}, W_{in})`.
  547. - Output: :math:`(N, C, H_{out}, W_{out})` or :math:`(C, H_{out}, W_{out})`, where
  548. :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
  549. :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
  550. Examples::
  551. >>> # xdoctest: +IGNORE_WANT("non-deterministic")
  552. >>> m = nn.ZeroPad2d(2)
  553. >>> input = torch.randn(1, 1, 3, 3)
  554. >>> input
  555. tensor([[[[-0.1678, -0.4418, 1.9466],
  556. [ 0.9604, -0.4219, -0.5241],
  557. [-0.9162, -0.5436, -0.6446]]]])
  558. >>> m(input)
  559. tensor([[[[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
  560. [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
  561. [ 0.0000, 0.0000, -0.1678, -0.4418, 1.9466, 0.0000, 0.0000],
  562. [ 0.0000, 0.0000, 0.9604, -0.4219, -0.5241, 0.0000, 0.0000],
  563. [ 0.0000, 0.0000, -0.9162, -0.5436, -0.6446, 0.0000, 0.0000],
  564. [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
  565. [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000, 0.0000]]]])
  566. >>> # using different paddings for different sides
  567. >>> m = nn.ZeroPad2d((1, 1, 2, 0))
  568. >>> m(input)
  569. tensor([[[[ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
  570. [ 0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
  571. [ 0.0000, -0.1678, -0.4418, 1.9466, 0.0000],
  572. [ 0.0000, 0.9604, -0.4219, -0.5241, 0.0000],
  573. [ 0.0000, -0.9162, -0.5436, -0.6446, 0.0000]]]])
  574. """
  575. padding: Tuple[int, int, int, int]
  576. def __init__(self, padding: _size_4_t) -> None:
  577. super().__init__(padding, 0.)
  578. def extra_repr(self) -> str:
  579. return f'{self.padding}'
  580. class ZeroPad3d(ConstantPad3d):
  581. r"""Pads the input tensor boundaries with zero.
  582. For `N`-dimensional padding, use :func:`torch.nn.functional.pad()`.
  583. Args:
  584. padding (int, tuple): the size of the padding. If is `int`, uses the same
  585. padding in all boundaries. If a 6-`tuple`, uses
  586. (:math:`\text{padding\_left}`, :math:`\text{padding\_right}`,
  587. :math:`\text{padding\_top}`, :math:`\text{padding\_bottom}`,
  588. :math:`\text{padding\_front}`, :math:`\text{padding\_back}`)
  589. Shape:
  590. - Input: :math:`(N, C, D_{in}, H_{in}, W_{in})` or :math:`(C, D_{in}, H_{in}, W_{in})`.
  591. - Output: :math:`(N, C, D_{out}, H_{out}, W_{out})` or
  592. :math:`(C, D_{out}, H_{out}, W_{out})`, where
  593. :math:`D_{out} = D_{in} + \text{padding\_front} + \text{padding\_back}`
  594. :math:`H_{out} = H_{in} + \text{padding\_top} + \text{padding\_bottom}`
  595. :math:`W_{out} = W_{in} + \text{padding\_left} + \text{padding\_right}`
  596. Examples::
  597. >>> m = nn.ZeroPad3d(3)
  598. >>> input = torch.randn(16, 3, 10, 20, 30)
  599. >>> output = m(input)
  600. >>> # using different paddings for different sides
  601. >>> m = nn.ZeroPad3d((3, 3, 6, 6, 0, 1))
  602. >>> output = m(input)
  603. """
  604. padding: Tuple[int, int, int, int, int, int]
  605. def __init__(self, padding: _size_6_t) -> None:
  606. super().__init__(padding, 0.)
  607. def extra_repr(self) -> str:
  608. return f'{self.padding}'