functional.pyi 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688
  1. from typing import (
  2. Any,
  3. Callable,
  4. Dict,
  5. List,
  6. Literal,
  7. Optional,
  8. overload,
  9. Sequence,
  10. Tuple,
  11. Union,
  12. )
  13. from torch import Tensor
  14. from torch.types import _dtype, _int, _size
  15. from .common_types import (
  16. _ratio_any_t,
  17. _size_1_t,
  18. _size_2_opt_t,
  19. _size_2_t,
  20. _size_3_opt_t,
  21. _size_3_t,
  22. _size_any_t,
  23. )
  24. # 'TypedDict' is a new accepted type that represents a dictionary with a fixed set of allowed keys.
  25. # It is standards-track but not in `typing` yet. We leave this hear to be uncommented once the feature
  26. # is wide-spread.
  27. # from mypy_extensions import TypedDict
  28. # GRID_SAMPLE_INTERPOLATION_MODES = TypedDict('GRID_SAMPLE_INTERPOLATION_MODES', {'bilinear': int, 'nearest': int})
  29. # GRID_SAMPLE_PADDING_MODES = TypedDict('GRID_SAMPLE_PADDING_MODES', {'zeros': int, 'border': int, 'reflection': int})
  30. GRID_SAMPLE_INTERPOLATION_MODES = Dict[str, int]
  31. GRID_SAMPLE_PADDING_MODES = Dict[str, int]
  32. # These stubs were generated by running stubgen (`stubgen --parse-only functional.py`), followed by manual cleaning.
  33. #
  34. # The 'BroadcastingList{1,2,3}' types were replaced by `_size` or _output_ratio, as appropriate.
  35. # This was necessary since the JIT uses BroadcastingList* types but static checking with mypy etc requires a `Sequence`
  36. # type. There is no way to express the expected lengths of these lists in the current Python typing system.
  37. #
  38. # Functions created via `_add_docstr` in `functional.py` where merely typed as `Any` by `stubgen`, so those were
  39. # deleted from the stub and replaced by generated declarations. See `gen_pyi` for the implementation of the code
  40. # generation logic for those functions. In the future, it might be worth looking into using the mypy plugin system
  41. # to encode the type semantics of `_add_docstr`, should that system ever become widespread.
  42. def fractional_max_pool2d_with_indices(
  43. input: Tensor,
  44. kernel_size: _size,
  45. output_size: Optional[_size] = ...,
  46. output_ratio: Optional[_ratio_any_t] = ...,
  47. return_indices: bool = ...,
  48. _random_samples: Optional[Tensor] = ...,
  49. ) -> Tuple[Tensor, Tensor]: ...
  50. def fractional_max_pool3d_with_indices(
  51. input: Tensor,
  52. kernel_size: _size,
  53. output_size: Optional[_size] = ...,
  54. output_ratio: Optional[_ratio_any_t] = ...,
  55. return_indices: bool = ...,
  56. _random_samples: Optional[Tensor] = ...,
  57. ) -> Tuple[Tensor, Tensor]: ...
  58. def max_pool1d_with_indices(
  59. input: Tensor,
  60. kernel_size: _size,
  61. stride: Optional[_size] = ...,
  62. padding: _size = ...,
  63. dilation: _size = ...,
  64. ceil_mode: bool = ...,
  65. return_indices: bool = ...,
  66. ) -> Tuple[Tensor, Tensor]: ...
  67. def max_pool2d_with_indices(
  68. input: Tensor,
  69. kernel_size: _size,
  70. stride: Optional[_size] = ...,
  71. padding: _size = ...,
  72. dilation: _size = ...,
  73. ceil_mode: bool = ...,
  74. return_indices: bool = ...,
  75. ) -> Tuple[Tensor, Tensor]: ...
  76. def max_pool3d_with_indices(
  77. input: Tensor,
  78. kernel_size: _size,
  79. stride: Optional[_size] = ...,
  80. padding: _size = ...,
  81. dilation: _size = ...,
  82. ceil_mode: bool = ...,
  83. return_indices: bool = ...,
  84. ) -> Tuple[Tensor, Tensor]: ...
  85. def max_unpool1d(
  86. input: Tensor,
  87. indices: Tensor,
  88. kernel_size: _size,
  89. stride: Optional[_size] = ...,
  90. padding: _size = ...,
  91. output_size: Optional[_size] = ...,
  92. ) -> Tensor: ...
  93. def max_unpool2d(
  94. input: Tensor,
  95. indices: Tensor,
  96. kernel_size: _size,
  97. stride: Optional[_size] = ...,
  98. padding: _size = ...,
  99. output_size: Optional[_size] = ...,
  100. ) -> Tensor: ...
  101. def max_unpool3d(
  102. input: Tensor,
  103. indices: Tensor,
  104. kernel_size: _size,
  105. stride: Optional[_size] = ...,
  106. padding: _size = ...,
  107. output_size: Optional[_size] = ...,
  108. ) -> Tensor: ...
  109. def lp_pool1d(
  110. input: Tensor,
  111. norm_type: float,
  112. kernel_size: _size_1_t,
  113. stride: Union[Optional[_size], Optional[int]] = ...,
  114. ceil_mode: bool = ...,
  115. ) -> Tensor: ...
  116. def lp_pool2d(
  117. input: Tensor,
  118. norm_type: float,
  119. kernel_size: _size_2_t,
  120. stride: Union[Optional[_size], Optional[int]] = ...,
  121. ceil_mode: bool = ...,
  122. ) -> Tensor: ...
  123. def lp_pool3d(
  124. input: Tensor,
  125. norm_type: float,
  126. kernel_size: _size_3_t,
  127. stride: Union[Optional[_size], Optional[int]] = ...,
  128. ceil_mode: bool = ...,
  129. ) -> Tensor: ...
  130. def adaptive_max_pool1d_with_indices(
  131. input: Tensor,
  132. output_size: _size,
  133. return_indices: bool = ...,
  134. ) -> Tuple[Tensor, Tensor]: ...
  135. def adaptive_max_pool2d_with_indices(
  136. input: Tensor,
  137. output_size: _size_2_opt_t,
  138. return_indices: bool = ...,
  139. ) -> Tuple[Tensor, Tensor]: ...
  140. def adaptive_max_pool3d_with_indices(
  141. input: Tensor,
  142. output_size: _size_3_opt_t,
  143. return_indices: bool = ...,
  144. ) -> Tuple[Tensor, Tensor]: ...
  145. def adaptive_avg_pool2d(input: Tensor, output_size: _size_2_opt_t) -> Tensor: ...
  146. def adaptive_avg_pool3d(input: Tensor, output_size: _size_3_opt_t) -> Tensor: ...
  147. def dropout(
  148. input: Tensor,
  149. p: float = ...,
  150. training: bool = ...,
  151. inplace: bool = ...,
  152. ) -> Tensor: ...
  153. def alpha_dropout(
  154. input: Tensor,
  155. p: float = ...,
  156. training: bool = ...,
  157. inplace: bool = ...,
  158. ) -> Tensor: ...
  159. def dropout1d(
  160. input: Tensor,
  161. p: float = ...,
  162. training: bool = ...,
  163. inplace: bool = ...,
  164. ) -> Tensor: ...
  165. def dropout2d(
  166. input: Tensor,
  167. p: float = ...,
  168. training: bool = ...,
  169. inplace: bool = ...,
  170. ) -> Tensor: ...
  171. def dropout3d(
  172. input: Tensor,
  173. p: float = ...,
  174. training: bool = ...,
  175. inplace: bool = ...,
  176. ) -> Tensor: ...
  177. def feature_alpha_dropout(
  178. input: Tensor,
  179. p: float = ...,
  180. training: bool = ...,
  181. inplace: bool = ...,
  182. ) -> Tensor: ...
  183. def threshold(
  184. input: Tensor,
  185. threshold: float,
  186. value: float,
  187. inplace: bool = ...,
  188. ) -> Tensor: ...
  189. def relu(input: Tensor, inplace: bool = ...) -> Tensor: ...
  190. def glu(input: Tensor, dim: int = ...) -> Tensor: ...
  191. def hardtanh(
  192. input: Tensor,
  193. min_val: float = ...,
  194. max_val: float = ...,
  195. inplace: bool = ...,
  196. ) -> Tensor: ...
  197. def relu6(input: Tensor, inplace: bool = ...) -> Tensor: ...
  198. def elu(input: Tensor, alpha: float = ..., inplace: bool = ...) -> Tensor: ...
  199. def selu(input: Tensor, inplace: bool = ...) -> Tensor: ...
  200. def celu(input: Tensor, alpha: float = ..., inplace: bool = ...) -> Tensor: ...
  201. def leaky_relu(
  202. input: Tensor,
  203. negative_slope: float = ...,
  204. inplace: bool = ...,
  205. ) -> Tensor: ...
  206. def rrelu(
  207. input: Tensor,
  208. lower: float = ...,
  209. upper: float = ...,
  210. training: bool = ...,
  211. inplace: bool = ...,
  212. ) -> Tensor: ...
  213. def tanhshrink(input: Any): ...
  214. def softsign(input: Any): ...
  215. def softmin(
  216. input: Tensor,
  217. dim: Optional[int] = ...,
  218. _stacklevel: int = ...,
  219. dtype: Optional[_dtype] = ...,
  220. ) -> Tensor: ...
  221. def softmax(
  222. input: Tensor,
  223. dim: Optional[int] = ...,
  224. _stacklevel: int = ...,
  225. dtype: Optional[_dtype] = ...,
  226. ) -> Tensor: ...
  227. def gumbel_softmax(
  228. logits: Tensor,
  229. tau: float = ...,
  230. hard: bool = ...,
  231. eps: float = ...,
  232. dim: int = ...,
  233. ) -> Tensor: ...
  234. def log_softmax(
  235. input: Tensor,
  236. dim: Optional[int] = ...,
  237. _stacklevel: int = ...,
  238. dtype: Optional[_dtype] = ...,
  239. ) -> Tensor: ...
  240. def tanh(input: Any): ...
  241. def sigmoid(input: Any) -> Tensor: ...
  242. def hardsigmoid(input: Tensor, inplace: bool = False) -> Tensor: ...
  243. def silu(input: Tensor, inplace: bool = False) -> Tensor: ...
  244. def mish(input: Tensor, inplace: bool = False) -> Tensor: ...
  245. def hardswish(input: Tensor, inplace: bool = False) -> Tensor: ...
  246. def embedding(
  247. input: Tensor,
  248. weight: Tensor,
  249. padding_idx: Optional[int] = ...,
  250. max_norm: Optional[float] = ...,
  251. norm_type: float = ...,
  252. scale_grad_by_freq: bool = ...,
  253. sparse: bool = ...,
  254. ) -> Tensor: ...
  255. def embedding_bag(
  256. input: Tensor,
  257. weight: Tensor,
  258. offsets: Optional[Tensor] = ...,
  259. max_norm: Optional[float] = ...,
  260. norm_type: float = ...,
  261. scale_grad_by_freq: bool = ...,
  262. mode: str = ...,
  263. sparse: bool = ...,
  264. per_sample_weights: Optional[Tensor] = ...,
  265. include_last_offset: bool = ...,
  266. padding_idx: Optional[int] = ...,
  267. ) -> Tensor: ...
  268. def batch_norm(
  269. input: Tensor,
  270. running_mean: Optional[Tensor],
  271. running_var: Optional[Tensor],
  272. weight: Optional[Tensor] = ...,
  273. bias: Optional[Tensor] = ...,
  274. training: bool = ...,
  275. momentum: float = ...,
  276. eps: float = ...,
  277. ) -> Tensor: ...
  278. def instance_norm(
  279. input: Tensor,
  280. running_mean: Optional[Tensor] = ...,
  281. running_var: Optional[Tensor] = ...,
  282. weight: Optional[Tensor] = ...,
  283. bias: Optional[Tensor] = ...,
  284. use_input_stats: bool = ...,
  285. momentum: float = ...,
  286. eps: float = ...,
  287. ) -> Tensor: ...
  288. def layer_norm(
  289. input: Tensor,
  290. normalized_shape: Sequence[int],
  291. weight: Optional[Tensor] = ...,
  292. bias: Optional[Tensor] = ...,
  293. eps: float = ...,
  294. ) -> Tensor: ...
  295. def rms_norm(
  296. input: Tensor,
  297. normalized_shape: Sequence[int],
  298. weight: Optional[Tensor] = ...,
  299. eps: Optional[float] = ...,
  300. ) -> Tensor: ...
  301. def group_norm(
  302. input: Tensor,
  303. num_groups: int,
  304. weight: Optional[Tensor] = ...,
  305. bias: Optional[Tensor] = ...,
  306. eps: float = ...,
  307. ) -> Tensor: ...
  308. def local_response_norm(
  309. input: Tensor,
  310. size: int,
  311. alpha: float = ...,
  312. beta: float = ...,
  313. k: float = ...,
  314. ) -> Tensor: ...
  315. def ctc_loss(
  316. log_probs: Tensor,
  317. targets: Tensor,
  318. input_lengths: Tensor,
  319. target_lengths: Tensor,
  320. blank: int = ...,
  321. reduction: str = ...,
  322. zero_infinity: bool = ...,
  323. ) -> Tensor: ...
  324. def nll_loss(
  325. input: Tensor,
  326. target: Tensor,
  327. weight: Optional[Tensor] = ...,
  328. size_average: Optional[bool] = ...,
  329. ignore_index: int = ...,
  330. reduce: Optional[bool] = ...,
  331. reduction: str = ...,
  332. ) -> Tensor: ...
  333. def poisson_nll_loss(
  334. input: Tensor,
  335. target: Tensor,
  336. log_input: bool = ...,
  337. full: bool = ...,
  338. size_average: Optional[bool] = ...,
  339. eps: float = ...,
  340. reduce: Optional[bool] = ...,
  341. reduction: str = ...,
  342. ) -> Tensor: ...
  343. def gaussian_nll_loss(
  344. input: Tensor,
  345. target: Tensor,
  346. var: Tensor,
  347. full: Optional[bool] = ...,
  348. eps: Optional[float] = ...,
  349. reduction: Optional[str] = ...,
  350. ) -> Tensor: ...
  351. def kl_div(
  352. input: Tensor,
  353. target: Tensor,
  354. size_average: Optional[bool] = ...,
  355. reduce: Optional[bool] = ...,
  356. reduction: str = ...,
  357. log_target: bool = ...,
  358. ) -> Tensor: ...
  359. def cross_entropy(
  360. input: Tensor,
  361. target: Tensor,
  362. weight: Optional[Tensor] = ...,
  363. size_average: Optional[bool] = ...,
  364. ignore_index: int = ...,
  365. reduce: Optional[bool] = ...,
  366. reduction: str = ...,
  367. label_smoothing: float = ...,
  368. ) -> Tensor: ...
  369. def binary_cross_entropy(
  370. input: Tensor,
  371. target: Tensor,
  372. weight: Optional[Tensor] = ...,
  373. size_average: Optional[bool] = ...,
  374. reduce: Optional[bool] = ...,
  375. reduction: str = ...,
  376. ) -> Tensor: ...
  377. def binary_cross_entropy_with_logits(
  378. input: Tensor,
  379. target: Tensor,
  380. weight: Optional[Tensor] = ...,
  381. size_average: Optional[bool] = ...,
  382. reduce: Optional[bool] = ...,
  383. reduction: str = ...,
  384. pos_weight: Optional[Tensor] = ...,
  385. ) -> Tensor: ...
  386. def smooth_l1_loss(
  387. input: Tensor,
  388. target: Tensor,
  389. size_average: Optional[bool] = ...,
  390. reduce: Optional[bool] = ...,
  391. reduction: str = ...,
  392. beta: float = ...,
  393. ) -> Tensor: ...
  394. def huber_loss(
  395. input: Tensor,
  396. target: Tensor,
  397. reduction: str = ...,
  398. delta: float = ...,
  399. ) -> Tensor: ...
  400. def l1_loss(
  401. input: Tensor,
  402. target: Tensor,
  403. size_average: Optional[bool] = ...,
  404. reduce: Optional[bool] = ...,
  405. reduction: str = ...,
  406. ) -> Tensor: ...
  407. def mse_loss(
  408. input: Tensor,
  409. target: Tensor,
  410. size_average: Optional[bool] = ...,
  411. reduce: Optional[bool] = ...,
  412. reduction: str = ...,
  413. ) -> Tensor: ...
  414. def margin_ranking_loss(
  415. input1: Tensor,
  416. input2: Tensor,
  417. target: Tensor,
  418. margin: float = ...,
  419. size_average: Optional[bool] = ...,
  420. reduce: Optional[bool] = ...,
  421. reduction: str = ...,
  422. ) -> Tensor: ...
  423. def hinge_embedding_loss(
  424. input: Tensor,
  425. target: Tensor,
  426. margin: float = ...,
  427. size_average: Optional[bool] = ...,
  428. reduce: Optional[bool] = ...,
  429. reduction: str = ...,
  430. ) -> Tensor: ...
  431. def multilabel_margin_loss(
  432. input: Tensor,
  433. target: Tensor,
  434. size_average: Optional[bool] = ...,
  435. reduce: Optional[bool] = ...,
  436. reduction: str = ...,
  437. ) -> Tensor: ...
  438. def soft_margin_loss(
  439. input: Tensor,
  440. target: Tensor,
  441. size_average: Optional[bool] = ...,
  442. reduce: Optional[bool] = ...,
  443. reduction: str = ...,
  444. ) -> Tensor: ...
  445. def multilabel_soft_margin_loss(
  446. input: Tensor,
  447. target: Tensor,
  448. weight: Optional[Tensor] = ...,
  449. size_average: Optional[bool] = ...,
  450. reduce: Optional[bool] = ...,
  451. reduction: str = ...,
  452. ) -> Tensor: ...
  453. def cosine_embedding_loss(
  454. input1: Tensor,
  455. input2: Tensor,
  456. target: Tensor,
  457. margin: float = ...,
  458. size_average: Optional[bool] = ...,
  459. reduce: Optional[bool] = ...,
  460. reduction: str = ...,
  461. ) -> Tensor: ...
  462. def multi_margin_loss(
  463. input: Tensor,
  464. target: Tensor,
  465. p: int = ...,
  466. margin: float = ...,
  467. weight: Optional[Tensor] = ...,
  468. size_average: Optional[bool] = ...,
  469. reduce: Optional[bool] = ...,
  470. reduction: str = ...,
  471. ) -> Tensor: ...
  472. def upsample(
  473. input: Any,
  474. size: Optional[Any] = ...,
  475. scale_factor: Optional[Any] = ...,
  476. mode: str = ...,
  477. align_corners: Optional[Any] = ...,
  478. ): ...
  479. def interpolate(
  480. input: Any,
  481. size: Optional[Any] = ...,
  482. scale_factor: Optional[Any] = ...,
  483. mode: str = ...,
  484. align_corners: Optional[Any] = ...,
  485. recompute_scale_factor: Optional[Any] = ...,
  486. antialias: bool = ...,
  487. ): ...
  488. def upsample_nearest(
  489. input: Any,
  490. size: Optional[Any] = ...,
  491. scale_factor: Optional[Any] = ...,
  492. ): ...
  493. def upsample_bilinear(
  494. input: Any,
  495. size: Optional[Any] = ...,
  496. scale_factor: Optional[Any] = ...,
  497. ): ...
  498. def grid_sample(
  499. input: Tensor,
  500. grid: Tensor,
  501. mode: str = ...,
  502. padding_mode: str = ...,
  503. align_corners: Optional[Any] = ...,
  504. ) -> Tensor: ...
  505. def affine_grid(
  506. theta: Tensor,
  507. size: List[int],
  508. align_corners: Optional[Any] = ...,
  509. ) -> Tensor: ...
  510. def triplet_margin_loss(
  511. anchor: Tensor,
  512. positive: Tensor,
  513. negative: Tensor,
  514. margin: float = ...,
  515. p: float = ...,
  516. eps: float = ...,
  517. swap: bool = ...,
  518. size_average: Optional[bool] = ...,
  519. reduce: Optional[bool] = ...,
  520. reduction: str = ...,
  521. ) -> Tensor: ...
  522. def triplet_margin_with_distance_loss(
  523. anchor: Tensor,
  524. positive: Tensor,
  525. negative: Tensor,
  526. *,
  527. distance_function: Optional[Callable[[Tensor, Tensor], Tensor]] = ...,
  528. margin: float = ...,
  529. swap: bool = ...,
  530. reduction: str = ...,
  531. ) -> Tensor: ...
  532. def normalize(
  533. input: Tensor,
  534. p: float = ...,
  535. dim: int = ...,
  536. eps: float = ...,
  537. out: Optional[Tensor] = ...,
  538. ) -> Tensor: ...
  539. def assert_int_or_pair(
  540. arg: Any,
  541. arg_name: Any,
  542. message: Any,
  543. ) -> None: ...
  544. def unfold(
  545. input: Tensor,
  546. kernel_size: _size_any_t,
  547. dilation: _size_any_t = ...,
  548. padding: _size_any_t = ...,
  549. stride: _size_any_t = ...,
  550. ) -> Tensor: ...
  551. def fold(
  552. input: Tensor,
  553. output_size: _size_any_t,
  554. kernel_size: _size_any_t,
  555. dilation: _size_any_t = ...,
  556. padding: _size_any_t = ...,
  557. stride: _size_any_t = ...,
  558. ) -> Tensor: ...
  559. def _canonical_mask(
  560. mask: Optional[Tensor],
  561. mask_name: str,
  562. other_type: Optional[_dtype],
  563. other_name: str,
  564. target_type: _dtype,
  565. check_other: bool = True,
  566. ) -> Optional[Tensor]: ...
  567. def _none_or_dtype(input: Optional[Tensor]) -> Optional[_dtype]: ...
  568. def multi_head_attention_forward(
  569. query: Tensor,
  570. key: Tensor,
  571. value: Tensor,
  572. embed_dim_to_check: int,
  573. num_heads: int,
  574. in_proj_weight: Optional[Tensor],
  575. in_proj_bias: Optional[Tensor],
  576. bias_k: Optional[Tensor],
  577. bias_v: Optional[Tensor],
  578. add_zero_attn: bool,
  579. dropout_p: float,
  580. out_proj_weight: Tensor,
  581. out_proj_bias: Optional[Tensor],
  582. training: bool = True,
  583. key_padding_mask: Optional[Tensor] = None,
  584. need_weights: bool = True,
  585. attn_mask: Optional[Tensor] = None,
  586. use_separate_proj_weight: bool = False,
  587. q_proj_weight: Optional[Tensor] = None,
  588. k_proj_weight: Optional[Tensor] = None,
  589. v_proj_weight: Optional[Tensor] = None,
  590. static_k: Optional[Tensor] = None,
  591. static_v: Optional[Tensor] = None,
  592. average_attn_weights: bool = True,
  593. is_causal: bool = False,
  594. ) -> Tuple[Tensor, Optional[Tensor]]: ...
  595. from .. import conv1d as conv1d
  596. from .. import conv2d as conv2d
  597. from .. import conv3d as conv3d
  598. from .. import conv_transpose1d as conv_transpose1d
  599. from .. import conv_transpose2d as conv_transpose2d
  600. from .. import conv_transpose3d as conv_transpose3d
  601. from .. import conv_tbc as conv_tbc
  602. from .. import avg_pool1d as avg_pool1d
  603. from .. import adaptive_avg_pool1d as adaptive_avg_pool1d
  604. from .. import relu_ as relu_
  605. from .. import selu_ as selu_
  606. from .. import celu_ as celu_
  607. from .. import prelu as prelu
  608. from .. import rrelu_ as rrelu_
  609. from .. import hardshrink as hardshrink
  610. from .. import bilinear as bilinear
  611. from .. import pixel_shuffle as pixel_shuffle
  612. from .. import pixel_unshuffle as pixel_unshuffle
  613. from .. import channel_shuffle as channel_shuffle
  614. from .. import native_channel_shuffle as native_channel_shuffle
  615. from .. import pairwise_distance as pairwise_distance
  616. from .. import pdist as pdist
  617. from .. import cosine_similarity as cosine_similarity
  618. from .._C._nn import avg_pool2d as avg_pool2d
  619. from .._C._nn import avg_pool3d as avg_pool3d
  620. from .._C._nn import hardtanh_ as hardtanh_
  621. from .._C._nn import elu_ as elu_
  622. from .._C._nn import leaky_relu_ as leaky_relu_
  623. from .._C._nn import gelu as gelu
  624. from .._C._nn import softplus as softplus
  625. from .._C._nn import softshrink as softshrink
  626. from .._C._nn import linear as linear
  627. from .._C._nn import pad as pad
  628. from .._C._nn import one_hot as one_hot
  629. from .._C._nn import scaled_dot_product_attention as scaled_dot_product_attention
  630. from .._C._nn import log_sigmoid
  631. logsigmoid = log_sigmoid
  632. @overload
  633. def adaptive_max_pool1d(input: Tensor, output_size: Union[_int, _size], return_indices: Literal[False] = False) -> Tensor: ...
  634. @overload
  635. def adaptive_max_pool1d(input: Tensor, output_size: Union[_int, _size], return_indices: Literal[True], /) -> Tuple[Tensor, Tensor]: ...
  636. @overload
  637. def adaptive_max_pool1d(input: Tensor, output_size: Union[_int, _size], *, return_indices: Literal[True]) -> Tuple[Tensor, Tensor]: ...
  638. @overload
  639. def adaptive_max_pool2d(input: Tensor, output_size: Union[_int, _size], return_indices: Literal[False] = False) -> Tensor: ...
  640. @overload
  641. def adaptive_max_pool2d(input: Tensor, output_size: Union[_int, _size], return_indices: Literal[True], /) -> Tuple[Tensor, Tensor]: ...
  642. @overload
  643. def adaptive_max_pool2d(input: Tensor, output_size: Union[_int, _size], *, return_indices: Literal[True]) -> Tuple[Tensor, Tensor]: ...
  644. @overload
  645. def adaptive_max_pool3d(input: Tensor, output_size: Union[_int, _size], return_indices: Literal[False] = False) -> Tensor: ...
  646. @overload
  647. def adaptive_max_pool3d(input: Tensor, output_size: Union[_int, _size], return_indices: Literal[True], /) -> Tuple[Tensor, Tensor]: ...
  648. @overload
  649. def adaptive_max_pool3d(input: Tensor, output_size: Union[_int, _size], *, return_indices: Literal[True]) -> Tuple[Tensor, Tensor]: ...
  650. @overload
  651. def fractional_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], output_size: Optional[Union[_int, _size]] = None, output_ratio: Optional[_ratio_any_t] = None, return_indices: Literal[False] = False, _random_samples: Optional[Tensor] = None) -> Tensor: ...
  652. @overload
  653. def fractional_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], output_size: Optional[Union[_int, _size]], output_ratio: Optional[_ratio_any_t], return_indices: Literal[True], /, _random_samples: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: ...
  654. @overload
  655. def fractional_max_pool2d(input: Tensor, kernel_size: Union[_int, _size], output_size: Optional[Union[_int, _size]] = None, output_ratio: Optional[_ratio_any_t] = None, *, return_indices: Literal[True], _random_samples: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: ...
  656. @overload
  657. def fractional_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], output_size: Optional[Union[_int, _size]] = None, output_ratio: Optional[_ratio_any_t] = None, return_indices: Literal[False] = False, _random_samples: Optional[Tensor] = None) -> Tensor: ...
  658. @overload
  659. def fractional_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], output_size: Optional[Union[_int, _size]], output_ratio: Optional[_ratio_any_t], return_indices: Literal[True], /, _random_samples: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: ...
  660. @overload
  661. def fractional_max_pool3d(input: Tensor, kernel_size: Union[_int, _size], output_size: Optional[Union[_int, _size]] = None, output_ratio: Optional[_ratio_any_t] = None, *, return_indices: Literal[True], _random_samples: Optional[Tensor] = None) -> Tuple[Tensor, Tensor]: ...
  662. @overload
  663. def max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: bool = False, return_indices: Literal[False] = False) -> Tensor: ...
  664. @overload
  665. def max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]], padding: Union[_int, _size], dilation: Union[_int, _size], ceil_mode: bool, return_indices: Literal[True], /) -> Tuple[Tensor, Tensor]: ...
  666. @overload
  667. def max_pool1d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: bool = False, *, return_indices: Literal[True]) -> Tuple[Tensor, Tensor]: ...
  668. @overload
  669. def max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: bool = False, return_indices: Literal[False] = False) -> Tensor: ...
  670. @overload
  671. def max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]], padding: Union[_int, _size], dilation: Union[_int, _size], ceil_mode: bool, return_indices: Literal[True], /) -> Tuple[Tensor, Tensor]: ...
  672. @overload
  673. def max_pool2d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: bool = False, *, return_indices: Literal[True]) -> Tuple[Tensor, Tensor]: ...
  674. @overload
  675. def max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: bool = False, return_indices: Literal[False] = False) -> Tensor: ...
  676. @overload
  677. def max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]], padding: Union[_int, _size], dilation: Union[_int, _size], ceil_mode: bool, return_indices: Literal[True], /) -> Tuple[Tensor, Tensor]: ...
  678. @overload
  679. def max_pool3d(input: Tensor, kernel_size: Union[_int, _size], stride: Optional[Union[_int, _size]] = None, padding: Union[_int, _size] = 0, dilation: Union[_int, _size] = 1, ceil_mode: bool = False, *, return_indices: Literal[True]) -> Tuple[Tensor, Tensor]: ...