AdaptivePooling.h 2.4 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849
  1. #pragma once
  2. #include <ATen/core/Tensor.h>
  3. #include <ATen/native/DispatchStub.h>
  4. #include <c10/util/ArrayRef.h>
  5. #include <c10/util/irange.h>
  6. #include <cmath>
  7. namespace at::native {
  8. using adaptive_avg_pooling2d_fn = void(*)(Tensor& output, const Tensor& input, IntArrayRef output_size);
  9. using adaptive_avg_pooling2d_backward_fn = void(*)(Tensor& grad_input, const Tensor& grad_output);
  10. DECLARE_DISPATCH(adaptive_avg_pooling2d_fn, adaptive_avg_pool2d_kernel);
  11. DECLARE_DISPATCH(adaptive_avg_pooling2d_backward_fn, adaptive_avg_pool2d_backward_kernel);
  12. using adaptive_max_pooling2d_fn = void(*)(const Tensor& output, const Tensor& indices, const Tensor& input, IntArrayRef output_size);
  13. using adaptive_max_pooling2d_backward_fn = void(*)(const Tensor& grad_input, const Tensor& grad_output, const Tensor& indices);
  14. DECLARE_DISPATCH(adaptive_max_pooling2d_fn, adaptive_max_pool2d_kernel);
  15. DECLARE_DISPATCH(adaptive_max_pooling2d_backward_fn, adaptive_max_pool2d_backward_kernel);
  16. using adaptive_avg_pooling3d_fn = void(*)(Tensor& output, const Tensor& input, IntArrayRef output_size);
  17. using adaptive_avg_pooling3d_backward_fn = void(*)(Tensor& grad_input, const Tensor& grad_output);
  18. DECLARE_DISPATCH(adaptive_avg_pooling3d_fn, adaptive_avg_pool3d_kernel);
  19. DECLARE_DISPATCH(adaptive_avg_pooling3d_backward_fn, adaptive_avg_pool3d_backward_kernel);
  20. using adaptive_max_pooling3d_fn = void(*)(const Tensor& output, const Tensor& indices, const Tensor& input, IntArrayRef output_size);
  21. using adaptive_max_pooling3d_backward_fn = void(*)(const Tensor& grad_input, const Tensor& grad_output, const Tensor& indices);
  22. DECLARE_DISPATCH(adaptive_max_pooling3d_fn, adaptive_max_pool3d_kernel);
  23. DECLARE_DISPATCH(adaptive_max_pooling3d_backward_fn, adaptive_max_pool3d_backward_kernel);
  24. inline int64_t start_index(int64_t a, int64_t b, int64_t c) {
  25. return (a / b) * c + ((a % b) * c) / b;
  26. }
  27. inline int64_t end_index(int64_t a, int64_t b, int64_t c) {
  28. return 1 + ((a + 1) * c - 1) / b;
  29. }
  30. inline void adaptive_pool_empty_output_check(const Tensor& gradOutput_, const char* arg_name) {
  31. int64_t ndim = gradOutput_.ndimension();
  32. for (const auto i : c10::irange(1, ndim)) {
  33. TORCH_CHECK(gradOutput_.size(i) > 0,
  34. arg_name, "(): Expected grad_output to have non-zero size for non-batch dimensions, "
  35. "but grad_output has sizes ", gradOutput_.sizes(), " with dimension ", i,
  36. " being empty");
  37. }
  38. }
  39. } // namespace at::native