CachedTensorUtils.h 1007 B

123456789101112131415161718192021222324
  1. #pragma once
  2. #include <ATen/ATen.h>
  3. namespace at::caching {
  4. // Some systems (just cudagraphs currently) will persist a static tensor output
  5. // whose TensorImpl does not change across iterations. For these tensors caching
  6. // dtype conversions is invalid. Additionally, there will be an extra reference
  7. // count to these cached tensors that would prevent buffer inplacing and other
  8. // checks on tensor uniqueness. If we are not using these systems the enabled
  9. // flag will be false and we will avoid the hash lookup.
  10. TORCH_API bool is_cached_tensor(const at::Tensor& t);
  11. TORCH_API void add_cached_tensor(const at::Tensor& t);
  12. TORCH_API void remove_cached_tensor(const at::Tensor& t);
  13. TORCH_API void set_cached_tensors_enabled(bool enable);
  14. // For gradient buffer stealing we will adjust the use count of tensors
  15. // which are persisted by cudagraphs, just as we need to adjust reference
  16. // count of tensors with hooks.
  17. TORCH_API size_t adjusted_use_count(const at::Tensor& t);
  18. } // namespace at::caching