NestedTensorImpl.h 10.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286
  1. #pragma once
  2. #include <ATen/MemoryOverlap.h>
  3. #include <ATen/Tensor.h>
  4. #include <c10/core/DispatchKey.h>
  5. #include <c10/core/DispatchKeySet.h>
  6. #include <c10/core/MemoryFormat.h>
  7. #include <c10/core/TensorImpl.h>
  8. #include <c10/util/ArrayRef.h>
  9. #include <c10/util/Exception.h>
  10. #include <c10/util/Metaprogramming.h>
  11. #include <c10/util/irange.h>
  12. namespace at::native {
  13. struct NestedTensorImpl;
  14. inline bool nested_tensor_impl_is_contiguous(const NestedTensorImpl* nt);
  15. int64_t get_numel_from_nested_size_tensor(const at::Tensor& tensor);
  16. at::Tensor construct_nested_strides(const at::Tensor& nested_size);
  17. at::Tensor construct_offsets(const at::Tensor& nested_size);
  18. struct TORCH_API NestedTensorImpl : public c10::TensorImpl {
  19. explicit NestedTensorImpl(
  20. Storage storage,
  21. c10::DispatchKeySet key_set,
  22. const caffe2::TypeMeta data_type,
  23. at::Tensor nested_sizes,
  24. at::Tensor nested_strides,
  25. at::Tensor storage_offsets);
  26. explicit NestedTensorImpl(
  27. const at::Tensor& buffer,
  28. at::Tensor nested_sizes,
  29. at::Tensor nested_strides,
  30. at::Tensor storage_offsets);
  31. // assume contiguous, `nested_strides` and `offsets`
  32. // can be infered from `nested_sizes`
  33. explicit NestedTensorImpl(
  34. const at::Tensor& buffer,
  35. const at::Tensor& nested_sizes);
  36. // This constructor is used creating view tensors from nested tensors
  37. explicit NestedTensorImpl(
  38. c10::TensorImpl::ImplType impl_type,
  39. const at::Tensor& base_tensor,
  40. at::Tensor nested_sizes,
  41. at::Tensor nested_strides,
  42. at::Tensor storage_offsets);
  43. // TODO: don't expose private implementation details like this; in
  44. // particular, resizing this tensor will mess up our dim() and
  45. // callers cannot fix it.
  46. const Tensor& get_nested_sizes() const {
  47. return nested_sizes_;
  48. }
  49. // TODO: don't expose private implementation details like this
  50. const Tensor& get_nested_strides() const {
  51. return nested_strides_;
  52. }
  53. const Tensor& get_storage_offsets() const {
  54. return storage_offsets_;
  55. }
  56. // Returns nullopt if the ith dimension is irregular. The ith dimension
  57. // of a NestedTensor is regular if the unbound tensors match in
  58. // size at the (i-1)th dimension.
  59. std::optional<int64_t> opt_size(int64_t d) const;
  60. int64_t size(int64_t d) const {
  61. std::optional<int64_t> optional_size = this->opt_size(d);
  62. TORCH_CHECK(
  63. optional_size.has_value(),
  64. "Given dimension ",
  65. d,
  66. " is irregular and does not have a size.");
  67. return *optional_size;
  68. }
  69. /**
  70. * Return a view of the nested tensor as a 1 dimensional contiguous tensor.
  71. *
  72. * The buffer tensor created by this function shares the same storage_impl as
  73. * the original nested tensor, and therefore can be seen as a view.
  74. *
  75. * @return A newly constructed view tensor
  76. */
  77. at::Tensor get_buffer() const {
  78. TORCH_CHECK(
  79. nested_tensor_impl_is_contiguous(this),
  80. "NestedTensor must be contiguous to get buffer.");
  81. return get_unsafe_storage_as_tensor();
  82. }
  83. /**
  84. * If possible use get_buffer() instead. This function returns the storage
  85. * as a tensor directly, which is not safe to use in general. If using this
  86. * function, The caller must ensure to account for nested_sizes,
  87. * nested_strides and storage_offsets.
  88. *
  89. * @return A newly constructed view tensor
  90. */
  91. at::Tensor get_unsafe_storage_as_tensor() const {
  92. auto buffer_key_set_ = generate_buffer_key_set();
  93. const auto buffer_size = get_buffer_size();
  94. auto buffer_tensor_impl = c10::make_intrusive<TensorImpl>(
  95. c10::TensorImpl::VIEW, Storage(storage_), buffer_key_set_, data_type_);
  96. buffer_tensor_impl->set_sizes_contiguous(
  97. c10::makeArrayRef(static_cast<int64_t>(buffer_size)));
  98. return Tensor(buffer_tensor_impl);
  99. }
  100. size_t get_buffer_size() const {
  101. return storage_.nbytes() / data_type_.itemsize();
  102. }
  103. protected:
  104. const char* tensorimpl_type_name() const override;
  105. // TODO: numel_custom and is_contiguous_custom can be profitably overridden
  106. // with real implementations
  107. int64_t numel_custom() const override;
  108. c10::SymInt sym_numel_custom() const override;
  109. bool is_contiguous_custom(MemoryFormat) const override;
  110. int64_t size_custom(int64_t d) const override {
  111. return this->size(d);
  112. }
  113. c10::SymInt sym_size_custom(int64_t d) const override {
  114. return c10::SymInt{this->size(d)};
  115. }
  116. IntArrayRef sizes_custom() const override;
  117. c10::SymIntArrayRef sym_sizes_custom() const override;
  118. IntArrayRef strides_custom() const override;
  119. c10::SymIntArrayRef sym_strides_custom() const override;
  120. // this one is real
  121. int64_t dim_custom() const override;
  122. c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
  123. const c10::VariableVersion& version_counter,
  124. bool allow_tensor_metadata_change) const override;
  125. c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
  126. c10::VariableVersion&& version_counter,
  127. bool allow_tensor_metadata_change) const override;
  128. void shallow_copy_from(const c10::intrusive_ptr<TensorImpl>& impl) override {
  129. copy_tensor_metadata(
  130. /*src_impl=*/impl.get(),
  131. /*dest_impl=*/this,
  132. /*version_counter=*/version_counter(),
  133. /*allow_tensor_metadata_change=*/allow_tensor_metadata_change());
  134. }
  135. private:
  136. // Must be called after any changes to our dim() to sync the state
  137. // to TensorImpl.
  138. void refresh_dim();
  139. // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
  140. const at::Tensor nested_sizes_, nested_strides_;
  141. // The starting positions of the underlying tensors in contiguous buffer
  142. // i.e. the buffer memory offsets to get the underlying tensors
  143. // The reason to keep this metadata is that, without strong enough constraint
  144. // it cannot be derived from `nested_sizes_`
  145. // and `nested_strides_`:
  146. // 1. when buffer has blanks, e.g. [tensor1, blank, tensor2]
  147. // this can happen e.g. after slicing a nested tensor
  148. // 2. when multiple tensors share a same memory
  149. // 3. when the nesting ordering is changed, e.g. [tensor1, tensor3, tensor2]
  150. // Some strong enough constraints are:
  151. // 1. every underlying tensor is contiguous in memory
  152. // && nesting in ascending order
  153. // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members)
  154. const at::Tensor storage_offsets_;
  155. // NOTE: -1 here means the size is missing
  156. // Optional to allow it to be computed lazily from nested.
  157. // TODO: maybe we can remove this metadata since
  158. // we can compute it from `nested_sizes_`
  159. mutable std::optional<std::vector<int64_t>> opt_sizes_;
  160. template <typename VariableVersion>
  161. c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach_core(
  162. VariableVersion&& version_counter,
  163. bool allow_tensor_metadata_change) const;
  164. /**
  165. * Generates a non-nested key_set from a nested tensor.
  166. *
  167. * For many nested tensor kernel implementations a buffer tensor
  168. * is generated and redispatched to a non-nested kernel this function
  169. * generates the key set used by that buffer tensor
  170. *
  171. * @return Appropriate key set for non-nested tensor
  172. */
  173. inline c10::DispatchKeySet generate_buffer_key_set() const {
  174. auto buffer_key_set = this->key_set();
  175. const bool Autograd = buffer_key_set.has_any(c10::autograd_dispatch_keyset);
  176. // Remove nested tensor specific keys
  177. buffer_key_set = buffer_key_set -
  178. c10::DispatchKeySet{
  179. c10::DispatchKey::NestedTensor,
  180. c10::DispatchKey::AutogradNestedTensor};
  181. // Add dense tensor specific keys
  182. buffer_key_set =
  183. buffer_key_set | c10::DispatchKeySet{c10::DispatchKey::Dense};
  184. buffer_key_set = Autograd
  185. ? c10::DispatchKeySet{c10::DispatchKey::Autograd} | buffer_key_set
  186. : buffer_key_set;
  187. return buffer_key_set;
  188. }
  189. };
  190. inline NestedTensorImpl* get_nested_tensor_impl_or_null(
  191. const at::Tensor& tensor) {
  192. if (tensor.is_nested()) {
  193. return static_cast<NestedTensorImpl*>(tensor.unsafeGetTensorImpl());
  194. }
  195. return nullptr;
  196. }
  197. inline NestedTensorImpl* get_nested_tensor_impl(const at::Tensor& tensor) {
  198. TORCH_CHECK(
  199. tensor.is_nested(), "get_nested_tensor_impl requires a NestedTensor.");
  200. return static_cast<NestedTensorImpl*>(tensor.unsafeGetTensorImpl());
  201. }
  202. inline bool nested_tensor_impl_is_contiguous(const NestedTensorImpl* nt) {
  203. int64_t ntensors = nt->size(0);
  204. if (ntensors == 0) {
  205. return true;
  206. }
  207. const Tensor &sizemat = nt->get_nested_sizes(),
  208. &stridemat = nt->get_nested_strides();
  209. const int64_t* offsets_ptr =
  210. nt->get_storage_offsets().const_data_ptr<int64_t>();
  211. int64_t orig_dim = sizemat.size(1);
  212. // nesting scalars
  213. if (orig_dim == 0) {
  214. // each scalar must be contiguous
  215. // if there is blank memory between underlying scalars
  216. for (int64_t i = 0; i < ntensors; i++) {
  217. if (offsets_ptr[i] != i) {
  218. return false;
  219. }
  220. }
  221. }
  222. // nesting tensors
  223. else {
  224. // if any underlying tensor is non-contiguous
  225. const int64_t *sizemat_ptr = sizemat.const_data_ptr<int64_t>(),
  226. *stridemat_ptr = stridemat.const_data_ptr<int64_t>();
  227. for (int64_t i = 0; i < ntensors; i++) {
  228. if (stridemat_ptr[orig_dim - 1] != 1) {
  229. return false;
  230. }
  231. int64_t product = sizemat_ptr[orig_dim - 1];
  232. for (int64_t j = orig_dim - 2; j >= 0; j--) {
  233. if (stridemat_ptr[j] != product) {
  234. return false;
  235. }
  236. product *= sizemat_ptr[j];
  237. }
  238. sizemat_ptr += orig_dim;
  239. stridemat_ptr += orig_dim;
  240. }
  241. // if there is blank memory between underlying tensors
  242. if (offsets_ptr[0] != 0) {
  243. return false;
  244. }
  245. sizemat_ptr = sizemat.const_data_ptr<int64_t>();
  246. stridemat_ptr = stridemat.const_data_ptr<int64_t>();
  247. for (int64_t i = 1; i < ntensors; i++) {
  248. if (offsets_ptr[i] !=
  249. offsets_ptr[i - 1] + *sizemat_ptr * *stridemat_ptr) {
  250. return false;
  251. }
  252. sizemat_ptr += orig_dim;
  253. stridemat_ptr += orig_dim;
  254. }
  255. }
  256. // everything is fine
  257. return true;
  258. }
  259. inline const at::Tensor& get_nested_sizes(const at::Tensor& tensor) {
  260. return get_nested_tensor_impl(tensor)->get_nested_sizes();
  261. }
  262. } // namespace at::native