SparseTensorImpl.h 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421
  1. #pragma once
  2. #include <ATen/Tensor.h>
  3. #include <c10/core/TensorImpl.h>
  4. #include <c10/core/impl/TorchDispatchModeTLS.h>
  5. #include <c10/util/Exception.h>
  6. #include <c10/util/irange.h>
  7. #ifndef AT_PER_OPERATOR_HEADERS
  8. #include <ATen/Functions.h>
  9. #else
  10. #include <ATen/ops/empty.h>
  11. #include <ATen/ops/resize.h>
  12. #endif
  13. namespace at {
  14. struct TORCH_API SparseTensorImpl : public TensorImpl {
  15. // Stored in COO format, indices + values.
  16. // INVARIANTS:
  17. // sparse_dim: range [0, len(shape)]; sparse_dim + dense_dim = len(shape)
  18. // dense_dim : range [0, len(shape)]; sparse_dim + dense_dim = len(shape)
  19. // _indices.shape: dimensionality: 2, shape: (sparse_dim, nnz)
  20. // _values.shape: dimensionality: 1 + dense_dim. shape: (nnz,
  21. // shape[sparse_dim:])
  22. int64_t sparse_dim_ = 0; // number of sparse dimensions
  23. int64_t dense_dim_ = 0; // number of dense dimensions
  24. Tensor indices_; // always a LongTensor
  25. Tensor values_;
  26. // A sparse tensor is 'coalesced' if every index occurs at most once in
  27. // the indices tensor, and the indices are in sorted order. (This means
  28. // that it is very easy to convert a coalesced tensor to CSR format: you
  29. // need only compute CSR format indices.)
  30. //
  31. // Most math operations can only be performed on coalesced sparse tensors,
  32. // because many algorithms proceed by merging two sorted lists (of indices).
  33. bool coalesced_ = false;
  34. // compute_numel with integer multiplication overflow check, see gh-57542
  35. void refresh_numel() {
  36. TensorImpl::safe_refresh_numel();
  37. }
  38. public:
  39. // Public for now...
  40. explicit SparseTensorImpl(at::DispatchKeySet, const caffe2::TypeMeta);
  41. void release_resources() override;
  42. int64_t nnz() const {
  43. return values_.size(0);
  44. }
  45. c10::SymInt sym_nnz() const {
  46. return values_.sym_size(0);
  47. }
  48. int64_t sparse_dim() const {
  49. return sparse_dim_;
  50. }
  51. int64_t dense_dim() const {
  52. return dense_dim_;
  53. }
  54. bool coalesced() const {
  55. return coalesced_;
  56. }
  57. Tensor indices() const {
  58. return indices_;
  59. }
  60. Tensor values() const {
  61. return values_;
  62. }
  63. void set_size(int64_t dim, int64_t new_size) override;
  64. void set_stride(int64_t dim, int64_t new_stride) override;
  65. void set_storage_offset(int64_t storage_offset) override;
  66. #ifdef DEBUG
  67. bool has_storage() const override;
  68. #endif
  69. // WARNING: This function does NOT preserve invariants of sparse_dim/dense_dim
  70. // with respect to indices and values
  71. void raw_resize_(int64_t sparse_dim, int64_t dense_dim, IntArrayRef size) {
  72. TORCH_CHECK(
  73. allow_tensor_metadata_change(),
  74. "raw_resize_ ",
  75. err_msg_tensor_metadata_change_not_allowed);
  76. TORCH_CHECK(
  77. !has_symbolic_sizes_strides_,
  78. "raw_resize_ called on tensor with symbolic shape")
  79. set_sizes_and_strides(size, std::vector<int64_t>(size.size()));
  80. sparse_dim_ = sparse_dim;
  81. dense_dim_ = dense_dim;
  82. refresh_numel();
  83. }
  84. // NOTE: This function preserves invariants of sparse_dim/dense_dim with
  85. // respect to indices and values.
  86. //
  87. // NOTE: This function supports the following cases:
  88. // 1. When we keep the number of dense dimensions unchanged, and NOT shrinking
  89. // the size of any of the dense dimensions.
  90. // 2. When we keep the number of sparse dimensions unchanged, and NOT
  91. // shrinking the size of any of the sparse dimensions.
  92. // 3. When the sparse tensor has zero nnz, in which case we are free to change
  93. // the shapes of both its sparse and dense dimensions.
  94. //
  95. // This function DOESN'T support (and will throw an error) the following
  96. // cases:
  97. // 1. When we attempt to change the number of sparse dimensions on a non-empty
  98. // sparse tensor (such an operation will invalidate the indices stored).
  99. // 2. When we attempt to change the number of dense dimensions on a non-empty
  100. // sparse tensor (such an operation will behave differently from an equivalent
  101. // dense tensor's resize method, and for API consistency we don't support it).
  102. // 3. When we attempt to shrink the size of any of the dense dimensions on a
  103. // non-empty sparse tensor (such an operation will behave differently from an
  104. // equivalent dense tensor's resize method, and for API consistency we don't
  105. // support it).
  106. // 4. When we attempt to shrink the size of any of the sparse dimensions on a
  107. // non-empty sparse tensor (this could make some of the stored indices
  108. // out-of-bound and thus unsafe).
  109. template <typename T>
  110. void _resize_(int64_t sparse_dim, int64_t dense_dim, ArrayRef<T> size) {
  111. TORCH_CHECK(
  112. allow_tensor_metadata_change(),
  113. "resize_ ",
  114. err_msg_tensor_metadata_change_not_allowed);
  115. TORCH_CHECK(
  116. !has_symbolic_sizes_strides_,
  117. "resize_ called on tensor with symbolic shape")
  118. TORCH_CHECK(
  119. sparse_dim + dense_dim == static_cast<int64_t>(size.size()),
  120. "number of dimensions must be sparse_dim (",
  121. sparse_dim,
  122. ") + dense_dim (",
  123. dense_dim,
  124. "), but got ",
  125. size.size());
  126. if (nnz() > 0) {
  127. auto alt_options_msg =
  128. "You could try the following options:\n\
  129. 1. If you need an empty sparse tensor of this size, call `x = torch.sparse_coo_tensor(size)`.\n\
  130. 2. If you need to resize this tensor, you have the following options:\n\
  131. 1. For both sparse and dense dimensions, keep the number of them constant and the size of them non-shrinking, and then try the same call again.\n\
  132. 2. Or, create a new sparse tensor with the correct indices and values from this sparse tensor.";
  133. TORCH_CHECK(
  134. sparse_dim == sparse_dim_,
  135. "changing the number of sparse dimensions (from ",
  136. sparse_dim_,
  137. " to ",
  138. sparse_dim,
  139. ") on a non-empty sparse tensor is not supported.\n",
  140. alt_options_msg);
  141. TORCH_CHECK(
  142. dense_dim == dense_dim_,
  143. "changing the number of dense dimensions (from ",
  144. dense_dim_,
  145. " to ",
  146. dense_dim,
  147. ") on a non-empty sparse tensor is not supported.\n",
  148. alt_options_msg);
  149. bool shrinking_sparse_dims = false;
  150. bool shrinking_dense_dim = false;
  151. auto sparse_size_original = generic_sizes<T>().slice(0, sparse_dim);
  152. auto sparse_size_new = size.slice(0, sparse_dim);
  153. for (const auto i : c10::irange(sparse_dim)) {
  154. if (sparse_size_new[i] < sparse_size_original[i]) {
  155. shrinking_sparse_dims = true;
  156. break;
  157. }
  158. }
  159. auto dense_size_original = generic_sizes<T>().slice(sparse_dim);
  160. auto dense_size_new = size.slice(sparse_dim);
  161. for (const auto i : c10::irange(dense_dim)) {
  162. if (dense_size_new[i] < dense_size_original[i]) {
  163. shrinking_dense_dim = true;
  164. break;
  165. }
  166. }
  167. TORCH_CHECK(
  168. !shrinking_sparse_dims,
  169. "shrinking the size of sparse dimensions (from ",
  170. sparse_size_original,
  171. " to ",
  172. sparse_size_new,
  173. ") on a non-empty sparse tensor is not supported.\n",
  174. alt_options_msg);
  175. TORCH_CHECK(
  176. !shrinking_dense_dim,
  177. "shrinking the size of dense dimensions (from ",
  178. dense_size_original,
  179. " to ",
  180. dense_size_new,
  181. ") on a non-empty sparse tensor is not supported.\n",
  182. alt_options_msg);
  183. }
  184. auto sizes_and_strides = generic_sizes<T>();
  185. const bool size_equals_sizes = std::equal(
  186. size.begin(),
  187. size.end(),
  188. sizes_and_strides.begin(),
  189. sizes_and_strides.end());
  190. if ((!size_equals_sizes) || (sparse_dim != sparse_dim_) ||
  191. (dense_dim != dense_dim_)) {
  192. auto nnz = at::symint::sizes<T>(values())[0];
  193. std::vector<T> values_size = {nnz};
  194. auto dense_size = size.slice(sparse_dim);
  195. values_size.insert(
  196. values_size.end(), dense_size.begin(), dense_size.end());
  197. at::symint::resize_<T>(values_, values_size);
  198. at::symint::resize_<T>(indices_, {T(sparse_dim), nnz});
  199. }
  200. if (!size_equals_sizes) {
  201. set_sizes_and_strides(size, std::vector<T>(size.size()));
  202. }
  203. sparse_dim_ = sparse_dim;
  204. dense_dim_ = dense_dim;
  205. refresh_numel();
  206. }
  207. void resize_(int64_t sparse_dim, int64_t dense_dim, ArrayRef<int64_t> size) {
  208. return _resize_(sparse_dim, dense_dim, size);
  209. }
  210. void resize_(
  211. int64_t sparse_dim,
  212. int64_t dense_dim,
  213. ArrayRef<c10::SymInt> size) {
  214. return _resize_(sparse_dim, dense_dim, size);
  215. }
  216. // NOTE: this function will resize the sparse tensor and also set `indices`
  217. // and `values` to empty.
  218. void resize_and_clear_(
  219. int64_t sparse_dim,
  220. int64_t dense_dim,
  221. IntArrayRef size) {
  222. TORCH_CHECK(
  223. allow_tensor_metadata_change(),
  224. "resize_and_clear_ ",
  225. err_msg_tensor_metadata_change_not_allowed);
  226. TORCH_CHECK(
  227. !has_symbolic_sizes_strides_,
  228. "resize_and_clear_ called on tensor with symbolic shape")
  229. TORCH_CHECK(
  230. sparse_dim + dense_dim == static_cast<int64_t>(size.size()),
  231. "number of dimensions must be sparse_dim (",
  232. sparse_dim,
  233. ") + dense_dim (",
  234. dense_dim,
  235. "), but got ",
  236. size.size());
  237. set_sizes_and_strides(size, std::vector<int64_t>(size.size()));
  238. sparse_dim_ = sparse_dim;
  239. dense_dim_ = dense_dim;
  240. auto empty_indices = at::empty({sparse_dim, 0}, indices().options());
  241. std::vector<int64_t> values_size = {0};
  242. auto dense_size = sizes().slice(sparse_dim);
  243. values_size.insert(values_size.end(), dense_size.begin(), dense_size.end());
  244. auto empty_values = at::empty(values_size, values().options());
  245. set_indices_and_values_unsafe(empty_indices, empty_values);
  246. refresh_numel();
  247. }
  248. void set_coalesced(bool coalesced) {
  249. TORCH_CHECK(
  250. allow_tensor_metadata_change(),
  251. "set_coalesced ",
  252. err_msg_tensor_metadata_change_not_allowed);
  253. coalesced_ = coalesced;
  254. }
  255. // NOTE: this function is only used internally and not exposed to Python
  256. // frontend
  257. void set_nnz_and_narrow(int64_t new_nnz) {
  258. TORCH_CHECK(
  259. allow_tensor_metadata_change(),
  260. "set_nnz_and_narrow ",
  261. err_msg_tensor_metadata_change_not_allowed);
  262. AT_ASSERT(new_nnz <= nnz());
  263. indices_ = indices_.narrow(1, 0, new_nnz);
  264. values_ = values_.narrow(0, 0, new_nnz);
  265. if (new_nnz < 2) {
  266. coalesced_ = true;
  267. }
  268. }
  269. // Takes indices and values and directly puts them into the sparse tensor, no
  270. // copy. NOTE: this function is unsafe because it doesn't check whether any
  271. // indices are out of boundaries of `sizes`, so it should ONLY be used where
  272. // we know that the indices are guaranteed to be within bounds. This used to
  273. // be called THSTensor_(_move) NB: This used to be able to avoid a refcount
  274. // bump, but I was too lazy to make it happen
  275. void set_indices_and_values_unsafe(
  276. const Tensor& indices,
  277. const Tensor& values);
  278. template <typename VariableVersion>
  279. c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach_core(
  280. VariableVersion&& version_counter,
  281. bool allow_tensor_metadata_change) const {
  282. const auto mode_stack_len = c10::impl::TorchDispatchModeTLS::stack_len();
  283. c10::impl::PyInterpreter&& interpreter = nullptr;
  284. if (mode_stack_len > 0 &&
  285. !c10::impl::tls_is_dispatch_key_excluded(DispatchKey::Python)) {
  286. const auto& cur_torch_dispatch_mode_state =
  287. c10::impl::TorchDispatchModeTLS::get_stack_at(mode_stack_len - 1);
  288. interpreter = cur_torch_dispatch_mode_state->pyinterpreter();
  289. } else if (
  290. key_set_.has(DispatchKey::Python) &&
  291. !c10::impl::tls_is_dispatch_key_excluded(DispatchKey::Python)) {
  292. interpreter = pyobj_slot_.load_pyobj_interpreter();
  293. } else {
  294. // otherwise just copy the SparseTensorImpl and not the PyObject.
  295. auto impl = c10::make_intrusive<SparseTensorImpl>(key_set(), dtype());
  296. copy_tensor_metadata(
  297. /*src_sparse_impl=*/this,
  298. /*dest_sparse_impl=*/impl.get(),
  299. /*version_counter=*/version_counter,
  300. /*allow_tensor_metadata_change=*/allow_tensor_metadata_change);
  301. impl->refresh_numel();
  302. return impl;
  303. }
  304. auto r = interpreter->detach(this);
  305. r->set_version_counter(std::forward<VariableVersion>(version_counter));
  306. r->set_allow_tensor_metadata_change(allow_tensor_metadata_change);
  307. return r;
  308. }
  309. /**
  310. * Return a TensorImpl that is a shallow-copy of this TensorImpl.
  311. *
  312. * For usage of `version_counter` and `allow_tensor_metadata_change`,
  313. * see NOTE [ TensorImpl Shallow-Copying ].
  314. */
  315. c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
  316. const c10::VariableVersion& version_counter,
  317. bool allow_tensor_metadata_change) const override {
  318. return shallow_copy_and_detach_core(
  319. version_counter, allow_tensor_metadata_change);
  320. }
  321. /**
  322. * Return a TensorImpl that is a shallow-copy of this TensorImpl.
  323. *
  324. * For usage of `version_counter` and `allow_tensor_metadata_change`,
  325. * see NOTE [ TensorImpl Shallow-Copying ].
  326. */
  327. c10::intrusive_ptr<TensorImpl> shallow_copy_and_detach(
  328. c10::VariableVersion&& version_counter,
  329. bool allow_tensor_metadata_change) const override {
  330. return shallow_copy_and_detach_core(
  331. std::move(version_counter), allow_tensor_metadata_change);
  332. }
  333. /**
  334. * Shallow-copies data from another TensorImpl into this TensorImpl.
  335. *
  336. * For why this function doesn't check this TensorImpl's
  337. * `allow_tensor_metadata_change_`, see NOTE [ TensorImpl Shallow-Copying ].
  338. */
  339. void shallow_copy_from(const c10::intrusive_ptr<TensorImpl>& impl) override {
  340. AT_ASSERT(has_compatible_shallow_copy_type(impl->key_set()));
  341. auto sparse_impl = static_cast<const SparseTensorImpl*>(impl.get());
  342. copy_tensor_metadata(
  343. /*src_sparse_impl=*/sparse_impl,
  344. /*dest_sparse_impl=*/this,
  345. /*version_counter=*/version_counter(),
  346. /*allow_tensor_metadata_change=*/allow_tensor_metadata_change());
  347. refresh_numel();
  348. }
  349. private:
  350. explicit SparseTensorImpl(
  351. at::DispatchKeySet,
  352. const caffe2::TypeMeta,
  353. at::Tensor indices,
  354. at::Tensor values);
  355. /**
  356. * Copy the tensor metadata fields (e.g. sizes / strides / storage pointer /
  357. * storage_offset) from one TensorImpl to another TensorImpl.
  358. *
  359. * For usage of `version_counter` and `allow_tensor_metadata_change`, see NOTE
  360. * [ TensorImpl Shallow-Copying ].
  361. */
  362. static void copy_tensor_metadata(
  363. const SparseTensorImpl* src_sparse_impl,
  364. SparseTensorImpl* dest_sparse_impl,
  365. c10::VariableVersion version_counter,
  366. bool allow_tensor_metadata_change) {
  367. TensorImpl::copy_tensor_metadata(
  368. src_sparse_impl,
  369. dest_sparse_impl,
  370. std::move(version_counter),
  371. allow_tensor_metadata_change);
  372. // Sparse-specific fields
  373. dest_sparse_impl->sparse_dim_ = src_sparse_impl->sparse_dim();
  374. dest_sparse_impl->dense_dim_ = src_sparse_impl->dense_dim();
  375. dest_sparse_impl->indices_ = src_sparse_impl->indices();
  376. dest_sparse_impl->values_ = src_sparse_impl->values();
  377. dest_sparse_impl->coalesced_ = src_sparse_impl->coalesced();
  378. }
  379. const char* tensorimpl_type_name() const override;
  380. };
  381. } // namespace at