DistributionTemplates.h 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394
  1. #pragma once
  2. #include <ATen/core/Tensor.h>
  3. #include <ATen/Dispatch.h>
  4. #include <ATen/Dispatch_v2.h>
  5. #include <ATen/Generator.h>
  6. #include <ATen/ExpandUtils.h>
  7. #include <ATen/Tensor.h>
  8. #include <ATen/MemoryOverlap.h>
  9. #include <ATen/NamedTensorUtils.h>
  10. #include <ATen/native/Resize.h>
  11. #include <ATen/native/TensorIterator.h>
  12. #include <c10/util/Optional.h>
  13. #include <limits>
  14. #include <cmath>
  15. #ifndef AT_PER_OPERATOR_HEADERS
  16. #include <ATen/Functions.h>
  17. #else
  18. #include <ATen/ops/empty_like.h>
  19. #include <ATen/ops/empty.h>
  20. #include <ATen/ops/full.h>
  21. #include <ATen/ops/view_as_real.h>
  22. #endif
  23. namespace at::native::templates {
  24. // ==================================================== Random ========================================================
  25. // The purpose of `update_from` and `update_to` is to find the closest valid int64_t number that can be used as actual `from`.
  26. // The current implementation of `random_` uses uint64_t arithmetics and casts the result to the target dtype(scalar_t).
  27. // This casting can result in generating numbers that happen to be greater or equal to `to` value. For instance:
  28. //
  29. // auto actual = torch::empty({3, 3}, torch::half);
  30. // actual.random_(0, 65504);
  31. //
  32. // If random's uint64_t arithmetics produces 65503 as a random value after casting to torch::half it becomes 65504
  33. // and violates the requirement that random value must be less than `to`. To resolve this issue `update_from` and `update_to`
  34. // moves `from` to the right and `to` to the left to the next closest value that won't go outside [from, to) after casting to
  35. // the target dtype. For `to` = 65504 it moves left for (1 << (log2(to) - 11 + 1)) = 32 and becomes 65472, which is previous
  36. // available number for torch::half dtype.
  37. template<typename scalar_t>
  38. int64_t update_from(int64_t from) {
  39. static_assert(
  40. std::is_floating_point<scalar_t>::value ||
  41. std::is_same<scalar_t, at::Half>::value ||
  42. std::is_same<scalar_t, at::BFloat16>::value, "scalar_t must be floating-point type");
  43. const auto from_plus_1 = static_cast<int64_t>(static_cast<scalar_t>(from + 1));
  44. if (from_plus_1 < from) {
  45. int64_t from_ = std::abs(from + 1);
  46. int n = 0;
  47. while (from_ >>= 1) ++n;
  48. // NOLINTNEXTLINE(clang-analyzer-core.UndefinedBinaryOperatorResult)
  49. from = from_plus_1 + (1LL << (n - std::numeric_limits<scalar_t>::digits + 1));
  50. }
  51. return from;
  52. }
  53. template<typename scalar_t>
  54. int64_t update_to(int64_t to) {
  55. static_assert(
  56. std::is_floating_point<scalar_t>::value ||
  57. std::is_same<scalar_t, at::Half>::value ||
  58. std::is_same<scalar_t, at::BFloat16>::value, "scalar_t must be floating-point type");
  59. const auto to_minus_1 = static_cast<int64_t>(static_cast<scalar_t>(to - 1));
  60. if (to_minus_1 >= to) {
  61. int64_t to_ = std::abs(to - 1);
  62. int n = 0;
  63. while (to_ >>= 1) ++n;
  64. // NOLINTNEXTLINE(clang-analyzer-core.UndefinedBinaryOperatorResult)
  65. to = to_minus_1 - (1LL << (n - std::numeric_limits<scalar_t>::digits + 1));
  66. }
  67. return to;
  68. }
  69. // Return earlier for not invoking kernel.
  70. // See https://github.com/pytorch/pytorch/issues/103418 for more details
  71. #define CHECK_EMPTY_AND_RETURN(tensor) \
  72. if (tensor.numel() == 0) { \
  73. return tensor; \
  74. }
  75. template<template<typename> class random_kernel, typename RNG>
  76. at::Tensor& random_impl(at::Tensor& self, std::optional<Generator> generator) {
  77. CHECK_EMPTY_AND_RETURN(self);
  78. auto iter = at::TensorIterator::borrowing_nullary_op(self);
  79. random_kernel<RNG>()(iter, generator);
  80. return self;
  81. }
  82. #define CHECK_OUT_OF_BOUNDS(var, name, min, max, dtype) \
  83. TORCH_CHECK(var >= min && var <= max, name , " is out of bounds for ", dtype); \
  84. #define WARN_OUT_OF_BOUNDS(var, name, digits, dtype) \
  85. if (var < -(1LL << digits) || var > (1LL << digits)) { \
  86. TORCH_WARN(name , " is out of bounds [-(2^", digits, "), 2^", digits, "]. ", \
  87. "Due to precision limitations ", dtype, " can support discrete uniform distribution only within this range. ", \
  88. "This warning will become an error in version 1.7 release, please fix the code in advance"); \
  89. }
  90. static void check_from_to_in_range(int64_t from, int64_t to_inc, caffe2::TypeMeta dtype) {
  91. const auto scalar_type = typeMetaToScalarType(dtype);
  92. if (isFloatingType(scalar_type)) {
  93. AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, scalar_type, "check_random_fp_bounds", [&] {
  94. const auto min = static_cast<double>(std::numeric_limits<scalar_t>::lowest());
  95. const auto max = static_cast<double>(std::numeric_limits<scalar_t>::max());
  96. CHECK_OUT_OF_BOUNDS(from, "from", min, max, dtype);
  97. CHECK_OUT_OF_BOUNDS(to_inc, "to - 1", min, max, dtype);
  98. constexpr auto digits = std::numeric_limits<scalar_t>::digits;
  99. WARN_OUT_OF_BOUNDS(from, "from", digits, dtype);
  100. WARN_OUT_OF_BOUNDS(to_inc, "to - 1", digits, dtype);
  101. });
  102. } else if (scalar_type == kUInt64) {
  103. // When you do a comparison between int64_t and uint64_t, the usual
  104. // arithmetic conversions say that the int64_t value is promoted to
  105. // unsigned. But this conversion wraps around: if I had -1 as my int64_t,
  106. // then it will promote to 0xFFFFFFFFFFFFFFFF in uint64_t. This is never
  107. // the right thing to do.
  108. CHECK_OUT_OF_BOUNDS(from, "from", 0, INT64_MAX, dtype);
  109. CHECK_OUT_OF_BOUNDS(to_inc, "to - 1", 0, INT64_MAX, dtype);
  110. } else if (isIntegralType(scalar_type, /*includeBool=*/true)) {
  111. AT_DISPATCH_V2(scalar_type, "check_random_integral_bounds", AT_WRAP([&]() {
  112. const auto min = static_cast<int64_t>(std::numeric_limits<scalar_t>::lowest());
  113. const auto max = static_cast<int64_t>(std::numeric_limits<scalar_t>::max());
  114. CHECK_OUT_OF_BOUNDS(from, "from", min, max, dtype);
  115. CHECK_OUT_OF_BOUNDS(to_inc, "to - 1", min, max, dtype);
  116. }), AT_EXPAND(AT_INTEGRAL_TYPES), kUInt16, kUInt32, kBool);
  117. } else {
  118. TORCH_CHECK(false, "check_random_bounds handles only integral, floating-point and boolean types");
  119. }
  120. }
  121. template<template<typename> class random_from_to_kernel, typename RNG>
  122. at::Tensor& random_from_to_impl(at::Tensor& self, int64_t from, std::optional<int64_t> to_opt, std::optional<Generator> generator) {
  123. uint64_t range = 0;
  124. auto iter = at::TensorIterator::borrowing_nullary_op(self);
  125. if (to_opt.has_value()) {
  126. // [from, to)
  127. int64_t to = *to_opt;
  128. TORCH_CHECK(from < to, "random_ expects 'from' to be less than 'to', but got from=", from, " >= to=", to);
  129. if (isFloatingType(iter.dtype())) {
  130. AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "random_update_from_to", [&] {
  131. from = update_from<scalar_t>(from);
  132. to = update_to<scalar_t>(to);
  133. TORCH_CHECK(from < to, "random_ expects 'from' casted to dtype to be less than 'to' casted to dtype, but got from=", from, " >= to=", to);
  134. });
  135. }
  136. check_from_to_in_range(from, to - 1, self.dtype());
  137. CHECK_EMPTY_AND_RETURN(self);
  138. range = static_cast<uint64_t>(to) - static_cast<uint64_t>(from);
  139. random_from_to_kernel<RNG>()(iter, range, from, generator);
  140. } else if (from != std::numeric_limits<int64_t>::lowest()) {
  141. // [from, std::numeric_limits<int64_t>::max()]
  142. int64_t to_inc = 0;
  143. if (isFloatingType(iter.dtype())) {
  144. AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "random_from_to_range_calc", [&] {
  145. constexpr int64_t scalar_t_max = static_cast<int64_t>(1) << std::numeric_limits<scalar_t>::digits;
  146. to_inc = scalar_t_max > std::numeric_limits<int64_t>::max() ? std::numeric_limits<int64_t>::max() : static_cast<int64_t>(scalar_t_max);
  147. from = update_from<scalar_t>(from);
  148. TORCH_CHECK(from < to_inc, "random_ expects 'from' casted to dtype to be less than or equal to 'to_inc' casted to dtype, but got from=", from, " > to_inc=", to_inc);
  149. });
  150. } else if (isIntegralType(iter.dtype(), /*includeBool=*/true)) {
  151. AT_DISPATCH_V2(self.scalar_type(), "random_from_to_range_calc", AT_WRAP([&] {
  152. if constexpr (std::is_same_v<scalar_t, bool>) {
  153. to_inc = static_cast<int64_t>(true);
  154. } else {
  155. to_inc = static_cast<int64_t>(std::numeric_limits<scalar_t>::max());
  156. }
  157. }), AT_EXPAND(AT_INTEGRAL_TYPES_V2), kBool);
  158. } else {
  159. TORCH_CHECK(false, "random_from_to_impl handles only integral, floating-point and boolean types");
  160. }
  161. check_from_to_in_range(from, to_inc, self.dtype());
  162. CHECK_EMPTY_AND_RETURN(self);
  163. range = static_cast<uint64_t>(to_inc) - static_cast<uint64_t>(from) + 1;
  164. random_from_to_kernel<RNG>()(iter, range, from, generator);
  165. } else {
  166. // [std::numeric_limits<int64_t>::lowest(), std::numeric_limits<int64_t>::max()]
  167. // range = 2^64
  168. CHECK_EMPTY_AND_RETURN(self);
  169. random_from_to_kernel<RNG>()(iter, generator);
  170. }
  171. return self;
  172. }
  173. // ==================================================== Normal ========================================================
  174. #define CHECK_NORMAL_TENSOR_STD(std) \
  175. do { \
  176. TORCH_CHECK( \
  177. !std.is_complex(), \
  178. "normal expects standard deviation to be non-complex"); \
  179. TORCH_CHECK( \
  180. std.numel() == 0 || std.is_meta() || std.min().ge(0).item<bool>(), \
  181. "normal expects all elements of std >= 0.0"); \
  182. } while (0)
  183. #define CHECK_NORMAL_STD(std) \
  184. TORCH_CHECK(std >= 0.0, "normal expects std >= 0.0, but found std ", std);
  185. template<template<typename> class normal_kernel, typename RNG>
  186. Tensor& normal_impl_(Tensor& self, double mean, double std, std::optional<Generator> gen) {
  187. CHECK_NORMAL_STD(std);
  188. CHECK_EMPTY_AND_RETURN(self);
  189. if (self.is_complex()) {
  190. auto float_tensor = at::view_as_real(self);
  191. // variance for normal distribution of the real and imaginary values
  192. // is half of the input variance
  193. normal_kernel<RNG>()(float_tensor, mean, std/(std::sqrt(2)), gen);
  194. } else {
  195. normal_kernel<RNG>()(self, mean, std, gen);
  196. }
  197. return self;
  198. }
  199. template<template<typename> class normal_kernel, typename RNG>
  200. Tensor& normal_out_impl(Tensor& output, const Tensor& mean, double std, std::optional<Generator> gen) {
  201. CHECK_NORMAL_STD(std);
  202. auto std_tensor = at::empty_like(output, MemoryFormat::Contiguous);
  203. auto shape = at::infer_size(mean.sizes(), std_tensor.sizes());
  204. at::native::resize_output(output, shape);
  205. normal_impl_<normal_kernel, RNG>(output, 0, std, gen);
  206. output.add_(mean);
  207. return output;
  208. }
  209. template<template<typename> class normal_kernel, typename RNG>
  210. Tensor& normal_out_impl(Tensor& output, double mean, const Tensor& std, std::optional<Generator> gen) {
  211. CHECK_NORMAL_TENSOR_STD(std);
  212. auto mean_tensor = at::full({}, mean, output.options());
  213. auto shape = at::infer_size(mean_tensor.sizes(), std.sizes());
  214. at::native::resize_output(output, shape);
  215. normal_impl_<normal_kernel, RNG>(output, 0, 1, gen);
  216. // CUDA NB: addcmul_out copies the tensor to be added into the output.
  217. // The previous function here was addcmul_out(output, mean_tensor, output, std, 1);
  218. // The third argument is not a constant reference and hence the samples in output are overwritten.
  219. // Consequently, the computation performed is mean_tensor + mean_tensor * std instead of mean_tensor + output * std
  220. output.mul_(std).add_(mean_tensor);
  221. return output;
  222. }
  223. template<template<typename> class normal_kernel, typename RNG>
  224. Tensor& normal_out_impl(Tensor& output, const Tensor& mean, const Tensor& std, std::optional<Generator> gen) {
  225. CHECK_NORMAL_TENSOR_STD(std);
  226. auto shape = at::infer_size(mean.sizes(), std.sizes());
  227. at::native::resize_output(output, shape);
  228. normal_impl_<normal_kernel, RNG>(output, 0, 1, gen);
  229. // CUDA NB: addcmul_out copies the tensor to be added into the output.
  230. // The previous function here was addcmul_out(output, mean, output, std, 1);
  231. // The third argument is not a constant reference and hence the samples in output are overwritten.
  232. // Consequently, the computation performed is mean + mean * std instead of mean + output * std
  233. output.mul_(std).add_(mean);
  234. return output;
  235. }
  236. template<template<typename> class normal_kernel, typename RNG>
  237. Tensor normal_impl(const Tensor& mean, double std, std::optional<Generator> gen) {
  238. CHECK_NORMAL_STD(std);
  239. Tensor ret = at::empty_like(mean, MemoryFormat::Contiguous);
  240. normal_out_impl<normal_kernel, RNG>(ret, mean, std, gen);
  241. return ret;
  242. }
  243. template<template<typename> class normal_kernel, typename RNG>
  244. Tensor normal_impl(double mean, const Tensor& std, std::optional<Generator> gen) {
  245. CHECK_NORMAL_TENSOR_STD(std);
  246. Tensor ret = at::empty_like(std, MemoryFormat::Contiguous);
  247. normal_out_impl<normal_kernel, RNG>(ret, mean, std, gen);
  248. return ret;
  249. }
  250. template<template<typename> class normal_kernel, typename RNG>
  251. Tensor normal_impl(const Tensor& mean, const Tensor& std, std::optional<Generator> gen) {
  252. CHECK_NORMAL_TENSOR_STD(std);
  253. auto shape = at::infer_size(mean.sizes(), std.sizes());
  254. Tensor ret = at::empty(shape, mean.options(), MemoryFormat::Contiguous);
  255. normal_out_impl<normal_kernel, RNG>(ret, mean, std, gen);
  256. return ret;
  257. }
  258. // ==================================================== Uniform =======================================================
  259. template<template<typename> class uniform_kernel, typename RNG>
  260. at::Tensor& uniform_impl_(at::Tensor& self, double from, double to, std::optional<Generator> generator) {
  261. if (self.is_complex()) {
  262. CHECK_EMPTY_AND_RETURN(self);
  263. auto float_tensor = at::view_as_real(self);
  264. uniform_impl_<uniform_kernel, RNG>(float_tensor, from, to, generator);
  265. } else {
  266. AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, self.scalar_type(), "check_uniform_bounds", [&] {
  267. const auto dtype = self.dtype();
  268. const auto min = static_cast<double>(std::numeric_limits<scalar_t>::lowest());
  269. const auto max = static_cast<double>(std::numeric_limits<scalar_t>::max());
  270. CHECK_OUT_OF_BOUNDS(from, "from", min, max, dtype);
  271. CHECK_OUT_OF_BOUNDS(to, "to", min, max, dtype);
  272. TORCH_CHECK(from <= to, "uniform_ expects to return a [from, to) range, but found from=", from, " > to=", to);
  273. TORCH_CHECK((to - from) <= std::numeric_limits<scalar_t>::max(),
  274. "uniform_ expects to-from <= std::numeric_limits<", toString(self.scalar_type()),
  275. ">::max(), but found to=", to, " and from=", from,
  276. " which result in to-from to exceed the limit");
  277. from = std::min(std::max(from, min), max);
  278. to = std::max(std::min(to, max), min);
  279. });
  280. CHECK_EMPTY_AND_RETURN(self);
  281. auto iter = at::TensorIterator::borrowing_nullary_op(self);
  282. uniform_kernel<RNG>()(iter, from, to, generator);
  283. }
  284. return self;
  285. }
  286. // ================================================== LogNormal =======================================================
  287. template<template<typename> class log_normal_kernel, typename RNG>
  288. at::Tensor& log_normal_impl_(at::Tensor& self, double mean, double std, std::optional<Generator> gen) {
  289. TORCH_CHECK(std > 0.0, "log_normal_ expects std > 0.0, but found std=", std);
  290. CHECK_EMPTY_AND_RETURN(self);
  291. auto iter = TensorIterator::borrowing_nullary_op(self);
  292. log_normal_kernel<RNG>()(iter, mean, std, gen);
  293. return self;
  294. }
  295. // =================================================== Geometric ======================================================
  296. template<template<typename> class geometric_kernel, typename RNG>
  297. Tensor& geometric_impl_(Tensor& self, double p, std::optional<Generator> gen) {
  298. TORCH_CHECK(0 < p && p < 1, "geometric_ expects p to be in (0, 1), but got p=", p);
  299. CHECK_EMPTY_AND_RETURN(self);
  300. auto iter = TensorIterator::borrowing_nullary_op(self);
  301. geometric_kernel<RNG>()(iter, p, gen);
  302. return self;
  303. }
  304. // ================================================== Exponential =====================================================
  305. template<template<typename> class exponential_kernel, typename RNG>
  306. Tensor& exponential_impl_(Tensor& self, double lambda, std::optional<Generator> gen) {
  307. TORCH_CHECK(lambda > 0.0, "exponential_ expects lambda > 0.0, but found lambda=", lambda);
  308. CHECK_EMPTY_AND_RETURN(self);
  309. auto iter = TensorIterator::borrowing_nullary_op(self);
  310. exponential_kernel<RNG>()(iter, lambda, gen);
  311. return self;
  312. }
  313. // ==================================================== Cauchy ========================================================
  314. template<template<typename> class cauchy_kernel, typename RNG>
  315. Tensor& cauchy_impl_(Tensor& self, double median, double sigma, std::optional<Generator> gen) {
  316. // TODO: instead of variable name 'sigma', use 'gamma' or 'scale'
  317. // the variance, squared sigma, is undefined for cauchy distribution
  318. TORCH_CHECK(sigma > 0.0, "cauchy_ expects sigma > 0.0, but found sigma=", sigma);
  319. TORCH_CHECK(at::isFloatingType(self.scalar_type()), "Cauchy distribution is a continuous probability distribution. dtype must be a floating point but you specified ", self.dtype());
  320. CHECK_EMPTY_AND_RETURN(self);
  321. auto iter = TensorIterator::borrowing_nullary_op(self);
  322. cauchy_kernel<RNG>()(iter, median, sigma, gen);
  323. return self;
  324. }
  325. // ==================================================== Bernoulli =====================================================
  326. template<template<typename> class bernoulli_tensor_kernel, typename RNG>
  327. Tensor& bernoulli_impl_(Tensor& self, const Tensor& p_, std::optional<Generator> gen) {
  328. CHECK_EMPTY_AND_RETURN(self);
  329. NoNamesGuard guard;
  330. at::assert_no_internal_overlap(self);
  331. bernoulli_tensor_kernel<RNG>()(self, p_, gen);
  332. return self;
  333. }
  334. template<template<typename> class bernoulli_scalar_kernel, typename RNG>
  335. Tensor& bernoulli_impl_(Tensor& self, double p, std::optional<Generator> gen) {
  336. TORCH_CHECK(0 <= p && p <= 1, "bernoulli_ expects p to be in [0, 1], but got p=", p);
  337. CHECK_EMPTY_AND_RETURN(self);
  338. at::assert_no_internal_overlap(self);
  339. bernoulli_scalar_kernel<RNG>()(self, p, gen);
  340. return self;
  341. }
  342. template<template<typename> class bernoulli_tensor_kernel, typename RNG>
  343. Tensor& bernoulli_out_impl(Tensor& result, const Tensor& self, std::optional<Generator> gen) {
  344. // result.resize_as_(self) requires self to have same dtype as result, so we
  345. // use resize_ instead.
  346. // TODO: Fix resize_as_. See pytorch/pytorch#11665.
  347. result.resize_(self.sizes());
  348. bernoulli_impl_<bernoulli_tensor_kernel, RNG>(result, self, gen);
  349. namedinference::propagate_names(result, self);
  350. return result;
  351. }
  352. #undef CHECK_OUT_OF_BOUNDS
  353. #undef WARN_OUT_OF_BOUNDS
  354. } // namespace at::native::templates