_tensor_str.py 26 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698
  1. # mypy: allow-untyped-defs
  2. import contextlib
  3. import dataclasses
  4. import math
  5. import textwrap
  6. from typing import Any, Dict, Optional
  7. import torch
  8. from torch import inf
  9. @dataclasses.dataclass
  10. class __PrinterOptions:
  11. precision: int = 4
  12. threshold: float = 1000
  13. edgeitems: int = 3
  14. linewidth: int = 80
  15. sci_mode: Optional[bool] = None
  16. PRINT_OPTS = __PrinterOptions()
  17. # We could use **kwargs, but this will give better docs
  18. def set_printoptions(
  19. precision=None,
  20. threshold=None,
  21. edgeitems=None,
  22. linewidth=None,
  23. profile=None,
  24. sci_mode=None,
  25. ):
  26. r"""Set options for printing. Items shamelessly taken from NumPy
  27. Args:
  28. precision: Number of digits of precision for floating point output
  29. (default = 4).
  30. threshold: Total number of array elements which trigger summarization
  31. rather than full `repr` (default = 1000).
  32. edgeitems: Number of array items in summary at beginning and end of
  33. each dimension (default = 3).
  34. linewidth: The number of characters per line for the purpose of
  35. inserting line breaks (default = 80). Thresholded matrices will
  36. ignore this parameter.
  37. profile: Sane defaults for pretty printing. Can override with any of
  38. the above options. (any one of `default`, `short`, `full`)
  39. sci_mode: Enable (True) or disable (False) scientific notation. If
  40. None (default) is specified, the value is defined by
  41. `torch._tensor_str._Formatter`. This value is automatically chosen
  42. by the framework.
  43. Example::
  44. >>> # Limit the precision of elements
  45. >>> torch.set_printoptions(precision=2)
  46. >>> torch.tensor([1.12345])
  47. tensor([1.12])
  48. >>> # Limit the number of elements shown
  49. >>> torch.set_printoptions(threshold=5)
  50. >>> torch.arange(10)
  51. tensor([0, 1, 2, ..., 7, 8, 9])
  52. >>> # Restore defaults
  53. >>> torch.set_printoptions(profile='default')
  54. >>> torch.tensor([1.12345])
  55. tensor([1.1235])
  56. >>> torch.arange(10)
  57. tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
  58. """
  59. if profile is not None:
  60. if profile == "default":
  61. PRINT_OPTS.precision = 4
  62. PRINT_OPTS.threshold = 1000
  63. PRINT_OPTS.edgeitems = 3
  64. PRINT_OPTS.linewidth = 80
  65. elif profile == "short":
  66. PRINT_OPTS.precision = 2
  67. PRINT_OPTS.threshold = 1000
  68. PRINT_OPTS.edgeitems = 2
  69. PRINT_OPTS.linewidth = 80
  70. elif profile == "full":
  71. PRINT_OPTS.precision = 4
  72. PRINT_OPTS.threshold = inf
  73. PRINT_OPTS.edgeitems = 3
  74. PRINT_OPTS.linewidth = 80
  75. if precision is not None:
  76. PRINT_OPTS.precision = precision
  77. if threshold is not None:
  78. PRINT_OPTS.threshold = threshold
  79. if edgeitems is not None:
  80. PRINT_OPTS.edgeitems = edgeitems
  81. if linewidth is not None:
  82. PRINT_OPTS.linewidth = linewidth
  83. PRINT_OPTS.sci_mode = sci_mode
  84. def get_printoptions() -> Dict[str, Any]:
  85. r"""Gets the current options for printing, as a dictionary that
  86. can be passed as ``**kwargs`` to set_printoptions().
  87. """
  88. return dataclasses.asdict(PRINT_OPTS)
  89. @contextlib.contextmanager
  90. def printoptions(**kwargs):
  91. r"""Context manager that temporarily changes the print options. Accepted
  92. arguments are same as :func:`set_printoptions`."""
  93. old_kwargs = get_printoptions()
  94. set_printoptions(**kwargs)
  95. try:
  96. yield
  97. finally:
  98. set_printoptions(**old_kwargs)
  99. def tensor_totype(t):
  100. dtype = torch.float if t.is_mps else torch.double
  101. return t.to(dtype=dtype)
  102. class _Formatter:
  103. def __init__(self, tensor):
  104. self.floating_dtype = tensor.dtype.is_floating_point
  105. self.int_mode = True
  106. self.sci_mode = False
  107. self.max_width = 1
  108. with torch.no_grad():
  109. tensor_view = tensor.reshape(-1)
  110. if not self.floating_dtype:
  111. for value in tensor_view:
  112. value_str = f"{value}"
  113. self.max_width = max(self.max_width, len(value_str))
  114. else:
  115. nonzero_finite_vals = torch.masked_select(
  116. tensor_view, torch.isfinite(tensor_view) & tensor_view.ne(0)
  117. )
  118. if nonzero_finite_vals.numel() == 0:
  119. # no valid number, do nothing
  120. return
  121. # Convert to double for easy calculation. HalfTensor overflows with 1e8, and there's no div() on CPU.
  122. nonzero_finite_abs = tensor_totype(nonzero_finite_vals.abs())
  123. nonzero_finite_min = tensor_totype(nonzero_finite_abs.min())
  124. nonzero_finite_max = tensor_totype(nonzero_finite_abs.max())
  125. for value in nonzero_finite_vals:
  126. if value != torch.ceil(value):
  127. self.int_mode = False
  128. break
  129. if self.int_mode:
  130. # in int_mode for floats, all numbers are integers, and we append a decimal to nonfinites
  131. # to indicate that the tensor is of floating type. add 1 to the len to account for this.
  132. if (
  133. nonzero_finite_max / nonzero_finite_min > 1000.0
  134. or nonzero_finite_max > 1.0e8
  135. ):
  136. self.sci_mode = True
  137. for value in nonzero_finite_vals:
  138. value_str = f"{{:.{PRINT_OPTS.precision}e}}".format(value)
  139. self.max_width = max(self.max_width, len(value_str))
  140. else:
  141. for value in nonzero_finite_vals:
  142. value_str = f"{value:.0f}"
  143. self.max_width = max(self.max_width, len(value_str) + 1)
  144. else:
  145. # Check if scientific representation should be used.
  146. if (
  147. nonzero_finite_max / nonzero_finite_min > 1000.0
  148. or nonzero_finite_max > 1.0e8
  149. or nonzero_finite_min < 1.0e-4
  150. ):
  151. self.sci_mode = True
  152. for value in nonzero_finite_vals:
  153. value_str = f"{{:.{PRINT_OPTS.precision}e}}".format(value)
  154. self.max_width = max(self.max_width, len(value_str))
  155. else:
  156. for value in nonzero_finite_vals:
  157. value_str = f"{{:.{PRINT_OPTS.precision}f}}".format(value)
  158. self.max_width = max(self.max_width, len(value_str))
  159. if PRINT_OPTS.sci_mode is not None:
  160. self.sci_mode = PRINT_OPTS.sci_mode
  161. def width(self):
  162. return self.max_width
  163. def format(self, value):
  164. if self.floating_dtype:
  165. if self.sci_mode:
  166. ret = f"{{:{self.max_width}.{PRINT_OPTS.precision}e}}".format(value)
  167. elif self.int_mode:
  168. ret = f"{value:.0f}"
  169. if not (math.isinf(value) or math.isnan(value)):
  170. ret += "."
  171. else:
  172. ret = f"{{:.{PRINT_OPTS.precision}f}}".format(value)
  173. else:
  174. ret = f"{value}"
  175. return (self.max_width - len(ret)) * " " + ret
  176. def _scalar_str(self, formatter1, formatter2=None):
  177. if formatter2 is not None:
  178. real_str = _scalar_str(self.real, formatter1)
  179. imag_str = (_scalar_str(self.imag, formatter2) + "j").lstrip()
  180. # handles negative numbers, +0.0, -0.0
  181. if imag_str[0] == "+" or imag_str[0] == "-":
  182. return real_str + imag_str
  183. else:
  184. return real_str + "+" + imag_str
  185. else:
  186. return formatter1.format(self.item())
  187. def _vector_str(self, indent, summarize, formatter1, formatter2=None):
  188. # length includes spaces and comma between elements
  189. element_length = formatter1.width() + 2
  190. if formatter2 is not None:
  191. # width for imag_formatter + an extra j for complex
  192. element_length += formatter2.width() + 1
  193. elements_per_line = max(
  194. 1, int(math.floor((PRINT_OPTS.linewidth - indent) / (element_length)))
  195. )
  196. def _val_formatter(val, formatter1=formatter1, formatter2=formatter2):
  197. if formatter2 is not None:
  198. real_str = formatter1.format(val.real)
  199. imag_str = (formatter2.format(val.imag) + "j").lstrip()
  200. # handles negative numbers, +0.0, -0.0
  201. if imag_str[0] == "+" or imag_str[0] == "-":
  202. return real_str + imag_str
  203. else:
  204. return real_str + "+" + imag_str
  205. else:
  206. return formatter1.format(val)
  207. if summarize and not PRINT_OPTS.edgeitems:
  208. # Deal with edge case that negative zero is zero
  209. data = ["..."]
  210. elif summarize and self.size(0) > 2 * PRINT_OPTS.edgeitems:
  211. data = (
  212. [_val_formatter(val) for val in self[: PRINT_OPTS.edgeitems].tolist()]
  213. + [" ..."]
  214. + [_val_formatter(val) for val in self[-PRINT_OPTS.edgeitems :].tolist()]
  215. )
  216. else:
  217. data = [_val_formatter(val) for val in self.tolist()]
  218. data_lines = [
  219. data[i : i + elements_per_line] for i in range(0, len(data), elements_per_line)
  220. ]
  221. lines = [", ".join(line) for line in data_lines]
  222. return "[" + ("," + "\n" + " " * (indent + 1)).join(lines) + "]"
  223. # formatter2 is only used for printing complex tensors.
  224. # For complex tensors, formatter1 and formatter2 are the formatters for tensor.real
  225. # and tensor.imag respesectively
  226. def _tensor_str_with_formatter(self, indent, summarize, formatter1, formatter2=None):
  227. dim = self.dim()
  228. if dim == 0:
  229. return _scalar_str(self, formatter1, formatter2)
  230. if dim == 1:
  231. return _vector_str(self, indent, summarize, formatter1, formatter2)
  232. if summarize and self.size(0) > 2 * PRINT_OPTS.edgeitems:
  233. slices = (
  234. [
  235. _tensor_str_with_formatter(
  236. self[i], indent + 1, summarize, formatter1, formatter2
  237. )
  238. for i in range(0, PRINT_OPTS.edgeitems)
  239. ]
  240. + ["..."]
  241. + [
  242. _tensor_str_with_formatter(
  243. self[i], indent + 1, summarize, formatter1, formatter2
  244. )
  245. for i in range(len(self) - PRINT_OPTS.edgeitems, len(self))
  246. ]
  247. )
  248. else:
  249. slices = [
  250. _tensor_str_with_formatter(
  251. self[i], indent + 1, summarize, formatter1, formatter2
  252. )
  253. for i in range(0, self.size(0))
  254. ]
  255. tensor_str = ("," + "\n" * (dim - 1) + " " * (indent + 1)).join(slices)
  256. return "[" + tensor_str + "]"
  257. def _tensor_str(self, indent):
  258. if self.numel() == 0:
  259. return "[]"
  260. if self.has_names():
  261. # There are two main codepaths (possibly more) that tensor printing goes through:
  262. # - tensor data can fit comfortably on screen
  263. # - tensor data needs to be summarized
  264. # Some of the codepaths don't fully support named tensors, so we send in
  265. # an unnamed tensor to the formatting code as a workaround.
  266. self = self.rename(None)
  267. summarize = self.numel() > PRINT_OPTS.threshold
  268. if self._is_zerotensor():
  269. self = self.clone()
  270. # handle the negative bit
  271. if self.is_neg():
  272. self = self.resolve_neg()
  273. if self.dtype in [
  274. torch.float16,
  275. torch.bfloat16,
  276. torch.float8_e5m2,
  277. torch.float8_e5m2fnuz,
  278. torch.float8_e4m3fn,
  279. torch.float8_e4m3fnuz,
  280. ]:
  281. self = self.float()
  282. if self.dtype is torch.complex32:
  283. self = self.cfloat()
  284. if self.dtype.is_complex:
  285. # handle the conjugate bit
  286. self = self.resolve_conj()
  287. real_formatter = _Formatter(
  288. get_summarized_data(self.real) if summarize else self.real
  289. )
  290. imag_formatter = _Formatter(
  291. get_summarized_data(self.imag) if summarize else self.imag
  292. )
  293. return _tensor_str_with_formatter(
  294. self, indent, summarize, real_formatter, imag_formatter
  295. )
  296. else:
  297. formatter = _Formatter(get_summarized_data(self) if summarize else self)
  298. return _tensor_str_with_formatter(self, indent, summarize, formatter)
  299. def _add_suffixes(tensor_str, suffixes, indent, force_newline):
  300. tensor_strs = [tensor_str]
  301. last_line_len = len(tensor_str) - tensor_str.rfind("\n") + 1
  302. for suffix in suffixes:
  303. suffix_len = len(suffix)
  304. if force_newline or last_line_len + suffix_len + 2 > PRINT_OPTS.linewidth:
  305. tensor_strs.append(",\n" + " " * indent + suffix)
  306. last_line_len = indent + suffix_len
  307. force_newline = False
  308. else:
  309. tensor_strs.append(", " + suffix)
  310. last_line_len += suffix_len + 2
  311. tensor_strs.append(")")
  312. return "".join(tensor_strs)
  313. def get_summarized_data(self):
  314. dim = self.dim()
  315. if dim == 0:
  316. return self
  317. if dim == 1:
  318. if self.size(0) > 2 * PRINT_OPTS.edgeitems:
  319. return torch.cat(
  320. (self[: PRINT_OPTS.edgeitems], self[-PRINT_OPTS.edgeitems :])
  321. )
  322. else:
  323. return self
  324. if not PRINT_OPTS.edgeitems:
  325. return self.new_empty([0] * self.dim())
  326. elif self.size(0) > 2 * PRINT_OPTS.edgeitems:
  327. start = [self[i] for i in range(0, PRINT_OPTS.edgeitems)]
  328. end = [self[i] for i in range(len(self) - PRINT_OPTS.edgeitems, len(self))]
  329. return torch.stack([get_summarized_data(x) for x in (start + end)])
  330. else:
  331. return torch.stack([get_summarized_data(x) for x in self])
  332. def _str_intern(inp, *, tensor_contents=None):
  333. if torch._C._functorch.is_functorch_wrapped_tensor(inp):
  334. return _functorch_wrapper_str_intern(inp, tensor_contents=tensor_contents)
  335. is_plain_tensor = type(inp) is torch.Tensor or type(inp) is torch.nn.Parameter
  336. if inp.is_nested:
  337. prefix = "nested_tensor("
  338. elif is_plain_tensor:
  339. prefix = "tensor("
  340. else:
  341. prefix = f"{type(inp).__name__}("
  342. indent = len(prefix)
  343. suffixes = []
  344. custom_contents_provided = tensor_contents is not None
  345. if custom_contents_provided:
  346. tensor_str = tensor_contents
  347. # This is used to extract the primal value and thus disable the forward AD
  348. # within this function.
  349. # TODO(albanD) This needs to be updated when more than one level is supported
  350. self, tangent = torch.autograd.forward_ad.unpack_dual(inp)
  351. # Note [Print tensor device]:
  352. # A general logic here is we only print device when it doesn't match
  353. # the device specified in default tensor type.
  354. # Currently torch.set_default_tensor_type() only supports CPU/CUDA, thus
  355. # torch._C._get_default_device() only returns either cpu or cuda.
  356. # In other cases, we don't have a way to set them as default yet,
  357. # and we should always print out device for them.
  358. if (
  359. self.device.type != torch._C._get_default_device()
  360. or (
  361. self.device.type == "cuda"
  362. and torch.cuda.current_device() != self.device.index
  363. )
  364. or (self.device.type == "mps")
  365. ):
  366. suffixes.append("device='" + str(self.device) + "'")
  367. # Tensor printing performs tensor operations like slice, indexing, etc to make it in a
  368. # representable format. These operations on ipu/xla/lazy/mtia tensor results in compilations. Hence,
  369. # to avoid compilations, copying the tensor to cpu before printing.
  370. if self.device.type in ["xla", "lazy", "ipu", "mtia"]:
  371. self = self.to("cpu")
  372. # TODO: add an API to map real -> complex dtypes
  373. _default_complex_dtype = (
  374. torch.cdouble if torch.get_default_dtype() == torch.double else torch.cfloat
  375. )
  376. has_default_dtype = self.dtype in (
  377. torch.get_default_dtype(),
  378. _default_complex_dtype,
  379. torch.int64,
  380. torch.bool,
  381. )
  382. if self.is_sparse:
  383. suffixes.append("size=" + str(tuple(self.shape)))
  384. from torch._subclasses.fake_tensor import FakeTensor
  385. is_meta = self.is_meta or isinstance(self, FakeTensor)
  386. if not is_meta:
  387. suffixes.append("nnz=" + str(self._nnz()))
  388. if not has_default_dtype:
  389. suffixes.append("dtype=" + str(self.dtype))
  390. if not custom_contents_provided:
  391. indices_prefix = "indices=tensor("
  392. indices = self._indices().detach()
  393. if is_meta:
  394. indices_str = "..."
  395. else:
  396. indices_str = _tensor_str(indices, indent + len(indices_prefix))
  397. if is_meta or indices.numel() == 0:
  398. indices_str += ", size=" + str(tuple(indices.shape))
  399. values_prefix = "values=tensor("
  400. values = self._values().detach()
  401. if is_meta:
  402. values_str = "..."
  403. else:
  404. values_str = _tensor_str(values, indent + len(values_prefix))
  405. if is_meta or values.numel() == 0:
  406. values_str += ", size=" + str(tuple(values.shape))
  407. tensor_str = (
  408. indices_prefix
  409. + indices_str
  410. + "),\n"
  411. + " " * indent
  412. + values_prefix
  413. + values_str
  414. + ")"
  415. )
  416. elif self.layout in {
  417. torch.sparse_csr,
  418. torch.sparse_csc,
  419. torch.sparse_bsr,
  420. torch.sparse_bsc,
  421. }:
  422. from torch._subclasses.fake_tensor import FakeTensor
  423. suffixes.append("size=" + str(tuple(self.shape)))
  424. is_meta = self.is_meta or isinstance(self, FakeTensor)
  425. if not is_meta:
  426. suffixes.append("nnz=" + str(self._nnz()))
  427. if not has_default_dtype:
  428. suffixes.append("dtype=" + str(self.dtype))
  429. if not custom_contents_provided:
  430. compressed_indices_method, plain_indices_method = {
  431. torch.sparse_csr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
  432. torch.sparse_csc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
  433. torch.sparse_bsr: (torch.Tensor.crow_indices, torch.Tensor.col_indices),
  434. torch.sparse_bsc: (torch.Tensor.ccol_indices, torch.Tensor.row_indices),
  435. }[self.layout]
  436. if self.layout in {torch.sparse_csr, torch.sparse_bsr}:
  437. cdimname, pdimname = "row", "column"
  438. else:
  439. cdimname, pdimname = "column", "row"
  440. compressed_indices_prefix = f"c{cdimname[:3]}_indices=tensor("
  441. compressed_indices = compressed_indices_method(self).detach()
  442. if is_meta:
  443. compressed_indices_str = "..."
  444. else:
  445. compressed_indices_str = _tensor_str(
  446. compressed_indices, indent + len(compressed_indices_prefix)
  447. )
  448. if compressed_indices.numel() == 0 or is_meta:
  449. compressed_indices_str += ", size=" + str(
  450. tuple(compressed_indices.shape)
  451. )
  452. plain_indices_prefix = f"{pdimname[:3]}_indices=tensor("
  453. plain_indices = plain_indices_method(self).detach()
  454. if is_meta:
  455. plain_indices_str = "..."
  456. else:
  457. plain_indices_str = _tensor_str(
  458. plain_indices, indent + len(plain_indices_prefix)
  459. )
  460. if plain_indices.numel() == 0 or is_meta:
  461. plain_indices_str += ", size=" + str(tuple(plain_indices.shape))
  462. values_prefix = "values=tensor("
  463. values = self.values().detach()
  464. if is_meta:
  465. values_str = "..."
  466. else:
  467. values_str = _tensor_str(values, indent + len(values_prefix))
  468. if values.numel() == 0 or is_meta:
  469. values_str += ", size=" + str(tuple(values.shape))
  470. tensor_str = (
  471. compressed_indices_prefix
  472. + compressed_indices_str
  473. + "),\n"
  474. + " " * indent
  475. + plain_indices_prefix
  476. + plain_indices_str
  477. + "),\n"
  478. + " " * indent
  479. + values_prefix
  480. + values_str
  481. + ")"
  482. )
  483. elif self.is_quantized:
  484. suffixes.append("size=" + str(tuple(self.shape)))
  485. if not has_default_dtype:
  486. suffixes.append("dtype=" + str(self.dtype))
  487. suffixes.append("quantization_scheme=" + str(self.qscheme()))
  488. if (
  489. self.qscheme() == torch.per_tensor_affine
  490. or self.qscheme() == torch.per_tensor_symmetric
  491. ):
  492. suffixes.append("scale=" + str(self.q_scale()))
  493. suffixes.append("zero_point=" + str(self.q_zero_point()))
  494. elif (
  495. self.qscheme() == torch.per_channel_affine
  496. or self.qscheme() == torch.per_channel_symmetric
  497. or self.qscheme() == torch.per_channel_affine_float_qparams
  498. ):
  499. suffixes.append("scale=" + str(self.q_per_channel_scales()))
  500. suffixes.append("zero_point=" + str(self.q_per_channel_zero_points()))
  501. suffixes.append("axis=" + str(self.q_per_channel_axis()))
  502. if not custom_contents_provided:
  503. tensor_str = _tensor_str(self.dequantize(), indent)
  504. elif self.is_nested:
  505. if not custom_contents_provided:
  506. def indented_str(s, indent):
  507. return "\n".join(f" {line}" for line in s.split("\n"))
  508. strs = ",\n".join(
  509. indented_str(str(t), indent + 1)
  510. for t in torch.ops.aten.unbind.int(self, 0)
  511. )
  512. tensor_str = f"[\n{strs}\n]"
  513. elif torch._is_functional_tensor(self):
  514. prefix = "_to_functional_tensor("
  515. tensor_str = repr(torch._from_functional_tensor(self))
  516. else:
  517. # Circular import problem, so we import it here
  518. from torch._subclasses.fake_tensor import FakeTensor
  519. if self.is_meta or isinstance(self, FakeTensor):
  520. suffixes.append("size=" + str(tuple(self.shape)))
  521. if self.dtype != torch.get_default_dtype():
  522. suffixes.append("dtype=" + str(self.dtype))
  523. # TODO: This implies that ellipses is valid syntax for allocating
  524. # a meta tensor or FakeTensor, which it could be, but it isn't right now
  525. if not custom_contents_provided:
  526. tensor_str = "..."
  527. else:
  528. if self.numel() == 0 and not self.is_sparse:
  529. # Explicitly print the shape if it is not (0,), to match NumPy behavior
  530. if self.dim() != 1:
  531. suffixes.append("size=" + str(tuple(self.shape)))
  532. # In an empty tensor, there are no elements to infer if the dtype
  533. # should be int64, so it must be shown explicitly.
  534. if self.dtype != torch.get_default_dtype():
  535. suffixes.append("dtype=" + str(self.dtype))
  536. if not custom_contents_provided:
  537. tensor_str = "[]"
  538. else:
  539. if not PRINT_OPTS.edgeitems:
  540. suffixes.append("size=" + str(tuple(self.shape)))
  541. if not has_default_dtype:
  542. suffixes.append("dtype=" + str(self.dtype))
  543. if not custom_contents_provided:
  544. if self.layout != torch.strided:
  545. tensor_str = _tensor_str(self.to_dense(), indent)
  546. else:
  547. tensor_str = _tensor_str(self, indent)
  548. if self.layout != torch.strided:
  549. suffixes.append("layout=" + str(self.layout))
  550. # Use inp here to get the original grad_fn and not the one generated by the forward grad
  551. # unpacking.
  552. grad_fn_name = None
  553. try:
  554. grad_fn = inp.grad_fn
  555. except RuntimeError:
  556. # Accessing the grad_fn calls rebasing logic which would cause an error
  557. # if that tensor is a view created in no-grad mode modified in-place in
  558. # no-grad mode. See: https://github.com/pytorch/pytorch/issues/99968
  559. grad_fn_name = "Invalid"
  560. if grad_fn_name is None and grad_fn is not None: # type: ignore[possibly-undefined]
  561. grad_fn_name = type(grad_fn).__name__
  562. if grad_fn_name == "CppFunction":
  563. grad_fn_name = grad_fn.name().rsplit("::", 1)[-1]
  564. if grad_fn_name is not None:
  565. suffixes.append(f"grad_fn=<{grad_fn_name}>")
  566. elif inp.requires_grad:
  567. suffixes.append("requires_grad=True")
  568. if self.has_names():
  569. suffixes.append(f"names={self.names}")
  570. if tangent is not None:
  571. suffixes.append(f"tangent={tangent}")
  572. string_repr = _add_suffixes(
  573. prefix + tensor_str, suffixes, indent, force_newline=self.is_sparse # type: ignore[possibly-undefined]
  574. )
  575. # Check if this instance is flagged as a parameter and change the repr accordingly.
  576. # Unfortunately, this function has to be aware of this detail.
  577. # NB: This is currently skipped for plain tensor parameters to maintain BC. In the future,
  578. # this should be done for those as well to produce a valid repr.
  579. if isinstance(self, torch.nn.Parameter) and not is_plain_tensor:
  580. string_repr = f"Parameter({string_repr})"
  581. return string_repr
  582. def _functorch_wrapper_str_intern(tensor, *, tensor_contents=None):
  583. level = torch._C._functorch.maybe_get_level(tensor)
  584. assert level != -1
  585. if torch._C._functorch.is_functionaltensor(tensor):
  586. # Since we're unwrapping the FunctionalTensorWrapper, we need to make sure
  587. # that it's up to date first
  588. torch._sync(tensor)
  589. value = torch._C._functorch.get_unwrapped(tensor)
  590. value_repr = repr(value)
  591. indented_value_repr = textwrap.indent(value_repr, " " * 4)
  592. if torch._C._functorch.is_batchedtensor(tensor):
  593. bdim = torch._C._functorch.maybe_get_bdim(tensor)
  594. assert bdim != -1
  595. return (
  596. f"BatchedTensor(lvl={level}, bdim={bdim}, value=\n"
  597. f"{indented_value_repr}\n"
  598. f")"
  599. )
  600. if torch._C._functorch.is_gradtrackingtensor(tensor):
  601. return (
  602. f"GradTrackingTensor(lvl={level}, value=\n" f"{indented_value_repr}\n" f")"
  603. )
  604. if torch._C._functorch.is_functionaltensor(tensor):
  605. return f"FunctionalTensor(lvl={level}, value=\\\n{value_repr})"
  606. raise ValueError("We don't know how to print this, please file us an issue")
  607. def _str(self, *, tensor_contents=None):
  608. with torch.no_grad(), torch.utils._python_dispatch._disable_current_modes():
  609. guard = torch._C._DisableFuncTorch()
  610. return _str_intern(self, tensor_contents=tensor_contents)