parametrize.py 35 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771
  1. # mypy: allow-untyped-defs
  2. import torch
  3. from torch.__future__ import get_swap_module_params_on_conversion
  4. from torch.nn.modules.container import ModuleList, ModuleDict, Module
  5. from torch.nn.parameter import Parameter
  6. from torch.utils._python_dispatch import is_traceable_wrapper_subclass
  7. from torch import Tensor
  8. import collections
  9. import copyreg
  10. from copy import deepcopy
  11. from contextlib import contextmanager
  12. from typing import Union, Optional, Dict, Tuple, Sequence
  13. __all__ = ['cached', 'ParametrizationList', 'register_parametrization', 'is_parametrized', 'remove_parametrizations',
  14. 'type_before_parametrizations', 'transfer_parametrizations_and_params']
  15. _cache_enabled = 0
  16. _cache: Dict[Tuple[int, str], Optional[Tensor]] = {}
  17. @contextmanager
  18. def cached():
  19. r"""Context manager that enables the caching system within parametrizations registered with :func:`register_parametrization`.
  20. The value of the parametrized objects is computed and cached the first time
  21. they are required when this context manager is active. The cached values are
  22. discarded when leaving the context manager.
  23. This is useful when using a parametrized parameter more than once in the forward pass.
  24. An example of this is when parametrizing the recurrent kernel of an RNN or when
  25. sharing weights.
  26. The simplest way to activate the cache is by wrapping the forward pass of the neural network
  27. .. code-block:: python
  28. import torch.nn.utils.parametrize as P
  29. ...
  30. with P.cached():
  31. output = model(inputs)
  32. in training and evaluation. One may also wrap the parts of the modules that use
  33. several times the parametrized tensors. For example, the loop of an RNN with a
  34. parametrized recurrent kernel:
  35. .. code-block:: python
  36. with P.cached():
  37. for x in xs:
  38. out_rnn = self.rnn_cell(x, out_rnn)
  39. """
  40. global _cache
  41. global _cache_enabled
  42. _cache_enabled += 1
  43. try:
  44. yield
  45. finally:
  46. _cache_enabled -= 1
  47. if not _cache_enabled:
  48. _cache = {}
  49. def _register_parameter_or_buffer(module, name, X):
  50. if isinstance(X, Parameter):
  51. module.register_parameter(name, X)
  52. else:
  53. module.register_buffer(name, X)
  54. def _maybe_set(dest: Tensor, src: Tensor) -> None:
  55. should_swap = get_swap_module_params_on_conversion() or is_traceable_wrapper_subclass(dest)
  56. if should_swap:
  57. if isinstance(dest, Parameter) and not isinstance(src, Parameter):
  58. src = Parameter(src, requires_grad=dest.requires_grad)
  59. torch.utils.swap_tensors(dest, src)
  60. else:
  61. dest.set_(src) # type: ignore[call-overload]
  62. class ParametrizationList(ModuleList):
  63. r"""A sequential container that holds and manages the original parameters or buffers of a parametrized :class:`torch.nn.Module`.
  64. It is the type of ``module.parametrizations[tensor_name]`` when ``module[tensor_name]``
  65. has been parametrized with :func:`register_parametrization`.
  66. If the first registered parametrization has a ``right_inverse`` that returns one tensor or
  67. does not have a ``right_inverse`` (in which case we assume that ``right_inverse`` is the identity),
  68. it will hold the tensor under the name ``original``.
  69. If it has a ``right_inverse`` that returns more than one tensor, these will be registered as
  70. ``original0``, ``original1``, ...
  71. .. warning::
  72. This class is used internally by :func:`register_parametrization`. It is documented
  73. here for completeness. It shall not be instantiated by the user.
  74. Args:
  75. modules (sequence): sequence of modules representing the parametrizations
  76. original (Parameter or Tensor): parameter or buffer that is parametrized
  77. unsafe (bool): a boolean flag that denotes whether the parametrization
  78. may change the dtype and shape of the tensor. Default: `False`
  79. Warning: the parametrization is not checked for consistency upon registration.
  80. Enable this flag at your own risk.
  81. """
  82. original: Tensor
  83. unsafe: bool
  84. def __init__(
  85. self, modules: Sequence[Module], original: Union[Tensor, Parameter], unsafe: bool = False
  86. ) -> None:
  87. # We require this because we need to treat differently the first parametrization
  88. # This should never throw, unless this class is used from the outside
  89. if len(modules) == 0:
  90. raise ValueError("ParametrizationList requires one or more modules.")
  91. super().__init__(modules)
  92. self.unsafe = unsafe
  93. # In plain words:
  94. # module.weight must keep its dtype and shape.
  95. # Furthermore, if there is no right_inverse or the right_inverse returns a tensor,
  96. # this should be of the same dtype as the original tensor
  97. #
  98. # We check that the following invariants hold:
  99. # X = module.weight
  100. # Y = param.right_inverse(X)
  101. # assert isinstance(Y, Tensor) or
  102. # (isinstance(Y, collections.abc.Sequence) and all(isinstance(t, Tensor) for t in Y))
  103. # Z = param(Y) if isinstance(Y, Tensor) else param(*Y)
  104. # # Consistency checks
  105. # assert X.dtype == Z.dtype and X.shape == Z.shape
  106. # # If it has one input, this allows to be able to use set_ to be able to
  107. # # move data to/from the original tensor without changing its id (which is what the
  108. # # optimizer uses to track parameters)
  109. # if isinstance(Y, Tensor)
  110. # assert X.dtype == Y.dtype
  111. # Below we use original = X, new = Y
  112. original_shape = original.shape
  113. original_dtype = original.dtype
  114. # Compute new
  115. with torch.no_grad():
  116. new = original
  117. for module in reversed(self): # type: ignore[call-overload]
  118. if hasattr(module, "right_inverse"):
  119. try:
  120. new = module.right_inverse(new)
  121. except NotImplementedError:
  122. pass
  123. # else, or if it throws, we assume that right_inverse is the identity
  124. if not isinstance(new, Tensor) and not isinstance(new, collections.abc.Sequence):
  125. raise ValueError("'right_inverse' must return a Tensor or a Sequence of tensors (list, tuple...). "
  126. f"Got {type(new).__name__}")
  127. # Set the number of original tensors
  128. self.is_tensor = isinstance(new, Tensor)
  129. self.ntensors = 1 if self.is_tensor else len(new)
  130. # Register the tensor(s)
  131. if self.is_tensor:
  132. if original.dtype != new.dtype:
  133. raise ValueError(
  134. "When `right_inverse` outputs one tensor, it may not change the dtype.\n"
  135. f"original.dtype: {original.dtype}\n"
  136. f"right_inverse(original).dtype: {new.dtype}"
  137. )
  138. # Set the original to original so that the user does not need to re-register the parameter
  139. # manually in the optimiser
  140. with torch.no_grad():
  141. _maybe_set(original, new)
  142. _register_parameter_or_buffer(self, "original", original)
  143. else:
  144. for i, originali in enumerate(new):
  145. if not isinstance(originali, Tensor):
  146. raise ValueError("'right_inverse' must return a Tensor or a Sequence of tensors "
  147. "(list, tuple...). "
  148. f"Got element {i} of the sequence with type {type(originali).__name__}.")
  149. # If the original tensor was a Parameter that required grad, we expect the user to
  150. # add the new parameters to the optimizer after registering the parametrization
  151. # (this is documented)
  152. if isinstance(original, Parameter):
  153. originali = Parameter(originali, original.requires_grad)
  154. originali.requires_grad_(original.requires_grad)
  155. _register_parameter_or_buffer(self, f"original{i}", originali)
  156. if not self.unsafe:
  157. # Consistency checks:
  158. # Since f : A -> B, right_inverse : B -> A, Z and original should live in B
  159. # Z = forward(right_inverse(original))
  160. Z = self()
  161. if not isinstance(Z, Tensor):
  162. raise ValueError(
  163. f"A parametrization must return a tensor. Got {type(Z).__name__}."
  164. )
  165. if Z.dtype != original_dtype:
  166. raise ValueError(
  167. "Registering a parametrization may not change the dtype of the tensor, unless `unsafe` flag is enabled.\n"
  168. f"unparametrized dtype: {original_dtype}\n"
  169. f"parametrized dtype: {Z.dtype}"
  170. )
  171. if Z.shape != original_shape:
  172. raise ValueError(
  173. "Registering a parametrization may not change the shape of the tensor, unless `unsafe` flag is enabled.\n"
  174. f"unparametrized shape: {original_shape}\n"
  175. f"parametrized shape: {Z.shape}"
  176. )
  177. def right_inverse(self, value: Tensor) -> None:
  178. r"""Call the ``right_inverse`` methods of the parametrizations in the inverse registration order.
  179. Then, it stores the result in ``self.original`` if ``right_inverse`` outputs one tensor
  180. or in ``self.original0``, ``self.original1``, ... if it outputs several.
  181. Args:
  182. value (Tensor): Value to which initialize the module
  183. """
  184. # All the exceptions in this function should almost never throw.
  185. # They could throw if, for example, right_inverse function returns a different
  186. # dtype when given a different input, which should most likely be caused by a
  187. # bug in the user's code
  188. with torch.no_grad():
  189. # See https://github.com/pytorch/pytorch/issues/53103
  190. for module in reversed(self): # type: ignore[call-overload]
  191. if hasattr(module, "right_inverse"):
  192. value = module.right_inverse(value)
  193. else:
  194. raise RuntimeError(f"parametrization {type(module).__name__} does not implement "
  195. "right_inverse.")
  196. if self.is_tensor:
  197. # These exceptions should only throw when a right_inverse function does not
  198. # return the same dtype for every input, which should most likely be caused by a bug
  199. if not isinstance(value, Tensor):
  200. raise ValueError(
  201. f"`right_inverse` should return a tensor. Got {type(value).__name__}"
  202. )
  203. if value.dtype != self.original.dtype:
  204. raise ValueError(
  205. f"The tensor returned by `right_inverse` has dtype {value.dtype} "
  206. f"while `original` has dtype {self.original.dtype}"
  207. )
  208. # We know that the result is going to have the same dtype
  209. _maybe_set(self.original, value)
  210. else:
  211. if not isinstance(value, collections.abc.Sequence):
  212. raise ValueError(
  213. "'right_inverse' must return a sequence of tensors. "
  214. f"Got {type(value).__name__}."
  215. )
  216. if len(value) != self.ntensors:
  217. raise ValueError(
  218. "'right_inverse' must return a sequence of tensors of length "
  219. f"{self.ntensors}. Got a sequence of length {len(value)}."
  220. )
  221. for i, tensor in enumerate(value):
  222. original_i = getattr(self, f"original{i}")
  223. if not isinstance(tensor, Tensor):
  224. raise ValueError(
  225. f"`right_inverse` must return a sequence of tensors. "
  226. f"Got element {i} of type {type(tensor).__name__}"
  227. )
  228. if original_i.dtype != tensor.dtype:
  229. raise ValueError(
  230. f"Tensor {i} returned by `right_inverse` has dtype {tensor.dtype} "
  231. f"while `original{i}` has dtype {original_i.dtype}"
  232. )
  233. _maybe_set(original_i, tensor)
  234. def forward(self) -> Tensor:
  235. if torch.jit.is_scripting():
  236. raise RuntimeError('Parametrization is not working with scripting.')
  237. # Unpack the originals for the first parametrization
  238. if self.is_tensor:
  239. x = self[0](self.original)
  240. else:
  241. originals = (getattr(self, f"original{i}") for i in range(self.ntensors))
  242. x = self[0](*originals)
  243. # It's not possible to call self[1:] here, so we have to be a bit more cryptic
  244. # Also we want to skip all non-integer keys
  245. curr_idx = 1
  246. while hasattr(self, str(curr_idx)):
  247. x = self[curr_idx](x)
  248. curr_idx += 1
  249. return x
  250. def _inject_new_class(module: Module) -> None:
  251. r"""Set up a module to be parametrized.
  252. This works by substituting the class of the module by a class
  253. that extends it to be able to inject a property
  254. Args:
  255. module (nn.Module): module into which to inject the property
  256. """
  257. cls = module.__class__
  258. def default_deepcopy(self, memo):
  259. # Just emulate a standard deepcopy procedure when __deepcopy__ doesn't exist in the current class.
  260. obj = memo.get(id(self), None)
  261. if obj is not None:
  262. return obj
  263. replica = self.__new__(self.__class__)
  264. memo[id(self)] = replica
  265. replica.__dict__ = deepcopy(self.__dict__, memo)
  266. # Also save all slots if they exist.
  267. slots_to_save = copyreg._slotnames(self.__class__) # type: ignore[attr-defined]
  268. for slot in slots_to_save:
  269. if hasattr(self, slot):
  270. setattr(replica, slot, deepcopy(getattr(self, slot), memo))
  271. return replica
  272. def getstate(self):
  273. raise RuntimeError(
  274. "Serialization of parametrized modules is only "
  275. "supported through state_dict(). See:\n"
  276. "https://pytorch.org/tutorials/beginner/saving_loading_models.html"
  277. "#saving-loading-a-general-checkpoint-for-inference-and-or-resuming-training"
  278. )
  279. dct = {"__getstate__": getstate}
  280. # We don't allow serialization of parametrized modules but should still allow deepcopying.
  281. # Default 'deepcopy' function invokes __deepcopy__ method instead of __getstate__ when it exists.
  282. if not hasattr(cls, "__deepcopy__"):
  283. dct["__deepcopy__"] = default_deepcopy # type: ignore[assignment]
  284. param_cls = type(
  285. f"Parametrized{cls.__name__}",
  286. (cls,),
  287. dct,
  288. )
  289. module.__class__ = param_cls
  290. def _inject_property(module: Module, tensor_name: str) -> None:
  291. r"""Injects a property into module[tensor_name].
  292. It assumes that the class in the module has already been modified from its
  293. original one using _inject_new_class and that the tensor under :attr:`tensor_name`
  294. has already been moved out
  295. Args:
  296. module (nn.Module): module into which to inject the property
  297. tensor_name (str): name of the name of the property to create
  298. """
  299. # We check the precondition.
  300. # This should never fire if register_parametrization is correctly implemented
  301. assert not hasattr(module, tensor_name)
  302. @torch.jit.unused
  303. def get_cached_parametrization(parametrization) -> Tensor:
  304. global _cache
  305. key = (id(module), tensor_name)
  306. tensor = _cache.get(key)
  307. if tensor is None:
  308. tensor = parametrization()
  309. _cache[key] = tensor
  310. return tensor
  311. def get_parametrized(self) -> Tensor:
  312. if torch.jit.is_scripting():
  313. raise RuntimeError('Parametrization is not working with scripting.')
  314. parametrization = self.parametrizations[tensor_name]
  315. if _cache_enabled:
  316. if torch.jit.is_scripting():
  317. # Scripting
  318. raise RuntimeError('Caching is not implemented for scripting. '
  319. 'Either disable caching or avoid scripting.')
  320. elif torch._C._get_tracing_state() is not None:
  321. # Tracing
  322. raise RuntimeError('Cannot trace a model while caching parametrizations.')
  323. else:
  324. return get_cached_parametrization(parametrization)
  325. else:
  326. # If caching is not active, this function just evaluates the parametrization
  327. return parametrization()
  328. def set_original(self, value: Tensor) -> None:
  329. if torch.jit.is_scripting():
  330. raise RuntimeError('Parametrization is not working with scripting.')
  331. self.parametrizations[tensor_name].right_inverse(value)
  332. setattr(module.__class__, tensor_name, property(get_parametrized, set_original))
  333. def register_parametrization(
  334. module: Module, tensor_name: str, parametrization: Module, *, unsafe: bool = False,
  335. ) -> Module:
  336. r"""Register a parametrization to a tensor in a module.
  337. Assume that ``tensor_name="weight"`` for simplicity. When accessing ``module.weight``,
  338. the module will return the parametrized version ``parametrization(module.weight)``.
  339. If the original tensor requires a gradient, the backward pass will differentiate
  340. through :attr:`parametrization`, and the optimizer will update the tensor accordingly.
  341. The first time that a module registers a parametrization, this function will add an attribute
  342. ``parametrizations`` to the module of type :class:`~ParametrizationList`.
  343. The list of parametrizations on the tensor ``weight`` will be accessible under
  344. ``module.parametrizations.weight``.
  345. The original tensor will be accessible under
  346. ``module.parametrizations.weight.original``.
  347. Parametrizations may be concatenated by registering several parametrizations
  348. on the same attribute.
  349. The training mode of a registered parametrization is updated on registration
  350. to match the training mode of the host module
  351. Parametrized parameters and buffers have an inbuilt caching system that can be activated
  352. using the context manager :func:`cached`.
  353. A :attr:`parametrization` may optionally implement a method with signature
  354. .. code-block:: python
  355. def right_inverse(self, X: Tensor) -> Union[Tensor, Sequence[Tensor]]
  356. This method is called on the unparametrized tensor when the first parametrization
  357. is registered to compute the initial value of the original tensor.
  358. If this method is not implemented, the original tensor will be just the unparametrized tensor.
  359. If all the parametrizations registered on a tensor implement `right_inverse` it is possible
  360. to initialize a parametrized tensor by assigning to it, as shown in the example below.
  361. It is possible for the first parametrization to depend on several inputs.
  362. This may be implemented returning a tuple of tensors from ``right_inverse``
  363. (see the example implementation of a ``RankOne`` parametrization below).
  364. In this case, the unconstrained tensors are also located under ``module.parametrizations.weight``
  365. with names ``original0``, ``original1``,...
  366. .. note::
  367. If unsafe=False (default) both the forward and right_inverse methods will be called
  368. once to perform a number of consistency checks.
  369. If unsafe=True, then right_inverse will be called if the tensor is not parametrized,
  370. and nothing will be called otherwise.
  371. .. note::
  372. In most situations, ``right_inverse`` will be a function such that
  373. ``forward(right_inverse(X)) == X`` (see
  374. `right inverse <https://en.wikipedia.org/wiki/Inverse_function#Right_inverses>`_).
  375. Sometimes, when the parametrization is not surjective, it may be reasonable
  376. to relax this.
  377. .. warning::
  378. If a parametrization depends on several inputs, :func:`~register_parametrization`
  379. will register a number of new parameters. If such parametrization is registered
  380. after the optimizer is created, these new parameters will need to be added manually
  381. to the optimizer. See :meth:`torch.Optimizer.add_param_group`.
  382. Args:
  383. module (nn.Module): module on which to register the parametrization
  384. tensor_name (str): name of the parameter or buffer on which to register
  385. the parametrization
  386. parametrization (nn.Module): the parametrization to register
  387. Keyword args:
  388. unsafe (bool): a boolean flag that denotes whether the parametrization
  389. may change the dtype and shape of the tensor. Default: `False`
  390. Warning: the parametrization is not checked for consistency upon registration.
  391. Enable this flag at your own risk.
  392. Raises:
  393. ValueError: if the module does not have a parameter or a buffer named :attr:`tensor_name`
  394. Examples:
  395. >>> # xdoctest: +REQUIRES(env:TORCH_DOCTEST_LAPACK)
  396. >>> import torch
  397. >>> import torch.nn as nn
  398. >>> import torch.nn.utils.parametrize as P
  399. >>>
  400. >>> class Symmetric(nn.Module):
  401. >>> def forward(self, X):
  402. >>> return X.triu() + X.triu(1).T # Return a symmetric matrix
  403. >>>
  404. >>> def right_inverse(self, A):
  405. >>> return A.triu()
  406. >>>
  407. >>> m = nn.Linear(5, 5)
  408. >>> P.register_parametrization(m, "weight", Symmetric())
  409. >>> print(torch.allclose(m.weight, m.weight.T)) # m.weight is now symmetric
  410. True
  411. >>> A = torch.rand(5, 5)
  412. >>> A = A + A.T # A is now symmetric
  413. >>> m.weight = A # Initialize the weight to be the symmetric matrix A
  414. >>> print(torch.allclose(m.weight, A))
  415. True
  416. >>> class RankOne(nn.Module):
  417. >>> def forward(self, x, y):
  418. >>> # Form a rank 1 matrix multiplying two vectors
  419. >>> return x.unsqueeze(-1) @ y.unsqueeze(-2)
  420. >>>
  421. >>> def right_inverse(self, Z):
  422. >>> # Project Z onto the rank 1 matrices
  423. >>> U, S, Vh = torch.linalg.svd(Z, full_matrices=False)
  424. >>> # Return rescaled singular vectors
  425. >>> s0_sqrt = S[0].sqrt().unsqueeze(-1)
  426. >>> return U[..., :, 0] * s0_sqrt, Vh[..., 0, :] * s0_sqrt
  427. >>>
  428. >>> linear_rank_one = P.register_parametrization(nn.Linear(4, 4), "weight", RankOne())
  429. >>> print(torch.linalg.matrix_rank(linear_rank_one.weight).item())
  430. 1
  431. """
  432. parametrization.train(module.training)
  433. if is_parametrized(module, tensor_name):
  434. # Correctness checks.
  435. # If A is the space of tensors with shape and dtype equal to module.weight
  436. # we check that parametrization.forward and parametrization.right_inverse are
  437. # functions from A to A
  438. if not unsafe:
  439. Y = getattr(module, tensor_name)
  440. X = parametrization(Y)
  441. if not isinstance(X, Tensor):
  442. raise ValueError(
  443. f"A parametrization must return a tensor. Got {type(X).__name__}."
  444. )
  445. if X.dtype != Y.dtype:
  446. raise ValueError(
  447. "Registering a parametrization may not change the dtype of the tensor, unless the `unsafe` flag is enabled.\n"
  448. f"module.{tensor_name}.dtype: {Y.dtype}\n"
  449. f"parametrization(module.{tensor_name}).dtype: {X.dtype}"
  450. )
  451. if X.shape != Y.shape:
  452. raise ValueError(
  453. "Registering a parametrization may not change the shape of the tensor, unless the `unsafe` flag is enabled.\n"
  454. f"module.{tensor_name}.shape: {Y.shape}\n"
  455. f"parametrization(module.{tensor_name}).shape: {X.shape}"
  456. )
  457. if hasattr(parametrization, "right_inverse"):
  458. try:
  459. Z = parametrization.right_inverse(X) # type: ignore[operator]
  460. except NotImplementedError:
  461. pass
  462. else:
  463. if not isinstance(Z, Tensor):
  464. raise ValueError(
  465. f"parametrization.right_inverse must return a tensor. Got: {type(Z).__name__}"
  466. )
  467. if Z.dtype != Y.dtype:
  468. raise ValueError(
  469. "The tensor returned by parametrization.right_inverse must have the same dtype "
  470. f"as module.{tensor_name}, unless the `unsafe` flag is enabled.\n"
  471. f"module.{tensor_name}.dtype: {Y.dtype}\n"
  472. f"returned dtype: {Z.dtype}"
  473. )
  474. if Z.shape != Y.shape:
  475. raise ValueError(
  476. "The tensor returned by parametrization.right_inverse must have the same shape "
  477. f"as module.{tensor_name}, unless the `unsafe` flag is enabled.\n"
  478. f"module.{tensor_name}.shape: {Y.shape}\n"
  479. f"returned shape: {Z.shape}"
  480. )
  481. # else right_inverse is assumed to be the identity
  482. # add the new parametrization to the parametrization list
  483. assert isinstance(module.parametrizations, ModuleDict) # Make mypy happy
  484. module.parametrizations[tensor_name].append(parametrization)
  485. # If unsafe was True in previous parametrization, keep it enabled
  486. module.parametrizations[tensor_name].unsafe |= unsafe # type: ignore[index, union-attr]
  487. elif tensor_name in module._buffers or tensor_name in module._parameters:
  488. # Set the parametrization mechanism
  489. # Fetch the original buffer or parameter
  490. original = getattr(module, tensor_name)
  491. # We create this early to check for possible errors
  492. parametrizations = ParametrizationList([parametrization], original, unsafe=unsafe)
  493. # Delete the previous parameter or buffer
  494. delattr(module, tensor_name)
  495. # If this is the first parametrization registered on the module,
  496. # we prepare the module to inject the property
  497. if not is_parametrized(module):
  498. # Change the class
  499. _inject_new_class(module)
  500. # Inject a ``ModuleDict`` into the instance under module.parametrizations
  501. module.parametrizations = ModuleDict()
  502. # Add a property into the class
  503. _inject_property(module, tensor_name)
  504. # Add a ParametrizationList
  505. assert isinstance(module.parametrizations, ModuleDict) # Make mypy happy
  506. module.parametrizations[tensor_name] = parametrizations
  507. else:
  508. raise ValueError(
  509. f"Module '{module}' does not have a parameter, a buffer, or a "
  510. f"parametrized element with name '{tensor_name}'"
  511. )
  512. return module
  513. def is_parametrized(module: Module, tensor_name: Optional[str] = None) -> bool:
  514. r"""Determine if a module has a parametrization.
  515. Args:
  516. module (nn.Module): module to query
  517. tensor_name (str, optional): name of the parameter in the module
  518. Default: ``None``
  519. Returns:
  520. ``True`` if :attr:`module` has a parametrization for the parameter named :attr:`tensor_name`,
  521. or if it has any parametrization when :attr:`tensor_name` is ``None``;
  522. otherwise ``False``
  523. """
  524. parametrizations = getattr(module, "parametrizations", None)
  525. if parametrizations is None or not isinstance(parametrizations, ModuleDict):
  526. return False
  527. if tensor_name is None:
  528. # Check that there is at least one parametrized buffer or Parameter
  529. return len(parametrizations) > 0
  530. else:
  531. return tensor_name in parametrizations
  532. def remove_parametrizations(
  533. module: Module, tensor_name: str, leave_parametrized: bool = True
  534. ) -> Module:
  535. r"""Remove the parametrizations on a tensor in a module.
  536. - If ``leave_parametrized=True``, ``module[tensor_name]`` will be set to
  537. its current output. In this case, the parametrization shall not change the ``dtype``
  538. of the tensor.
  539. - If ``leave_parametrized=False``, ``module[tensor_name]`` will be set to
  540. the unparametrised tensor in ``module.parametrizations[tensor_name].original``.
  541. This is only possible when the parametrization depends on just one tensor.
  542. Args:
  543. module (nn.Module): module from which remove the parametrization
  544. tensor_name (str): name of the parametrization to be removed
  545. leave_parametrized (bool, optional): leave the attribute :attr:`tensor_name` parametrized.
  546. Default: ``True``
  547. Returns:
  548. Module: module
  549. Raises:
  550. ValueError: if ``module[tensor_name]`` is not parametrized
  551. ValueError: if ``leave_parametrized=False`` and the parametrization depends on several tensors
  552. """
  553. if not is_parametrized(module, tensor_name):
  554. raise ValueError(f"Module {module} does not have a parametrization on {tensor_name}")
  555. # Fetch the original tensor
  556. assert isinstance(module.parametrizations, ModuleDict) # Make mypy happy
  557. parametrizations = module.parametrizations[tensor_name]
  558. if parametrizations.is_tensor:
  559. original = parametrizations.original
  560. if leave_parametrized:
  561. with torch.no_grad():
  562. t = getattr(module, tensor_name)
  563. # We know they have the same dtype because we have checked this when registering the
  564. # parametrizations. As such, we can use set_
  565. # We do this so that the parameter does not to change the id()
  566. # This way the user does not need to update the optimizer
  567. with torch.no_grad():
  568. if type(original) is torch.Tensor:
  569. _maybe_set(original, t)
  570. else:
  571. try:
  572. _maybe_set(original, t)
  573. except RuntimeError as e:
  574. # TODO: Fix this for tensor subclasses that are parameters:
  575. # RuntimeError: set_storage is not allowed on a Tensor created from .data or .detach().
  576. raise RuntimeError("Calling remove_parametrizations() with leave_parametrized=True "
  577. "for a parameter that is an instance of a tensor subclass requires "
  578. "set_() to be implemented correctly for the tensor subclass."
  579. "Alternatively, one can opt into the swap_tensors path"
  580. "Either set leave_parametrized=False or provide a working implementation"
  581. "for set_() in the tensor subclass or set "
  582. "torch.__future__.set_swap_module_params_on_conversion(True).") from e
  583. else:
  584. if leave_parametrized:
  585. # We cannot use no_grad because we need to know whether one or more
  586. # original tensors required grad
  587. t = getattr(module, tensor_name)
  588. # We'll have to trust the user to add it to the optimizer
  589. original = Parameter(t) if t.requires_grad else t
  590. else:
  591. raise ValueError("Cannot leave unparametrized (`leave_parametrized=False`) a tensor "
  592. "that is parametrized in terms of a sequence of tensors.")
  593. # Delete the property that manages the parametrization
  594. delattr(module.__class__, tensor_name)
  595. # Delete the ParametrizationList
  596. del module.parametrizations[tensor_name]
  597. # Restore the parameter / buffer into the main class
  598. _register_parameter_or_buffer(module, tensor_name, original)
  599. # Roll back the parametrized class if no other buffer or parameter
  600. # is currently parametrized in this class
  601. if not is_parametrized(module):
  602. delattr(module, "parametrizations")
  603. # Restore class
  604. orig_cls = module.__class__.__bases__[0]
  605. module.__class__ = orig_cls
  606. return module
  607. def type_before_parametrizations(module: Module) -> type:
  608. r"""Return the module type before parametrizations were applied and if not, then it returns the module type.
  609. Args:
  610. module (nn.Module): module to get type of
  611. """
  612. if is_parametrized(module):
  613. return module.__class__.__bases__[0]
  614. else:
  615. return type(module)
  616. def transfer_parametrizations_and_params(
  617. from_module: Module, to_module: Module, tensor_name: Optional[str] = None
  618. ) -> Module:
  619. r"""Transfer parametrizations and the parameters they parametrize from :attr:`from_module` to :attr:`to_module`.
  620. If :attr:`tensor_name` is specified, only transfers the specified parameter, otherwise
  621. transfers all parametrized parameters. If those parameters do not exist in to_module, it will create them.
  622. Does nothing if from_module is not parametrized.
  623. Args:
  624. from_module (nn.Module): module to transfer from
  625. to_module (nn.Module): module to transfer to
  626. tensor_name (str, optional): parameter to transfer
  627. Returns:
  628. Module: to_module
  629. """
  630. if is_parametrized(from_module):
  631. assert isinstance(from_module.parametrizations, ModuleDict) # for mypy
  632. # get list of all params or the single param to transfer
  633. parameters_to_transfer: Union[list, ModuleDict] = (
  634. from_module.parametrizations if tensor_name is None else [tensor_name]
  635. )
  636. assert hasattr(parameters_to_transfer, "__iter__") # for mypy
  637. for parameter_name in parameters_to_transfer:
  638. # initialize the to-be-transferred param in to_module if it doesn't exist already
  639. if not hasattr(to_module, parameter_name):
  640. setattr(
  641. to_module,
  642. parameter_name,
  643. Parameter(getattr(from_module, parameter_name)),
  644. )
  645. # apply the params's parametrizations to to_module
  646. for param_func in from_module.parametrizations[parameter_name]:
  647. register_parametrization(to_module, parameter_name, param_func)
  648. assert isinstance(to_module.parametrizations, ModuleDict) # for mypy
  649. # make values match, original values can be stored in either original or
  650. # original0, original1..., need to check both cases
  651. if hasattr(from_module.parametrizations[parameter_name], "original"):
  652. to_module.parametrizations[parameter_name].original = \
  653. from_module.parametrizations[parameter_name].original
  654. else:
  655. num = 0
  656. orig_num = "original" + str(num)
  657. # loop through each original# until all values have been set
  658. while hasattr(from_module.parametrizations[parameter_name], orig_num):
  659. setattr(
  660. to_module.parametrizations[parameter_name],
  661. orig_num,
  662. getattr(from_module.parametrizations[parameter_name], orig_num),
  663. )
  664. num = num + 1
  665. orig_num = "original" + str(num)
  666. return to_module