prune.py 56 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380
  1. # mypy: allow-untyped-defs
  2. r"""Pruning methods."""
  3. import numbers
  4. from abc import ABC, abstractmethod
  5. from collections.abc import Iterable
  6. from typing import Tuple
  7. import torch
  8. class BasePruningMethod(ABC):
  9. r"""Abstract base class for creation of new pruning techniques.
  10. Provides a skeleton for customization requiring the overriding of methods
  11. such as :meth:`compute_mask` and :meth:`apply`.
  12. """
  13. _tensor_name: str
  14. def __call__(self, module, inputs):
  15. r"""Multiply the mask into original tensor and store the result.
  16. Multiplies the mask (stored in ``module[name + '_mask']``)
  17. into the original tensor (stored in ``module[name + '_orig']``)
  18. and stores the result into ``module[name]`` by using :meth:`apply_mask`.
  19. Args:
  20. module (nn.Module): module containing the tensor to prune
  21. inputs: not used.
  22. """
  23. setattr(module, self._tensor_name, self.apply_mask(module))
  24. @abstractmethod
  25. def compute_mask(self, t, default_mask):
  26. r"""Compute and returns a mask for the input tensor ``t``.
  27. Starting from a base ``default_mask`` (which should be a mask of ones
  28. if the tensor has not been pruned yet), generate a random mask to
  29. apply on top of the ``default_mask`` according to the specific pruning
  30. method recipe.
  31. Args:
  32. t (torch.Tensor): tensor representing the importance scores of the
  33. parameter to prune.
  34. default_mask (torch.Tensor): Base mask from previous pruning
  35. iterations, that need to be respected after the new mask is
  36. applied. Same dims as ``t``.
  37. Returns:
  38. mask (torch.Tensor): mask to apply to ``t``, of same dims as ``t``
  39. """
  40. pass
  41. def apply_mask(self, module):
  42. r"""Simply handles the multiplication between the parameter being pruned and the generated mask.
  43. Fetches the mask and the original tensor from the module
  44. and returns the pruned version of the tensor.
  45. Args:
  46. module (nn.Module): module containing the tensor to prune
  47. Returns:
  48. pruned_tensor (torch.Tensor): pruned version of the input tensor
  49. """
  50. # to carry out the multiplication, the mask needs to have been computed,
  51. # so the pruning method must know what tensor it's operating on
  52. assert self._tensor_name is not None, f"Module {module} has to be pruned" # this gets set in apply()
  53. mask = getattr(module, self._tensor_name + "_mask")
  54. orig = getattr(module, self._tensor_name + "_orig")
  55. pruned_tensor = mask.to(dtype=orig.dtype) * orig
  56. return pruned_tensor
  57. @classmethod
  58. def apply(cls, module, name, *args, importance_scores=None, **kwargs):
  59. r"""Add pruning on the fly and reparametrization of a tensor.
  60. Adds the forward pre-hook that enables pruning on the fly and
  61. the reparametrization of a tensor in terms of the original tensor
  62. and the pruning mask.
  63. Args:
  64. module (nn.Module): module containing the tensor to prune
  65. name (str): parameter name within ``module`` on which pruning
  66. will act.
  67. args: arguments passed on to a subclass of
  68. :class:`BasePruningMethod`
  69. importance_scores (torch.Tensor): tensor of importance scores (of
  70. same shape as module parameter) used to compute mask for pruning.
  71. The values in this tensor indicate the importance of the
  72. corresponding elements in the parameter being pruned.
  73. If unspecified or None, the parameter will be used in its place.
  74. kwargs: keyword arguments passed on to a subclass of a
  75. :class:`BasePruningMethod`
  76. """
  77. def _get_composite_method(cls, module, name, *args, **kwargs):
  78. # Check if a pruning method has already been applied to
  79. # `module[name]`. If so, store that in `old_method`.
  80. old_method = None
  81. found = 0
  82. # there should technically be only 1 hook with hook.name == name
  83. # assert this using `found`
  84. hooks_to_remove = []
  85. for k, hook in module._forward_pre_hooks.items():
  86. # if it exists, take existing thing, remove hook, then
  87. # go through normal thing
  88. if isinstance(hook, BasePruningMethod) and hook._tensor_name == name:
  89. old_method = hook
  90. hooks_to_remove.append(k)
  91. found += 1
  92. assert (
  93. found <= 1
  94. ), f"Avoid adding multiple pruning hooks to the\
  95. same tensor {name} of module {module}. Use a PruningContainer."
  96. for k in hooks_to_remove:
  97. del module._forward_pre_hooks[k]
  98. # Apply the new pruning method, either from scratch or on top of
  99. # the previous one.
  100. method = cls(*args, **kwargs) # new pruning
  101. # Have the pruning method remember what tensor it's been applied to
  102. method._tensor_name = name
  103. # combine `methods` with `old_method`, if `old_method` exists
  104. if old_method is not None: # meaning that there was a hook
  105. # if the hook is already a pruning container, just add the
  106. # new pruning method to the container
  107. if isinstance(old_method, PruningContainer):
  108. old_method.add_pruning_method(method)
  109. method = old_method # rename old_method --> method
  110. # if the hook is simply a single pruning method, create a
  111. # container, add the old pruning method and the new one
  112. elif isinstance(old_method, BasePruningMethod):
  113. container = PruningContainer(old_method)
  114. # Have the pruning method remember the name of its tensor
  115. # setattr(container, '_tensor_name', name)
  116. container.add_pruning_method(method)
  117. method = container # rename container --> method
  118. return method
  119. method = _get_composite_method(cls, module, name, *args, **kwargs)
  120. # at this point we have no forward_pre_hooks but we could have an
  121. # active reparametrization of the tensor if another pruning method
  122. # had been applied (in which case `method` would be a PruningContainer
  123. # and not a simple pruning method).
  124. # Pruning is to be applied to the module's tensor named `name`,
  125. # starting from the state it is found in prior to this iteration of
  126. # pruning. The pruning mask is calculated based on importances scores.
  127. orig = getattr(module, name)
  128. if importance_scores is not None:
  129. assert (
  130. importance_scores.shape == orig.shape
  131. ), f"importance_scores should have the same shape as parameter {name} of {module}"
  132. else:
  133. importance_scores = orig
  134. # If this is the first time pruning is applied, take care of moving
  135. # the original tensor to a new parameter called name + '_orig' and
  136. # and deleting the original parameter
  137. if not isinstance(method, PruningContainer):
  138. # copy `module[name]` to `module[name + '_orig']`
  139. module.register_parameter(name + "_orig", orig)
  140. # temporarily delete `module[name]`
  141. del module._parameters[name]
  142. default_mask = torch.ones_like(orig) # temp
  143. # If this is not the first time pruning is applied, all of the above
  144. # has been done before in a previous pruning iteration, so we're good
  145. # to go
  146. else:
  147. default_mask = (
  148. getattr(module, name + "_mask")
  149. .detach()
  150. .clone(memory_format=torch.contiguous_format)
  151. )
  152. # Use try/except because if anything goes wrong with the mask
  153. # computation etc., you'd want to roll back.
  154. try:
  155. # get the final mask, computed according to the specific method
  156. mask = method.compute_mask(importance_scores, default_mask=default_mask)
  157. # reparameterize by saving mask to `module[name + '_mask']`...
  158. module.register_buffer(name + "_mask", mask)
  159. # ... and the new pruned tensor to `module[name]`
  160. setattr(module, name, method.apply_mask(module))
  161. # associate the pruning method to the module via a hook to
  162. # compute the function before every forward() (compile by run)
  163. module.register_forward_pre_hook(method)
  164. except Exception as e:
  165. if not isinstance(method, PruningContainer):
  166. orig = getattr(module, name + "_orig")
  167. module.register_parameter(name, orig)
  168. del module._parameters[name + "_orig"]
  169. raise e
  170. return method
  171. def prune(self, t, default_mask=None, importance_scores=None):
  172. r"""Compute and returns a pruned version of input tensor ``t``.
  173. According to the pruning rule specified in :meth:`compute_mask`.
  174. Args:
  175. t (torch.Tensor): tensor to prune (of same dimensions as
  176. ``default_mask``).
  177. importance_scores (torch.Tensor): tensor of importance scores (of
  178. same shape as ``t``) used to compute mask for pruning ``t``.
  179. The values in this tensor indicate the importance of the
  180. corresponding elements in the ``t`` that is being pruned.
  181. If unspecified or None, the tensor ``t`` will be used in its place.
  182. default_mask (torch.Tensor, optional): mask from previous pruning
  183. iteration, if any. To be considered when determining what
  184. portion of the tensor that pruning should act on. If None,
  185. default to a mask of ones.
  186. Returns:
  187. pruned version of tensor ``t``.
  188. """
  189. if importance_scores is not None:
  190. assert (
  191. importance_scores.shape == t.shape
  192. ), "importance_scores should have the same shape as tensor t"
  193. else:
  194. importance_scores = t
  195. default_mask = default_mask if default_mask is not None else torch.ones_like(t)
  196. return t * self.compute_mask(importance_scores, default_mask=default_mask)
  197. def remove(self, module):
  198. r"""Remove the pruning reparameterization from a module.
  199. The pruned parameter named ``name`` remains permanently pruned,
  200. and the parameter named ``name+'_orig'`` is removed from the parameter list.
  201. Similarly, the buffer named ``name+'_mask'`` is removed from the buffers.
  202. Note:
  203. Pruning itself is NOT undone or reversed!
  204. """
  205. # before removing pruning from a tensor, it has to have been applied
  206. assert (
  207. self._tensor_name is not None
  208. ), f"Module {module} has to be pruned before pruning can be removed" # this gets set in apply()
  209. # to update module[name] to latest trained weights
  210. weight = self.apply_mask(module) # masked weights
  211. # delete and reset
  212. if hasattr(module, self._tensor_name):
  213. delattr(module, self._tensor_name)
  214. orig = module._parameters[self._tensor_name + "_orig"]
  215. orig.data = weight.data
  216. del module._parameters[self._tensor_name + "_orig"]
  217. del module._buffers[self._tensor_name + "_mask"]
  218. setattr(module, self._tensor_name, orig)
  219. class PruningContainer(BasePruningMethod):
  220. """Container holding a sequence of pruning methods for iterative pruning.
  221. Keeps track of the order in which pruning methods are applied and handles
  222. combining successive pruning calls.
  223. Accepts as argument an instance of a BasePruningMethod or an iterable of
  224. them.
  225. """
  226. def __init__(self, *args):
  227. self._pruning_methods: Tuple[BasePruningMethod, ...] = tuple()
  228. if not isinstance(args, Iterable): # only 1 item
  229. self._tensor_name = args._tensor_name
  230. self.add_pruning_method(args)
  231. elif len(args) == 1: # only 1 item in a tuple
  232. self._tensor_name = args[0]._tensor_name
  233. self.add_pruning_method(args[0])
  234. else: # manual construction from list or other iterable (or no args)
  235. for method in args:
  236. self.add_pruning_method(method)
  237. def add_pruning_method(self, method):
  238. r"""Add a child pruning ``method`` to the container.
  239. Args:
  240. method (subclass of BasePruningMethod): child pruning method
  241. to be added to the container.
  242. """
  243. # check that we're adding a pruning method to the container
  244. if not isinstance(method, BasePruningMethod) and method is not None:
  245. raise TypeError(
  246. f"{type(method)} is not a BasePruningMethod subclass"
  247. )
  248. elif method is not None and self._tensor_name != method._tensor_name:
  249. raise ValueError(
  250. "Can only add pruning methods acting on "
  251. f"the parameter named '{self._tensor_name}' to PruningContainer {self}."
  252. + f" Found '{method._tensor_name}'"
  253. )
  254. # if all checks passed, add to _pruning_methods tuple
  255. self._pruning_methods += (method,) # type: ignore[operator]
  256. def __len__(self):
  257. return len(self._pruning_methods)
  258. def __iter__(self):
  259. return iter(self._pruning_methods)
  260. def __getitem__(self, idx):
  261. return self._pruning_methods[idx]
  262. def compute_mask(self, t, default_mask):
  263. r"""Apply the latest ``method`` by computing the new partial masks and returning its combination with the ``default_mask``.
  264. The new partial mask should be computed on the entries or channels
  265. that were not zeroed out by the ``default_mask``.
  266. Which portions of the tensor ``t`` the new mask will be calculated from
  267. depends on the ``PRUNING_TYPE`` (handled by the type handler):
  268. * for 'unstructured', the mask will be computed from the raveled
  269. list of nonmasked entries;
  270. * for 'structured', the mask will be computed from the nonmasked
  271. channels in the tensor;
  272. * for 'global', the mask will be computed across all entries.
  273. Args:
  274. t (torch.Tensor): tensor representing the parameter to prune
  275. (of same dimensions as ``default_mask``).
  276. default_mask (torch.Tensor): mask from previous pruning iteration.
  277. Returns:
  278. mask (torch.Tensor): new mask that combines the effects
  279. of the ``default_mask`` and the new mask from the current
  280. pruning ``method`` (of same dimensions as ``default_mask`` and
  281. ``t``).
  282. """
  283. def _combine_masks(method, t, mask):
  284. r"""Combine the masks from all pruning methods and returns a new mask.
  285. Args:
  286. method (a BasePruningMethod subclass): pruning method
  287. currently being applied.
  288. t (torch.Tensor): tensor representing the parameter to prune
  289. (of same dimensions as mask).
  290. mask (torch.Tensor): mask from previous pruning iteration
  291. Returns:
  292. new_mask (torch.Tensor): new mask that combines the effects
  293. of the old mask and the new mask from the current
  294. pruning method (of same dimensions as mask and t).
  295. """
  296. new_mask = mask # start off from existing mask
  297. new_mask = new_mask.to(dtype=t.dtype)
  298. # compute a slice of t onto which the new pruning method will operate
  299. if method.PRUNING_TYPE == "unstructured":
  300. # prune entries of t where the mask is 1
  301. slc = mask == 1
  302. # for struct pruning, exclude channels that have already been
  303. # entirely pruned
  304. elif method.PRUNING_TYPE == "structured":
  305. if not hasattr(method, "dim"):
  306. raise AttributeError(
  307. "Pruning methods of PRUNING_TYPE "
  308. '"structured" need to have the attribute `dim` defined.'
  309. )
  310. # find the channels to keep by removing the ones that have been
  311. # zeroed out already (i.e. where sum(entries) == 0)
  312. n_dims = t.dim() # "is this a 2D tensor? 3D? ..."
  313. dim = method.dim
  314. # convert negative indexing
  315. if dim < 0:
  316. dim = n_dims + dim
  317. # if dim is still negative after subtracting it from n_dims
  318. if dim < 0:
  319. raise IndexError(
  320. f"Index is out of bounds for tensor with dimensions {n_dims}"
  321. )
  322. # find channels along dim = dim that aren't already tots 0ed out
  323. keep_channel = mask.sum(dim=[d for d in range(n_dims) if d != dim]) != 0
  324. # create slice to identify what to prune
  325. slc = [slice(None)] * n_dims
  326. slc[dim] = keep_channel
  327. elif method.PRUNING_TYPE == "global":
  328. n_dims = len(t.shape) # "is this a 2D tensor? 3D? ..."
  329. slc = [slice(None)] * n_dims
  330. else:
  331. raise ValueError(
  332. f"Unrecognized PRUNING_TYPE {method.PRUNING_TYPE}"
  333. )
  334. # compute the new mask on the unpruned slice of the tensor t
  335. partial_mask = method.compute_mask(t[slc], default_mask=mask[slc])
  336. new_mask[slc] = partial_mask.to(dtype=new_mask.dtype)
  337. return new_mask
  338. method = self._pruning_methods[-1]
  339. mask = _combine_masks(method, t, default_mask)
  340. return mask
  341. class Identity(BasePruningMethod):
  342. r"""Utility pruning method that does not prune any units but generates the pruning parametrization with a mask of ones."""
  343. PRUNING_TYPE = "unstructured"
  344. def compute_mask(self, t, default_mask):
  345. mask = default_mask
  346. return mask
  347. @classmethod
  348. def apply(cls, module, name):
  349. r"""Add pruning on the fly and reparametrization of a tensor.
  350. Adds the forward pre-hook that enables pruning on the fly and
  351. the reparametrization of a tensor in terms of the original tensor
  352. and the pruning mask.
  353. Args:
  354. module (nn.Module): module containing the tensor to prune
  355. name (str): parameter name within ``module`` on which pruning
  356. will act.
  357. """
  358. return super().apply(module, name)
  359. class RandomUnstructured(BasePruningMethod):
  360. r"""Prune (currently unpruned) units in a tensor at random.
  361. Args:
  362. name (str): parameter name within ``module`` on which pruning
  363. will act.
  364. amount (int or float): quantity of parameters to prune.
  365. If ``float``, should be between 0.0 and 1.0 and represent the
  366. fraction of parameters to prune. If ``int``, it represents the
  367. absolute number of parameters to prune.
  368. """
  369. PRUNING_TYPE = "unstructured"
  370. def __init__(self, amount):
  371. # Check range of validity of pruning amount
  372. _validate_pruning_amount_init(amount)
  373. self.amount = amount
  374. def compute_mask(self, t, default_mask):
  375. # Check that the amount of units to prune is not > than the number of
  376. # parameters in t
  377. tensor_size = t.nelement()
  378. # Compute number of units to prune: amount if int,
  379. # else amount * tensor_size
  380. nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size)
  381. # This should raise an error if the number of units to prune is larger
  382. # than the number of units in the tensor
  383. _validate_pruning_amount(nparams_toprune, tensor_size)
  384. mask = default_mask.clone(memory_format=torch.contiguous_format)
  385. if nparams_toprune != 0: # k=0 not supported by torch.kthvalue
  386. prob = torch.rand_like(t)
  387. topk = torch.topk(prob.view(-1), k=nparams_toprune)
  388. mask.view(-1)[topk.indices] = 0
  389. return mask
  390. @classmethod
  391. def apply(cls, module, name, amount):
  392. r"""Add pruning on the fly and reparametrization of a tensor.
  393. Adds the forward pre-hook that enables pruning on the fly and
  394. the reparametrization of a tensor in terms of the original tensor
  395. and the pruning mask.
  396. Args:
  397. module (nn.Module): module containing the tensor to prune
  398. name (str): parameter name within ``module`` on which pruning
  399. will act.
  400. amount (int or float): quantity of parameters to prune.
  401. If ``float``, should be between 0.0 and 1.0 and represent the
  402. fraction of parameters to prune. If ``int``, it represents the
  403. absolute number of parameters to prune.
  404. """
  405. return super().apply(module, name, amount=amount)
  406. class L1Unstructured(BasePruningMethod):
  407. r"""Prune (currently unpruned) units in a tensor by zeroing out the ones with the lowest L1-norm.
  408. Args:
  409. amount (int or float): quantity of parameters to prune.
  410. If ``float``, should be between 0.0 and 1.0 and represent the
  411. fraction of parameters to prune. If ``int``, it represents the
  412. absolute number of parameters to prune.
  413. """
  414. PRUNING_TYPE = "unstructured"
  415. def __init__(self, amount):
  416. # Check range of validity of pruning amount
  417. _validate_pruning_amount_init(amount)
  418. self.amount = amount
  419. def compute_mask(self, t, default_mask):
  420. # Check that the amount of units to prune is not > than the number of
  421. # parameters in t
  422. tensor_size = t.nelement()
  423. # Compute number of units to prune: amount if int,
  424. # else amount * tensor_size
  425. nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size)
  426. # This should raise an error if the number of units to prune is larger
  427. # than the number of units in the tensor
  428. _validate_pruning_amount(nparams_toprune, tensor_size)
  429. mask = default_mask.clone(memory_format=torch.contiguous_format)
  430. if nparams_toprune != 0: # k=0 not supported by torch.kthvalue
  431. # largest=True --> top k; largest=False --> bottom k
  432. # Prune the smallest k
  433. topk = torch.topk(torch.abs(t).view(-1), k=nparams_toprune, largest=False)
  434. # topk will have .indices and .values
  435. mask.view(-1)[topk.indices] = 0
  436. return mask
  437. @classmethod
  438. def apply(cls, module, name, amount, importance_scores=None):
  439. r"""Add pruning on the fly and reparametrization of a tensor.
  440. Adds the forward pre-hook that enables pruning on the fly and
  441. the reparametrization of a tensor in terms of the original tensor
  442. and the pruning mask.
  443. Args:
  444. module (nn.Module): module containing the tensor to prune
  445. name (str): parameter name within ``module`` on which pruning
  446. will act.
  447. amount (int or float): quantity of parameters to prune.
  448. If ``float``, should be between 0.0 and 1.0 and represent the
  449. fraction of parameters to prune. If ``int``, it represents the
  450. absolute number of parameters to prune.
  451. importance_scores (torch.Tensor): tensor of importance scores (of same
  452. shape as module parameter) used to compute mask for pruning.
  453. The values in this tensor indicate the importance of the corresponding
  454. elements in the parameter being pruned.
  455. If unspecified or None, the module parameter will be used in its place.
  456. """
  457. return super().apply(
  458. module, name, amount=amount, importance_scores=importance_scores
  459. )
  460. class RandomStructured(BasePruningMethod):
  461. r"""Prune entire (currently unpruned) channels in a tensor at random.
  462. Args:
  463. amount (int or float): quantity of parameters to prune.
  464. If ``float``, should be between 0.0 and 1.0 and represent the
  465. fraction of parameters to prune. If ``int``, it represents the
  466. absolute number of parameters to prune.
  467. dim (int, optional): index of the dim along which we define
  468. channels to prune. Default: -1.
  469. """
  470. PRUNING_TYPE = "structured"
  471. def __init__(self, amount, dim=-1):
  472. # Check range of validity of amount
  473. _validate_pruning_amount_init(amount)
  474. self.amount = amount
  475. self.dim = dim
  476. def compute_mask(self, t, default_mask):
  477. r"""Compute and returns a mask for the input tensor ``t``.
  478. Starting from a base ``default_mask`` (which should be a mask of ones
  479. if the tensor has not been pruned yet), generate a random mask to
  480. apply on top of the ``default_mask`` by randomly zeroing out channels
  481. along the specified dim of the tensor.
  482. Args:
  483. t (torch.Tensor): tensor representing the parameter to prune
  484. default_mask (torch.Tensor): Base mask from previous pruning
  485. iterations, that need to be respected after the new mask is
  486. applied. Same dims as ``t``.
  487. Returns:
  488. mask (torch.Tensor): mask to apply to ``t``, of same dims as ``t``
  489. Raises:
  490. IndexError: if ``self.dim >= len(t.shape)``
  491. """
  492. # Check that tensor has structure (i.e. more than 1 dimension) such
  493. # that the concept of "channels" makes sense
  494. _validate_structured_pruning(t)
  495. # Check that self.dim is a valid dim to index t, else raise IndexError
  496. _validate_pruning_dim(t, self.dim)
  497. # Check that the amount of channels to prune is not > than the number of
  498. # channels in t along the dim to prune
  499. tensor_size = t.shape[self.dim]
  500. # Compute number of units to prune: amount if int,
  501. # else amount * tensor_size
  502. nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size)
  503. # This should raise an error if the number of units to prune is larger
  504. # than the number of units in the tensor
  505. _validate_pruning_amount(nparams_toprune, tensor_size)
  506. # Compute binary mask by initializing it to all 0s and then filling in
  507. # 1s wherever topk.indices indicates, along self.dim.
  508. # mask has the same shape as tensor t
  509. def make_mask(t, dim, nchannels, nchannels_toprune):
  510. # generate a random number in [0, 1] to associate to each channel
  511. prob = torch.rand(nchannels)
  512. # generate mask for each channel by 0ing out the channels that
  513. # got assigned the k = nchannels_toprune lowest values in prob
  514. threshold = torch.kthvalue(prob, k=nchannels_toprune).values
  515. channel_mask = prob > threshold
  516. mask = torch.zeros_like(t)
  517. slc = [slice(None)] * len(t.shape)
  518. slc[dim] = channel_mask
  519. mask[slc] = 1
  520. return mask
  521. if nparams_toprune == 0: # k=0 not supported by torch.kthvalue
  522. mask = default_mask
  523. else:
  524. # apply the new structured mask on top of prior (potentially
  525. # unstructured) mask
  526. mask = make_mask(t, self.dim, tensor_size, nparams_toprune)
  527. mask *= default_mask.to(dtype=mask.dtype)
  528. return mask
  529. @classmethod
  530. def apply(cls, module, name, amount, dim=-1):
  531. r"""Add pruning on the fly and reparametrization of a tensor.
  532. Adds the forward pre-hook that enables pruning on the fly and
  533. the reparametrization of a tensor in terms of the original tensor
  534. and the pruning mask.
  535. Args:
  536. module (nn.Module): module containing the tensor to prune
  537. name (str): parameter name within ``module`` on which pruning
  538. will act.
  539. amount (int or float): quantity of parameters to prune.
  540. If ``float``, should be between 0.0 and 1.0 and represent the
  541. fraction of parameters to prune. If ``int``, it represents the
  542. absolute number of parameters to prune.
  543. dim (int, optional): index of the dim along which we define
  544. channels to prune. Default: -1.
  545. """
  546. return super().apply(module, name, amount=amount, dim=dim)
  547. class LnStructured(BasePruningMethod):
  548. r"""Prune entire (currently unpruned) channels in a tensor based on their L\ ``n``-norm.
  549. Args:
  550. amount (int or float): quantity of channels to prune.
  551. If ``float``, should be between 0.0 and 1.0 and represent the
  552. fraction of parameters to prune. If ``int``, it represents the
  553. absolute number of parameters to prune.
  554. n (int, float, inf, -inf, 'fro', 'nuc'): See documentation of valid
  555. entries for argument ``p`` in :func:`torch.norm`.
  556. dim (int, optional): index of the dim along which we define
  557. channels to prune. Default: -1.
  558. """
  559. PRUNING_TYPE = "structured"
  560. def __init__(self, amount, n, dim=-1):
  561. # Check range of validity of amount
  562. _validate_pruning_amount_init(amount)
  563. self.amount = amount
  564. self.n = n
  565. self.dim = dim
  566. def compute_mask(self, t, default_mask):
  567. r"""Compute and returns a mask for the input tensor ``t``.
  568. Starting from a base ``default_mask`` (which should be a mask of ones
  569. if the tensor has not been pruned yet), generate a mask to apply on
  570. top of the ``default_mask`` by zeroing out the channels along the
  571. specified dim with the lowest L\ ``n``-norm.
  572. Args:
  573. t (torch.Tensor): tensor representing the parameter to prune
  574. default_mask (torch.Tensor): Base mask from previous pruning
  575. iterations, that need to be respected after the new mask is
  576. applied. Same dims as ``t``.
  577. Returns:
  578. mask (torch.Tensor): mask to apply to ``t``, of same dims as ``t``
  579. Raises:
  580. IndexError: if ``self.dim >= len(t.shape)``
  581. """
  582. # Check that tensor has structure (i.e. more than 1 dimension) such
  583. # that the concept of "channels" makes sense
  584. _validate_structured_pruning(t)
  585. # Check that self.dim is a valid dim to index t, else raise IndexError
  586. _validate_pruning_dim(t, self.dim)
  587. # Check that the amount of channels to prune is not > than the number of
  588. # channels in t along the dim to prune
  589. tensor_size = t.shape[self.dim]
  590. # Compute number of units to prune: amount if int,
  591. # else amount * tensor_size
  592. nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size)
  593. nparams_tokeep = tensor_size - nparams_toprune
  594. # This should raise an error if the number of units to prune is larger
  595. # than the number of units in the tensor
  596. _validate_pruning_amount(nparams_toprune, tensor_size)
  597. # Structured pruning prunes entire channels so we need to know the
  598. # L_n norm along each channel to then find the topk based on this
  599. # metric
  600. norm = _compute_norm(t, self.n, self.dim)
  601. # largest=True --> top k; largest=False --> bottom k
  602. # Keep the largest k channels along dim=self.dim
  603. topk = torch.topk(norm, k=nparams_tokeep, largest=True)
  604. # topk will have .indices and .values
  605. # Compute binary mask by initializing it to all 0s and then filling in
  606. # 1s wherever topk.indices indicates, along self.dim.
  607. # mask has the same shape as tensor t
  608. def make_mask(t, dim, indices):
  609. # init mask to 0
  610. mask = torch.zeros_like(t)
  611. # e.g.: slc = [None, None, None], if len(t.shape) = 3
  612. slc = [slice(None)] * len(t.shape)
  613. # replace a None at position=dim with indices
  614. # e.g.: slc = [None, None, [0, 2, 3]] if dim=2 & indices=[0,2,3]
  615. slc[dim] = indices
  616. # use slc to slice mask and replace all its entries with 1s
  617. # e.g.: mask[:, :, [0, 2, 3]] = 1
  618. mask[slc] = 1
  619. return mask
  620. if nparams_toprune == 0: # k=0 not supported by torch.kthvalue
  621. mask = default_mask
  622. else:
  623. mask = make_mask(t, self.dim, topk.indices)
  624. mask *= default_mask.to(dtype=mask.dtype)
  625. return mask
  626. @classmethod
  627. def apply(cls, module, name, amount, n, dim, importance_scores=None):
  628. r"""Add pruning on the fly and reparametrization of a tensor.
  629. Adds the forward pre-hook that enables pruning on the fly and
  630. the reparametrization of a tensor in terms of the original tensor
  631. and the pruning mask.
  632. Args:
  633. module (nn.Module): module containing the tensor to prune
  634. name (str): parameter name within ``module`` on which pruning
  635. will act.
  636. amount (int or float): quantity of parameters to prune.
  637. If ``float``, should be between 0.0 and 1.0 and represent the
  638. fraction of parameters to prune. If ``int``, it represents the
  639. absolute number of parameters to prune.
  640. n (int, float, inf, -inf, 'fro', 'nuc'): See documentation of valid
  641. entries for argument ``p`` in :func:`torch.norm`.
  642. dim (int): index of the dim along which we define channels to
  643. prune.
  644. importance_scores (torch.Tensor): tensor of importance scores (of same
  645. shape as module parameter) used to compute mask for pruning.
  646. The values in this tensor indicate the importance of the corresponding
  647. elements in the parameter being pruned.
  648. If unspecified or None, the module parameter will be used in its place.
  649. """
  650. return super().apply(
  651. module,
  652. name,
  653. amount=amount,
  654. n=n,
  655. dim=dim,
  656. importance_scores=importance_scores,
  657. )
  658. class CustomFromMask(BasePruningMethod):
  659. PRUNING_TYPE = "global"
  660. def __init__(self, mask):
  661. self.mask = mask
  662. def compute_mask(self, t, default_mask):
  663. assert default_mask.shape == self.mask.shape
  664. mask = default_mask * self.mask.to(dtype=default_mask.dtype)
  665. return mask
  666. @classmethod
  667. def apply(cls, module, name, mask):
  668. r"""Add pruning on the fly and reparametrization of a tensor.
  669. Adds the forward pre-hook that enables pruning on the fly and
  670. the reparametrization of a tensor in terms of the original tensor
  671. and the pruning mask.
  672. Args:
  673. module (nn.Module): module containing the tensor to prune
  674. name (str): parameter name within ``module`` on which pruning
  675. will act.
  676. """
  677. return super().apply(module, name, mask=mask)
  678. def identity(module, name):
  679. r"""Apply pruning reparametrization without pruning any units.
  680. Applies pruning reparametrization to the tensor corresponding to the
  681. parameter called ``name`` in ``module`` without actually pruning any
  682. units. Modifies module in place (and also return the modified module)
  683. by:
  684. 1) adding a named buffer called ``name+'_mask'`` corresponding to the
  685. binary mask applied to the parameter ``name`` by the pruning method.
  686. 2) replacing the parameter ``name`` by its pruned version, while the
  687. original (unpruned) parameter is stored in a new parameter named
  688. ``name+'_orig'``.
  689. Note:
  690. The mask is a tensor of ones.
  691. Args:
  692. module (nn.Module): module containing the tensor to prune.
  693. name (str): parameter name within ``module`` on which pruning
  694. will act.
  695. Returns:
  696. module (nn.Module): modified (i.e. pruned) version of the input module
  697. Examples:
  698. >>> # xdoctest: +SKIP
  699. >>> m = prune.identity(nn.Linear(2, 3), 'bias')
  700. >>> print(m.bias_mask)
  701. tensor([1., 1., 1.])
  702. """
  703. Identity.apply(module, name)
  704. return module
  705. def random_unstructured(module, name, amount):
  706. r"""Prune tensor by removing random (currently unpruned) units.
  707. Prunes tensor corresponding to parameter called ``name`` in ``module``
  708. by removing the specified ``amount`` of (currently unpruned) units
  709. selected at random.
  710. Modifies module in place (and also return the modified module) by:
  711. 1) adding a named buffer called ``name+'_mask'`` corresponding to the
  712. binary mask applied to the parameter ``name`` by the pruning method.
  713. 2) replacing the parameter ``name`` by its pruned version, while the
  714. original (unpruned) parameter is stored in a new parameter named
  715. ``name+'_orig'``.
  716. Args:
  717. module (nn.Module): module containing the tensor to prune
  718. name (str): parameter name within ``module`` on which pruning
  719. will act.
  720. amount (int or float): quantity of parameters to prune.
  721. If ``float``, should be between 0.0 and 1.0 and represent the
  722. fraction of parameters to prune. If ``int``, it represents the
  723. absolute number of parameters to prune.
  724. Returns:
  725. module (nn.Module): modified (i.e. pruned) version of the input module
  726. Examples:
  727. >>> # xdoctest: +SKIP
  728. >>> m = prune.random_unstructured(nn.Linear(2, 3), 'weight', amount=1)
  729. >>> torch.sum(m.weight_mask == 0)
  730. tensor(1)
  731. """
  732. RandomUnstructured.apply(module, name, amount)
  733. return module
  734. def l1_unstructured(module, name, amount, importance_scores=None):
  735. r"""Prune tensor by removing units with the lowest L1-norm.
  736. Prunes tensor corresponding to parameter called ``name`` in ``module``
  737. by removing the specified `amount` of (currently unpruned) units with the
  738. lowest L1-norm.
  739. Modifies module in place (and also return the modified module)
  740. by:
  741. 1) adding a named buffer called ``name+'_mask'`` corresponding to the
  742. binary mask applied to the parameter ``name`` by the pruning method.
  743. 2) replacing the parameter ``name`` by its pruned version, while the
  744. original (unpruned) parameter is stored in a new parameter named
  745. ``name+'_orig'``.
  746. Args:
  747. module (nn.Module): module containing the tensor to prune
  748. name (str): parameter name within ``module`` on which pruning
  749. will act.
  750. amount (int or float): quantity of parameters to prune.
  751. If ``float``, should be between 0.0 and 1.0 and represent the
  752. fraction of parameters to prune. If ``int``, it represents the
  753. absolute number of parameters to prune.
  754. importance_scores (torch.Tensor): tensor of importance scores (of same
  755. shape as module parameter) used to compute mask for pruning.
  756. The values in this tensor indicate the importance of the corresponding
  757. elements in the parameter being pruned.
  758. If unspecified or None, the module parameter will be used in its place.
  759. Returns:
  760. module (nn.Module): modified (i.e. pruned) version of the input module
  761. Examples:
  762. >>> # xdoctest: +SKIP
  763. >>> m = prune.l1_unstructured(nn.Linear(2, 3), 'weight', amount=0.2)
  764. >>> m.state_dict().keys()
  765. odict_keys(['bias', 'weight_orig', 'weight_mask'])
  766. """
  767. L1Unstructured.apply(
  768. module, name, amount=amount, importance_scores=importance_scores
  769. )
  770. return module
  771. def random_structured(module, name, amount, dim):
  772. r"""Prune tensor by removing random channels along the specified dimension.
  773. Prunes tensor corresponding to parameter called ``name`` in ``module``
  774. by removing the specified ``amount`` of (currently unpruned) channels
  775. along the specified ``dim`` selected at random.
  776. Modifies module in place (and also return the modified module)
  777. by:
  778. 1) adding a named buffer called ``name+'_mask'`` corresponding to the
  779. binary mask applied to the parameter ``name`` by the pruning method.
  780. 2) replacing the parameter ``name`` by its pruned version, while the
  781. original (unpruned) parameter is stored in a new parameter named
  782. ``name+'_orig'``.
  783. Args:
  784. module (nn.Module): module containing the tensor to prune
  785. name (str): parameter name within ``module`` on which pruning
  786. will act.
  787. amount (int or float): quantity of parameters to prune.
  788. If ``float``, should be between 0.0 and 1.0 and represent the
  789. fraction of parameters to prune. If ``int``, it represents the
  790. absolute number of parameters to prune.
  791. dim (int): index of the dim along which we define channels to prune.
  792. Returns:
  793. module (nn.Module): modified (i.e. pruned) version of the input module
  794. Examples:
  795. >>> # xdoctest: +SKIP
  796. >>> m = prune.random_structured(
  797. ... nn.Linear(5, 3), 'weight', amount=3, dim=1
  798. ... )
  799. >>> columns_pruned = int(sum(torch.sum(m.weight, dim=0) == 0))
  800. >>> print(columns_pruned)
  801. 3
  802. """
  803. RandomStructured.apply(module, name, amount, dim)
  804. return module
  805. def ln_structured(module, name, amount, n, dim, importance_scores=None):
  806. r"""Prune tensor by removing channels with the lowest L\ ``n``-norm along the specified dimension.
  807. Prunes tensor corresponding to parameter called ``name`` in ``module``
  808. by removing the specified ``amount`` of (currently unpruned) channels
  809. along the specified ``dim`` with the lowest L\ ``n``-norm.
  810. Modifies module in place (and also return the modified module)
  811. by:
  812. 1) adding a named buffer called ``name+'_mask'`` corresponding to the
  813. binary mask applied to the parameter ``name`` by the pruning method.
  814. 2) replacing the parameter ``name`` by its pruned version, while the
  815. original (unpruned) parameter is stored in a new parameter named
  816. ``name+'_orig'``.
  817. Args:
  818. module (nn.Module): module containing the tensor to prune
  819. name (str): parameter name within ``module`` on which pruning
  820. will act.
  821. amount (int or float): quantity of parameters to prune.
  822. If ``float``, should be between 0.0 and 1.0 and represent the
  823. fraction of parameters to prune. If ``int``, it represents the
  824. absolute number of parameters to prune.
  825. n (int, float, inf, -inf, 'fro', 'nuc'): See documentation of valid
  826. entries for argument ``p`` in :func:`torch.norm`.
  827. dim (int): index of the dim along which we define channels to prune.
  828. importance_scores (torch.Tensor): tensor of importance scores (of same
  829. shape as module parameter) used to compute mask for pruning.
  830. The values in this tensor indicate the importance of the corresponding
  831. elements in the parameter being pruned.
  832. If unspecified or None, the module parameter will be used in its place.
  833. Returns:
  834. module (nn.Module): modified (i.e. pruned) version of the input module
  835. Examples:
  836. >>> from torch.nn.utils import prune
  837. >>> m = prune.ln_structured(
  838. ... nn.Conv2d(5, 3, 2), 'weight', amount=0.3, dim=1, n=float('-inf')
  839. ... )
  840. """
  841. LnStructured.apply(
  842. module, name, amount, n, dim, importance_scores=importance_scores
  843. )
  844. return module
  845. def global_unstructured(parameters, pruning_method, importance_scores=None, **kwargs):
  846. r"""
  847. Globally prunes tensors corresponding to all parameters in ``parameters`` by applying the specified ``pruning_method``.
  848. Modifies modules in place by:
  849. 1) adding a named buffer called ``name+'_mask'`` corresponding to the
  850. binary mask applied to the parameter ``name`` by the pruning method.
  851. 2) replacing the parameter ``name`` by its pruned version, while the
  852. original (unpruned) parameter is stored in a new parameter named
  853. ``name+'_orig'``.
  854. Args:
  855. parameters (Iterable of (module, name) tuples): parameters of
  856. the model to prune in a global fashion, i.e. by aggregating all
  857. weights prior to deciding which ones to prune. module must be of
  858. type :class:`nn.Module`, and name must be a string.
  859. pruning_method (function): a valid pruning function from this module,
  860. or a custom one implemented by the user that satisfies the
  861. implementation guidelines and has ``PRUNING_TYPE='unstructured'``.
  862. importance_scores (dict): a dictionary mapping (module, name) tuples to
  863. the corresponding parameter's importance scores tensor. The tensor
  864. should be the same shape as the parameter, and is used for computing
  865. mask for pruning.
  866. If unspecified or None, the parameter will be used in place of its
  867. importance scores.
  868. kwargs: other keyword arguments such as:
  869. amount (int or float): quantity of parameters to prune across the
  870. specified parameters.
  871. If ``float``, should be between 0.0 and 1.0 and represent the
  872. fraction of parameters to prune. If ``int``, it represents the
  873. absolute number of parameters to prune.
  874. Raises:
  875. TypeError: if ``PRUNING_TYPE != 'unstructured'``
  876. Note:
  877. Since global structured pruning doesn't make much sense unless the
  878. norm is normalized by the size of the parameter, we now limit the
  879. scope of global pruning to unstructured methods.
  880. Examples:
  881. >>> from torch.nn.utils import prune
  882. >>> from collections import OrderedDict
  883. >>> net = nn.Sequential(OrderedDict([
  884. ... ('first', nn.Linear(10, 4)),
  885. ... ('second', nn.Linear(4, 1)),
  886. ... ]))
  887. >>> parameters_to_prune = (
  888. ... (net.first, 'weight'),
  889. ... (net.second, 'weight'),
  890. ... )
  891. >>> prune.global_unstructured(
  892. ... parameters_to_prune,
  893. ... pruning_method=prune.L1Unstructured,
  894. ... amount=10,
  895. ... )
  896. >>> print(sum(torch.nn.utils.parameters_to_vector(net.buffers()) == 0))
  897. tensor(10)
  898. """
  899. # ensure parameters is a list or generator of tuples
  900. if not isinstance(parameters, Iterable):
  901. raise TypeError("global_unstructured(): parameters is not an Iterable")
  902. importance_scores = importance_scores if importance_scores is not None else {}
  903. if not isinstance(importance_scores, dict):
  904. raise TypeError("global_unstructured(): importance_scores must be of type dict")
  905. # flatten importance scores to consider them all at once in global pruning
  906. relevant_importance_scores = torch.nn.utils.parameters_to_vector(
  907. [
  908. importance_scores.get((module, name), getattr(module, name))
  909. for (module, name) in parameters
  910. ]
  911. )
  912. # similarly, flatten the masks (if they exist), or use a flattened vector
  913. # of 1s of the same dimensions as t
  914. default_mask = torch.nn.utils.parameters_to_vector(
  915. [
  916. getattr(module, name + "_mask", torch.ones_like(getattr(module, name)))
  917. for (module, name) in parameters
  918. ]
  919. )
  920. # use the canonical pruning methods to compute the new mask, even if the
  921. # parameter is now a flattened out version of `parameters`
  922. container = PruningContainer()
  923. container._tensor_name = "temp" # to make it match that of `method`
  924. method = pruning_method(**kwargs)
  925. method._tensor_name = "temp" # to make it match that of `container`
  926. if method.PRUNING_TYPE != "unstructured":
  927. raise TypeError(
  928. 'Only "unstructured" PRUNING_TYPE supported for '
  929. f"the `pruning_method`. Found method {pruning_method} of type {method.PRUNING_TYPE}"
  930. )
  931. container.add_pruning_method(method)
  932. # use the `compute_mask` method from `PruningContainer` to combine the
  933. # mask computed by the new method with the pre-existing mask
  934. final_mask = container.compute_mask(relevant_importance_scores, default_mask)
  935. # Pointer for slicing the mask to match the shape of each parameter
  936. pointer = 0
  937. for module, name in parameters:
  938. param = getattr(module, name)
  939. # The length of the parameter
  940. num_param = param.numel()
  941. # Slice the mask, reshape it
  942. param_mask = final_mask[pointer : pointer + num_param].view_as(param)
  943. # Assign the correct pre-computed mask to each parameter and add it
  944. # to the forward_pre_hooks like any other pruning method
  945. custom_from_mask(module, name, mask=param_mask)
  946. # Increment the pointer to continue slicing the final_mask
  947. pointer += num_param
  948. def custom_from_mask(module, name, mask):
  949. r"""Prune tensor corresponding to parameter called ``name`` in ``module`` by applying the pre-computed mask in ``mask``.
  950. Modifies module in place (and also return the modified module) by:
  951. 1) adding a named buffer called ``name+'_mask'`` corresponding to the
  952. binary mask applied to the parameter ``name`` by the pruning method.
  953. 2) replacing the parameter ``name`` by its pruned version, while the
  954. original (unpruned) parameter is stored in a new parameter named
  955. ``name+'_orig'``.
  956. Args:
  957. module (nn.Module): module containing the tensor to prune
  958. name (str): parameter name within ``module`` on which pruning
  959. will act.
  960. mask (Tensor): binary mask to be applied to the parameter.
  961. Returns:
  962. module (nn.Module): modified (i.e. pruned) version of the input module
  963. Examples:
  964. >>> from torch.nn.utils import prune
  965. >>> m = prune.custom_from_mask(
  966. ... nn.Linear(5, 3), name='bias', mask=torch.tensor([0, 1, 0])
  967. ... )
  968. >>> print(m.bias_mask)
  969. tensor([0., 1., 0.])
  970. """
  971. CustomFromMask.apply(module, name, mask)
  972. return module
  973. def remove(module, name):
  974. r"""Remove the pruning reparameterization from a module and the pruning method from the forward hook.
  975. The pruned parameter named ``name`` remains permanently pruned, and the parameter
  976. named ``name+'_orig'`` is removed from the parameter list. Similarly,
  977. the buffer named ``name+'_mask'`` is removed from the buffers.
  978. Note:
  979. Pruning itself is NOT undone or reversed!
  980. Args:
  981. module (nn.Module): module containing the tensor to prune
  982. name (str): parameter name within ``module`` on which pruning
  983. will act.
  984. Examples:
  985. >>> m = random_unstructured(nn.Linear(5, 7), name='weight', amount=0.2)
  986. >>> m = remove(m, name='weight')
  987. """
  988. for k, hook in module._forward_pre_hooks.items():
  989. if isinstance(hook, BasePruningMethod) and hook._tensor_name == name:
  990. hook.remove(module)
  991. del module._forward_pre_hooks[k]
  992. return module
  993. raise ValueError(
  994. f"Parameter '{name}' of module {module} has to be pruned before pruning can be removed"
  995. )
  996. def is_pruned(module):
  997. r"""Check if a module is pruned by looking for pruning pre-hooks.
  998. Check whether ``module`` is pruned by looking for
  999. ``forward_pre_hooks`` in its modules that inherit from the
  1000. :class:`BasePruningMethod`.
  1001. Args:
  1002. module (nn.Module): object that is either pruned or unpruned
  1003. Returns:
  1004. binary answer to whether ``module`` is pruned.
  1005. Examples:
  1006. >>> from torch.nn.utils import prune
  1007. >>> m = nn.Linear(5, 7)
  1008. >>> print(prune.is_pruned(m))
  1009. False
  1010. >>> prune.random_unstructured(m, name='weight', amount=0.2)
  1011. >>> print(prune.is_pruned(m))
  1012. True
  1013. """
  1014. for _, submodule in module.named_modules():
  1015. for hook in submodule._forward_pre_hooks.values():
  1016. if isinstance(hook, BasePruningMethod):
  1017. return True
  1018. return False
  1019. def _validate_pruning_amount_init(amount):
  1020. r"""Validate helper to check the range of amount at init.
  1021. Args:
  1022. amount (int or float): quantity of parameters to prune.
  1023. If float, should be between 0.0 and 1.0 and represent the
  1024. fraction of parameters to prune. If int, it represents the
  1025. absolute number of parameters to prune.
  1026. Raises:
  1027. ValueError: if amount is a float not in [0, 1], or if it's a negative
  1028. integer.
  1029. TypeError: if amount is neither a float nor an integer.
  1030. Note:
  1031. This does not take into account the number of parameters in the
  1032. tensor to be pruned, which is known only at prune.
  1033. """
  1034. if not isinstance(amount, numbers.Real):
  1035. raise TypeError(
  1036. f"Invalid type for amount: {amount}. Must be int or float."
  1037. )
  1038. if (isinstance(amount, numbers.Integral) and amount < 0) or (
  1039. not isinstance(amount, numbers.Integral) # so it's a float
  1040. and (float(amount) > 1.0 or float(amount) < 0.0)
  1041. ):
  1042. raise ValueError(
  1043. f"amount={amount} should either be a float in the range [0, 1] or a non-negative integer"
  1044. )
  1045. def _validate_pruning_amount(amount, tensor_size):
  1046. r"""Validate that the pruning amount is meaningful wrt to the size of the data.
  1047. Validation helper to check that the amount of parameters to prune
  1048. is meaningful wrt to the size of the data (`tensor_size`).
  1049. Args:
  1050. amount (int or float): quantity of parameters to prune.
  1051. If float, should be between 0.0 and 1.0 and represent the
  1052. fraction of parameters to prune. If int, it represents the
  1053. absolute number of parameters to prune.
  1054. tensor_size (int): absolute number of parameters in the tensor
  1055. to prune.
  1056. """
  1057. # TODO: consider removing this check and allowing users to specify
  1058. # a number of units to prune that is greater than the number of units
  1059. # left to prune. In this case, the tensor will just be fully pruned.
  1060. if isinstance(amount, numbers.Integral) and amount > tensor_size:
  1061. raise ValueError(
  1062. f"amount={amount} should be smaller than the number of parameters to prune={tensor_size}"
  1063. )
  1064. def _validate_structured_pruning(t):
  1065. r"""Validate that the tensor to be pruned is at least 2-Dimensional.
  1066. Validation helper to check that the tensor to be pruned is multi-
  1067. dimensional, such that the concept of "channels" is well-defined.
  1068. Args:
  1069. t (torch.Tensor): tensor representing the parameter to prune
  1070. Raises:
  1071. ValueError: if the tensor `t` is not at least 2D.
  1072. """
  1073. shape = t.shape
  1074. if len(shape) <= 1:
  1075. raise ValueError(
  1076. "Structured pruning can only be applied to "
  1077. "multidimensional tensors. Found tensor of shape "
  1078. f"{shape} with {len(shape)} dims"
  1079. )
  1080. def _compute_nparams_toprune(amount, tensor_size):
  1081. r"""Convert the pruning amount from a percentage to absolute value.
  1082. Since amount can be expressed either in absolute value or as a
  1083. percentage of the number of units/channels in a tensor, this utility
  1084. function converts the percentage to absolute value to standardize
  1085. the handling of pruning.
  1086. Args:
  1087. amount (int or float): quantity of parameters to prune.
  1088. If float, should be between 0.0 and 1.0 and represent the
  1089. fraction of parameters to prune. If int, it represents the
  1090. absolute number of parameters to prune.
  1091. tensor_size (int): absolute number of parameters in the tensor
  1092. to prune.
  1093. Returns:
  1094. int: the number of units to prune in the tensor
  1095. """
  1096. # incorrect type already checked in _validate_pruning_amount_init
  1097. if isinstance(amount, numbers.Integral):
  1098. return amount
  1099. else:
  1100. return round(amount * tensor_size)
  1101. def _validate_pruning_dim(t, dim):
  1102. r"""Validate that the pruning dimension is within the bounds of the tensor dimension.
  1103. Args:
  1104. t (torch.Tensor): tensor representing the parameter to prune
  1105. dim (int): index of the dim along which we define channels to prune
  1106. """
  1107. if dim >= t.dim():
  1108. raise IndexError(f"Invalid index {dim} for tensor of size {t.shape}")
  1109. def _compute_norm(t, n, dim):
  1110. r"""Compute the L_n-norm of a tensor along all dimensions except for the specified dimension.
  1111. The L_n-norm will be computed across all entries in tensor `t` along all dimension
  1112. except for the one identified by dim.
  1113. Example: if `t` is of shape, say, 3x2x4 and dim=2 (the last dim),
  1114. then norm will have Size [4], and each entry will represent the
  1115. `L_n`-norm computed using the 3x2=6 entries for each of the 4 channels.
  1116. Args:
  1117. t (torch.Tensor): tensor representing the parameter to prune
  1118. n (int, float, inf, -inf, 'fro', 'nuc'): See documentation of valid
  1119. entries for argument p in torch.norm
  1120. dim (int): dim identifying the channels to prune
  1121. Returns:
  1122. norm (torch.Tensor): L_n norm computed across all dimensions except
  1123. for `dim`. By construction, `norm.shape = t.shape[-1]`.
  1124. """
  1125. # dims = all axes, except for the one identified by `dim`
  1126. dims = list(range(t.dim()))
  1127. # convert negative indexing
  1128. if dim < 0:
  1129. dim = dims[dim]
  1130. dims.remove(dim)
  1131. norm = torch.norm(t, p=n, dim=dims)
  1132. return norm