_functions.py 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. # mypy: allow-untyped-defs
  2. import torch
  3. import torch.distributed as dist
  4. from torch.autograd.function import Function
  5. class SyncBatchNorm(Function):
  6. @staticmethod
  7. def forward(self, input, weight, bias, running_mean, running_var, eps, momentum, process_group, world_size):
  8. if not (
  9. input.is_contiguous(memory_format=torch.channels_last) or
  10. input.is_contiguous(memory_format=torch.channels_last_3d)
  11. ):
  12. input = input.contiguous()
  13. if weight is not None:
  14. weight = weight.contiguous()
  15. size = int(input.numel() // input.size(1))
  16. if size == 1 and world_size < 2:
  17. raise ValueError(f'Expected more than 1 value per channel when training, got input size {size}')
  18. num_channels = input.shape[1]
  19. if input.numel() > 0:
  20. # calculate mean/invstd for input.
  21. mean, invstd = torch.batch_norm_stats(input, eps)
  22. count = torch.full(
  23. (1,),
  24. input.numel() // input.size(1),
  25. dtype=mean.dtype,
  26. device=mean.device
  27. )
  28. # C, C, 1 -> (2C + 1)
  29. combined = torch.cat([mean, invstd, count], dim=0)
  30. else:
  31. # for empty input, set stats and the count to zero. The stats with
  32. # zero count will be filtered out later when computing global mean
  33. # & invstd, but they still needs to participate the all_gather
  34. # collective communication to unblock other peer processes.
  35. combined = torch.zeros(
  36. 2 * num_channels + 1,
  37. dtype=input.dtype,
  38. device=input.device
  39. )
  40. # Use allgather instead of allreduce because count could be different across
  41. # ranks, simple all reduce op can not give correct results.
  42. # batch_norm_gather_stats_with_counts calculates global mean & invstd based on
  43. # all gathered mean, invstd and count.
  44. # for nccl backend, use the optimized version of all gather.
  45. # The Gloo backend does not support `all_gather_into_tensor`.
  46. if process_group._get_backend_name() != "gloo":
  47. # world_size * (2C + 1)
  48. combined_size = combined.numel()
  49. combined_flat = torch.empty(1,
  50. combined_size * world_size,
  51. dtype=combined.dtype,
  52. device=combined.device)
  53. dist.all_gather_into_tensor(combined_flat, combined, process_group, async_op=False)
  54. combined = torch.reshape(combined_flat, (world_size, combined_size))
  55. # world_size * (2C + 1) -> world_size * C, world_size * C, world_size * 1
  56. mean_all, invstd_all, count_all = torch.split(combined, num_channels, dim=1)
  57. else:
  58. # world_size * (2C + 1)
  59. combined_list = [
  60. torch.empty_like(combined) for _ in range(world_size)
  61. ]
  62. dist.all_gather(combined_list, combined, process_group, async_op=False)
  63. combined = torch.stack(combined_list, dim=0)
  64. # world_size * (2C + 1) -> world_size * C, world_size * C, world_size * 1
  65. mean_all, invstd_all, count_all = torch.split(combined, num_channels, dim=1)
  66. if not (torch.cuda.is_available() and torch.cuda.is_current_stream_capturing()):
  67. # The lines below force a synchronization between CUDA and CPU, because
  68. # the shape of the result count_all depends on the values in mask tensor.
  69. # Such synchronizations break CUDA Graph capturing.
  70. # See https://github.com/pytorch/pytorch/issues/78549
  71. # FIXME: https://github.com/pytorch/pytorch/issues/78656 describes
  72. # a better longer-term solution.
  73. # remove stats from empty inputs
  74. mask = count_all.squeeze(-1) >= 1
  75. count_all = count_all[mask]
  76. mean_all = mean_all[mask]
  77. invstd_all = invstd_all[mask]
  78. # calculate global mean & invstd
  79. counts = count_all.view(-1)
  80. if running_mean is not None and counts.dtype != running_mean.dtype:
  81. counts = counts.to(running_mean.dtype)
  82. mean, invstd = torch.batch_norm_gather_stats_with_counts(
  83. input,
  84. mean_all,
  85. invstd_all,
  86. running_mean,
  87. running_var,
  88. momentum,
  89. eps,
  90. counts,
  91. )
  92. self.save_for_backward(input, weight, mean, invstd, count_all.to(torch.int32))
  93. self.process_group = process_group
  94. # apply element-wise normalization
  95. if input.numel() > 0:
  96. return torch.batch_norm_elemt(input, weight, bias, mean, invstd, eps)
  97. else:
  98. return torch.empty_like(input)
  99. @staticmethod
  100. def backward(self, grad_output):
  101. if not (
  102. grad_output.is_contiguous(memory_format=torch.channels_last) or
  103. grad_output.is_contiguous(memory_format=torch.channels_last_3d)
  104. ):
  105. grad_output = grad_output.contiguous()
  106. saved_input, weight, mean, invstd, count_tensor = self.saved_tensors
  107. grad_input = grad_weight = grad_bias = None
  108. process_group = self.process_group
  109. if saved_input.numel() > 0:
  110. # calculate local stats as well as grad_weight / grad_bias
  111. sum_dy, sum_dy_xmu, grad_weight, grad_bias = torch.batch_norm_backward_reduce(
  112. grad_output,
  113. saved_input,
  114. mean,
  115. invstd,
  116. weight,
  117. self.needs_input_grad[0],
  118. self.needs_input_grad[1],
  119. self.needs_input_grad[2]
  120. )
  121. if self.needs_input_grad[0]:
  122. # synchronizing stats used to calculate input gradient.
  123. num_channels = sum_dy.shape[0]
  124. combined = torch.cat([sum_dy, sum_dy_xmu], dim=0)
  125. torch.distributed.all_reduce(
  126. combined, torch.distributed.ReduceOp.SUM, process_group, async_op=False)
  127. sum_dy, sum_dy_xmu = torch.split(combined, num_channels)
  128. # backward pass for gradient calculation
  129. if weight is not None and weight.dtype != mean.dtype:
  130. weight = weight.to(mean.dtype)
  131. grad_input = torch.batch_norm_backward_elemt(
  132. grad_output,
  133. saved_input,
  134. mean,
  135. invstd,
  136. weight,
  137. sum_dy,
  138. sum_dy_xmu,
  139. count_tensor
  140. )
  141. # synchronizing of grad_weight / grad_bias is not needed as distributed
  142. # training would handle all reduce.
  143. if weight is None or not self.needs_input_grad[1]:
  144. grad_weight = None
  145. if weight is None or not self.needs_input_grad[2]:
  146. grad_bias = None
  147. else:
  148. # This process got an empty input tensor in the forward pass.
  149. # Although this process can directly set grad_input as an empty
  150. # tensor of zeros, it still needs to participate in the collective
  151. # communication to unblock its peers, as other peer processes might
  152. # have received non-empty inputs.
  153. num_channels = saved_input.shape[1]
  154. if self.needs_input_grad[0]:
  155. # launch all_reduce to unblock other peer processes
  156. combined = torch.zeros(
  157. 2 * num_channels,
  158. dtype=saved_input.dtype,
  159. device=saved_input.device
  160. )
  161. torch.distributed.all_reduce(
  162. combined, torch.distributed.ReduceOp.SUM, process_group, async_op=False)
  163. # Leave grad_input, grad_weight and grad_bias as None, which will be
  164. # interpreted by the autograd engine as Tensors full of zeros.
  165. return grad_input, grad_weight, grad_bias, None, None, None, None, None, None
  166. class CrossMapLRN2d(Function):
  167. @staticmethod
  168. def forward(ctx, input, size, alpha=1e-4, beta=0.75, k=1):
  169. ctx.size = size
  170. ctx.alpha = alpha
  171. ctx.beta = beta
  172. ctx.k = k
  173. ctx.scale = None
  174. if input.dim() != 4:
  175. raise ValueError(f"CrossMapLRN2d: Expected input to be 4D, got {input.dim()}D instead.")
  176. ctx.scale = ctx.scale or input.new()
  177. output = input.new()
  178. batch_size = input.size(0)
  179. channels = input.size(1)
  180. input_height = input.size(2)
  181. input_width = input.size(3)
  182. output.resize_as_(input)
  183. ctx.scale.resize_as_(input)
  184. # use output storage as temporary buffer
  185. input_square = output
  186. torch.pow(input, 2, out=input_square)
  187. pre_pad = int((ctx.size - 1) / 2 + 1)
  188. pre_pad_crop = min(pre_pad, channels)
  189. scale_first = ctx.scale.select(1, 0)
  190. scale_first.zero_()
  191. # compute first feature map normalization
  192. for c in range(pre_pad_crop):
  193. scale_first.add_(input_square.select(1, c))
  194. # reuse computations for next feature maps normalization
  195. # by adding the next feature map and removing the previous
  196. for c in range(1, channels):
  197. scale_previous = ctx.scale.select(1, c - 1)
  198. scale_current = ctx.scale.select(1, c)
  199. scale_current.copy_(scale_previous)
  200. if c < channels - pre_pad + 1:
  201. square_next = input_square.select(1, c + pre_pad - 1)
  202. scale_current.add_(square_next, alpha=1)
  203. if c > pre_pad:
  204. square_previous = input_square.select(1, c - pre_pad)
  205. scale_current.add_(square_previous, alpha=-1)
  206. ctx.scale.mul_(ctx.alpha / ctx.size).add_(ctx.k)
  207. torch.pow(ctx.scale, -ctx.beta, out=output)
  208. output.mul_(input)
  209. ctx.save_for_backward(input, output)
  210. return output
  211. @staticmethod
  212. def backward(ctx, grad_output):
  213. input, output = ctx.saved_tensors
  214. grad_input = grad_output.new()
  215. batch_size = input.size(0)
  216. channels = input.size(1)
  217. input_height = input.size(2)
  218. input_width = input.size(3)
  219. paddded_ratio = input.new(channels + ctx.size - 1, input_height,
  220. input_width)
  221. accum_ratio = input.new(input_height, input_width)
  222. cache_ratio_value = 2 * ctx.alpha * ctx.beta / ctx.size
  223. inversePrePad = int(ctx.size - (ctx.size - 1) / 2)
  224. grad_input.resize_as_(input)
  225. torch.pow(ctx.scale, -ctx.beta, out=grad_input).mul_(grad_output)
  226. paddded_ratio.zero_()
  227. padded_ratio_center = paddded_ratio.narrow(0, inversePrePad,
  228. channels)
  229. for n in range(batch_size):
  230. torch.mul(grad_output[n], output[n], out=padded_ratio_center)
  231. padded_ratio_center.div_(ctx.scale[n])
  232. torch.sum(
  233. paddded_ratio.narrow(0, 0, ctx.size - 1), 0, keepdim=False, out=accum_ratio)
  234. for c in range(channels):
  235. accum_ratio.add_(paddded_ratio[c + ctx.size - 1])
  236. grad_input[n][c].addcmul_(input[n][c], accum_ratio, value=-cache_ratio_value)
  237. accum_ratio.add_(paddded_ratio[c], alpha=-1)
  238. return grad_input, None, None, None, None
  239. class BackwardHookFunction(torch.autograd.Function):
  240. @staticmethod
  241. def forward(ctx, *args):
  242. ctx.mark_non_differentiable(*[arg for arg in args if not arg.requires_grad])
  243. return args
  244. @staticmethod
  245. def backward(ctx, *args):
  246. return args