mkldnn.py 7.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234
  1. # mypy: allow-untyped-defs
  2. import torch
  3. class MkldnnLinear(torch.jit.ScriptModule):
  4. def __init__(self, dense_module, dtype):
  5. super().__init__()
  6. self.register_buffer('weight', dense_module.weight.to_mkldnn(dtype))
  7. if dense_module.bias is not None:
  8. # Bias can be fp32 or bf16 for OneDNN bf16 path, but for good accuracy,
  9. # we use fp32 dtype.
  10. self.register_buffer('bias', dense_module.bias.to_mkldnn())
  11. else:
  12. # TODO: Remove this once ScriptModule supports registering None buffer
  13. self.register_buffer(
  14. 'bias',
  15. torch.zeros([dense_module.weight.size(0)], dtype=torch.float).to_mkldnn())
  16. @torch.jit.script_method
  17. def __getstate__(self):
  18. return (self.weight.to_dense(), self.bias.to_dense(), self.training)
  19. @torch.jit.script_method
  20. def __setstate__(self, state):
  21. self.weight = state[0].to_mkldnn()
  22. self.bias = state[1].to_mkldnn()
  23. self.training = state[2]
  24. @torch.jit.script_method
  25. def forward(self, x):
  26. x_mkldnn = x if x.is_mkldnn else x.to_mkldnn()
  27. y_mkldnn = torch._C._nn.mkldnn_linear(x_mkldnn, self.weight, self.bias)
  28. y = y_mkldnn if x.is_mkldnn else y_mkldnn.to_dense()
  29. return y
  30. class _MkldnnConvNd(torch.jit.ScriptModule):
  31. """Common base of MkldnnConv1d and MkldnnConv2d."""
  32. __constants__ = ['stride', 'padding', 'dilation', 'groups']
  33. def __init__(self, dense_module):
  34. super().__init__()
  35. self.stride = dense_module.stride
  36. self.padding = dense_module.padding
  37. self.dilation = dense_module.dilation
  38. self.groups = dense_module.groups
  39. if dense_module.bias is not None:
  40. self.register_buffer('bias', dense_module.bias.to_mkldnn())
  41. else:
  42. # Bias can be fp32 or bf16 for OneDNN bf16 path, but for good accuracy,
  43. # we use fp32 dtype.
  44. # TODO: Remove this once ScriptModule supports registering None buffer
  45. self.register_buffer(
  46. 'bias',
  47. torch.zeros([dense_module.weight.size(0)], dtype=torch.float).to_mkldnn())
  48. @torch.jit.script_method
  49. def __getstate__(self):
  50. return (self.weight.to_dense(), self.bias.to_dense(), self.training)
  51. @torch.jit.script_method
  52. def forward(self, x):
  53. return torch.mkldnn_convolution(
  54. x,
  55. self.weight,
  56. self.bias,
  57. self.padding,
  58. self.stride,
  59. self.dilation,
  60. self.groups)
  61. class MkldnnConv1d(_MkldnnConvNd):
  62. def __init__(self, dense_module, dtype):
  63. super().__init__(dense_module)
  64. self.register_buffer('weight', dense_module.weight.to_mkldnn(dtype))
  65. @torch.jit.script_method
  66. def __setstate__(self, state):
  67. self.weight = state[0].to_mkldnn()
  68. self.bias = state[1].to_mkldnn()
  69. self.training = state[2]
  70. class MkldnnConv2d(_MkldnnConvNd):
  71. def __init__(self, dense_module, dtype):
  72. super().__init__(dense_module)
  73. self.register_buffer('weight', torch._C._nn.mkldnn_reorder_conv2d_weight(
  74. dense_module.weight.to_mkldnn(dtype),
  75. self.padding,
  76. self.stride,
  77. self.dilation,
  78. self.groups))
  79. @torch.jit.script_method
  80. def __setstate__(self, state):
  81. self.weight = torch._C._nn.mkldnn_reorder_conv2d_weight(
  82. state[0].to_mkldnn(),
  83. self.padding,
  84. self.stride,
  85. self.dilation,
  86. self.groups)
  87. self.bias = state[1].to_mkldnn()
  88. self.training = state[2]
  89. class MkldnnConv3d(_MkldnnConvNd):
  90. def __init__(self, dense_module, dtype):
  91. super().__init__(dense_module)
  92. self.register_buffer('weight', torch._C._nn.mkldnn_reorder_conv3d_weight(
  93. dense_module.weight.to_mkldnn(dtype),
  94. self.padding,
  95. self.stride,
  96. self.dilation,
  97. self.groups))
  98. @torch.jit.script_method
  99. def __setstate__(self, state):
  100. self.weight = torch._C._nn.mkldnn_reorder_conv3d_weight(
  101. state[0].to_mkldnn(),
  102. self.padding,
  103. self.stride,
  104. self.dilation,
  105. self.groups)
  106. self.bias = state[1].to_mkldnn()
  107. self.training = state[2]
  108. class MkldnnBatchNorm(torch.jit.ScriptModule):
  109. __constants__ = ['exponential_average_factor', 'eps']
  110. def __init__(self, dense_module):
  111. super().__init__()
  112. assert not dense_module.training
  113. assert dense_module.track_running_stats
  114. assert dense_module.affine
  115. if dense_module.momentum is None:
  116. self.exponential_average_factor = 0.0
  117. else:
  118. self.exponential_average_factor = dense_module.momentum
  119. self.eps = dense_module.eps
  120. self.register_buffer('weight', dense_module.weight.to_mkldnn())
  121. self.register_buffer('bias', dense_module.bias.to_mkldnn())
  122. self.register_buffer('running_mean', dense_module.running_mean.to_mkldnn())
  123. self.register_buffer('running_var', dense_module.running_var.to_mkldnn())
  124. @torch.jit.script_method
  125. def __getstate__(self):
  126. weight = self.weight.to_dense()
  127. bias = self.bias.to_dense()
  128. running_mean = self.running_mean.to_dense()
  129. running_var = self.running_var.to_dense()
  130. return (weight, bias, running_mean, running_var, self.training)
  131. @torch.jit.script_method
  132. def __setstate__(self, state):
  133. self.weight = state[0].to_mkldnn()
  134. self.bias = state[1].to_mkldnn()
  135. self.running_mean = state[2].to_mkldnn()
  136. self.running_var = state[3].to_mkldnn()
  137. self.training = state[4]
  138. @torch.jit.script_method
  139. def forward(self, x):
  140. return torch.batch_norm(
  141. x,
  142. self.weight,
  143. self.bias,
  144. self.running_mean,
  145. self.running_var,
  146. False, # training
  147. self.exponential_average_factor,
  148. self.eps,
  149. False, # cuda_enabled
  150. )
  151. class MkldnnPrelu(torch.jit.ScriptModule):
  152. def __init__(self, dense_module, dtype):
  153. super().__init__()
  154. self.register_buffer('weight', dense_module.weight.to_mkldnn(dtype))
  155. @torch.jit.script_method
  156. def __getstate__(self):
  157. return (self.weight.to_dense(), self.training)
  158. @torch.jit.script_method
  159. def __setstate__(self, state):
  160. self.weight = state[0].to_mkldnn()
  161. self.training = state[1]
  162. @torch.jit.script_method
  163. def forward(self, x):
  164. x_mkldnn = x if x.is_mkldnn else x.to_mkldnn()
  165. y_mkldnn = torch.prelu(x_mkldnn, self.weight)
  166. y = y_mkldnn if x.is_mkldnn else y_mkldnn.to_dense()
  167. return y
  168. def to_mkldnn(module, dtype=torch.float):
  169. assert dtype in [torch.float, torch.bfloat16, torch.half], \
  170. "MKLDNN only support float, bfloat16, and half path now"
  171. def m_fn(m, d):
  172. if isinstance(m, torch.nn.Linear):
  173. return MkldnnLinear(m, d)
  174. elif isinstance(m, torch.nn.Conv1d):
  175. return MkldnnConv1d(m, d)
  176. elif isinstance(m, torch.nn.Conv2d):
  177. return MkldnnConv2d(m, d)
  178. elif isinstance(m, torch.nn.Conv3d):
  179. return MkldnnConv3d(m, d)
  180. elif isinstance(m, (torch.nn.BatchNorm2d, torch.nn.BatchNorm3d)):
  181. # For batchnorm bf16 path, OneDNN requires weight and bias need fp32 dtype.
  182. # so it doesn't need dtype argument.
  183. return MkldnnBatchNorm(m)
  184. elif isinstance(m, torch.nn.PReLU):
  185. return MkldnnPrelu(m, d)
  186. else:
  187. return m
  188. def m_fn_rec(m, d):
  189. new_m = m_fn(m, d)
  190. for name, sub_m in m.named_children():
  191. setattr(new_m, name, m_fn_rec(sub_m, d))
  192. return new_m
  193. return m_fn_rec(module, dtype)