jiterator.py 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186
  1. # mypy: allow-untyped-defs
  2. import re
  3. from typing import Callable, List
  4. import torch
  5. from torch import Tensor
  6. __all__: List[str] = []
  7. class _CodeParser:
  8. def __init__(self, code_string: str):
  9. optional_ws = r"\s*"
  10. required_ws = r"\s+"
  11. template_params = r"(?P<template_params>\<.+\>)"
  12. return_type = r"(?P<return_type>\w+)"
  13. function_name = r"(?P<function_name>\w+)"
  14. function_params = r"(?P<function_params>\(.+\))"
  15. function_body = r"(?P<function_body>\{.+\})"
  16. pattern = (
  17. optional_ws
  18. + "template"
  19. + optional_ws
  20. + template_params
  21. + optional_ws
  22. + return_type
  23. + required_ws
  24. + function_name
  25. + optional_ws
  26. + function_params
  27. + optional_ws
  28. + function_body
  29. + optional_ws
  30. )
  31. result = re.match(
  32. pattern, code_string, re.DOTALL
  33. ) # DOTALL for matching multiline
  34. if result is None:
  35. raise Exception( # noqa: TRY002
  36. f"Couldn't parse code, please check correctness:\n {code_string}"
  37. )
  38. self.template_params = result["template_params"]
  39. self.return_type = result["return_type"]
  40. self.function_name = result["function_name"]
  41. self.function_params = result["function_params"]
  42. self.function_body = result["function_body"]
  43. class _JittedFunction:
  44. def __init__(
  45. self, code_string: str, return_by_ref: bool, num_outputs: int, **kwargs
  46. ):
  47. self.code_string = code_string
  48. assert (
  49. return_by_ref or num_outputs == 1
  50. ), "Return by value only works for single output. "
  51. self.return_by_ref = return_by_ref
  52. self.num_outputs = num_outputs
  53. parsed_code = _CodeParser(code_string)
  54. self.kernel_name = parsed_code.function_name
  55. self.kwargs_dict = kwargs
  56. self.is_cuda_available = torch.cuda.is_available()
  57. def __call__(self, *tensors: Tensor, **kwargs):
  58. # Jiterator follow torch.cuda's lazy initialization behavior
  59. # Defer checking cuda's availability at the function invocation time
  60. assert (
  61. self.is_cuda_available
  62. ), "Jiterator is only supported on CUDA and ROCm GPUs, none are available."
  63. assert len(tensors) <= 8, "jiterator only supports up to 8 tensor inputs."
  64. expanded_kwargs = self.kwargs_dict.copy()
  65. for key, value in kwargs.items():
  66. if key in self.kwargs_dict:
  67. expanded_kwargs[key] = value
  68. else:
  69. raise KeyError(f"{key} is not declared in function definition")
  70. return torch._C._cuda_jiterator_compile_and_launch_kernel(
  71. self.code_string,
  72. self.kernel_name,
  73. self.return_by_ref,
  74. self.num_outputs,
  75. tensors,
  76. expanded_kwargs,
  77. )
  78. def _create_jit_fn(code_string: str, **kwargs) -> Callable:
  79. """
  80. Create a jiterator-generated cuda kernel for an elementwise op.
  81. The code string has to be a valid CUDA function that describes the computation for a single element. The code
  82. string has to follow the c++ template pattern, as shown in the example below. This function will be inlined
  83. into elementwise kernel template, and compiled on the fly. Compiled kernel will be cached in memory, as well as
  84. local temp dir.
  85. Jiterator-generated kernels accepts noncontiguous tensors, and supports broadcasting and type promotion.
  86. Args:
  87. code_string (str): CUDA code string to be compiled by jiterator. The entry functor must return by value.
  88. kwargs (Dict, optional): Keyword arguments for generated function
  89. Example::
  90. code_string = "template <typename T> T my_kernel(T x, T y, T alpha) { return -x + alpha * y; }"
  91. jitted_fn = create_jit_fn(code_string, alpha=1.0)
  92. a = torch.rand(3, device='cuda')
  93. b = torch.rand(3, device='cuda')
  94. # invoke jitted function like a regular python function
  95. result = jitted_fn(a, b, alpha=3.14)
  96. code_string also allows multiple function definitions, and the last function will be treated as the entry function.
  97. Example::
  98. code_string = "template <typename T> T util_fn(T x, T y) { return ::sin(x) + ::cos(y); }"
  99. code_string += "template <typename T> T my_kernel(T x, T y, T val) { return ::min(val, util_fn(x, y)); }"
  100. jitted_fn = create_jit_fn(code_string, val=0.0)
  101. a = torch.rand(3, device='cuda')
  102. b = torch.rand(3, device='cuda')
  103. # invoke jitted function like a regular python function
  104. result = jitted_fn(a, b) # using default val=0.0
  105. Jiterator can be used together with python registration to override an operator's cuda kernel.
  106. Following example is overriding gelu's cuda kernel with relu.
  107. Example::
  108. code_string = "template <typename T> T my_gelu(T a) { return a > 0 ? a : 0; }"
  109. my_gelu = create_jit_fn(code_string)
  110. my_lib = torch.library.Library("aten", "IMPL")
  111. my_lib.impl('aten::gelu', my_gelu, "CUDA")
  112. # torch.nn.GELU and torch.nn.function.gelu are now overridden
  113. a = torch.rand(3, device='cuda')
  114. torch.allclose(torch.nn.functional.gelu(a), torch.nn.functional.relu(a))
  115. .. warning::
  116. This API is in beta and may change in future releases.
  117. .. warning::
  118. This API only supports up to 8 inputs and 1 output
  119. .. warning::
  120. All input tensors must live in CUDA device
  121. """
  122. return _JittedFunction(code_string, return_by_ref=False, num_outputs=1, **kwargs)
  123. def _create_multi_output_jit_fn(
  124. code_string: str, num_outputs: int, **kwargs
  125. ) -> Callable:
  126. """
  127. Create a jiterator-generated cuda kernel for an elementwise op that supports returning one or more outputs.
  128. Args:
  129. code_string (str): CUDA code string to be compiled by jiterator. The entry functor must return value by reference.
  130. num_outputs(int): number of outputs return by the kernel
  131. kwargs (Dict, optional): Keyword arguments for generated function
  132. Example::
  133. code_string = "template <typename T> void my_kernel(T x, T y, T alpha, T& out) { out = -x + alpha * y; }"
  134. jitted_fn = create_jit_fn(code_string, alpha=1.0)
  135. a = torch.rand(3, device='cuda')
  136. b = torch.rand(3, device='cuda')
  137. # invoke jitted function like a regular python function
  138. result = jitted_fn(a, b, alpha=3.14)
  139. .. warning::
  140. This API is in beta and may change in future releases.
  141. .. warning::
  142. This API only supports up to 8 inputs and 8 outputs
  143. """
  144. return _JittedFunction(
  145. code_string, return_by_ref=True, num_outputs=num_outputs, **kwargs
  146. )