_pattern_matcher.py 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663
  1. # mypy: allow-untyped-defs
  2. import json
  3. import math
  4. import os
  5. import re
  6. from typing import Dict, List, Optional, Set
  7. import torch
  8. import torch.utils.benchmark as benchmark
  9. from torch._C._profiler import (
  10. _EventType,
  11. _ExtraFields_PyCall,
  12. _ExtraFields_PyCCall,
  13. _ExtraFields_TorchOp,
  14. _ProfilerEvent,
  15. )
  16. from torch.profiler import profile
  17. from torch.profiler._utils import index_of_first_match, traverse_bfs, traverse_dfs
  18. class Pattern:
  19. """
  20. Base class for all patterns, subclass this class and implement match()
  21. to define custom patterns.
  22. In subclass, define description and skip property.
  23. """
  24. def __init__(self, prof: profile, should_benchmark: bool = False):
  25. self.prof = prof
  26. self.should_benchmark = should_benchmark
  27. self.name = "Please specify a name for pattern"
  28. self.description = "Please specify a description for pattern"
  29. self.url = ""
  30. assert prof.profiler is not None and prof.profiler.kineto_results is not None
  31. self.event_tree = prof.profiler.kineto_results.experimental_event_tree()
  32. self.tid_root: Dict[int, List[_ProfilerEvent]] = {}
  33. for event in self.event_tree:
  34. self.tid_root.setdefault(event.start_tid, []).append(event)
  35. @property
  36. def skip(self):
  37. return False
  38. def report(self, event: _ProfilerEvent):
  39. msg = (
  40. f"{self.description}\n[Source Code Location] {source_code_location(event)}"
  41. )
  42. return msg
  43. def eventTreeTraversal(self):
  44. """
  45. Traverse the event tree and yield all events.
  46. Override this method in subclass to customize the traversal.
  47. """
  48. yield from traverse_dfs(self.event_tree)
  49. def summary(self, events: List[_ProfilerEvent]):
  50. default_summary = f"{self.name}: {len(events)} events matched."
  51. if self.should_benchmark:
  52. # If benchmark summary is not empty, use it.
  53. return (
  54. self.benchmark_summary(events)
  55. if hasattr(self, "benchmark") # type: ignore[attr-defined]
  56. else default_summary
  57. )
  58. return default_summary
  59. def benchmark_summary(self, events: List[_ProfilerEvent]):
  60. def format_time(time_ns: int):
  61. unit_lst = ["ns", "us", "ms"]
  62. for unit in unit_lst:
  63. if time_ns < 1000:
  64. return f"{time_ns:.2f} {unit}"
  65. time_ns //= 1000
  66. return f"{time_ns:.2f} s"
  67. assert hasattr(self, "benchmark"), "Please implement benchmark()"
  68. shapes_factor_map = self.benchmark(events) # type: ignore[attr-defined]
  69. original_time = sum(event.duration_time_ns for event in events)
  70. new_time = sum(
  71. shapes_factor_map[input_shapes(event)] * event.duration_time_ns
  72. for event in events
  73. )
  74. return (
  75. f"{self.name}: {len(events)} events matched. "
  76. f"Total Estimated Speedup: {format_time(original_time - new_time)} ({round(original_time/new_time, 2)}X)"
  77. )
  78. def match(self, event: _ProfilerEvent):
  79. """
  80. Return True if the event matches the pattern.
  81. This method should be overriden in subclass.
  82. """
  83. raise NotImplementedError
  84. def matched_events(self):
  85. if self.skip:
  86. return []
  87. matched_events = []
  88. for event in self.eventTreeTraversal():
  89. if self.match(event):
  90. matched_events.append(event)
  91. return matched_events
  92. def root_of(self, event: _ProfilerEvent):
  93. while event.parent:
  94. event = event.parent
  95. return event
  96. def siblings_of(self, event: _ProfilerEvent):
  97. if event.parent:
  98. children = event.parent.children
  99. else:
  100. children = self.tid_root[event.start_tid]
  101. index = children.index(event)
  102. return children[:index], children[index + 1 :]
  103. def next_of(self, event: _ProfilerEvent):
  104. _, next_events = self.siblings_of(event)
  105. return next_events[0] if next_events else None
  106. def prev_of(self, event: _ProfilerEvent):
  107. prev_events, _ = self.siblings_of(event)
  108. return prev_events[-1] if prev_events else None
  109. def go_up_until(self, event: _ProfilerEvent, predicate):
  110. if not event:
  111. return None
  112. while event.parent and not predicate(event):
  113. event = event.parent
  114. return event
  115. # Patterns
  116. class NamePattern(Pattern):
  117. def __init__(self, prof: profile, name: str, should_benchmark: bool = False):
  118. super().__init__(prof, should_benchmark)
  119. self.description = f"Matched Name Event: {name}"
  120. self.name = name
  121. def match(self, event: _ProfilerEvent):
  122. return re.search(self.name, event.name) is not None
  123. class ExtraCUDACopyPattern(Pattern):
  124. """
  125. This pattern identifies if we creates a constant tensor on CPU and immediately moves it to GPU.
  126. example: torch.zeros((100, 100)).to("cuda")
  127. Pattern:
  128. build-in method |build-in method
  129. ... | aten::to
  130. aten::fill_/aten::zero_ | aten::_to_copy
  131. Algorithm:
  132. We start at node aten::to, go parent events' previous events,
  133. and check if we have a aten::fill_/aten::zero_ as we keep going down the tree.
  134. We always select the last child in the children list when we go down the tree.
  135. If at any step we failed, it is not a match.
  136. """
  137. def __init__(self, prof: profile, should_benchmark: bool = False):
  138. super().__init__(prof, should_benchmark)
  139. self.name = "Extra CUDA Copy Pattern"
  140. self.description = "Filled a CPU tensor and immediately moved it to GPU. Please initialize it on GPU."
  141. self.url = "https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#create-tensors-directly-on-the-target-device"
  142. self.init_ops = {
  143. "aten::fill_",
  144. "aten::zero_",
  145. "aten::normal_",
  146. "aten::uniform_",
  147. }
  148. @property
  149. def skip(self):
  150. return not self.prof.with_stack or not self.prof.record_shapes
  151. def match(self, event):
  152. # TODO: We should also check tensor identities
  153. if event.name != "aten::to":
  154. return False
  155. to_event = event
  156. if not event.children:
  157. return False
  158. event = event.children[-1]
  159. if event.name != "aten::_to_copy":
  160. return False
  161. if not event.children:
  162. return False
  163. event = event.children[-1]
  164. if event.name != "aten::copy_":
  165. return False
  166. # aten::copy_ should have the first 2 args dtype the same
  167. dtypes = input_dtypes(event)
  168. if len(dtypes) < 2:
  169. return False
  170. if dtypes[0] is None or dtypes[0] != dtypes[1]:
  171. return False
  172. event = to_event
  173. # Up one level
  174. event = event.parent
  175. if event is None:
  176. return False
  177. # Check if we have a aten::fill_ in previous leaf
  178. event = self.prev_of(event)
  179. if event is None:
  180. return False
  181. while event.children:
  182. event = event.children[-1]
  183. # aten::zero_ is a special optimzation case where fill_ is not called
  184. if event.name in self.init_ops:
  185. return True
  186. return event.name in self.init_ops
  187. # TODO: Check if tensor is reused
  188. def benchmark(self, events: List[_ProfilerEvent]):
  189. shapes_factor_map = {input_shapes(event): 0.0 for event in events}
  190. for shape in shapes_factor_map:
  191. size = shape[0]
  192. to_timer = benchmark.Timer(
  193. stmt='torch.ones(size).to("cuda")', globals={"size": size}
  194. )
  195. de_timer = benchmark.Timer(
  196. stmt='torch.ones(size, device="cuda")', globals={"size": size}
  197. )
  198. to_time = to_timer.timeit(10).mean
  199. de_time = de_timer.timeit(10).mean
  200. shapes_factor_map[shape] = de_time / to_time
  201. return shapes_factor_map
  202. class ForLoopIndexingPattern(Pattern):
  203. """
  204. This pattern identifies if we use a for loop to index a tensor that
  205. can be vectorized.
  206. example:
  207. tensor = torch.empty((100, 100))
  208. for i in range(100):
  209. tensor[i] = i
  210. Pattern:
  211. aten::select | ... | aten::select | ... (Repeat)
  212. Algorithm:
  213. We start at node aten::select, and we check if we can find this alternating patterns.
  214. We also keep a dictionary to avoid duplicate match in the for loop.
  215. """
  216. def __init__(self, prof: profile, should_benchmark: bool = False):
  217. super().__init__(prof, should_benchmark)
  218. self.name = "For Loop Indexing Pattern"
  219. self.description = "For loop indexing detected. Vectorization recommended."
  220. self.visited: Set[int] = set()
  221. def eventTreeTraversal(self):
  222. """
  223. We need to use BFS traversal order to avoid duplicate match.
  224. """
  225. yield from traverse_bfs(self.event_tree)
  226. def match(self, event: _ProfilerEvent):
  227. if event.name != "aten::select":
  228. return False
  229. if event.id in self.visited:
  230. return False
  231. repeat_count = 1
  232. _, next = self.siblings_of(event)
  233. if len(next) <= 1:
  234. return False
  235. # Custom event list matching
  236. def same_ops(list1, list2):
  237. if len(list1) != len(list2):
  238. return False
  239. for op1, op2 in zip(list1, list2):
  240. if op1.name != op2.name:
  241. return False
  242. return True
  243. # Record the ops between two aten::select
  244. next_select_idx = index_of_first_match(next, lambda e: e.name == "aten::select")
  245. if next_select_idx is None:
  246. return False
  247. indexing_ops = [event] + next[:next_select_idx]
  248. next = next[len(indexing_ops) - 1 :]
  249. for i in range(0, len(next), len(indexing_ops)):
  250. if same_ops(indexing_ops, next[i : i + len(indexing_ops)]):
  251. repeat_count += 1
  252. self.visited.add(next[i].id)
  253. else:
  254. break
  255. return repeat_count >= 10
  256. class FP32MatMulPattern(Pattern):
  257. def __init__(self, prof: profile, should_benchmark: bool = False):
  258. super().__init__(prof, should_benchmark)
  259. self.name = "FP32 MatMul Pattern"
  260. self.description = (
  261. "You are currently using GPU that supports TF32. "
  262. "Please enable TF32 by setting 'torch.backends.cuda.matmul.allow_tf32 = True'"
  263. )
  264. self.url = "https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
  265. @property
  266. def skip(self):
  267. if torch.version.hip is not None:
  268. has_tf32 = False
  269. else:
  270. # Anything less than sm_80 is not Ampere which doesn't support TF32
  271. has_tf32 = all(int(arch[3:]) >= 80 for arch in torch.cuda.get_arch_list())
  272. return has_tf32 is False or super().skip or not self.prof.record_shapes
  273. def match(self, event: _ProfilerEvent):
  274. # If we saw this pattern once, we don't need to match it again
  275. if event.tag != _EventType.TorchOp:
  276. return False
  277. assert isinstance(event.extra_fields, _ExtraFields_TorchOp)
  278. if event.name == "aten::mm":
  279. if event.extra_fields.allow_tf32_cublas is False:
  280. return True
  281. return False
  282. def report(self, event: _ProfilerEvent):
  283. return self.description
  284. def benchmark(self, events: List[_ProfilerEvent]):
  285. shapes_factor_map = {input_shapes(event): 0.0 for event in events}
  286. for shape in shapes_factor_map:
  287. matrixA = torch.randn(shape[0], device="cuda", dtype=torch.float32)
  288. matrixB = torch.randn(shape[1], device="cuda", dtype=torch.float32)
  289. fp32_timer = benchmark.Timer(
  290. stmt="torch.mm(matrixA, matrixB)",
  291. globals={"matrixA": matrixA, "matrixB": matrixB},
  292. )
  293. tf32_timer = benchmark.Timer(
  294. stmt="torch.mm(matrixA, matrixB)",
  295. setup="torch.backends.cuda.matmul.allow_tf32 = True",
  296. globals={"matrixA": matrixA, "matrixB": matrixB},
  297. )
  298. torch.backends.cuda.matmul.allow_tf32 = False
  299. fp32_time = fp32_timer.timeit(10).mean
  300. tf32_time = tf32_timer.timeit(10).mean
  301. shapes_factor_map[shape] = tf32_time / fp32_time
  302. return shapes_factor_map
  303. class OptimizerSingleTensorPattern(Pattern):
  304. """
  305. This pattern identifies if we are using the single-tensor version of an optimizer.
  306. example:
  307. optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
  308. By adding foreach=True to enable multi-tensor optimizer, we can gain speedup when
  309. the kernels are relatively small.
  310. Pattern:
  311. XXXXX: _single_tenser_<OPTIMIZER_NAME>
  312. Algorithm:
  313. String match
  314. """
  315. def __init__(self, prof: profile, should_benchmark: bool = False):
  316. super().__init__(prof, should_benchmark)
  317. self.name = "Optimizer Single Tensor Pattern"
  318. self.optimizers_with_foreach = ["adam", "sgd", "adamw"]
  319. self.description = (
  320. "Deteced optimizer running with single tensor implementation. "
  321. "Please enable multi tensor implementation by passing 'foreach=True' into optimizer."
  322. )
  323. self.url = ""
  324. def match(self, event: _ProfilerEvent):
  325. for optimizer in self.optimizers_with_foreach:
  326. if event.name.endswith(f"_single_tensor_{optimizer}"):
  327. return True
  328. return False
  329. class SynchronizedDataLoaderPattern(Pattern):
  330. """
  331. This pattern identifies if we are using num_workers=0 in DataLoader.
  332. example:
  333. torch.utils.data.DataLoader(dataset, batch_size=batch_size)
  334. Add num_workers=N to the arguments. N depends on system configuration.
  335. Pattern:
  336. dataloader.py(...): __iter__
  337. dataloader.py(...): _get_iterator
  338. NOT dataloader.py(...): check_worker_number_rationality
  339. Algorithm:
  340. If we don't see check_worker_number_rationality call in the dataloader __iter__,
  341. It is not an asynchronous dataloader.
  342. """
  343. def __init__(self, prof: profile, should_benchmark: bool = False):
  344. super().__init__(prof, should_benchmark)
  345. self.name = "Synchronized DataLoader Pattern"
  346. self.description = (
  347. "Detected DataLoader running with synchronized implementation. "
  348. "Please enable asynchronous dataloading by setting num_workers > 0 when initializing DataLoader."
  349. )
  350. self.url = (
  351. "https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html"
  352. "#enable-async-data-loading-and-augmentation"
  353. )
  354. def match(self, event: _ProfilerEvent):
  355. def is_dataloader_function(name: str, function_name: str):
  356. return name.startswith(
  357. os.path.join("torch", "utils", "data", "dataloader.py")
  358. ) and name.endswith(function_name)
  359. # TODO: fixme! Due to lifetime issues of the function name, this field might
  360. # actually point to an already freed string when the even is a PyCall.
  361. # Just silently skip this to unblock testing.
  362. try:
  363. event.name
  364. except UnicodeDecodeError:
  365. return False
  366. if not is_dataloader_function(event.name, "__iter__"):
  367. return False
  368. if not event.children:
  369. return False
  370. event = event.children[0]
  371. if not is_dataloader_function(event.name, "_get_iterator"):
  372. return False
  373. if not event.children:
  374. return False
  375. event = event.children[0]
  376. return not is_dataloader_function(event.name, "check_worker_number_rationality")
  377. # TODO: We should also check if the loader is bottleneck.
  378. class GradNotSetToNonePattern(Pattern):
  379. """
  380. This pattern identifies if we are not setting grad to None in zero_grad.
  381. example:
  382. optimizer.zero_grad()
  383. By setting set_to_none=True, we can gain speedup
  384. Pattern:
  385. XXXXX: _zero_grad
  386. NOT aten::zeros
  387. aten::zero_
  388. aten::zero_ is called on each parameter in the model.
  389. We also want to make sure it is not called by aten::zeros.
  390. Algorithm:
  391. String match
  392. """
  393. def __init__(self, prof: profile, should_benchmark: bool = False):
  394. super().__init__(prof, should_benchmark)
  395. self.name = "Gradient Set To Zero Instead of None Pattern"
  396. self.description = (
  397. "Detected gradient set to zero instead of None. "
  398. "Please add 'set_to_none=True' when calling zero_grad()."
  399. )
  400. self.url = (
  401. "https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html"
  402. "#disable-gradient-calculation-for-validation-or-inference"
  403. )
  404. def match(self, event: _ProfilerEvent):
  405. if not event.name.endswith(": zero_grad"):
  406. return False
  407. if not event.children:
  408. return False
  409. for sub_event in traverse_dfs(event.children):
  410. if (
  411. sub_event.name == "aten::zero_"
  412. and sub_event.parent.name != "aten::zeros"
  413. ):
  414. return True
  415. # TODO: We should also check if the optimizer's numerical behavior will change.
  416. return False
  417. class Conv2dBiasFollowedByBatchNorm2dPattern(Pattern):
  418. """
  419. This pattern identifies if we are enabling bias in Conv2d which is followed by BatchNorm2d.
  420. Bias doesn't do anything when followed by batchnorm.
  421. Pattern:
  422. nn.Module: Conv2d | nn.Module: BatchNorm2d
  423. ...
  424. aten::conv2d AND dtype of third argument is not null
  425. The third argument is the bias
  426. Algorithm:
  427. String match
  428. """
  429. def __init__(self, prof: profile, should_benchmark: bool = False):
  430. super().__init__(prof, should_benchmark)
  431. self.name = "Enabling Bias in Conv2d Followed By BatchNorm Pattern"
  432. self.description = "Detected bias enabled in Conv2d that is followed by BatchNorm2d. Please set 'bias=False' in Conv2d."
  433. self.url = (
  434. "https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html"
  435. "#disable-bias-for-convolutions-directly-followed-by-a-batch-norm"
  436. )
  437. @property
  438. def skip(self):
  439. return self.prof.record_shapes is False or super().skip
  440. def match(self, event: _ProfilerEvent):
  441. if event.name != "aten::conv2d":
  442. return False
  443. if len(input_dtypes(event)) < 3 or input_dtypes(event)[2] is None:
  444. return False
  445. # This means bias=True
  446. event = self.go_up_until(
  447. event, lambda e: e.name.startswith("nn.Module: Conv2d")
  448. )
  449. if not event:
  450. return False
  451. event = self.next_of(event)
  452. if not event:
  453. return False
  454. return event.name.startswith("nn.Module: BatchNorm2d")
  455. class MatMulDimInFP16Pattern(Pattern):
  456. def __init__(self, prof: profile, should_benchmark: bool = False):
  457. super().__init__(prof, should_benchmark)
  458. self.name = "Matrix Multiplication Dimension Not Aligned Pattern"
  459. self.description = "Detected matmul with dimension not aligned. Please use matmul with aligned dimension."
  460. self.url = "https://pytorch.org/tutorials/recipes/recipes/tuning_guide.html#use-mixed-precision-and-amp"
  461. @property
  462. def skip(self):
  463. return not self.prof.with_stack or not self.prof.record_shapes
  464. def match(self, event: _ProfilerEvent):
  465. def mutiple_of(shapes, multiple):
  466. return all(dim % multiple == 0 for shape in shapes for dim in shape[-2:])
  467. if event.name not in ("aten::mm", "aten::bmm", "aten::addmm"):
  468. return False
  469. if not input_dtypes(event):
  470. return False
  471. arg_dtype = input_dtypes(event)[0]
  472. if arg_dtype in (torch.bfloat16, torch.half) and not mutiple_of(
  473. input_shapes(event), 8
  474. ):
  475. return True
  476. return False
  477. def benchmark(self, events: List[_ProfilerEvent]):
  478. def closest_multiple(shapes, multiple):
  479. return [multiple * math.ceil(shape / multiple) for shape in shapes]
  480. shapes_factor_map = {input_shapes(event): 0.0 for event in events}
  481. for shape in shapes_factor_map:
  482. matrixA = torch.randn(shape[0], device="cuda", dtype=torch.float16)
  483. matrixB = torch.randn(shape[1], device="cuda", dtype=torch.float16)
  484. not_aligned_dim_timer = benchmark.Timer(
  485. stmt="torch.mm(matrixA, matrixB)",
  486. globals={"matrixA": matrixA, "matrixB": matrixB},
  487. )
  488. matrixA = torch.randn(
  489. closest_multiple(shape[0], 8), device="cuda", dtype=torch.float16
  490. )
  491. matrixB = torch.randn(
  492. closest_multiple(shape[1], 8), device="cuda", dtype=torch.float16
  493. )
  494. aligned_dim_timer = benchmark.Timer(
  495. stmt="torch.mm(matrixA, matrixB)",
  496. globals={"matrixA": matrixA, "matrixB": matrixB},
  497. )
  498. not_aligned_dim_time = not_aligned_dim_timer.timeit(10).mean
  499. aligned_dim_time = aligned_dim_timer.timeit(10).mean
  500. shapes_factor_map[shape] = aligned_dim_time / not_aligned_dim_time
  501. return shapes_factor_map
  502. def source_code_location(event: Optional[_ProfilerEvent]):
  503. while event:
  504. if event.tag == _EventType.PyCall or event.tag == _EventType.PyCCall:
  505. assert isinstance(
  506. event.extra_fields, (_ExtraFields_PyCall, _ExtraFields_PyCCall)
  507. )
  508. if not event.extra_fields.caller.file_name.startswith("torch" + os.sep):
  509. return f"{event.extra_fields.caller.file_name}:{event.extra_fields.caller.line_number}"
  510. event = event.parent
  511. return "No source code location found"
  512. def input_shapes(event: _ProfilerEvent):
  513. assert isinstance(event.extra_fields, _ExtraFields_TorchOp)
  514. return tuple(tuple(getattr(i, "sizes", ())) for i in event.extra_fields.inputs)
  515. def input_dtypes(event: _ProfilerEvent):
  516. assert isinstance(event.extra_fields, _ExtraFields_TorchOp)
  517. return tuple(getattr(i, "dtype", None) for i in event.extra_fields.inputs)
  518. def report_all_anti_patterns(
  519. prof,
  520. should_benchmark: bool = False,
  521. print_enable: bool = True,
  522. json_report_dir: Optional[str] = None,
  523. ):
  524. report_dict: Dict = {}
  525. anti_patterns = [
  526. ExtraCUDACopyPattern(prof, should_benchmark),
  527. # ForLoopIndexingPattern(prof, should_benchmark),
  528. FP32MatMulPattern(prof, should_benchmark),
  529. OptimizerSingleTensorPattern(prof, should_benchmark),
  530. SynchronizedDataLoaderPattern(prof, should_benchmark),
  531. GradNotSetToNonePattern(prof, should_benchmark),
  532. Conv2dBiasFollowedByBatchNorm2dPattern(prof, should_benchmark),
  533. MatMulDimInFP16Pattern(prof, should_benchmark),
  534. ]
  535. reported = set()
  536. summaries = []
  537. message_list = [f"{'-'*40}TorchTidy Report{'-'*40}"]
  538. message_list.append("Matched Events:")
  539. for anti_pattern in anti_patterns:
  540. matched_events = anti_pattern.matched_events()
  541. if not matched_events:
  542. continue
  543. summaries.append(anti_pattern.summary(matched_events))
  544. for event in matched_events:
  545. report_msg = anti_pattern.report(event)
  546. if report_msg not in reported:
  547. message_list.append(report_msg)
  548. reported.add(report_msg)
  549. src_location, line_no = source_code_location(event).split(":")
  550. report_dict.setdefault(src_location, []).append(
  551. {
  552. "line_number": int(line_no),
  553. "name": anti_pattern.name,
  554. "url": anti_pattern.url,
  555. "message": anti_pattern.description,
  556. }
  557. )
  558. if json_report_dir is not None:
  559. json_report_path = os.path.join(json_report_dir, "torchtidy_report.json")
  560. if os.path.exists(json_report_path):
  561. with open(json_report_path) as f:
  562. exisiting_report = json.load(f)
  563. exisiting_report.update(report_dict)
  564. report_dict = exisiting_report
  565. with open(json_report_path, "w") as f:
  566. json.dump(report_dict, f, indent=4)
  567. message_list.append("Summary:")
  568. message_list += summaries
  569. message_list.append(f"{'-'*40}TorchTidy Report{'-'*40}")
  570. if print_enable:
  571. print("\n".join(message_list))