collect_env.py 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651
  1. # mypy: allow-untyped-defs
  2. # Unlike the rest of the PyTorch this file must be python2 compliant.
  3. # This script outputs relevant system environment info
  4. # Run it with `python collect_env.py` or `python -m torch.utils.collect_env`
  5. import datetime
  6. import locale
  7. import re
  8. import subprocess
  9. import sys
  10. import os
  11. from collections import namedtuple
  12. try:
  13. import torch
  14. TORCH_AVAILABLE = True
  15. except (ImportError, NameError, AttributeError, OSError):
  16. TORCH_AVAILABLE = False
  17. # System Environment Information
  18. SystemEnv = namedtuple('SystemEnv', [
  19. 'torch_version',
  20. 'is_debug_build',
  21. 'cuda_compiled_version',
  22. 'gcc_version',
  23. 'clang_version',
  24. 'cmake_version',
  25. 'os',
  26. 'libc_version',
  27. 'python_version',
  28. 'python_platform',
  29. 'is_cuda_available',
  30. 'cuda_runtime_version',
  31. 'cuda_module_loading',
  32. 'nvidia_driver_version',
  33. 'nvidia_gpu_models',
  34. 'cudnn_version',
  35. 'pip_version', # 'pip' or 'pip3'
  36. 'pip_packages',
  37. 'conda_packages',
  38. 'hip_compiled_version',
  39. 'hip_runtime_version',
  40. 'miopen_runtime_version',
  41. 'caching_allocator_config',
  42. 'is_xnnpack_available',
  43. 'cpu_info',
  44. ])
  45. DEFAULT_CONDA_PATTERNS = {
  46. "torch",
  47. "numpy",
  48. "cudatoolkit",
  49. "soumith",
  50. "mkl",
  51. "magma",
  52. "triton",
  53. "optree",
  54. }
  55. DEFAULT_PIP_PATTERNS = {
  56. "torch",
  57. "numpy",
  58. "mypy",
  59. "flake8",
  60. "triton",
  61. "optree",
  62. "onnx",
  63. }
  64. def run(command):
  65. """Return (return-code, stdout, stderr)."""
  66. shell = True if type(command) is str else False
  67. p = subprocess.Popen(command, stdout=subprocess.PIPE,
  68. stderr=subprocess.PIPE, shell=shell)
  69. raw_output, raw_err = p.communicate()
  70. rc = p.returncode
  71. if get_platform() == 'win32':
  72. enc = 'oem'
  73. else:
  74. enc = locale.getpreferredencoding()
  75. output = raw_output.decode(enc)
  76. err = raw_err.decode(enc)
  77. return rc, output.strip(), err.strip()
  78. def run_and_read_all(run_lambda, command):
  79. """Run command using run_lambda; reads and returns entire output if rc is 0."""
  80. rc, out, _ = run_lambda(command)
  81. if rc != 0:
  82. return None
  83. return out
  84. def run_and_parse_first_match(run_lambda, command, regex):
  85. """Run command using run_lambda, returns the first regex match if it exists."""
  86. rc, out, _ = run_lambda(command)
  87. if rc != 0:
  88. return None
  89. match = re.search(regex, out)
  90. if match is None:
  91. return None
  92. return match.group(1)
  93. def run_and_return_first_line(run_lambda, command):
  94. """Run command using run_lambda and returns first line if output is not empty."""
  95. rc, out, _ = run_lambda(command)
  96. if rc != 0:
  97. return None
  98. return out.split('\n')[0]
  99. def get_conda_packages(run_lambda, patterns=None):
  100. if patterns is None:
  101. patterns = DEFAULT_CONDA_PATTERNS
  102. conda = os.environ.get('CONDA_EXE', 'conda')
  103. out = run_and_read_all(run_lambda, "{} list".format(conda))
  104. if out is None:
  105. return out
  106. return "\n".join(
  107. line
  108. for line in out.splitlines()
  109. if not line.startswith("#")
  110. and any(name in line for name in patterns)
  111. )
  112. def get_gcc_version(run_lambda):
  113. return run_and_parse_first_match(run_lambda, 'gcc --version', r'gcc (.*)')
  114. def get_clang_version(run_lambda):
  115. return run_and_parse_first_match(run_lambda, 'clang --version', r'clang version (.*)')
  116. def get_cmake_version(run_lambda):
  117. return run_and_parse_first_match(run_lambda, 'cmake --version', r'cmake (.*)')
  118. def get_nvidia_driver_version(run_lambda):
  119. if get_platform() == 'darwin':
  120. cmd = 'kextstat | grep -i cuda'
  121. return run_and_parse_first_match(run_lambda, cmd,
  122. r'com[.]nvidia[.]CUDA [(](.*?)[)]')
  123. smi = get_nvidia_smi()
  124. return run_and_parse_first_match(run_lambda, smi, r'Driver Version: (.*?) ')
  125. def get_gpu_info(run_lambda):
  126. if get_platform() == 'darwin' or (TORCH_AVAILABLE and hasattr(torch.version, 'hip') and torch.version.hip is not None):
  127. if TORCH_AVAILABLE and torch.cuda.is_available():
  128. if torch.version.hip is not None:
  129. prop = torch.cuda.get_device_properties(0)
  130. if hasattr(prop, "gcnArchName"):
  131. gcnArch = " ({})".format(prop.gcnArchName)
  132. else:
  133. gcnArch = "NoGCNArchNameOnOldPyTorch"
  134. else:
  135. gcnArch = ""
  136. return torch.cuda.get_device_name(None) + gcnArch
  137. return None
  138. smi = get_nvidia_smi()
  139. uuid_regex = re.compile(r' \(UUID: .+?\)')
  140. rc, out, _ = run_lambda(smi + ' -L')
  141. if rc != 0:
  142. return None
  143. # Anonymize GPUs by removing their UUID
  144. return re.sub(uuid_regex, '', out)
  145. def get_running_cuda_version(run_lambda):
  146. return run_and_parse_first_match(run_lambda, 'nvcc --version', r'release .+ V(.*)')
  147. def get_cudnn_version(run_lambda):
  148. """Return a list of libcudnn.so; it's hard to tell which one is being used."""
  149. if get_platform() == 'win32':
  150. system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows')
  151. cuda_path = os.environ.get('CUDA_PATH', "%CUDA_PATH%")
  152. where_cmd = os.path.join(system_root, 'System32', 'where')
  153. cudnn_cmd = '{} /R "{}\\bin" cudnn*.dll'.format(where_cmd, cuda_path)
  154. elif get_platform() == 'darwin':
  155. # CUDA libraries and drivers can be found in /usr/local/cuda/. See
  156. # https://docs.nvidia.com/cuda/cuda-installation-guide-mac-os-x/index.html#install
  157. # https://docs.nvidia.com/deeplearning/sdk/cudnn-install/index.html#installmac
  158. # Use CUDNN_LIBRARY when cudnn library is installed elsewhere.
  159. cudnn_cmd = 'ls /usr/local/cuda/lib/libcudnn*'
  160. else:
  161. cudnn_cmd = 'ldconfig -p | grep libcudnn | rev | cut -d" " -f1 | rev'
  162. rc, out, _ = run_lambda(cudnn_cmd)
  163. # find will return 1 if there are permission errors or if not found
  164. if len(out) == 0 or (rc != 1 and rc != 0):
  165. l = os.environ.get('CUDNN_LIBRARY')
  166. if l is not None and os.path.isfile(l):
  167. return os.path.realpath(l)
  168. return None
  169. files_set = set()
  170. for fn in out.split('\n'):
  171. fn = os.path.realpath(fn) # eliminate symbolic links
  172. if os.path.isfile(fn):
  173. files_set.add(fn)
  174. if not files_set:
  175. return None
  176. # Alphabetize the result because the order is non-deterministic otherwise
  177. files = sorted(files_set)
  178. if len(files) == 1:
  179. return files[0]
  180. result = '\n'.join(files)
  181. return 'Probably one of the following:\n{}'.format(result)
  182. def get_nvidia_smi():
  183. # Note: nvidia-smi is currently available only on Windows and Linux
  184. smi = 'nvidia-smi'
  185. if get_platform() == 'win32':
  186. system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows')
  187. program_files_root = os.environ.get('PROGRAMFILES', 'C:\\Program Files')
  188. legacy_path = os.path.join(program_files_root, 'NVIDIA Corporation', 'NVSMI', smi)
  189. new_path = os.path.join(system_root, 'System32', smi)
  190. smis = [new_path, legacy_path]
  191. for candidate_smi in smis:
  192. if os.path.exists(candidate_smi):
  193. smi = '"{}"'.format(candidate_smi)
  194. break
  195. return smi
  196. # example outputs of CPU infos
  197. # * linux
  198. # Architecture: x86_64
  199. # CPU op-mode(s): 32-bit, 64-bit
  200. # Address sizes: 46 bits physical, 48 bits virtual
  201. # Byte Order: Little Endian
  202. # CPU(s): 128
  203. # On-line CPU(s) list: 0-127
  204. # Vendor ID: GenuineIntel
  205. # Model name: Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz
  206. # CPU family: 6
  207. # Model: 106
  208. # Thread(s) per core: 2
  209. # Core(s) per socket: 32
  210. # Socket(s): 2
  211. # Stepping: 6
  212. # BogoMIPS: 5799.78
  213. # Flags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr
  214. # sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon rep_good nopl
  215. # xtopology nonstop_tsc cpuid aperfmperf tsc_known_freq pni pclmulqdq monitor ssse3 fma cx16
  216. # pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand
  217. # hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced
  218. # fsgsbase tsc_adjust bmi1 avx2 smep bmi2 erms invpcid avx512f avx512dq rdseed adx smap
  219. # avx512ifma clflushopt clwb avx512cd sha_ni avx512bw avx512vl xsaveopt xsavec xgetbv1
  220. # xsaves wbnoinvd ida arat avx512vbmi pku ospke avx512_vbmi2 gfni vaes vpclmulqdq
  221. # avx512_vnni avx512_bitalg tme avx512_vpopcntdq rdpid md_clear flush_l1d arch_capabilities
  222. # Virtualization features:
  223. # Hypervisor vendor: KVM
  224. # Virtualization type: full
  225. # Caches (sum of all):
  226. # L1d: 3 MiB (64 instances)
  227. # L1i: 2 MiB (64 instances)
  228. # L2: 80 MiB (64 instances)
  229. # L3: 108 MiB (2 instances)
  230. # NUMA:
  231. # NUMA node(s): 2
  232. # NUMA node0 CPU(s): 0-31,64-95
  233. # NUMA node1 CPU(s): 32-63,96-127
  234. # Vulnerabilities:
  235. # Itlb multihit: Not affected
  236. # L1tf: Not affected
  237. # Mds: Not affected
  238. # Meltdown: Not affected
  239. # Mmio stale data: Vulnerable: Clear CPU buffers attempted, no microcode; SMT Host state unknown
  240. # Retbleed: Not affected
  241. # Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl and seccomp
  242. # Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization
  243. # Spectre v2: Mitigation; Enhanced IBRS, IBPB conditional, RSB filling, PBRSB-eIBRS SW sequence
  244. # Srbds: Not affected
  245. # Tsx async abort: Not affected
  246. # * win32
  247. # Architecture=9
  248. # CurrentClockSpeed=2900
  249. # DeviceID=CPU0
  250. # Family=179
  251. # L2CacheSize=40960
  252. # L2CacheSpeed=
  253. # Manufacturer=GenuineIntel
  254. # MaxClockSpeed=2900
  255. # Name=Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz
  256. # ProcessorType=3
  257. # Revision=27142
  258. #
  259. # Architecture=9
  260. # CurrentClockSpeed=2900
  261. # DeviceID=CPU1
  262. # Family=179
  263. # L2CacheSize=40960
  264. # L2CacheSpeed=
  265. # Manufacturer=GenuineIntel
  266. # MaxClockSpeed=2900
  267. # Name=Intel(R) Xeon(R) Platinum 8375C CPU @ 2.90GHz
  268. # ProcessorType=3
  269. # Revision=27142
  270. def get_cpu_info(run_lambda):
  271. rc, out, err = 0, '', ''
  272. if get_platform() == 'linux':
  273. rc, out, err = run_lambda('lscpu')
  274. elif get_platform() == 'win32':
  275. rc, out, err = run_lambda('wmic cpu get Name,Manufacturer,Family,Architecture,ProcessorType,DeviceID, \
  276. CurrentClockSpeed,MaxClockSpeed,L2CacheSize,L2CacheSpeed,Revision /VALUE')
  277. elif get_platform() == 'darwin':
  278. rc, out, err = run_lambda("sysctl -n machdep.cpu.brand_string")
  279. cpu_info = 'None'
  280. if rc == 0:
  281. cpu_info = out
  282. else:
  283. cpu_info = err
  284. return cpu_info
  285. def get_platform():
  286. if sys.platform.startswith('linux'):
  287. return 'linux'
  288. elif sys.platform.startswith('win32'):
  289. return 'win32'
  290. elif sys.platform.startswith('cygwin'):
  291. return 'cygwin'
  292. elif sys.platform.startswith('darwin'):
  293. return 'darwin'
  294. else:
  295. return sys.platform
  296. def get_mac_version(run_lambda):
  297. return run_and_parse_first_match(run_lambda, 'sw_vers -productVersion', r'(.*)')
  298. def get_windows_version(run_lambda):
  299. system_root = os.environ.get('SYSTEMROOT', 'C:\\Windows')
  300. wmic_cmd = os.path.join(system_root, 'System32', 'Wbem', 'wmic')
  301. findstr_cmd = os.path.join(system_root, 'System32', 'findstr')
  302. return run_and_read_all(run_lambda, '{} os get Caption | {} /v Caption'.format(wmic_cmd, findstr_cmd))
  303. def get_lsb_version(run_lambda):
  304. return run_and_parse_first_match(run_lambda, 'lsb_release -a', r'Description:\t(.*)')
  305. def check_release_file(run_lambda):
  306. return run_and_parse_first_match(run_lambda, 'cat /etc/*-release',
  307. r'PRETTY_NAME="(.*)"')
  308. def get_os(run_lambda):
  309. from platform import machine
  310. platform = get_platform()
  311. if platform == 'win32' or platform == 'cygwin':
  312. return get_windows_version(run_lambda)
  313. if platform == 'darwin':
  314. version = get_mac_version(run_lambda)
  315. if version is None:
  316. return None
  317. return 'macOS {} ({})'.format(version, machine())
  318. if platform == 'linux':
  319. # Ubuntu/Debian based
  320. desc = get_lsb_version(run_lambda)
  321. if desc is not None:
  322. return '{} ({})'.format(desc, machine())
  323. # Try reading /etc/*-release
  324. desc = check_release_file(run_lambda)
  325. if desc is not None:
  326. return '{} ({})'.format(desc, machine())
  327. return '{} ({})'.format(platform, machine())
  328. # Unknown platform
  329. return platform
  330. def get_python_platform():
  331. import platform
  332. return platform.platform()
  333. def get_libc_version():
  334. import platform
  335. if get_platform() != 'linux':
  336. return 'N/A'
  337. return '-'.join(platform.libc_ver())
  338. def get_pip_packages(run_lambda, patterns=None):
  339. """Return `pip list` output. Note: will also find conda-installed pytorch and numpy packages."""
  340. if patterns is None:
  341. patterns = DEFAULT_PIP_PATTERNS
  342. # People generally have `pip` as `pip` or `pip3`
  343. # But here it is invoked as `python -mpip`
  344. def run_with_pip(pip):
  345. out = run_and_read_all(run_lambda, pip + ["list", "--format=freeze"])
  346. return "\n".join(
  347. line
  348. for line in out.splitlines()
  349. if any(name in line for name in patterns)
  350. )
  351. pip_version = 'pip3' if sys.version[0] == '3' else 'pip'
  352. out = run_with_pip([sys.executable, '-mpip'])
  353. return pip_version, out
  354. def get_cachingallocator_config():
  355. ca_config = os.environ.get('PYTORCH_CUDA_ALLOC_CONF', '')
  356. return ca_config
  357. def get_cuda_module_loading_config():
  358. if TORCH_AVAILABLE and torch.cuda.is_available():
  359. torch.cuda.init()
  360. config = os.environ.get('CUDA_MODULE_LOADING', '')
  361. return config
  362. else:
  363. return "N/A"
  364. def is_xnnpack_available():
  365. if TORCH_AVAILABLE:
  366. import torch.backends.xnnpack
  367. return str(torch.backends.xnnpack.enabled) # type: ignore[attr-defined]
  368. else:
  369. return "N/A"
  370. def get_env_info():
  371. """
  372. Collects environment information to aid in debugging.
  373. The returned environment information contains details on torch version, is debug build
  374. or not, cuda compiled version, gcc version, clang version, cmake version, operating
  375. system, libc version, python version, python platform, CUDA availability, CUDA
  376. runtime version, CUDA module loading config, GPU model and configuration, Nvidia
  377. driver version, cuDNN version, pip version and versions of relevant pip and
  378. conda packages, HIP runtime version, MIOpen runtime version,
  379. Caching allocator config, XNNPACK availability and CPU information.
  380. Returns:
  381. SystemEnv (namedtuple): A tuple containining various environment details
  382. and system information.
  383. """
  384. run_lambda = run
  385. pip_version, pip_list_output = get_pip_packages(run_lambda)
  386. if TORCH_AVAILABLE:
  387. version_str = torch.__version__
  388. debug_mode_str = str(torch.version.debug)
  389. cuda_available_str = str(torch.cuda.is_available())
  390. cuda_version_str = torch.version.cuda
  391. if not hasattr(torch.version, 'hip') or torch.version.hip is None: # cuda version
  392. hip_compiled_version = hip_runtime_version = miopen_runtime_version = 'N/A'
  393. else: # HIP version
  394. def get_version_or_na(cfg, prefix):
  395. _lst = [s.rsplit(None, 1)[-1] for s in cfg if prefix in s]
  396. return _lst[0] if _lst else 'N/A'
  397. cfg = torch._C._show_config().split('\n')
  398. hip_runtime_version = get_version_or_na(cfg, 'HIP Runtime')
  399. miopen_runtime_version = get_version_or_na(cfg, 'MIOpen')
  400. cuda_version_str = 'N/A'
  401. hip_compiled_version = torch.version.hip
  402. else:
  403. version_str = debug_mode_str = cuda_available_str = cuda_version_str = 'N/A'
  404. hip_compiled_version = hip_runtime_version = miopen_runtime_version = 'N/A'
  405. sys_version = sys.version.replace("\n", " ")
  406. conda_packages = get_conda_packages(run_lambda)
  407. return SystemEnv(
  408. torch_version=version_str,
  409. is_debug_build=debug_mode_str,
  410. python_version='{} ({}-bit runtime)'.format(sys_version, sys.maxsize.bit_length() + 1),
  411. python_platform=get_python_platform(),
  412. is_cuda_available=cuda_available_str,
  413. cuda_compiled_version=cuda_version_str,
  414. cuda_runtime_version=get_running_cuda_version(run_lambda),
  415. cuda_module_loading=get_cuda_module_loading_config(),
  416. nvidia_gpu_models=get_gpu_info(run_lambda),
  417. nvidia_driver_version=get_nvidia_driver_version(run_lambda),
  418. cudnn_version=get_cudnn_version(run_lambda),
  419. hip_compiled_version=hip_compiled_version,
  420. hip_runtime_version=hip_runtime_version,
  421. miopen_runtime_version=miopen_runtime_version,
  422. pip_version=pip_version,
  423. pip_packages=pip_list_output,
  424. conda_packages=conda_packages,
  425. os=get_os(run_lambda),
  426. libc_version=get_libc_version(),
  427. gcc_version=get_gcc_version(run_lambda),
  428. clang_version=get_clang_version(run_lambda),
  429. cmake_version=get_cmake_version(run_lambda),
  430. caching_allocator_config=get_cachingallocator_config(),
  431. is_xnnpack_available=is_xnnpack_available(),
  432. cpu_info=get_cpu_info(run_lambda),
  433. )
  434. env_info_fmt = """
  435. PyTorch version: {torch_version}
  436. Is debug build: {is_debug_build}
  437. CUDA used to build PyTorch: {cuda_compiled_version}
  438. ROCM used to build PyTorch: {hip_compiled_version}
  439. OS: {os}
  440. GCC version: {gcc_version}
  441. Clang version: {clang_version}
  442. CMake version: {cmake_version}
  443. Libc version: {libc_version}
  444. Python version: {python_version}
  445. Python platform: {python_platform}
  446. Is CUDA available: {is_cuda_available}
  447. CUDA runtime version: {cuda_runtime_version}
  448. CUDA_MODULE_LOADING set to: {cuda_module_loading}
  449. GPU models and configuration: {nvidia_gpu_models}
  450. Nvidia driver version: {nvidia_driver_version}
  451. cuDNN version: {cudnn_version}
  452. HIP runtime version: {hip_runtime_version}
  453. MIOpen runtime version: {miopen_runtime_version}
  454. Is XNNPACK available: {is_xnnpack_available}
  455. CPU:
  456. {cpu_info}
  457. Versions of relevant libraries:
  458. {pip_packages}
  459. {conda_packages}
  460. """.strip()
  461. def pretty_str(envinfo):
  462. def replace_nones(dct, replacement='Could not collect'):
  463. for key in dct.keys():
  464. if dct[key] is not None:
  465. continue
  466. dct[key] = replacement
  467. return dct
  468. def replace_bools(dct, true='Yes', false='No'):
  469. for key in dct.keys():
  470. if dct[key] is True:
  471. dct[key] = true
  472. elif dct[key] is False:
  473. dct[key] = false
  474. return dct
  475. def prepend(text, tag='[prepend]'):
  476. lines = text.split('\n')
  477. updated_lines = [tag + line for line in lines]
  478. return '\n'.join(updated_lines)
  479. def replace_if_empty(text, replacement='No relevant packages'):
  480. if text is not None and len(text) == 0:
  481. return replacement
  482. return text
  483. def maybe_start_on_next_line(string):
  484. # If `string` is multiline, prepend a \n to it.
  485. if string is not None and len(string.split('\n')) > 1:
  486. return '\n{}\n'.format(string)
  487. return string
  488. mutable_dict = envinfo._asdict()
  489. # If nvidia_gpu_models is multiline, start on the next line
  490. mutable_dict['nvidia_gpu_models'] = \
  491. maybe_start_on_next_line(envinfo.nvidia_gpu_models)
  492. # If the machine doesn't have CUDA, report some fields as 'No CUDA'
  493. dynamic_cuda_fields = [
  494. 'cuda_runtime_version',
  495. 'nvidia_gpu_models',
  496. 'nvidia_driver_version',
  497. ]
  498. all_cuda_fields = dynamic_cuda_fields + ['cudnn_version']
  499. all_dynamic_cuda_fields_missing = all(
  500. mutable_dict[field] is None for field in dynamic_cuda_fields)
  501. if TORCH_AVAILABLE and not torch.cuda.is_available() and all_dynamic_cuda_fields_missing:
  502. for field in all_cuda_fields:
  503. mutable_dict[field] = 'No CUDA'
  504. if envinfo.cuda_compiled_version is None:
  505. mutable_dict['cuda_compiled_version'] = 'None'
  506. # Replace True with Yes, False with No
  507. mutable_dict = replace_bools(mutable_dict)
  508. # Replace all None objects with 'Could not collect'
  509. mutable_dict = replace_nones(mutable_dict)
  510. # If either of these are '', replace with 'No relevant packages'
  511. mutable_dict['pip_packages'] = replace_if_empty(mutable_dict['pip_packages'])
  512. mutable_dict['conda_packages'] = replace_if_empty(mutable_dict['conda_packages'])
  513. # Tag conda and pip packages with a prefix
  514. # If they were previously None, they'll show up as ie '[conda] Could not collect'
  515. if mutable_dict['pip_packages']:
  516. mutable_dict['pip_packages'] = prepend(mutable_dict['pip_packages'],
  517. '[{}] '.format(envinfo.pip_version))
  518. if mutable_dict['conda_packages']:
  519. mutable_dict['conda_packages'] = prepend(mutable_dict['conda_packages'],
  520. '[conda] ')
  521. mutable_dict['cpu_info'] = envinfo.cpu_info
  522. return env_info_fmt.format(**mutable_dict)
  523. def get_pretty_env_info():
  524. """
  525. Returns a pretty string of environment information.
  526. This function retrieves environment information by calling the `get_env_info` function
  527. and then formats the information into a human-readable string. The retrieved environment
  528. information is listed in the document of `get_env_info`.
  529. This function is used in `python collect_env.py` that should be executed when reporting a bug.
  530. Returns:
  531. str: A pretty string of the environment information.
  532. """
  533. return pretty_str(get_env_info())
  534. def main():
  535. print("Collecting environment information...")
  536. output = get_pretty_env_info()
  537. print(output)
  538. if TORCH_AVAILABLE and hasattr(torch, 'utils') and hasattr(torch.utils, '_crash_handler'):
  539. minidump_dir = torch.utils._crash_handler.DEFAULT_MINIDUMP_DIR
  540. if sys.platform == "linux" and os.path.exists(minidump_dir):
  541. dumps = [os.path.join(minidump_dir, dump) for dump in os.listdir(minidump_dir)]
  542. latest = max(dumps, key=os.path.getctime)
  543. ctime = os.path.getctime(latest)
  544. creation_time = datetime.datetime.fromtimestamp(ctime).strftime('%Y-%m-%d %H:%M:%S')
  545. msg = "\n*** Detected a minidump at {} created on {}, ".format(latest, creation_time) + \
  546. "if this is related to your bug please include it when you file a report ***"
  547. print(msg, file=sys.stderr)
  548. if __name__ == '__main__':
  549. main()