quantize_fx.py 736 B

1234567891011121314151617181920212223242526
  1. # flake8: noqa: F401
  2. r"""
  3. This file is in the process of migration to `torch/ao/quantization`, and
  4. is kept here for compatibility while the migration process is ongoing.
  5. If you are adding a new entry/functionality, please, add it to the
  6. `torch/ao/quantization/quantize_fx.py`, while adding an import statement
  7. here.
  8. """
  9. from torch.ao.quantization.fx.graph_module import ObservedGraphModule
  10. from torch.ao.quantization.quantize_fx import (
  11. _check_is_graph_module,
  12. _convert_fx,
  13. _convert_standalone_module_fx,
  14. _fuse_fx,
  15. _prepare_fx,
  16. _prepare_standalone_module_fx,
  17. _swap_ff_with_fxff,
  18. convert_fx,
  19. fuse_fx,
  20. prepare_fx,
  21. prepare_qat_fx,
  22. QuantizationTracer,
  23. Scope,
  24. ScopeContextManager,
  25. )