_equalize.py 1.2 KB

1234567891011121314151617181920212223242526272829303132333435363738
  1. # flake8: noqa: F401
  2. r"""
  3. This file is in the process of migration to `torch/ao/quantization`, and
  4. is kept here for compatibility while the migration process is ongoing.
  5. If you are adding a new entry/functionality, please, add it to the
  6. appropriate files under `torch/ao/quantization/fx/`, while adding an import statement
  7. here.
  8. """
  9. from torch.ao.quantization.fx._equalize import (
  10. _convert_equalization_ref,
  11. _InputEqualizationObserver,
  12. _WeightEqualizationObserver,
  13. calculate_equalization_scale,
  14. clear_weight_quant_obs_node,
  15. convert_eq_obs,
  16. CUSTOM_MODULE_SUPP_LIST,
  17. custom_module_supports_equalization,
  18. default_equalization_qconfig,
  19. EqualizationQConfig,
  20. fused_module_supports_equalization,
  21. get_equalization_qconfig_dict,
  22. get_layer_sqnr_dict,
  23. get_op_node_and_weight_eq_obs,
  24. input_equalization_observer,
  25. is_equalization_observer,
  26. maybe_get_next_equalization_scale,
  27. maybe_get_next_input_eq_obs,
  28. maybe_get_weight_eq_obs_node,
  29. nn_module_supports_equalization,
  30. node_supports_equalization,
  31. remove_node,
  32. reshape_scale,
  33. scale_input_observer,
  34. scale_weight_functional,
  35. scale_weight_node,
  36. update_obs_for_equalization,
  37. weight_equalization_observer,
  38. )