From bcbdec74f4c8c3104a35b0bf87deec7c0fa5f14c Mon Sep 17 00:00:00 2001 From: Jongsoo Park Date: Thu, 26 Dec 2019 14:18:33 -0800 Subject: [PATCH] add more comments on handling small scale handling; remove redundant if (#232) Summary: Pull Request resolved: https://github.com/pytorch/FBGEMM/pull/232 As title Reviewed By: dskhudia Differential Revision: D19231440 fbshipit-source-id: fe4db92bc6b5dd2822dbdac75f29b779d80cee65 --- src/PackAWithQuantRowOffset.cc | 2 +- src/QuantUtils.cc | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/PackAWithQuantRowOffset.cc b/src/PackAWithQuantRowOffset.cc index fb90101332..f99ba27989 100644 --- a/src/PackAWithQuantRowOffset.cc +++ b/src/PackAWithQuantRowOffset.cc @@ -46,7 +46,7 @@ PackAWithQuantRowOffset::PackAWithQuantRowOffset( if (!cpuinfo_initialize()) { throw std::runtime_error("Failed to initialize cpuinfo!"); } - if (scale_ == 0.0f || std::isinf(1.0f / scale_)) { + if (scale_ == 0.0f) { throw std::runtime_error("scale cannot be zero"); } if (std::isinf(1.0f / scale_)) { diff --git a/src/QuantUtils.cc b/src/QuantUtils.cc index 2a2d379b5b..be911e3608 100644 --- a/src/QuantUtils.cc +++ b/src/QuantUtils.cc @@ -42,7 +42,9 @@ TensorQuantizationParams ChooseQuantizationParams( // final number to reflect the actual number used during quantization. float scale = (static_cast(max) - min) / (qmax - qmin); // If scale is 0 or too small so its reciprocal is infinity, we arbitrary - // adjust the scale to 0.1 + // adjust the scale to 0.1 . We want to avoid scale's reciprocal being infinity + // because some of fbgemm code pre-computes scale's reciprocal to do + // multiplication instead of division in the time critical part of code. if (scale == 0.0f || isinf(1.0f / scale)) { scale = 0.1; }