Skip to content

Commit

Permalink
Compile without -Wno-unused-variable (take 2) (pytorch#66041)
Browse files Browse the repository at this point in the history
Summary:
Delete `-Wno-unused-variable` from top level `CMakeLists.txt`
Still suppress those warnings for tests and `torch_python`

Delete number of unused variables from caffe2 code
Use `(void)var;` to suppress unused variable in range loops
Use `C10_UNUSED` for global constructors and use `constexpr` instead of `static` for global constants

Do not delete `caffe2::OperatorBase::Output` calls as they have side effects

Pull Request resolved: pytorch#66041

Reviewed By: ngimel

Differential Revision: D31360142

Pulled By: malfet

fbshipit-source-id: 6fdfb9f91efdc49ca984a2f2a17ee377d28210c8
  • Loading branch information
malfet authored and facebook-github-bot committed Oct 5, 2021
1 parent 6b0aa29 commit 4c4525f
Show file tree
Hide file tree
Showing 62 changed files with 90 additions and 100 deletions.
4 changes: 3 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -744,7 +744,6 @@ if(NOT MSVC)
string(APPEND CMAKE_CXX_FLAGS " -Wno-unknown-pragmas")
string(APPEND CMAKE_CXX_FLAGS " -Wno-sign-compare")
string(APPEND CMAKE_CXX_FLAGS " -Wno-unused-parameter")
string(APPEND CMAKE_CXX_FLAGS " -Wno-unused-variable")
string(APPEND CMAKE_CXX_FLAGS " -Wno-unused-function")
string(APPEND CMAKE_CXX_FLAGS " -Wno-unused-result")
string(APPEND CMAKE_CXX_FLAGS " -Wno-unused-local-typedefs")
Expand Down Expand Up @@ -1068,6 +1067,9 @@ endif()

# ---[ JNI
if(BUILD_JNI)
if(NOT MSVC)
string(APPEND CMAKE_CXX_FLAGS " -Wno-unused-variable")
endif()
set(BUILD_LIBTORCH_WITH_JNI 1)
set(FBJNI_SKIP_TESTS 1)
add_subdirectory(android/pytorch_android)
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/mkl/Limits.h
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,6 @@ namespace at { namespace native {

// Since size of MKL_LONG varies on different platforms (linux 64 bit, windows
// 32 bit), we need to programmatically calculate the max.
static int64_t MKL_LONG_MAX = ((1LL << (sizeof(MKL_LONG) * 8 - 2)) - 1) * 2 + 1;
constexpr int64_t MKL_LONG_MAX = ((1LL << (sizeof(MKL_LONG) * 8 - 2)) - 1) * 2 + 1;

}} // namespace
1 change: 1 addition & 0 deletions aten/src/ATen/test/cuda_stream_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,7 @@ TEST(TestStream, StreamPoolTest) {
if (!at::cuda::is_available()) return;
std::vector<at::cuda::CUDAStream> streams{};
for (const auto i : c10::irange(200)) {
(void)i;
streams.emplace_back(at::cuda::getStreamFromPool());
}

Expand Down
2 changes: 2 additions & 0 deletions aten/src/ATen/test/cuda_tensor_interop_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,8 @@ TEST(CUDAPytorchToCaffe2, Op) {

auto* c2_tensor_a = BlobSetTensor(workspace.CreateBlob("a"), caffe2::Tensor(at_tensor_a));
auto* c2_tensor_b = BlobSetTensor(workspace.CreateBlob("b"), caffe2::Tensor(at_tensor_b));
(void)c2_tensor_a;
(void)c2_tensor_b;

// Test Alias
{
Expand Down
2 changes: 0 additions & 2 deletions aten/src/ATen/test/math_kernel_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -54,8 +54,6 @@ TEST(MathKernelTest, NativeGroupNorm) {
TEST(MathKernelTest, NativeLayerNorm) {
const auto input = rand({20, 10, 10, 10});
const auto input_shape = input.sizes();
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores)
const auto input_ndim = input.dim();

double eps = 1e-05;
for (bool undef_weight: {true, false}) {
Expand Down
4 changes: 4 additions & 0 deletions benchmarks/cpp/nvfuser/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,8 @@ if(USE_CUDA)
main.cpp)

target_link_libraries(nvfuser_bench PRIVATE torch_library benchmark)
if(NOT MSVC)
target_compile_options(nvfuser_bench PRIVATE -Wno-unused-variable)
endif()

endif()
3 changes: 3 additions & 0 deletions c10/test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,9 @@ if(BUILD_TEST)
get_filename_component(test_file_name ${test_src} NAME_WE)
set(test_name "c10_${test_file_name}")
add_executable(${test_name} "${test_src}")
if(NOT MSVC)
target_compile_options(${test_name} PRIVATE -Wno-unused-variable)
endif()
target_link_libraries(${test_name} c10 gmock gtest gtest_main)
add_test(NAME ${test_name} COMMAND $<TARGET_FILE:${test_name}>)
if(INSTALL_TEST)
Expand Down
3 changes: 3 additions & 0 deletions caffe2/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -1762,6 +1762,9 @@ if(BUILD_TEST)
target_include_directories(${test_name} PRIVATE $<INSTALL_INTERFACE:include>)
target_include_directories(${test_name} PRIVATE $<BUILD_INTERFACE:${CMAKE_BINARY_DIR}/include>)
target_include_directories(${test_name} PRIVATE ${Caffe2_CPU_INCLUDE})
if(NOT MSVC)
target_compile_options(${test_name} PRIVATE -Wno-unused-variable)
endif()
add_test(NAME ${test_name} COMMAND $<TARGET_FILE:${test_name}>)
if(INSTALL_TEST)
install(TARGETS ${test_name} DESTINATION test)
Expand Down
2 changes: 1 addition & 1 deletion caffe2/core/blob_serialization.cc
Original file line number Diff line number Diff line change
Expand Up @@ -399,8 +399,8 @@ void TensorSerializer::SerializeWithOptions(
std::vector<std::future<void>> futures;
if (tensor.numel() > chunk_size) {
futures.reserve(FLAGS_caffe2_max_tensor_serializer_threads);
// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores,clang-diagnostic-unused-variable)
for (const auto i : c10::irange(FLAGS_caffe2_max_tensor_serializer_threads)) {
(void)i;
futures.emplace_back(std::async(std::launch::async, task));
}
}
Expand Down
1 change: 0 additions & 1 deletion caffe2/operators/cast_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ class CastOp : public Operator<Context> {
: Operator<Context>(operator_def, ws) {
const ArgumentHelper helper(operator_def);
TensorProto_DataType to = cast::GetCastDataType(helper, "to");
TensorProto_DataType from = cast::GetCastDataType(helper, "from_type");

SetBody(to);
}
Expand Down
4 changes: 4 additions & 0 deletions caffe2/operators/conv_op_cudnn.cc
Original file line number Diff line number Diff line change
Expand Up @@ -576,7 +576,9 @@ bool CudnnConvOp::DoRunWithType() {
return true;
}

#if !CUDNN_VERSION_MIN(7, 0, 0)
int group_offset_filter = filter.numel() / group_;
#endif

// Set up the cudnn algorithms & workspace if necessary
bool input_changed = (X.sizes() != cudnn_input_dims_);
Expand Down Expand Up @@ -951,7 +953,9 @@ bool CudnnConvGradientOp::DoRunWithType() {
"If you set group, the number of output channels should be divisible "
"by group.");

#if !CUDNN_VERSION_MIN(7, 0, 0)
int group_offset_filter = filter.numel() / group_;
#endif
if (kernel_.size() == 1) {
ConvPoolOpBase<CUDAContext>::ComputePads({H});
} else if (kernel_.size() == 2) {
Expand Down
3 changes: 1 addition & 2 deletions caffe2/operators/fused_rowwise_nbitfake_conversion_ops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -25,11 +25,10 @@ float compress_uniform_simplified_(
float inverse_scale = 1.0f / scale;

float norm = 0.0f;
// NOLINTNEXTLINE(clang-diagnostic-unused-variable)
constexpr int VLEN = 8;
int i = 0;

#ifdef __AVX__
constexpr int VLEN = 8;
// vectorized loop
__m256 norm_v = _mm256_setzero_ps();
for (; i < N / VLEN * VLEN; i += VLEN) {
Expand Down
1 change: 0 additions & 1 deletion caffe2/operators/fused_rowwise_nbitfake_conversion_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,6 @@ class FloatToFusedNBitFakeRowwiseQuantizedOp final
CAFFE_THROW("Unsupported data type");
}

bool use_openmp = GREEDY;
#ifdef _OPENMP
vector<float> tmp_vec(input_columns * (GREEDY ? omp_get_max_threads() : 1));
#else
Expand Down
1 change: 0 additions & 1 deletion caffe2/operators/gather_fused_8bit_rowwise_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,6 @@ class GatherFused8BitRowwiseOp : public Operator<Context> {
const std::vector<int64_t> shape = {indices.size(0), data.size(1) - 8};
auto* output = Output(0, shape, at::dtype<float>());

int block_size = shape[1];
auto block_bytesize = data.size_from_dim(1) * data.dtype().itemsize();
int N = indices.numel();

Expand Down
6 changes: 2 additions & 4 deletions caffe2/operators/generate_proposals_op_util_nms.h
Original file line number Diff line number Diff line change
Expand Up @@ -133,8 +133,7 @@ std::vector<int> soft_nms_cpu_upright(

// Find proposal with max score among remaining proposals
int max_pos;
// NOLINTNEXTLINE(clang-diagnostic-unused-variable)
auto max_score = GetSubArray(*out_scores, pending).maxCoeff(&max_pos);
GetSubArray(*out_scores, pending).maxCoeff(&max_pos);
int i = pending[max_pos];
keep.push_back(i);

Expand Down Expand Up @@ -635,8 +634,7 @@ std::vector<int> soft_nms_cpu_rotated(

// Find proposal with max score among remaining proposals
int max_pos;
// NOLINTNEXTLINE(clang-diagnostic-unused-variable)
auto max_score = GetSubArray(*out_scores, pending).maxCoeff(&max_pos);
GetSubArray(*out_scores, pending).maxCoeff(&max_pos);
int i = pending[max_pos];
keep.push_back(i);

Expand Down
2 changes: 0 additions & 2 deletions caffe2/operators/h_softmax_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -458,8 +458,6 @@ bool HuffmanTreeHierarchyOp<T, Context>::RunOnDevice() {
std::vector<int> labelIndices;
labelIndices.resize(num_classes_);

// NOLINTNEXTLINE(clang-diagnostic-unused-variable)
int current_node_index = 0;
for (int i = 0; i < num_classes_; ++i) {
Node node(i, labelCounts[i]);
nodes.push(node);
Expand Down
2 changes: 0 additions & 2 deletions caffe2/operators/layer_norm_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -132,8 +132,6 @@ class LayerNormGradientOp final : public Operator<Context> {
template <typename T>
bool DoRunWithType() {
const auto& dY = Input(0);
// NOLINTNEXTLINE(clang-diagnostic-unused-variable)
const auto& Y = Input(1);
const auto& mean = Input(2);
const auto& sigma = Input(3);
const auto& X = Input(4);
Expand Down
1 change: 0 additions & 1 deletion caffe2/operators/lengths_reducer_rowwise_8bit_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -53,7 +53,6 @@ class SparseLengths8BitsRowwiseOp : public Operator<Context> {
"the second dim of scale_bias has to be equal to 2");
CAFFE_ENFORCE_EQ(1, indicesInput.dim(), "INDICES must be a vector");
const IndexType* indices = indicesInput.template data<IndexType>();
int64_t dataToReduceSize = indicesInput.size(0);

const int* lengths = lengthsInput.template data<int>();
vector<int64_t> shape = dataInput.sizes().vec();
Expand Down
2 changes: 0 additions & 2 deletions caffe2/operators/local_response_normalization_op_cudnn.cc
Original file line number Diff line number Diff line change
Expand Up @@ -193,8 +193,6 @@ bool CuDNNLRNGradientOp::DoRunWithType() {

bool CuDNNLRNGradientOp::RunOnDevice() {
// dispatch based on contents of tensor(s)
const auto& X = Input(0);
const auto& Y = Input(1);
const auto& dY = Input(2);
auto* dX = Output(0);

Expand Down
2 changes: 2 additions & 0 deletions caffe2/operators/quantized/int8_add_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,8 +55,10 @@ class Int8AddOp final : public Operator<CPUContext> {

initQNNPACK();

#if !defined(FBCODE_CAFFE2) && defined(USE_INTERNAL_PTHREADPOOL_IMPL)
pthreadpool_t threadpool =
reinterpret_cast<pthreadpool_t>(ws_->GetThreadPool());
#endif

if (this->qnnpackOperator_ == nullptr) {
const qnnp_status createStatus = qnnp_create_add_nc_q8(
Expand Down
1 change: 0 additions & 1 deletion caffe2/operators/quantized/int8_channel_shuffle_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,6 @@ class Int8ChannelShuffleOp final : public ConvPoolOpBase<CPUContext> {
const auto C = X.t.dim32(3);
const auto G = this->group_;
CAFFE_ENFORCE(C % G == 0, "");
const auto B = X.t.numel() / C;

initQNNPACK();

Expand Down
2 changes: 2 additions & 0 deletions caffe2/operators/quantized/int8_conv_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,10 @@ class Int8ConvOp final : public ConvPoolOpBase<CPUContext> {
runWithSharedBuffer<CPUContext>(ws_, [&](Tensor* buffer) {
initQNNPACK();

#if !defined(FBCODE_CAFFE2) && defined(USE_INTERNAL_PTHREADPOOL_IMPL)
pthreadpool_t threadpool =
reinterpret_cast<pthreadpool_t>(ws_->GetThreadPool());
#endif

if (this->qnnpackObject_ == nullptr) {
CAFFE_ENFORCE(
Expand Down
7 changes: 2 additions & 5 deletions caffe2/operators/quantized/int8_conv_transpose_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,17 +39,12 @@ class Int8ConvTransposeOp final : public ConvTransposeUnpoolBase<CPUContext> {
const auto& W = Inputs()[1]->template Get<Int8TensorCPU>();
const auto& B = Inputs()[2]->template Get<Int8TensorCPU>();
auto* Y = Outputs()[0]->template GetMutable<Int8TensorCPU>();
const auto X_offset = -X.zero_point;
const auto W_offset = -W.zero_point;
const int32_t Y_offset =
this->template GetSingleArgument<int>("Y_zero_point", 0);
double Y_scale = this->template GetSingleArgument<float>("Y_scale", 1);
Y->scale = Y_scale;
Y->zero_point = Y_offset;

const auto N = X.t.size(0);
const auto IH = X.t.size(1);
const auto IW = X.t.size(2);
const auto IC = X.t.size(3);

CHECK_EQ(IC, W.t.size(0));
Expand All @@ -64,8 +59,10 @@ class Int8ConvTransposeOp final : public ConvTransposeUnpoolBase<CPUContext> {
runWithSharedBuffer<CPUContext>(ws_, [&](Tensor* buffer) {
initQNNPACK();

#if !defined(FBCODE_CAFFE2) && defined(USE_INTERNAL_PTHREADPOOL_IMPL)
pthreadpool_t threadpool =
reinterpret_cast<pthreadpool_t>(ws_->GetThreadPool());
#endif

if (this->qnnpackObject_ == nullptr) {
const qnnp_status createStatus = qnnp_create_deconvolution2d_nhwc_q8(
Expand Down
2 changes: 2 additions & 0 deletions caffe2/operators/quantized/int8_fc_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,10 @@ class Int8FCOp final : public Operator<CPUContext> {
runWithSharedBuffer<CPUContext>(ws_, [&](Tensor* buffer) {
initQNNPACK();

#if !defined(FBCODE_CAFFE2) && defined(USE_INTERNAL_PTHREADPOOL_IMPL)
pthreadpool_t threadpool =
reinterpret_cast<pthreadpool_t>(ws_->GetThreadPool());
#endif

if (this->qnnpackObject_ == nullptr) {
const qnnp_status createStatus = qnnp_create_fully_connected_nc_q8(
Expand Down
2 changes: 1 addition & 1 deletion caffe2/operators/quantized/int8_quantize_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -19,10 +19,10 @@ void Int8Quantize(
const int64_t N,
const float Y_scale,
const int32_t Y_offset) {
const float inv_scale = 1.0f / Y_scale;
uint32_t i = 0;

#ifdef INT8_NEON_SIMD
const float inv_scale = 1.0f / Y_scale;
const float32x4_t vinv_scale = vdupq_n_f32(inv_scale);
// magic float and magic int to take care of rounding
// int magic_round(float f): interpret_int32(f + 12582912.0f) - 0x4B400000
Expand Down
1 change: 0 additions & 1 deletion caffe2/operators/quantized/int8_softmax_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,6 @@ class Int8SoftmaxOp final : public Operator<CPUContext> {
* in-place, we may overwrite these parameters later, when we set
* quantization parameters for output tensor.
*/
const uint8_t X_zero_point = X.zero_point;
const float X_scale = X.scale;

Y->scale = Y_scale;
Expand Down
2 changes: 0 additions & 2 deletions caffe2/operators/text_file_reader.cc
Original file line number Diff line number Diff line change
Expand Up @@ -141,8 +141,6 @@ class TextFileReaderReadOp : public Operator<CPUContext> {
(field > 0 && token.startDelimId == 1),
"Invalid number of columns at row ",
instance->rowsRead + rowsRead + 1);
// NOLINTNEXTLINE(clang-diagnostic-unused-variable)
const auto& meta = instance->fieldMetas[field];
char*& data = datas[field];
convert(
(TensorProto_DataType)instance->fieldTypes[field],
Expand Down
3 changes: 1 addition & 2 deletions caffe2/operators/utility_ops.h
Original file line number Diff line number Diff line change
Expand Up @@ -686,8 +686,7 @@ class ScatterAssignOp : public Operator<Context> {
const auto dataType = TypeMetaToDataType(data.dtype());
const auto slicesType = TypeMetaToDataType(slices.dtype());
const auto indicesType = TypeMetaToDataType(indices.dtype());
// NOLINTNEXTLINE(clang-diagnostic-unused-variable)
auto* output = Output(0);
C10_UNUSED auto* output = Output(0);

auto runner = GetRunner(dataType, slicesType, indicesType);
(this->*runner)();
Expand Down
3 changes: 1 addition & 2 deletions caffe2/operators/weighted_sample_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,7 @@ bool WeightedSampleOp<float, CPUContext>::RunOnDevice() {
}
}
} else {
// NOLINTNEXTLINE(clang-diagnostic-unused-variable,clang-analyzer-deadcode.DeadStores)
auto* out_idx = Output(0, {0}, at::dtype<int>());
C10_UNUSED auto* out_idx = Output(0, {0}, at::dtype<int>());
if (OutputSize() == 2) {
auto* out_value = Output(1, {0}, at::dtype<float>());
out_value->template mutable_data<float>();
Expand Down
7 changes: 0 additions & 7 deletions caffe2/opt/bound_shape_inferencer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -436,13 +436,6 @@ void BoundShapeInferencer::InferSparseLengthsSum(const OperatorDef& op) {
op.type() == "SparseLengthsWeightedSum4BitRowwiseSparse" ||
op.type() == "SparseLengthsSum4BitRowwiseSparse");

// NOLINTNEXTLINE(clang-analyzer-deadcode.DeadStores,clang-diagnostic-unused-variable)
const bool isSparse =
(op.type() == "SparseLengthsSum4BitRowwiseSparse" ||
op.type() == "SparseLengthsWeightedSum4BitRowwiseSparse" ||
op.type() == "SparseLengthsSum8BitRowwiseSparse" ||
op.type() == "SparseLengthsWeightedSum8BitRowwiseSparse");

if (weight) {
CAFFE_ENFORCE_GE(
op.input_size(),
Expand Down
6 changes: 0 additions & 6 deletions caffe2/opt/optimize_ideep.cc
Original file line number Diff line number Diff line change
Expand Up @@ -533,8 +533,6 @@ bool fuseActivation(repr::NNModule* nn, caffe2::Workspace* ws) {
continue;
}
auto relu_node = consumers.front();
// NOLINTNEXTLINE(clang-diagnostic-unused-variable,clang-analyzer-deadcode.DeadStores)
auto relu = repr::nn::get<repr::Relu>(relu_node);

auto relu_outputs = repr::nn::getOutputs(relu_node);
if (relu_outputs.size() != 1) {
Expand Down Expand Up @@ -893,10 +891,6 @@ void preConvertFiltersFormat(repr::NNModule* nn, caffe2::Workspace* ws) {
initValue(strides, {1, 1});
auto pads = convTranspose->getPads();
initValue(pads, {0, 0, 0, 0});
// NOLINTNEXTLINE(clang-diagnostic-unused-variable,clang-analyzer-deadcode.DeadStores)
auto* op = getMutableOpDef(*convTranspose);
// NOLINTNEXTLINE(clang-diagnostic-unused-variable)
auto aalgorithm = ialgo::deconvolution_direct;
auto dataType = filter->get_data_type();
ideep::tensor::dims filter_dims_mkldnn{filter->get_dim(1),
filter->get_dim(0),
Expand Down
2 changes: 1 addition & 1 deletion caffe2/quantization/server/dynamic_histogram.cc
Original file line number Diff line number Diff line change
Expand Up @@ -64,12 +64,12 @@ void RemapHistograms(Histogram& src_hist, Histogram& dst_hist) {
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
float dst_bin_begin = dst_hist.Min() + dst_bin_width * dst_bin;
float dst_bin_end = dst_bin_begin + dst_bin_width;
// NOLINTNEXTLINE(clang-diagnostic-unused-variable,clang-analyzer-deadcode.DeadStores)
int dst_bin2 =
// NOLINTNEXTLINE(bugprone-narrowing-conversions,cppcoreguidelines-narrowing-conversions)
dst_bin_width == 0 ? 0 : (src_bin_end - dst_hist.Min()) / dst_bin_width;
// 1 src_bin is mapped to at most 2 dst bin
assert(dst_bin2 <= dst_bin + 2);
(void)dst_bin2;

// dst_bin_cnt is the count from src_bin that should go to dst_bin
// The remainder should go to dst_bin2
Expand Down
10 changes: 4 additions & 6 deletions caffe2/quantization/server/fbgemm_pack_op.cc
Original file line number Diff line number Diff line change
Expand Up @@ -698,9 +698,8 @@ TypeIdentifier Int8ConvDNNLowpPackedWeightBlobShapeFunctions::GetTypeMetaId() {

TypeMeta Int8FCDNNLowpPackedWeightBlobShapeFunctions::GetExternalTensorType(
const void* c) {
// NOLINTNEXTLINE(clang-diagnostic-unused-variable)
const Int8FCDNNLowPPackedWeightBlob* int8_tensor =
reinterpret_cast<const Int8FCDNNLowPPackedWeightBlob*>(c);
// const Int8FCDNNLowPPackedWeightBlob* int8_tensor =
// reinterpret_cast<const Int8FCDNNLowPPackedWeightBlob*>(c);
// We forced the output type to be uint8_t since we know it always is.
// If it is going to be implemented elsewhere, we might need to change here.
// return (int8_tensor->original_tensor).dtype();
Expand All @@ -709,9 +708,8 @@ TypeMeta Int8FCDNNLowpPackedWeightBlobShapeFunctions::GetExternalTensorType(

TypeMeta Int8ConvDNNLowpPackedWeightBlobShapeFunctions::GetExternalTensorType(
const void* c) {
// NOLINTNEXTLINE(clang-diagnostic-unused-variable)
const Int8ConvDNNLowPPackedWeightBlob* int8_tensor =
reinterpret_cast<const Int8ConvDNNLowPPackedWeightBlob*>(c);
// const Int8ConvDNNLowPPackedWeightBlob* int8_tensor =
// reinterpret_cast<const Int8ConvDNNLowPPackedWeightBlob*>(c);
// return (int8_tensor->original_tensor).dtype();
return TypeMeta::Make<uint8_t>();
}
Expand Down
Loading

0 comments on commit 4c4525f

Please sign in to comment.