diff --git a/runtime/libs/tflite/include/tflite/Diff.h b/runtime/libs/tflite/include/tflite/Diff.h index 11347b1e586..fdc1a310b6f 100644 --- a/runtime/libs/tflite/include/tflite/Diff.h +++ b/runtime/libs/tflite/include/tflite/Diff.h @@ -85,64 +85,4 @@ class TfLiteInterpMatchApp const nnfw::misc::tensor::Comparator &_comparator; }; -#include "tflite/interp/Builder.h" -#include "tflite/Quantization.h" - -/** - * @brief Structure for NNAPI correctness test - */ -struct RandomTestParam -{ - int verbose; //!< Verbosity of debug information - int tolerance; //!< Torlerance of value difference - int tensor_logging = 0; //!< Save logging to a file if not 0 - std::string log_path = ""; //!< Path of log file, meaningful only when tensor_logging is 1 -}; - -/** - * @brief Class to define Random test runner - */ -class RandomTestRunner -{ -public: - /** - * @brief Construct a new RandomTestRunner object - * @param[in] seed Random seed value - * @param[in] param RandomTestParam object for test runner - * @param[in] quantization TfLiteQuantizationParams type to represent quantization value - */ - RandomTestRunner(uint32_t seed, const RandomTestParam ¶m) - : _randgen{seed, 0.0f, 2.0f}, _param{param} - { - // DO NOTHING - } - -public: - /** - * @brief Run the random test runner - * @param[in] builder Interpreter Builder used to run - * @return 0 if test succeeds, otherwise failure - */ - int run(const nnfw::tflite::Builder &builder); - -public: - /** - * @brief Get RandomGenerator reference - * @return RandomGenerator reference - */ - nnfw::misc::RandomGenerator &generator() { return _randgen; }; - -private: - nnfw::misc::RandomGenerator _randgen; - const RandomTestParam _param; - -public: - /** - * @brief Create a RandomTestRunner object - * @param[in] seed Random seed value - * @return RandomGenerator object - */ - static RandomTestRunner make(uint32_t seed); -}; - #endif // __NNFW_TFLITE_DIFF_H__ diff --git a/runtime/libs/tflite/include/tflite/RandomTestRunner.h b/runtime/libs/tflite/include/tflite/RandomTestRunner.h new file mode 100644 index 00000000000..afeaab74d1c --- /dev/null +++ b/runtime/libs/tflite/include/tflite/RandomTestRunner.h @@ -0,0 +1,94 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +/** + * @file RandomTestRunner.h + * @brief This file contains class for random input testing + */ + +#ifndef __NNFW_TFLITE_RANDOM_TEST_RUNNER_H__ +#define __NNFW_TFLITE_RANDOM_TEST_RUNNER_H__ + +#include "tflite/interp/Builder.h" + +#include + +namespace nnfw +{ +namespace tflite +{ + +/** + * @brief Structure for NNAPI correctness test + */ +struct RandomTestParam +{ + int verbose; //!< Verbosity of debug information + int tolerance; //!< Torlerance of value difference + int tensor_logging = 0; //!< Save logging to a file if not 0 + std::string log_path = ""; //!< Path of log file, meaningful only when tensor_logging is 1 +}; + +/** + * @brief Class to define Random test runner + */ +class RandomTestRunner +{ +public: + /** + * @brief Construct a new RandomTestRunner object + * @param[in] seed Random seed value + * @param[in] param RandomTestParam object for test runner + * @param[in] quantization TfLiteQuantizationParams type to represent quantization value + */ + RandomTestRunner(uint32_t seed, const RandomTestParam ¶m) + : _randgen{seed, 0.0f, 2.0f}, _param{param} + { + // DO NOTHING + } + +public: + /** + * @brief Run the random test runner + * @param[in] builder Interpreter Builder used to run + * @return 0 if test succeeds, otherwise failure + */ + int run(const nnfw::tflite::Builder &builder); + +public: + /** + * @brief Get RandomGenerator reference + * @return RandomGenerator reference + */ + nnfw::misc::RandomGenerator &generator() { return _randgen; }; + +private: + nnfw::misc::RandomGenerator _randgen; + const RandomTestParam _param; + +public: + /** + * @brief Create a RandomTestRunner object + * @param[in] seed Random seed value + * @return RandomGenerator object + */ + static RandomTestRunner make(uint32_t seed); +}; + +} // namespace tflite +} // namespace nnfw + +#endif // __NNFW_TFLITE_RANDOM_TEST_RUNNER_H__ diff --git a/runtime/libs/tflite/src/Diff.cpp b/runtime/libs/tflite/src/Diff.cpp index 919d0bfa0be..39f99435211 100644 --- a/runtime/libs/tflite/src/Diff.cpp +++ b/runtime/libs/tflite/src/Diff.cpp @@ -15,17 +15,13 @@ */ #include "tflite/Diff.h" -#include "tflite/ext/nnapi_delegate.h" #include "misc/fp32.h" -#include "misc/tensor/IndexIterator.h" #include "misc/tensor/IndexFormatter.h" #include "misc/tensor/Zipper.h" #include "misc/tensor/Comparator.h" -#include "misc/EnvVar.h" - #include #include @@ -257,332 +253,3 @@ bool TfLiteInterpMatchApp::run(::tflite::Interpreter &interp, ::tflite::Interpre return all_matched; } - -#include "misc/tensor/Object.h" - -using namespace std::placeholders; - -#include "tflite/TensorLogger.h" -// -// Random Test Runner -// -int RandomTestRunner::run(const nnfw::tflite::Builder &builder) -{ - auto tfl_interp = builder.build(); - auto nnapi = builder.build(); - - tfl_interp->UseNNAPI(false); - - // Allocate Tensors - tfl_interp->AllocateTensors(); - nnapi->AllocateTensors(); - - assert(tfl_interp->inputs() == nnapi->inputs()); - - using ::tflite::Interpreter; - using Initializer = std::function; - - std::map initializers; - std::map reseters; - - // Generate singed 32-bit integer (s32) input - initializers[kTfLiteInt32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) { - assert(tfl_interp->tensor(id)->type == kTfLiteInt32); - assert(nnapi->tensor(id)->type == kTfLiteInt32); - - auto tfl_interp_view = nnfw::tflite::TensorView::make(*tfl_interp, id); - auto nnapi_view = nnfw::tflite::TensorView::make(*nnapi, id); - - assert(tfl_interp_view.shape() == nnapi_view.shape()); - - int32_t value = 0; - - nnfw::misc::tensor::iterate(tfl_interp_view.shape()) - << [&](const nnfw::misc::tensor::Index &ind) { - // TODO Generate random values - tfl_interp_view.at(ind) = value; - nnapi_view.at(ind) = value; - ++value; - }; - }; - - // Generate singed 32-bit integer (s32) input - reseters[kTfLiteInt32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) { - assert(tfl_interp->tensor(id)->type == kTfLiteInt32); - assert(nnapi->tensor(id)->type == kTfLiteInt32); - - auto tfl_interp_view = nnfw::tflite::TensorView::make(*tfl_interp, id); - auto nnapi_view = nnfw::tflite::TensorView::make(*nnapi, id); - - assert(tfl_interp_view.shape() == nnapi_view.shape()); - - int32_t value = 0; - - nnfw::misc::tensor::iterate(tfl_interp_view.shape()) - << [&](const nnfw::misc::tensor::Index &ind) { - // TODO Generate random values - tfl_interp_view.at(ind) = value; - nnapi_view.at(ind) = value; - }; - }; - - initializers[kTfLiteUInt8] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) { - assert(tfl_interp->tensor(id)->type == kTfLiteUInt8); - assert(nnapi->tensor(id)->type == kTfLiteUInt8); - - auto tfl_interp_view = nnfw::tflite::TensorView::make(*tfl_interp, id); - auto nnapi_view = nnfw::tflite::TensorView::make(*nnapi, id); - - assert(tfl_interp_view.shape() == nnapi_view.shape()); - - auto fp = static_cast( - &nnfw::misc::RandomGenerator::generate); - const nnfw::misc::tensor::Object data(tfl_interp_view.shape(), - std::bind(fp, _randgen, _1, _2)); - assert(tfl_interp_view.shape() == data.shape()); - - nnfw::misc::tensor::iterate(tfl_interp_view.shape()) - << [&](const nnfw::misc::tensor::Index &ind) { - const auto value = data.at(ind); - - tfl_interp_view.at(ind) = value; - nnapi_view.at(ind) = value; - }; - }; - - reseters[kTfLiteUInt8] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) { - assert(tfl_interp->tensor(id)->type == kTfLiteUInt8); - assert(nnapi->tensor(id)->type == kTfLiteUInt8); - - auto tfl_interp_view = nnfw::tflite::TensorView::make(*tfl_interp, id); - auto nnapi_view = nnfw::tflite::TensorView::make(*nnapi, id); - - assert(tfl_interp_view.shape() == nnapi_view.shape()); - - auto fp = static_cast( - &nnfw::misc::RandomGenerator::generate); - const nnfw::misc::tensor::Object data(tfl_interp_view.shape(), - std::bind(fp, _randgen, _1, _2)); - assert(tfl_interp_view.shape() == data.shape()); - - uint8_t value = 0; - - nnfw::misc::tensor::iterate(tfl_interp_view.shape()) - << [&](const nnfw::misc::tensor::Index &ind) { - tfl_interp_view.at(ind) = value; - nnapi_view.at(ind) = value; - }; - }; - - initializers[kTfLiteFloat32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) { - assert(tfl_interp->tensor(id)->type == kTfLiteFloat32); - assert(nnapi->tensor(id)->type == kTfLiteFloat32); - - auto tfl_interp_view = nnfw::tflite::TensorView::make(*tfl_interp, id); - auto nnapi_view = nnfw::tflite::TensorView::make(*nnapi, id); - - assert(tfl_interp_view.shape() == nnapi_view.shape()); - - auto fp = static_cast( - &nnfw::misc::RandomGenerator::generate); - const nnfw::misc::tensor::Object data(tfl_interp_view.shape(), - std::bind(fp, _randgen, _1, _2)); - - assert(tfl_interp_view.shape() == data.shape()); - - nnfw::misc::tensor::iterate(tfl_interp_view.shape()) - << [&](const nnfw::misc::tensor::Index &ind) { - const auto value = data.at(ind); - - tfl_interp_view.at(ind) = value; - nnapi_view.at(ind) = value; - }; - }; - - reseters[kTfLiteFloat32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) { - assert(tfl_interp->tensor(id)->type == kTfLiteFloat32); - assert(nnapi->tensor(id)->type == kTfLiteFloat32); - - auto tfl_interp_view = nnfw::tflite::TensorView::make(*tfl_interp, id); - auto nnapi_view = nnfw::tflite::TensorView::make(*nnapi, id); - - assert(tfl_interp_view.shape() == nnapi_view.shape()); - - auto fp = static_cast( - &nnfw::misc::RandomGenerator::generate); - const nnfw::misc::tensor::Object data(tfl_interp_view.shape(), - std::bind(fp, _randgen, _1, _2)); - - assert(tfl_interp_view.shape() == data.shape()); - - float value = 0; - - nnfw::misc::tensor::iterate(tfl_interp_view.shape()) - << [&](const nnfw::misc::tensor::Index &ind) { - tfl_interp_view.at(ind) = value; - nnapi_view.at(ind) = value; - }; - }; - - initializers[kTfLiteBool] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) { - assert(tfl_interp->tensor(id)->type == kTfLiteBool); - assert(nnapi->tensor(id)->type == kTfLiteBool); - - auto tfl_interp_view = nnfw::tflite::TensorView::make(*tfl_interp, id); - auto nnapi_view = nnfw::tflite::TensorView::make(*nnapi, id); - - assert(tfl_interp_view.shape() == nnapi_view.shape()); - - auto fp = static_cast( - &nnfw::misc::RandomGenerator::generate); - const nnfw::misc::tensor::Object data(tfl_interp_view.shape(), - std::bind(fp, _randgen, _1, _2)); - - assert(tfl_interp_view.shape() == data.shape()); - - nnfw::misc::tensor::iterate(tfl_interp_view.shape()) - << [&](const nnfw::misc::tensor::Index &ind) { - const auto value = data.at(ind); - - tfl_interp_view.at(ind) = value; - nnapi_view.at(ind) = value; - }; - }; - - reseters[kTfLiteBool] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) { - assert(tfl_interp->tensor(id)->type == kTfLiteBool); - assert(nnapi->tensor(id)->type == kTfLiteBool); - - auto tfl_interp_view = nnfw::tflite::TensorView::make(*tfl_interp, id); - auto nnapi_view = nnfw::tflite::TensorView::make(*nnapi, id); - - assert(tfl_interp_view.shape() == nnapi_view.shape()); - - auto fp = static_cast( - &nnfw::misc::RandomGenerator::generate); - const nnfw::misc::tensor::Object data(tfl_interp_view.shape(), - std::bind(fp, _randgen, _1, _2)); - - assert(tfl_interp_view.shape() == data.shape()); - - bool value = false; - - nnfw::misc::tensor::iterate(tfl_interp_view.shape()) - << [&](const nnfw::misc::tensor::Index &ind) { - tfl_interp_view.at(ind) = value; - nnapi_view.at(ind) = value; - }; - }; - - // Fill IFM with random numbers - for (const auto id : tfl_interp->inputs()) - { - assert(tfl_interp->tensor(id)->type == nnapi->tensor(id)->type); - - auto it = initializers.find(tfl_interp->tensor(id)->type); - - if (it == initializers.end()) - { - throw std::runtime_error{"Not supported input type"}; - } - - it->second(id, tfl_interp.get(), nnapi.get()); - } - - // Fill OFM with 0 - for (const auto id : tfl_interp->outputs()) - { - assert(tfl_interp->tensor(id)->type == nnapi->tensor(id)->type); - - auto it = reseters.find(tfl_interp->tensor(id)->type); - - if (it == reseters.end()) - { - throw std::runtime_error{"Not supported input type"}; - } - - it->second(id, tfl_interp.get(), nnapi.get()); - } - - std::cout << "[NNAPI TEST] Run T/F Lite Interpreter without NNAPI" << std::endl; - tfl_interp->Invoke(); - - std::cout << "[NNAPI TEST] Run T/F Lite Interpreter with NNAPI" << std::endl; - - char *env = getenv("UPSTREAM_DELEGATE"); - - if (env && !std::string(env).compare("1")) - { - nnapi->UseNNAPI(true); - nnapi->Invoke(); - } - else - { - nnfw::tflite::NNAPIDelegate d; - - // WARNING - // primary_subgraph: Experimental interface. Return 1st sugbraph - if (d.BuildGraph(&nnapi.get()->primary_subgraph())) - { - throw std::runtime_error{"Failed to BuildGraph"}; - } - - if (d.Invoke(&nnapi.get()->primary_subgraph())) - { - throw std::runtime_error{"Failed to BuildGraph"}; - } - } - - // Compare OFM - std::cout << "[NNAPI TEST] Compare the result" << std::endl; - - const auto tolerance = _param.tolerance; - - auto equals = [tolerance](float lhs, float rhs) { - // NOTE Hybrid approach - // TODO Allow users to set tolerance for absolute_epsilon_equal - if (nnfw::misc::fp32::absolute_epsilon_equal(lhs, rhs)) - { - return true; - } - - return nnfw::misc::fp32::epsilon_equal(lhs, rhs, tolerance); - }; - - nnfw::misc::tensor::Comparator comparator(equals); - TfLiteInterpMatchApp app(comparator); - - app.verbose() = _param.verbose; - - bool res = app.run(*tfl_interp, *nnapi); - - if (!res) - { - return 255; - } - - std::cout << "[NNAPI TEST] PASSED" << std::endl; - - if (_param.tensor_logging) - nnfw::tflite::TensorLogger::get().save(_param.log_path, *tfl_interp); - - return 0; -} - -RandomTestRunner RandomTestRunner::make(uint32_t seed) -{ - RandomTestParam param; - - param.verbose = nnfw::misc::EnvVar("VERBOSE").asInt(0); - param.tolerance = nnfw::misc::EnvVar("TOLERANCE").asInt(1); - param.tensor_logging = nnfw::misc::EnvVar("TENSOR_LOGGING").asBool(false); - param.log_path = nnfw::misc::EnvVar("TENSOR_LOGGING").asString("tensor_log.txt"); - - return RandomTestRunner{seed, param}; -} diff --git a/runtime/libs/tflite/src/RandomTestRunner.cpp b/runtime/libs/tflite/src/RandomTestRunner.cpp new file mode 100644 index 00000000000..217df9134ad --- /dev/null +++ b/runtime/libs/tflite/src/RandomTestRunner.cpp @@ -0,0 +1,361 @@ +/* + * Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "tflite/RandomTestRunner.h" +#include "tflite/Diff.h" +#include "tflite/TensorLogger.h" +#include "tflite/ext/nnapi_delegate.h" + +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace nnfw +{ +namespace tflite +{ + +using namespace std::placeholders; + +int RandomTestRunner::run(const nnfw::tflite::Builder &builder) +{ + auto tfl_interp = builder.build(); + auto nnapi = builder.build(); + + tfl_interp->UseNNAPI(false); + + // Allocate Tensors + tfl_interp->AllocateTensors(); + nnapi->AllocateTensors(); + + assert(tfl_interp->inputs() == nnapi->inputs()); + + using ::tflite::Interpreter; + using Initializer = std::function; + + std::map initializers; + std::map reseters; + + // Generate singed 32-bit integer (s32) input + initializers[kTfLiteInt32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) { + assert(tfl_interp->tensor(id)->type == kTfLiteInt32); + assert(nnapi->tensor(id)->type == kTfLiteInt32); + + auto tfl_interp_view = nnfw::tflite::TensorView::make(*tfl_interp, id); + auto nnapi_view = nnfw::tflite::TensorView::make(*nnapi, id); + + assert(tfl_interp_view.shape() == nnapi_view.shape()); + + int32_t value = 0; + + nnfw::misc::tensor::iterate(tfl_interp_view.shape()) + << [&](const nnfw::misc::tensor::Index &ind) { + // TODO Generate random values + tfl_interp_view.at(ind) = value; + nnapi_view.at(ind) = value; + ++value; + }; + }; + + // Generate singed 32-bit integer (s32) input + reseters[kTfLiteInt32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) { + assert(tfl_interp->tensor(id)->type == kTfLiteInt32); + assert(nnapi->tensor(id)->type == kTfLiteInt32); + + auto tfl_interp_view = nnfw::tflite::TensorView::make(*tfl_interp, id); + auto nnapi_view = nnfw::tflite::TensorView::make(*nnapi, id); + + assert(tfl_interp_view.shape() == nnapi_view.shape()); + + int32_t value = 0; + + nnfw::misc::tensor::iterate(tfl_interp_view.shape()) + << [&](const nnfw::misc::tensor::Index &ind) { + // TODO Generate random values + tfl_interp_view.at(ind) = value; + nnapi_view.at(ind) = value; + }; + }; + + initializers[kTfLiteUInt8] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) { + assert(tfl_interp->tensor(id)->type == kTfLiteUInt8); + assert(nnapi->tensor(id)->type == kTfLiteUInt8); + + auto tfl_interp_view = nnfw::tflite::TensorView::make(*tfl_interp, id); + auto nnapi_view = nnfw::tflite::TensorView::make(*nnapi, id); + + assert(tfl_interp_view.shape() == nnapi_view.shape()); + + auto fp = static_cast( + &nnfw::misc::RandomGenerator::generate); + const nnfw::misc::tensor::Object data(tfl_interp_view.shape(), + std::bind(fp, _randgen, _1, _2)); + assert(tfl_interp_view.shape() == data.shape()); + + nnfw::misc::tensor::iterate(tfl_interp_view.shape()) + << [&](const nnfw::misc::tensor::Index &ind) { + const auto value = data.at(ind); + + tfl_interp_view.at(ind) = value; + nnapi_view.at(ind) = value; + }; + }; + + reseters[kTfLiteUInt8] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) { + assert(tfl_interp->tensor(id)->type == kTfLiteUInt8); + assert(nnapi->tensor(id)->type == kTfLiteUInt8); + + auto tfl_interp_view = nnfw::tflite::TensorView::make(*tfl_interp, id); + auto nnapi_view = nnfw::tflite::TensorView::make(*nnapi, id); + + assert(tfl_interp_view.shape() == nnapi_view.shape()); + + auto fp = static_cast( + &nnfw::misc::RandomGenerator::generate); + const nnfw::misc::tensor::Object data(tfl_interp_view.shape(), + std::bind(fp, _randgen, _1, _2)); + assert(tfl_interp_view.shape() == data.shape()); + + uint8_t value = 0; + + nnfw::misc::tensor::iterate(tfl_interp_view.shape()) + << [&](const nnfw::misc::tensor::Index &ind) { + tfl_interp_view.at(ind) = value; + nnapi_view.at(ind) = value; + }; + }; + + initializers[kTfLiteFloat32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) { + assert(tfl_interp->tensor(id)->type == kTfLiteFloat32); + assert(nnapi->tensor(id)->type == kTfLiteFloat32); + + auto tfl_interp_view = nnfw::tflite::TensorView::make(*tfl_interp, id); + auto nnapi_view = nnfw::tflite::TensorView::make(*nnapi, id); + + assert(tfl_interp_view.shape() == nnapi_view.shape()); + + auto fp = static_cast( + &nnfw::misc::RandomGenerator::generate); + const nnfw::misc::tensor::Object data(tfl_interp_view.shape(), + std::bind(fp, _randgen, _1, _2)); + + assert(tfl_interp_view.shape() == data.shape()); + + nnfw::misc::tensor::iterate(tfl_interp_view.shape()) + << [&](const nnfw::misc::tensor::Index &ind) { + const auto value = data.at(ind); + + tfl_interp_view.at(ind) = value; + nnapi_view.at(ind) = value; + }; + }; + + reseters[kTfLiteFloat32] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) { + assert(tfl_interp->tensor(id)->type == kTfLiteFloat32); + assert(nnapi->tensor(id)->type == kTfLiteFloat32); + + auto tfl_interp_view = nnfw::tflite::TensorView::make(*tfl_interp, id); + auto nnapi_view = nnfw::tflite::TensorView::make(*nnapi, id); + + assert(tfl_interp_view.shape() == nnapi_view.shape()); + + auto fp = static_cast( + &nnfw::misc::RandomGenerator::generate); + const nnfw::misc::tensor::Object data(tfl_interp_view.shape(), + std::bind(fp, _randgen, _1, _2)); + + assert(tfl_interp_view.shape() == data.shape()); + + float value = 0; + + nnfw::misc::tensor::iterate(tfl_interp_view.shape()) + << [&](const nnfw::misc::tensor::Index &ind) { + tfl_interp_view.at(ind) = value; + nnapi_view.at(ind) = value; + }; + }; + + initializers[kTfLiteBool] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) { + assert(tfl_interp->tensor(id)->type == kTfLiteBool); + assert(nnapi->tensor(id)->type == kTfLiteBool); + + auto tfl_interp_view = nnfw::tflite::TensorView::make(*tfl_interp, id); + auto nnapi_view = nnfw::tflite::TensorView::make(*nnapi, id); + + assert(tfl_interp_view.shape() == nnapi_view.shape()); + + auto fp = static_cast( + &nnfw::misc::RandomGenerator::generate); + const nnfw::misc::tensor::Object data(tfl_interp_view.shape(), + std::bind(fp, _randgen, _1, _2)); + + assert(tfl_interp_view.shape() == data.shape()); + + nnfw::misc::tensor::iterate(tfl_interp_view.shape()) + << [&](const nnfw::misc::tensor::Index &ind) { + const auto value = data.at(ind); + + tfl_interp_view.at(ind) = value; + nnapi_view.at(ind) = value; + }; + }; + + reseters[kTfLiteBool] = [&](int id, Interpreter *tfl_interp, Interpreter *nnapi) { + assert(tfl_interp->tensor(id)->type == kTfLiteBool); + assert(nnapi->tensor(id)->type == kTfLiteBool); + + auto tfl_interp_view = nnfw::tflite::TensorView::make(*tfl_interp, id); + auto nnapi_view = nnfw::tflite::TensorView::make(*nnapi, id); + + assert(tfl_interp_view.shape() == nnapi_view.shape()); + + auto fp = static_cast( + &nnfw::misc::RandomGenerator::generate); + const nnfw::misc::tensor::Object data(tfl_interp_view.shape(), + std::bind(fp, _randgen, _1, _2)); + + assert(tfl_interp_view.shape() == data.shape()); + + bool value = false; + + nnfw::misc::tensor::iterate(tfl_interp_view.shape()) + << [&](const nnfw::misc::tensor::Index &ind) { + tfl_interp_view.at(ind) = value; + nnapi_view.at(ind) = value; + }; + }; + + // Fill IFM with random numbers + for (const auto id : tfl_interp->inputs()) + { + assert(tfl_interp->tensor(id)->type == nnapi->tensor(id)->type); + + auto it = initializers.find(tfl_interp->tensor(id)->type); + + if (it == initializers.end()) + { + throw std::runtime_error{"Not supported input type"}; + } + + it->second(id, tfl_interp.get(), nnapi.get()); + } + + // Fill OFM with 0 + for (const auto id : tfl_interp->outputs()) + { + assert(tfl_interp->tensor(id)->type == nnapi->tensor(id)->type); + + auto it = reseters.find(tfl_interp->tensor(id)->type); + + if (it == reseters.end()) + { + throw std::runtime_error{"Not supported input type"}; + } + + it->second(id, tfl_interp.get(), nnapi.get()); + } + + std::cout << "[NNAPI TEST] Run T/F Lite Interpreter without NNAPI" << std::endl; + tfl_interp->Invoke(); + + std::cout << "[NNAPI TEST] Run T/F Lite Interpreter with NNAPI" << std::endl; + + char *env = getenv("UPSTREAM_DELEGATE"); + + if (env && !std::string(env).compare("1")) + { + nnapi->UseNNAPI(true); + nnapi->Invoke(); + } + else + { + nnfw::tflite::NNAPIDelegate d; + + // WARNING + // primary_subgraph: Experimental interface. Return 1st sugbraph + if (d.BuildGraph(&nnapi.get()->primary_subgraph())) + { + throw std::runtime_error{"Failed to BuildGraph"}; + } + + if (d.Invoke(&nnapi.get()->primary_subgraph())) + { + throw std::runtime_error{"Failed to BuildGraph"}; + } + } + + // Compare OFM + std::cout << "[NNAPI TEST] Compare the result" << std::endl; + + const auto tolerance = _param.tolerance; + + auto equals = [tolerance](float lhs, float rhs) { + // NOTE Hybrid approach + // TODO Allow users to set tolerance for absolute_epsilon_equal + if (nnfw::misc::fp32::absolute_epsilon_equal(lhs, rhs)) + { + return true; + } + + return nnfw::misc::fp32::epsilon_equal(lhs, rhs, tolerance); + }; + + nnfw::misc::tensor::Comparator comparator(equals); + TfLiteInterpMatchApp app(comparator); + + app.verbose() = _param.verbose; + + bool res = app.run(*tfl_interp, *nnapi); + + if (!res) + { + return 255; + } + + std::cout << "[NNAPI TEST] PASSED" << std::endl; + + if (_param.tensor_logging) + nnfw::tflite::TensorLogger::get().save(_param.log_path, *tfl_interp); + + return 0; +} + +RandomTestRunner RandomTestRunner::make(uint32_t seed) +{ + RandomTestParam param; + + param.verbose = nnfw::misc::EnvVar("VERBOSE").asInt(0); + param.tolerance = nnfw::misc::EnvVar("TOLERANCE").asInt(1); + param.tensor_logging = nnfw::misc::EnvVar("TENSOR_LOGGING").asBool(false); + param.log_path = nnfw::misc::EnvVar("TENSOR_LOGGING").asString("tensor_log.txt"); + + return RandomTestRunner{seed, param}; +} + +} // namespace tflite +} // namespace nnfw diff --git a/tests/tools/nnapi_test/src/nnapi_test.cc b/tests/tools/nnapi_test/src/nnapi_test.cc index d925831f166..d3c72d5b7c9 100644 --- a/tests/tools/nnapi_test/src/nnapi_test.cc +++ b/tests/tools/nnapi_test/src/nnapi_test.cc @@ -18,7 +18,7 @@ #include "tensorflow/lite/model.h" #include "tflite/interp/FlatBufferBuilder.h" -#include "tflite/Diff.h" +#include "tflite/RandomTestRunner.h" #include #include @@ -51,7 +51,7 @@ int main(const int argc, char **argv) try { - return RandomTestRunner::make(0).run(builder); + return nnfw::tflite::RandomTestRunner::make(0).run(builder); } catch (const std::exception &e) {