Skip to content

Commit

Permalink
[onert-micro] Refactor onert-micro (Samsung#10475)
Browse files Browse the repository at this point in the history
This pr refactors onert-micro to use lower runtime memory and have smaller binary file size.

ONE-DCO-1.0-Signed-off-by: Artem Balyshev <[email protected]>

Co-authored-by: Artem Balyshev <[email protected]>
  • Loading branch information
BalyshevArtem and Artem Balyshev authored Mar 14, 2023
1 parent aa27166 commit d52f5e7
Show file tree
Hide file tree
Showing 76 changed files with 2,035 additions and 4,541 deletions.
22 changes: 22 additions & 0 deletions onert-micro/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,25 @@ else()
return()
endif()

if (USE_STATIC_ALLOC)
# TODO: enable it
message(STATUS "FAILED ONERT-MICRO is not support Static Memory Manager now")
return()
else()
message(STATUS "USE_STATIC_ALLOC variable is not defined, default dynamic memory manager will be used")
endif()

if (DIS_QUANT)
message(STATUS "ONERT-MICRO will not use part for QUANTIZED models")
add_definitions(-DDIS_QUANT)
endif()

if (DIS_FLOAT)
message(STATUS "ONERT-MICRO will not use part for FLOAT models")
add_definitions(-DDIS_FLOAT)
endif()


set(CMAKE_ARM_OPTIONS
-DLUCI_INTERPRETER_STATIC=ON
-DLUCI_STATIC=ON
Expand Down Expand Up @@ -122,6 +141,9 @@ endif ()
# To remove GENERATE_KERNELS_LIST_FROM and KERNELS variable from cmake cache
unset(GENERATE_KERNELS_LIST_FROM CACHE)
unset(KERNELS CACHE)
unset(USE_STATIC_KERNEL CACHE)
unset(DIS_QUANT CACHE)
unset(DIS_FLOAT CACHE)

set(MICRO_ARM_BINARY "${MICRO_ARM_BUILD_DIR}/luci-interpreter/src/libluci_interpreter_micro.a")

Expand Down
63 changes: 16 additions & 47 deletions onert-micro/eval-driver/Driver.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -48,14 +48,6 @@ void writeDataToFile(const std::string &filename, const char *data, size_t data_
}
}

size_t getTensorSize(const luci_interpreter::Tensor *tensor)
{
size_t tensor_size = luci_interpreter::size(tensor->element_type());
tensor_size *= tensor->shape().num_elements();

return tensor_size;
}

} // namespace

/*
Expand Down Expand Up @@ -107,53 +99,30 @@ int entry(int argc, char **argv)
// Set input.
// Data for n'th input is read from ${input_prefix}n
// (ex: Add.circle.input0, Add.circle.input1 ..)
const auto input_tensors = interpreter.getInputTensors();
assert(num_inputs == input_tensors.size());
for (int32_t i = 0; i < num_inputs; i++)
int num_inference = 1;
for (int j = 0; j < num_inference; ++j)
{
auto *input_tensor = input_tensors[i];
std::vector<char> input_data(getTensorSize(input_tensor));
readDataFromFile(std::string(input_prefix) + std::to_string(i), input_data.data(),
input_data.size());
luci_interpreter::Interpreter::writeInputTensor(input_tensor, input_data.data(),
input_data.size());
}
for (int32_t i = 0; i < num_inputs; i++)
{
auto input_data = reinterpret_cast<char *>(interpreter.allocateInputTensor(i));
readDataFromFile(std::string(input_prefix) + std::to_string(i), input_data,
interpreter.getInputDataSizeByIndex(i));
}

// Do inference.
interpreter.interpret();
// Do inference.
interpreter.interpret();
}

// Get output.
const auto output_tensors = interpreter.getOutputTensors();
for (int i = 0; i < output_tensors.size(); i++)
int num_outputs = 1;
for (int i = 0; i < num_outputs; i++)
{
const auto *output_tensor = output_tensors[i];
std::vector<char> output_data(getTensorSize(output_tensor));
luci_interpreter::Interpreter::readOutputTensor(output_tensor, output_data.data(),
output_data.size());
auto data = interpreter.readOutputTensor(i);

// Output data is written in ${output_file}
// (ex: Add.circle.output0)
// Output shape is written in ${output_file}.shape
// (ex: Add.circle.output0.shape)
writeDataToFile(std::string(output_file) + std::to_string(i), output_data.data(),
output_data.size());
// In case of Tensor output is Scalar value.
// The output tensor with rank 0 is treated as a scalar with shape (1)
if (output_tensor->shape().num_dims() == 0)
{
writeDataToFile(std::string(output_file) + std::to_string(i) + ".shape", "1", 1);
}
else
{
auto shape_str = std::to_string(output_tensor->shape().dim(0));
for (int j = 1; j < output_tensor->shape().num_dims(); j++)
{
shape_str += ",";
shape_str += std::to_string(output_tensor->shape().dim(j));
}
const auto tensor_shape_file = std::string(output_file) + std::to_string(i) + ".shape";
writeDataToFile(tensor_shape_file, shape_str.c_str(), shape_str.size());
}
writeDataToFile(std::string(output_file) + std::to_string(i), reinterpret_cast<char *>(data),
interpreter.getOutputDataSizeByIndex(i));
}
return EXIT_SUCCESS;
}
Expand Down

This file was deleted.

28 changes: 16 additions & 12 deletions onert-micro/luci-interpreter/include/luci_interpreter/Interpreter.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,13 +18,16 @@
#define LUCI_INTERPRETER_INTERPRETER_H

#include "luci_interpreter/core/Tensor.h"
#include "luci_interpreter/InterpreterConfigure.h"

#include "memory_managers/MemoryManager.h"
#ifdef USE_STATIC_ALLOC
#include "luci_interpreter/InterpreterConfigure.h"
#include "memory_managers/StaticMemoryManager.h"
#else
#include "memory_managers/SimpleMemoryManager.h"
#endif // USE_STATIC_ALLOC

#include "loader/ModuleLoader.h"
#include <memory>
#include <vector>
#include <unordered_map>

namespace luci_interpreter
{
Expand All @@ -35,27 +38,28 @@ class Interpreter
// Construct default interpreter with dynamic allocations and with input allocations
explicit Interpreter(const char *model_data_raw);

#ifdef USE_STATIC_ALLOC
// Construct interpreter with configurations
explicit Interpreter(const char *model_data_raw, const InterpreterConfigure &configuration);
#endif // USE_STATIC_ALLOC

~Interpreter();

static void writeInputTensor(Tensor *input_tensor, const void *data, size_t data_size);
void allocateAndWriteInputTensor(int32_t input_tensor_index, const void *data, size_t data_size);
uint8_t *allocateInputTensor(int32_t input_tensor_index);

static void writeInputTensorWithoutCopy(Tensor *input_tensor, const void *data);
uint8_t *readOutputTensor(int32_t output_tensor_index);

static void readOutputTensor(const Tensor *output_tensor, void *data, size_t data_size);
int32_t getInputDataSizeByIndex(int32_t input_tensor_index);
int32_t getOutputDataSizeByIndex(int32_t output_tensor_index);

void interpret();

std::vector<Tensor *> getInputTensors();
std::vector<Tensor *> getOutputTensors();

private:
// _default_memory_manager should be before _runtime_module due to
// the order of deletion in the destructor
std::unique_ptr<IMemoryManager> _memory_manager;
std::unique_ptr<class RuntimeModule> _runtime_module;
MemoryManager _memory_manager{};
RuntimeModule _runtime_module{};
};

} // namespace luci_interpreter
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -19,6 +19,7 @@

namespace luci_interpreter
{
#ifdef USE_STATIC_ALLOC

enum MemoryManagerType
{
Expand Down Expand Up @@ -72,6 +73,8 @@ class InterpreterConfigure
uint32_t _output_buf_size = 0;
};

#endif

} // namespace luci_interpreter

#endif // ONERT_MICRO_INTERPRETER_CONFIGURE_H
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@

namespace luci_interpreter
{

// TODO check can we remove it
/**
* @brief "scalar" value type
*/
Expand All @@ -49,9 +49,6 @@ enum class DataType
// WARNING the size of Bool may vary for NN frameworks
// TODO we need to find a way to resolve this issue
BOOL, // Boolean

// WARNING STRING is NOT fully supported yet
STRING, // String
};

/**
Expand Down Expand Up @@ -137,12 +134,6 @@ template <> struct DataTypeImpl<DataType::BOOL>
using Type = uint8_t;
};

template <> struct DataTypeImpl<DataType::STRING>
{
// Use C++ std::string type for STRING
using Type = std::string;
};

/**
* @brief Returns the size of the data type.
* @note If you need the size at compile time, use `sizeof(typename DataTypeImpl<DT>::Type)`.
Expand Down Expand Up @@ -175,9 +166,6 @@ inline uint32_t size(DataType data_type)
return sizeof(DataTypeImpl<DataType::FLOAT64>::Type);
case DataType::BOOL:
return sizeof(DataTypeImpl<DataType::BOOL>::Type);
case DataType::STRING:
// STRING is variable length. Cannot decide size by type
assert(false && "Invalid size call with STRING type");
default:
// TODO Support remaining data types.
assert(false);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@

namespace luci_interpreter
{

// TODO check can we remove it
enum class FusedActFunc
{
UNDEFINED, // This is not defined by TFLite or Circle. This was added to
Expand Down
Loading

0 comments on commit d52f5e7

Please sign in to comment.