Skip to content

Commit

Permalink
[onert/gpu-cl] Revise ClTensorBuilder (Samsung#8059)
Browse files Browse the repository at this point in the history
This commit changes template class ClTensorBuilder to general class and remove class alias TensorBuilder.

Signed-off-by: Hyeongseok Oh <[email protected]>
  • Loading branch information
hseok-oh authored Dec 2, 2021
1 parent df85b4f commit f7f819e
Show file tree
Hide file tree
Showing 2 changed files with 98 additions and 40 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,11 @@
* limitations under the License.
*/

#ifndef __ONERT_BACKEND_CL_TENSOR_BUILDER_H__
#define __ONERT_BACKEND_CL_TENSOR_BUILDER_H__

#include <memory>
#include <queue>

#include "TensorBuilder.h"

#include "ClTensorManager.h"
#include "ClTensorRegistry.h"
#include "ParentInfo.h"
Expand All @@ -34,6 +33,8 @@
#include <ir/Operands.h>
#include <util/Utils.h>

// TODO Remove below
#if 0
namespace onert
{
namespace backend
Expand Down Expand Up @@ -121,6 +122,7 @@ template <typename T_ITensor, typename T_Tensor> class ClTensorBuilder
} // namespace gpu_cl
} // namespace backend
} // namespace onert
#endif

#include <cassert>
#include <stack>
Expand All @@ -134,22 +136,17 @@ namespace backend
namespace gpu_cl
{

template <typename T_ITensor, typename T_Tensor>
ClTensorBuilder<T_ITensor, T_Tensor>::ClTensorBuilder(
const ir::Operands &operands, TensorManager *tensor_mgr,
tflite::gpu::cl::InferenceContext::CreateInferenceInfo create_info,
const std::shared_ptr<tflite::gpu::cl::Environment> &environment)
TensorBuilder::TensorBuilder(const ir::Operands &operands, TensorManager *tensor_mgr,
tflite::gpu::cl::InferenceContext::CreateInferenceInfo create_info,
const std::shared_ptr<tflite::gpu::cl::Environment> &environment)
: _operands{operands}, _tensor_mgr{tensor_mgr}, _create_info{create_info}, _environment{
environment}
{
assert(_tensor_mgr);
}

template <typename T_ITensor, typename T_Tensor>
void ClTensorBuilder<T_ITensor, T_Tensor>::registerTensorInfo(const ir::OperandIndex &ind,
const ir::OperandInfo &info,
ir::Layout backend_layout,
TensorType type)
void TensorBuilder::registerTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info,
ir::Layout backend_layout, TensorType type)
{
assert(_tensor_mgr->constTensors().size() == 0);
assert(_tensor_mgr->nonconstTensors().size() == 0);
Expand All @@ -162,32 +159,24 @@ void ClTensorBuilder<T_ITensor, T_Tensor>::registerTensorInfo(const ir::OperandI
_tensor_layout_map.insert({ind, backend_layout});
}

template <typename T_ITensor, typename T_Tensor>
void ClTensorBuilder<T_ITensor, T_Tensor>::notifyFirstUse(const ir::OperandIndex &ind)
void TensorBuilder::notifyFirstUse(const ir::OperandIndex &ind)
{
_lifetime_seq.emplace_back(UsesType::FIRST, ind);
}

template <typename T_ITensor, typename T_Tensor>
void ClTensorBuilder<T_ITensor, T_Tensor>::notifyLastUse(const ir::OperandIndex &ind)
void TensorBuilder::notifyLastUse(const ir::OperandIndex &ind)
{
_lifetime_seq.emplace_back(UsesType::LAST, ind);
}

template <typename T_ITensor, typename T_Tensor>
bool ClTensorBuilder<T_ITensor, T_Tensor>::isRegistered(const ir::OperandIndex &ind) const
bool TensorBuilder::isRegistered(const ir::OperandIndex &ind) const
{
return _tensor_info_map.find(ind) != _tensor_info_map.end();
}

template <typename T_ITensor, typename T_Tensor>
void ClTensorBuilder<T_ITensor, T_Tensor>::prepare(void)
{
buildTensors();
}
void TensorBuilder::prepare(void) { buildTensors(); }

template <typename T_ITensor, typename T_Tensor>
void ClTensorBuilder<T_ITensor, T_Tensor>::allocate(void)
void TensorBuilder::allocate(void)
{
// Update lifetime sequence to apply subtensor optimization

Expand Down Expand Up @@ -261,14 +250,9 @@ void ClTensorBuilder<T_ITensor, T_Tensor>::allocate(void)
_tensor_mgr->allocateNonconsts();
}

template <typename T_ITensor, typename T_Tensor>
void ClTensorBuilder<T_ITensor, T_Tensor>::postFunctionPrepare(void)
{
_tensor_mgr->tryDeallocConstants();
}
void TensorBuilder::postFunctionPrepare(void) { _tensor_mgr->tryDeallocConstants(); }

template <typename T_ITensor, typename T_Tensor>
void ClTensorBuilder<T_ITensor, T_Tensor>::buildTensors(void)
void TensorBuilder::buildTensors(void)
{
assert(_tensor_mgr->constTensors().size() == 0);
assert(_tensor_mgr->nonconstTensors().size() == 0);
Expand All @@ -288,5 +272,3 @@ void ClTensorBuilder<T_ITensor, T_Tensor>::buildTensors(void)
} // namespace gpu_cl
} // namespace backend
} // namespace onert

#endif // __ONERT_BACKEND_ACL_COMMON_TEMPL_TENSOR_BUILDER_H__
86 changes: 81 additions & 5 deletions runtime/onert/backend/gpu_cl/TensorBuilder.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,10 +17,11 @@
#ifndef __ONERT_BACKEND_GPU_CL_TENSOR_BUILDER_H__
#define __ONERT_BACKEND_GPU_CL_TENSOR_BUILDER_H__

#include <backend/basic/TensorBuilder.h>
#include "operand/ICLTensor.h"
#include "operand/CLTensor.h"
#include "ClTensorBuilder.h"
#include "ClTensorManager.h"
#include "ParentInfo.h"

#include <ir/Operands.h>
#include <ir/OperandIndexSequence.h>

namespace onert
{
Expand All @@ -29,7 +30,82 @@ namespace backend
namespace gpu_cl
{

using TensorBuilder = ClTensorBuilder<operand::ICLTensor, operand::CLTensor>;
enum class UsesType
{
FIRST,
LAST
};

class TensorBuilder
{
public:
TensorBuilder(const ir::Operands &operands, TensorManager *tensor_mgr,
tflite::gpu::cl::InferenceContext::CreateInferenceInfo create_info,
const std::shared_ptr<tflite::gpu::cl::Environment> &environment);

/**
* @brief Register tensor information to allocate on ACL-CL backend
* @param[in] ind Operand index
* @param[in] info Tensor information
* @param[in] layout Tensor data layout
*/
void registerTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info,
ir::Layout backend_layout, TensorType type);

void notifyFirstUse(const ir::OperandIndex &);
void notifyLastUse(const ir::OperandIndex &);

bool isRegistered(const ir::OperandIndex &) const;

void prepare();
void allocate();
void postFunctionPrepare();

TensorManager *cl_tensor_manager(void) { return _tensor_mgr.get(); }

void setUsesCount(const ir::OperandIndex &index, size_t num_uses)
{
assert(_uses_count_map.find(index) != _uses_count_map.end() ? _uses_count_map[index] == num_uses
: true);
_uses_count_map[index] = num_uses;
}

void parent_map(std::unordered_map<ir::OperandIndex, ParentInfo> &&parent_map)
{
_parent_map = std::move(parent_map);
}

bool areSubTensorsOf(const ir::OperandIndex &parent, const ir::OperandIndexSequence &seq);

/**
* @brief Check child tensor is allocated as subtensor of parent tensor
* @param[in] parent Index of parent
* @param[in] child Index of child
* @return @c true if child is allocated as subtensor of parent, otherwise @c false
*/
bool isSubTensorOf(const ir::OperandIndex &parent, const ir::OperandIndex &child);

private:
void buildTensors(void);
ir::OperandIndex findRootParent(ir::OperandIndex index);

private:
const ir::Operands &_operands;
ir::OperandIndexMap<ir::OperandInfo> _tensor_info_map;
ir::OperandIndexMap<ir::Layout> _tensor_layout_map;
ir::OperandIndexMap<TensorType> _tensor_type_map;
ir::OperandIndexMap<size_t> _uses_count_map;

std::unique_ptr<TensorManager> _tensor_mgr;
tflite::gpu::cl::InferenceContext::CreateInferenceInfo _create_info;
std::shared_ptr<tflite::gpu::cl::Environment> _environment;

// for linear executor
std::vector<std::pair<UsesType, ir::OperandIndex>> _lifetime_seq;

// Extra info for concat elimination
ir::OperandIndexMap<ParentInfo> _parent_map;
};

} // namespace gpu_cl
} // namespace backend
Expand Down

0 comments on commit f7f819e

Please sign in to comment.