Skip to content

Commit

Permalink
[onert] Implement ReduceAny operation in cpu backend (Samsung#649)
Browse files Browse the repository at this point in the history
- Support ReduceAny Operation
- This operation is only supported at cpu backend

Signed-off-by: sungho22.lee <[email protected]>
  • Loading branch information
intom authored May 18, 2020
1 parent b3c9a6f commit 56e2efd
Show file tree
Hide file tree
Showing 25 changed files with 340 additions and 1 deletion.
7 changes: 6 additions & 1 deletion runtime/libs/tflite/port/1.13.1/src/nnapi_delegate.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -806,6 +806,11 @@ TfLiteStatus AddOpsAndParams(
nn_op_type = ANEURALNETWORKS_REDUCE_SUM;
add_reducer_params(node.builtin_data);
break;
case tflite::BuiltinOperator_REDUCE_ANY:
add_reducer_params(node.builtin_data);
nnapi_version = 12; // require NNAPI 1.2
nn_op_type = ANEURALNETWORKS_REDUCE_ANY;
break;
case tflite::BuiltinOperator_REDUCE_MAX:
add_reducer_params(node.builtin_data);
nnapi_version = 12; // require NNAPI 1.2
Expand Down Expand Up @@ -959,7 +964,7 @@ TfLiteStatus AddOpsAndParams(
//case tflite::BuiltinOperator_LOGICAL_NOT:
//case tflite::BuiltinOperator_UNPACK:
case tflite::BuiltinOperator_FLOOR_DIV:
case tflite::BuiltinOperator_REDUCE_ANY:
//case tflite::BuiltinOperator_REDUCE_ANY:
case tflite::BuiltinOperator_SQUARE:
//case tflite::BuiltinOperator_ZEROS_LIKE:
case tflite::BuiltinOperator_FILL:
Expand Down
16 changes: 16 additions & 0 deletions runtime/onert/backend/cpu/KernelGenerator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -817,6 +817,22 @@ void KernelGenerator::visit(const ir::operation::ReduceSum &node)
_return_fn = std::move(fn);
}

void KernelGenerator::visit(const ir::operation::ReduceAny &node)
{
const auto output_index{node.getOutputs().at(0)};
const auto input_index{node.getInputs().at(ir::operation::ReduceAny::Input::INPUT)};

auto output_alloc = _tensor_builder->at(output_index).get();
auto input_alloc = _tensor_builder->at(input_index).get();

auto fn = std::make_unique<kernel::ReduceLayer>();

fn->configure(input_alloc, output_alloc, kernel::ReduceType::kAny, node.param().axes,
node.param().keep_dims);

_return_fn = std::move(fn);
}

void KernelGenerator::visit(const ir::operation::ReduceMax &node)
{
const auto output_index{node.getOutputs().at(0)};
Expand Down
1 change: 1 addition & 0 deletions runtime/onert/backend/cpu/KernelGenerator.h
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ class KernelGenerator : public IKernelGenerator
void visit(const ir::operation::Cast &) override;
void visit(const ir::operation::Transpose &) override;
void visit(const ir::operation::ReduceSum &) override;
void visit(const ir::operation::ReduceAny &) override;
void visit(const ir::operation::ReduceMax &) override;
void visit(const ir::operation::ReduceMin &) override;
void visit(const ir::operation::Select &) override;
Expand Down
2 changes: 2 additions & 0 deletions runtime/onert/backend/cpu/ShapeFixer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,8 @@ void ShapeFixer::visit(const ir::operation::Transpose &) { /* DO NOTHING */}

void ShapeFixer::visit(const ir::operation::ReduceSum &) { /* DO NOTHING */}

void ShapeFixer::visit(const ir::operation::ReduceAny &) { /* DO NOTHING */}

void ShapeFixer::visit(const ir::operation::ReduceMax &) { /* DO NOTHING */}

void ShapeFixer::visit(const ir::operation::ReduceMin &) { /* DO NOTHING */}
Expand Down
1 change: 1 addition & 0 deletions runtime/onert/backend/cpu/ShapeFixer.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ class ShapeFixer : public IShapeFixer
void visit(const ir::operation::Cast &) override;
void visit(const ir::operation::Transpose &) override;
void visit(const ir::operation::ReduceSum &) override;
void visit(const ir::operation::ReduceAny &) override;
void visit(const ir::operation::ReduceMax &) override;
void visit(const ir::operation::ReduceMin &) override;
void visit(const ir::operation::Select &) override;
Expand Down
23 changes: 23 additions & 0 deletions runtime/onert/backend/cpu/kernel/ReduceLayer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,24 @@ void evalType(const operand::Tensor *input, operand::Tensor *output, const std::
}
}

// Template specialization for bool type
template <>
void evalType<bool>(const operand::Tensor *input, operand::Tensor *output,
const std::vector<int> &axes, bool keep_dims, nnfw::cker::Reduce &reduce_kernel,
ReduceType reduce_type)
{
switch (reduce_type)
{
case ReduceType::kAny:
return evalLogic<bool>(
input, output, axes, keep_dims, false, reduce_kernel,
[](const bool current, const bool in) -> bool { return in || current; });
break;
default:
throw std::runtime_error{"Reduce: Unsupported reduce type"};
}
}

template <ReduceType reduce_type>
void evalGeneric(const operand::Tensor *input, operand::Tensor *output,
const std::vector<int> &axes, bool keep_dims, nnfw::cker::Reduce &reduce_kernel)
Expand All @@ -88,6 +106,8 @@ void evalGeneric(const operand::Tensor *input, operand::Tensor *output,
return evalType<float>(input, output, axes, keep_dims, reduce_kernel, reduce_type);
case OperandType::INT32:
return evalType<int32_t>(input, output, axes, keep_dims, reduce_kernel, reduce_type);
case OperandType::BOOL8:
return evalType<bool>(input, output, axes, keep_dims, reduce_kernel, reduce_type);
default:
throw std::runtime_error{"Reduce(generic): Unsupported input type"};
}
Expand Down Expand Up @@ -129,6 +149,9 @@ void ReduceLayer::run()
case ReduceType::kMin:
evalGeneric<ReduceType::kMin>(_input, _output, _axes, _keep_dims, *_reduce_kernel);
break;
case ReduceType::kAny:
evalGeneric<ReduceType::kAny>(_input, _output, _axes, _keep_dims, *_reduce_kernel);
break;
default:
throw std::runtime_error{"ReduceSum: Unsupported reduce type"};
}
Expand Down
1 change: 1 addition & 0 deletions runtime/onert/core/include/ir/Operations.Include.h
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@
#include "ir/operation/Div.h"
#include "ir/operation/Exp.h"
#include "ir/operation/ExpandDims.h"
#include "ir/operation/ReduceAny.h"
#include "ir/operation/ReduceMax.h"
#include "ir/operation/Comparison.h"
#include "ir/operation/LogicalAnd.h"
Expand Down
1 change: 1 addition & 0 deletions runtime/onert/core/include/ir/Operations.lst
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ OP(Div)
OP(Transpose)
OP(Exp)
OP(ExpandDims)
OP(ReduceAny)
OP(ReduceMax)
OP(Comparison)
OP(LogicalAnd)
Expand Down
65 changes: 65 additions & 0 deletions runtime/onert/core/include/ir/operation/ReduceAny.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,65 @@
/*
* Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef __ONERT_IR_OPERATION_REDUCEANY_H__
#define __ONERT_IR_OPERATION_REDUCEANY_H__

#include <memory>

#include "ir/Operation.h"

namespace onert
{
namespace ir
{
namespace operation
{

class ReduceAny : public Operation
{
public:
enum Input
{
INPUT = 0
};

struct Param
{
std::vector<int> axes;
bool keep_dims;
int32_t rank;
};

public:
ReduceAny(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
const Param &param);

public:
void accept(OperationVisitor &v) const override;
OpCode opcode() const final { return OpCode::ReduceAny; }

public:
const Param &param() const { return _param; }

private:
Param _param;
};

} // namespace operation
} // namespace ir
} // namespace onert

#endif // __ONERT_IR_OPERATION_REDUCEANY_H__
50 changes: 50 additions & 0 deletions runtime/onert/core/src/compiler/OperationValidator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -236,6 +236,56 @@ void OperationValidator::visit(const ir::operation::Transpose &node)
assert(input_shape.rank() == output_shape.rank());
}

void OperationValidator::visit(const ir::operation::ReduceAny &node)
{
const auto output_index{node.getOutputs().at(0)};
const auto input_index{node.getInputs().at(ir::operation::ReduceAny::Input::INPUT)};
const auto &axes = node.param().axes;

auto output_shape = _ctx.at(output_index).shape();
auto input_shape = _ctx.at(input_index).shape();

UNUSED_RELEASE(output_shape);
UNUSED_RELEASE(input_shape);
UNUSED_RELEASE(axes);

assert(input_shape.rank() <= 4);
assert(output_shape.rank() <= input_shape.rank());

// NOTE For the 4-dimensions, if the rank of input and output are different, this runtime only
// supports cases reducing height and width or reducing depth.
// TODO We have to support all cases of dimensions up to 4.
// For correct permuting, we have to set output's shape to be equal in dimension position of the
// input. But the positions of the same dimensions in the input and output may be set differently.
// For example {2,3,4,5}(input's shape) can be reduced to {3,5}(output's shape). The original
// output shape should be {1,3,1,5}, but real output shape may be {3,5}. If you simply try to
// extend it in 4 dimensions, it should be {1,1,3,5}.
// Even if output shape is changed to {1,3,1,5}, there is another problem. It is that shape of
// output tensor used at next operation is changed to {1,3,1,5} after this operation even if the
// next operation is not desired.
if (input_shape.rank() == 4 && input_shape.rank() != output_shape.rank())
{
if (output_shape.rank() == 2)
{
// Reducing HW
assert(input_shape.dim(0) == output_shape.dim(0) &&
input_shape.dim(3) == output_shape.dim(1));
}
else if (output_shape.rank() == 3)
{
// Reducing C or
// (Reducing H and C(ifm and ofm) == 1) or (Reducing W and C(ifm and ofm) == 1)
assert((input_shape.dim(0) == output_shape.dim(0) &&
input_shape.dim(1) == output_shape.dim(1) &&
input_shape.dim(2) == output_shape.dim(2)) ||
(input_shape.dim(0) == output_shape.dim(0) &&
(input_shape.dim(1) == output_shape.dim(1) ||
input_shape.dim(2) == output_shape.dim(1)) &&
input_shape.dim(3) == 1 && output_shape.dim(2) == 1));
}
}
}

void OperationValidator::visit(const ir::operation::ReduceMax &node)
{
const auto output_index{node.getOutputs().at(0)};
Expand Down
1 change: 1 addition & 0 deletions runtime/onert/core/src/compiler/OperationValidator.h
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ class OperationValidator : public ir::OperationVisitor
void visit(const ir::operation::Permute &node) override;
void visit(const ir::operation::ReduceSum &node) override;
void visit(const ir::operation::Transpose &node) override;
void visit(const ir::operation::ReduceAny &node) override;
void visit(const ir::operation::ReduceMax &node) override;
void visit(const ir::operation::RNN &node) override;
void visit(const ir::operation::Round &node) override;
Expand Down
8 changes: 8 additions & 0 deletions runtime/onert/core/src/ir/OperationDumper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -411,6 +411,14 @@ void OperationDumper::visit(const PReLU &node)
VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
}

void OperationDumper::visit(const ReduceAny &node)
{
VERBOSE(LIR) << "* ReduceAny" << std::endl;
VERBOSE(LIR) << " - Inputs : Input(" << node.getInputs().at(ReduceAny::Input::INPUT).value()
<< ")" << std::endl;
VERBOSE(LIR) << " - Output : Output(" << node.getOutputs().at(0).value() << ")" << std::endl;
}

void OperationDumper::visit(const ReduceMax &node)
{
VERBOSE(LIR) << "* ReduceMax" << std::endl;
Expand Down
1 change: 1 addition & 0 deletions runtime/onert/core/src/ir/OperationDumper.h
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,7 @@ class OperationDumper : public OperationVisitor
void visit(const operation::Permute &node) override;
void visit(const operation::Pow &node) override;
void visit(const operation::PReLU &) override;
void visit(const operation::ReduceAny &) override;
void visit(const operation::ReduceMax &) override;
void visit(const operation::ReduceMin &) override;
void visit(const operation::ReduceSum &) override;
Expand Down
40 changes: 40 additions & 0 deletions runtime/onert/core/src/ir/operation/ReduceAny.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
/*
* Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#include "ir/operation/ReduceAny.h"

#include <cassert>

#include "ir/OperationVisitor.h"

namespace onert
{
namespace ir
{
namespace operation
{

void ReduceAny::accept(OperationVisitor &v) const { v.visit(*this); }

ReduceAny::ReduceAny(const OperandIndexSequence &inputs, const OperandIndexSequence &outputs,
const Param &param)
: Operation{OperandConstraint::createExact(1u), inputs, outputs}, _param{param}
{
}

} // namespace operation
} // namespace ir
} // namespace onert
27 changes: 27 additions & 0 deletions runtime/onert/frontend/base_loader/include/base_loader.h
Original file line number Diff line number Diff line change
Expand Up @@ -110,6 +110,7 @@ template <typename LoaderDomain, typename SpecificLoader> class BaseLoader
void loadTanh(const Operator *op, ir::Graph &subg);
void loadTranspose(const Operator *op, ir::Graph &subg);
void loadMean(const Operator *op, ir::Graph &subg);
void loadReduceAny(const Operator *op, ir::Graph &subg);
void loadReduceMax(const Operator *op, ir::Graph &subg);
void loadReverseV2(const Operator *op, ir::Graph &subg);
void loadPad(const Operator *op, ir::Graph &subg);
Expand Down Expand Up @@ -798,6 +799,29 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadMean(const Operator *op, ir::
subg.addOperation(std::move(new_op));
}

template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadReduceAny(const Operator *op, ir::Graph &subg)
{
ir::OperandIndexSequence inputs;
ir::OperandIndexSequence outputs;

loadOperationIO(op, inputs, outputs);
auto input = inputs.at(0);
auto axes = inputs.at(1);

// FIXME Handle ReducerOptions.
if (!subg.operands().at(axes).isConstant())
throw std::runtime_error("ReduceAny: non-constant 'axes' is not supported.");

ir::operation::ReduceAny::Param param;
param.axes = subg.operands().at(axes).template asVector<int>();
param.keep_dims = op->builtin_options_as_ReducerOptions()->keep_dims();
param.rank = subg.operands().at(inputs.at(0)).shape().rank();

std::unique_ptr<ir::Operation> new_op(new ir::operation::ReduceAny({input}, outputs, param));
subg.addOperation(std::move(new_op));
}

template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadReduceMax(const Operator *op, ir::Graph &subg)
{
Expand Down Expand Up @@ -1505,6 +1529,9 @@ void BaseLoader<LoaderDomain, SpecificLoader>::loadOperation(const Operator *op,
case BuiltinOperator::BuiltinOperator_MEAN:
loadMean(op, subg);
return;
case BuiltinOperator::BuiltinOperator_REDUCE_ANY:
loadReduceAny(op, subg);
return;
case BuiltinOperator::BuiltinOperator_REDUCE_MAX:
loadReduceMax(op, subg);
return;
Expand Down
Loading

0 comments on commit 56e2efd

Please sign in to comment.