Skip to content

Commit

Permalink
[onert/cpu] CPU backend Logical_and operation (Samsung#5056)
Browse files Browse the repository at this point in the history
Support CPU backend logical_and operation

Signed-off-by: Hyeongseok Oh <[email protected]>
  • Loading branch information
hseok-oh authored Nov 18, 2020
1 parent 56d3d0f commit b006d77
Show file tree
Hide file tree
Showing 8 changed files with 116 additions and 32 deletions.
80 changes: 80 additions & 0 deletions compute/cker/include/cker/operation/LogicalAnd.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
/*
* Copyright (c) 2020 Samsung Electronics Co., Ltd. All Rights Reserved
* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/

#ifndef __NNFW_CKER_LOGICAL_AND_H__
#define __NNFW_CKER_LOGICAL_AND_H__

#include "cker/Shape.h"
#include "cker/Utils.h"

namespace nnfw
{
namespace cker
{

template <typename T>
inline void LogicalAndBroadcast(const Shape &unextended_input1_shape, const T *input1_data,
const Shape &unextended_input2_shape, const T *input2_data,
const Shape &unextended_output_shape, T *output_data)
{
assert(unextended_input1_shape.DimensionsCount() <= 4);
assert(unextended_input2_shape.DimensionsCount() <= 4);
assert(unextended_output_shape.DimensionsCount() <= 4);
const Shape output_shape = Shape::ExtendedShape(4, unextended_output_shape);

NdArrayDesc<4> desc1;
NdArrayDesc<4> desc2;
NdArrayDescsForElementwiseBroadcast(unextended_input1_shape, unextended_input2_shape, &desc1,
&desc2);

for (int b = 0; b < output_shape.Dims(0); ++b)
{
for (int y = 0; y < output_shape.Dims(1); ++y)
{
for (int x = 0; x < output_shape.Dims(2); ++x)
{
for (int c = 0; c < output_shape.Dims(3); ++c)
{
auto out_idx = Offset(output_shape, b, y, x, c);
auto in1_idx = SubscriptToIndex(desc1, b, y, x, c);
auto in2_idx = SubscriptToIndex(desc2, b, y, x, c);
auto in1_val = input1_data[in1_idx];
auto in2_val = input2_data[in2_idx];
output_data[out_idx] = in1_val && in2_val;
}
}
}
}
}

template <typename T>
inline void LogicalAndElementwise(const Shape &shape, const T *input1_data, const T *input2_data,
T *output_data)
{

int num_elements = shape.FlatSize();

for (int t = 0; t < num_elements; t++)
{
output_data[t] = input1_data[t] && input2_data[t];
}
}

} // namespace cker
} // namespace nnfw

#endif // __NNFW_CKER_LOGICAL_AND_H__
2 changes: 2 additions & 0 deletions runtime/onert/backend/cpu/KernelGenerator.cc
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,8 @@ convertElementwiseBinaryType(ir::operation::ElementwiseBinary::ElementwiseBinary
{
switch (type_ir)
{
case ir::operation::ElementwiseBinary::ElementwiseBinaryType::LOGICAL_AND:
return ops::ElementwiseBinaryType::kLogicalAnd;
case ir::operation::ElementwiseBinary::ElementwiseBinaryType::LOGICAL_OR:
return ops::ElementwiseBinaryType::kLogicalOr;
case ir::operation::ElementwiseBinary::ElementwiseBinaryType::MAX:
Expand Down
30 changes: 30 additions & 0 deletions runtime/onert/backend/cpu/ops/ElementwiseBinaryLayer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@

#include "OperationUtils.h"

#include <cker/operation/LogicalAnd.h>
#include <cker/operation/LogicalOr.h>
#include <cker/operation/MaxMin.h>

Expand All @@ -32,6 +33,25 @@ namespace ops

namespace
{
template <typename T>
void logicalAndGeneric(const IPortableTensor *lhs, const IPortableTensor *rhs,
IPortableTensor *output)
{
if (!HaveSameShapes(lhs, rhs))
{
nnfw::cker::LogicalAndBroadcast<T>(
getTensorShape(lhs), reinterpret_cast<const T *>(lhs->buffer()), getTensorShape(rhs),
reinterpret_cast<const T *>(rhs->buffer()), getTensorShape(output),
reinterpret_cast<T *>(output->buffer()));
}
else
{
nnfw::cker::LogicalAndElementwise<T>(
getTensorShape(lhs), reinterpret_cast<const T *>(lhs->buffer()),
reinterpret_cast<const T *>(rhs->buffer()), reinterpret_cast<T *>(output->buffer()));
}
}

template <typename T>
void logicalOrGeneric(const IPortableTensor *lhs, const IPortableTensor *rhs,
IPortableTensor *output)
Expand Down Expand Up @@ -88,6 +108,16 @@ void ElementwiseBinaryLayer::configure(const IPortableTensor *lhs, const IPortab

switch (op_type)
{
case ElementwiseBinaryType::kLogicalAnd:
if ((_lhs->data_type() == OperandType::BOOL8) && (_rhs->data_type() == OperandType::BOOL8))
{
_kernel = logicalAndGeneric<bool>;
}
else
{
throw std::runtime_error{"LogicalOr: Unsupported data type"};
}
break;
case ElementwiseBinaryType::kLogicalOr:
if ((_lhs->data_type() == OperandType::BOOL8) && (_rhs->data_type() == OperandType::BOOL8))
{
Expand Down
4 changes: 4 additions & 0 deletions runtime/onert/frontend/base_loader/include/base_loader.h
Original file line number Diff line number Diff line change
Expand Up @@ -1513,6 +1513,10 @@ void BaseLoader<LoaderDomain>::loadOperation(const Operator *op, ir::Graph &subg
case BuiltinOperator::BuiltinOperator_LOGICAL_NOT:
loadElementwiseUnary(op, subg, ir::operation::ElementwiseUnary::Type::LOGICAL_NOT);
return;
case BuiltinOperator::BuiltinOperator_LOGICAL_AND:
loadElementwiseBinary(op, subg,
ir::operation::ElementwiseBinary::ElementwiseBinaryType::LOGICAL_AND);
return;
case BuiltinOperator::BuiltinOperator_LOGICAL_OR:
loadElementwiseBinary(op, subg,
ir::operation::ElementwiseBinary::ElementwiseBinaryType::LOGICAL_OR);
Expand Down
8 changes: 0 additions & 8 deletions tests/nnapi/nnapi_gtest.skip.aarch64-android.cpu
Original file line number Diff line number Diff line change
Expand Up @@ -46,14 +46,6 @@ GeneratedTests.local_response_norm_float_1
GeneratedTests.local_response_norm_float_2
GeneratedTests.local_response_norm_float_3
GeneratedTests.local_response_norm_float_4
GeneratedTests.logical_and_1D_nnfw
GeneratedTests.logical_and_2D_nnfw
GeneratedTests.logical_and_3D_nnfw
GeneratedTests.logical_and_4D_nnfw
GeneratedTests.logical_and_broadcast
GeneratedTests.logical_and_broadcast_4D_2D_nnfw
GeneratedTests.logical_and_broadcast_nnfw
GeneratedTests.logical_and_simple
GeneratedTests.logical_not
GeneratedTests.lsh_projection
GeneratedTests.lsh_projection_2
Expand Down
8 changes: 0 additions & 8 deletions tests/nnapi/nnapi_gtest.skip.aarch64-linux.cpu
Original file line number Diff line number Diff line change
Expand Up @@ -46,14 +46,6 @@ GeneratedTests.local_response_norm_float_1
GeneratedTests.local_response_norm_float_2
GeneratedTests.local_response_norm_float_3
GeneratedTests.local_response_norm_float_4
GeneratedTests.logical_and_1D_nnfw
GeneratedTests.logical_and_2D_nnfw
GeneratedTests.logical_and_3D_nnfw
GeneratedTests.logical_and_4D_nnfw
GeneratedTests.logical_and_broadcast
GeneratedTests.logical_and_broadcast_4D_2D_nnfw
GeneratedTests.logical_and_broadcast_nnfw
GeneratedTests.logical_and_simple
GeneratedTests.logical_not
GeneratedTests.lsh_projection
GeneratedTests.lsh_projection_2
Expand Down
8 changes: 0 additions & 8 deletions tests/nnapi/nnapi_gtest.skip.armv7l-linux.cpu
Original file line number Diff line number Diff line change
Expand Up @@ -46,14 +46,6 @@ GeneratedTests.local_response_norm_float_1
GeneratedTests.local_response_norm_float_2
GeneratedTests.local_response_norm_float_3
GeneratedTests.local_response_norm_float_4
GeneratedTests.logical_and_1D_nnfw
GeneratedTests.logical_and_2D_nnfw
GeneratedTests.logical_and_3D_nnfw
GeneratedTests.logical_and_4D_nnfw
GeneratedTests.logical_and_broadcast
GeneratedTests.logical_and_broadcast_4D_2D_nnfw
GeneratedTests.logical_and_broadcast_nnfw
GeneratedTests.logical_and_simple
GeneratedTests.logical_not
GeneratedTests.lsh_projection
GeneratedTests.lsh_projection_2
Expand Down
8 changes: 0 additions & 8 deletions tests/nnapi/nnapi_gtest.skip.x86_64-linux.cpu
Original file line number Diff line number Diff line change
Expand Up @@ -46,14 +46,6 @@ GeneratedTests.local_response_norm_float_1
GeneratedTests.local_response_norm_float_2
GeneratedTests.local_response_norm_float_3
GeneratedTests.local_response_norm_float_4
GeneratedTests.logical_and_1D_nnfw
GeneratedTests.logical_and_2D_nnfw
GeneratedTests.logical_and_3D_nnfw
GeneratedTests.logical_and_4D_nnfw
GeneratedTests.logical_and_broadcast
GeneratedTests.logical_and_broadcast_4D_2D_nnfw
GeneratedTests.logical_and_broadcast_nnfw
GeneratedTests.logical_and_simple
GeneratedTests.logical_not
GeneratedTests.lsh_projection
GeneratedTests.lsh_projection_2
Expand Down

0 comments on commit b006d77

Please sign in to comment.