Skip to content

Commit

Permalink
Merge pull request PaddlePaddle#675 from joey12300/add_functions
Browse files Browse the repository at this point in the history
[Functions] Add  functions for FDTensor
- maximum, sqrt, exp, round, log and clip
  • Loading branch information
joey12300 authored Nov 23, 2022
2 parents 243f280 + f80775b commit 1af54c4
Show file tree
Hide file tree
Showing 11 changed files with 477 additions and 0 deletions.
58 changes: 58 additions & 0 deletions fastdeploy/function/clip.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,58 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "fastdeploy/function/clip.h"
#include <algorithm>

namespace fastdeploy {
namespace function {

template <typename T> class ClipFunctor {
public:
explicit ClipFunctor(const T min, const T max) : min_(min), max_(max) {}
T operator()(const T x) const {
return x < min_ ? min_ : x > max_ ? max_ : x;
}

private:
T min_;
T max_;
};

template <typename T>
void ClipKernel(const FDTensor& x, double min, double max, FDTensor* out) {
T max_ = static_cast<T>(max);
T min_ = static_cast<T>(min);

FDASSERT(min_ < max_,
"max should be greater than or equal to min. But received min = %f, "
"max = %f",
static_cast<float>(min_), static_cast<float>(max_));

out->Allocate(x.Shape(), x.Dtype());
const T* x_data = reinterpret_cast<const T*>(x.Data());

int64_t numel = x.Numel();
T* out_data = reinterpret_cast<T*>(out->Data());

std::transform(x_data, x_data + numel, out_data, ClipFunctor<T>(min_, max_));
}

void Clip(const FDTensor& x, double min, double max, FDTensor* out) {
FD_VISIT_INT_FLOAT_TYPES(x.dtype, "ClipKernel",
([&] { ClipKernel<data_t>(x, min, max, out); }));
}

} // namespace function
} // namespace fastdeploy
32 changes: 32 additions & 0 deletions fastdeploy/function/clip.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "fastdeploy/core/fd_tensor.h"

namespace fastdeploy {
namespace function {

/** This operator clip all elements in input into the range [ min, max ]. Support float32, float64, int32, int64
@param x The input tensor.
@param min The lower bound
@param max The uppper bound
@param out The output tensor which stores the result.
*/
FASTDEPLOY_DECL void Clip(const FDTensor& x, double min, double max,
FDTensor* out);

} // namespace function
} // namespace fastdeploy
13 changes: 13 additions & 0 deletions fastdeploy/function/elementwise.cc
Original file line number Diff line number Diff line change
Expand Up @@ -71,5 +71,18 @@ FDTensor operator/(const FDTensor& x, const FDTensor& y) {
return out;
}

template <typename T> struct MaximumRawKernel {
void operator()(const FDTensor& x, const FDTensor& y, int axis,
FDTensor* out) {
ElementwiseCompute<MaximumFunctor<T>, T>(x, y, axis, MaximumFunctor<T>(),
out);
}
};

void Maximum(const FDTensor& x, const FDTensor& y, FDTensor* out) {
FD_VISIT_ALL_TYPES(x.dtype, "MaximumRawKernel",
([&] { MaximumRawKernel<data_t>()(x, y, -1, out); }));
}

} // namespace function
} // namespace fastdeploy
8 changes: 8 additions & 0 deletions fastdeploy/function/elementwise.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,5 +56,13 @@ FASTDEPLOY_DECL void Divide(const FDTensor& x, const FDTensor& y,
FDTensor* out);
FASTDEPLOY_DECL FDTensor operator/(const FDTensor& x, const FDTensor& y);

/** Excute the maximum operation for input FDTensors. *out = max(x, y).
@param x The input tensor.
@param y The input tensor.
@param out The output tensor which stores the result.
*/
FASTDEPLOY_DECL void Maximum(const FDTensor& x, const FDTensor& y,
FDTensor* out);

} // namespace function
} // namespace fastdeploy
5 changes: 5 additions & 0 deletions fastdeploy/function/elementwise_functor.h
Original file line number Diff line number Diff line change
Expand Up @@ -122,5 +122,10 @@ template <typename T, typename Enable = void> struct InverseDivideFunctor {
inline T operator()(const T a, const T b) const { return b / a; }
};

// Maximum
template <typename T> struct MaximumFunctor {
inline T operator()(const T a, const T b) const { return a > b ? a : b; }
};

} // namespace function
} // namespace fastdeploy
64 changes: 64 additions & 0 deletions fastdeploy/function/math.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "fastdeploy/function/math.h"
#include "fastdeploy/function/eigen.h"
#include "fastdeploy/function/math_functor.h"

namespace fastdeploy {
namespace function {

#define DEFINE_ACTIVATION_KERNEL(name, functor_class) \
template <typename T> void name##Kernel(const FDTensor& x, FDTensor* out) { \
functor_class<T> functor; \
ActivationImpl<T, functor_class<T>>(x, out, functor); \
}

template <typename T, typename Functor>
void ActivationImpl(const FDTensor& X, FDTensor* Out, const Functor& functor) {
FDASSERT(Out != nullptr, "Output Out should not be nullptr");
auto x = EigenVector<T>::Flatten(X);
Out->Allocate(X.Shape(), X.Dtype());
auto out = EigenVector<T>::Flatten(*Out);
const auto& dev = *EigenDeviceWrapper::GetInstance()->GetDevice();
functor(dev, x, out);
}

DEFINE_ACTIVATION_KERNEL(Sqrt, SqrtFunctor)
DEFINE_ACTIVATION_KERNEL(Log, LogFunctor)
DEFINE_ACTIVATION_KERNEL(Round, RoundFunctor)
DEFINE_ACTIVATION_KERNEL(Exp, ExpFunctor)

void Sqrt(const FDTensor& x, FDTensor* out) {
FD_VISIT_FLOAT_TYPES(x.dtype, "SqrtKernel",
([&] { SqrtKernel<data_t>(x, out); }));
}

void Log(const FDTensor& x, FDTensor* out) {
FD_VISIT_FLOAT_TYPES(x.dtype, "LogKernel",
([&] { LogKernel<data_t>(x, out); }));
}

void Round(const FDTensor& x, FDTensor* out) {
FD_VISIT_FLOAT_TYPES(x.dtype, "RoundKernel",
([&] { RoundKernel<data_t>(x, out); }));
}

void Exp(const FDTensor& x, FDTensor* out) {
FD_VISIT_FLOAT_TYPES(x.dtype, "ExpKernel",
([&] { ExpKernel<data_t>(x, out); }));
}

} // namespace function
} // namespace fastdeploy
47 changes: 47 additions & 0 deletions fastdeploy/function/math.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "fastdeploy/core/fd_tensor.h"

namespace fastdeploy {
namespace function {

/** Calculates the sqrt of the given input Tensor, element-wise. Only for float type FDTensor
@param x The input tensor.
@param out The output tensor which stores the result.
*/
FASTDEPLOY_DECL void Sqrt(const FDTensor& x, FDTensor* out);

/** Calculates the natural log of the given input Tensor, element-wise. Only for float type FDTensor
@param x The input tensor.
@param out The output tensor which stores the result.
*/
FASTDEPLOY_DECL void Log(const FDTensor& x, FDTensor* out);

/** Rounds the values in the input to the nearest integer value, element-wise. Only for float type FDTensor
@param x The input tensor.
@param out The output tensor which stores the result.
*/
FASTDEPLOY_DECL void Round(const FDTensor& x, FDTensor* out);

/** Computes exp of x element-wise with a natural number e as the base, element-wise. Only for float type FDTensor
@param x The input tensor.
@param out The output tensor which stores the result.
*/
FASTDEPLOY_DECL void Exp(const FDTensor& x, FDTensor* out);

} // namespace function
} // namespace fastdeploy
56 changes: 56 additions & 0 deletions fastdeploy/function/math_functor.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,56 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "fastdeploy/function/eigen.h"

namespace fastdeploy {
namespace function {

// log(x) = natural logarithm of x
template <typename T> struct LogFunctor {
template <typename Device, typename X, typename Out>
void operator()(Device d, X x, Out out) const {
out.device(d) = x.log();
}
};

// exp functor
// exp(x) = e^x
template <typename T> struct ExpFunctor {
template <typename Device, typename X, typename Out>
void operator()(Device d, X x, Out out) const {
out.device(d) = x.exp();
}
};

// round(x) = [x]
template <typename T> struct RoundFunctor {
template <typename Device, typename X, typename Out>
void operator()(Device d, X x, Out out) const {
out.device(d) = x.round();
}
};

// sqrt(x) = x^(1/2)
template <typename T> struct SqrtFunctor {
template <typename Device, typename X, typename Out>
void operator()(Device d, X x, Out out) const {
out.device(d) = x.sqrt();
}
};

} // namespace function
} // namespace fastdeploy
54 changes: 54 additions & 0 deletions tests/function/test_clip.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "fastdeploy/core/fd_tensor.h"
#include "fastdeploy/function/clip.h"
#include "glog/logging.h"
#include "gtest_utils.h"
#include "gtest/gtest.h"
#include <array>
#include <vector>

namespace fastdeploy {
namespace function {

std::vector<float> CreateTestData() {
// Shape: [2, 3, 4]
std::vector<float> x_data = {
0.8428625, 0.6461913, 0.13740455, 0.11430702, 0.659926, 0.535816,
0.7429162, 0.8456049, 0.21228176, 0.29970083, 0.8621713, 0.40894133,
0.12684688, 0.1566195, 0.42884097, 0.8476526, 0.2458633, 0.669046,
0.87888306, 0.6762589, 0.666453, 0.32523027, 0.4139388, 0.8341406};
return x_data;
}

TEST(fastdeploy, clip) {
CheckShape check_shape;
CheckData check_data;
FDTensor x, y;
auto test_data = CreateTestData();
x.SetExternalData({2, 3, 4}, FDDataType::FP32, test_data.data());

Clip(x, /* min = */ 0.2, /* max = */ 0.8, &y);
std::vector<float> result = {
0.8, 0.646191, 0.2, 0.2, 0.659926, 0.535816, 0.742916, 0.8,
0.212282, 0.299701, 0.8, 0.408941, 0.2, 0.2, 0.428841, 0.8,
0.245863, 0.669046, 0.8, 0.676259, 0.666453, 0.32523, 0.413939, 0.8};
check_shape(y.shape, {2, 3, 4});
check_data(reinterpret_cast<const float*>(y.Data()), result.data(),
result.size());
}

} // namespace function
} // namespace fastdeploy
Loading

0 comments on commit 1af54c4

Please sign in to comment.