Skip to content

Commit

Permalink
Add cumprod function
Browse files Browse the repository at this point in the history
  • Loading branch information
joey12300 committed Nov 24, 2022
1 parent 12af6b4 commit 6e2e646
Show file tree
Hide file tree
Showing 3 changed files with 182 additions and 0 deletions.
78 changes: 78 additions & 0 deletions fastdeploy/function/cumprod.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "fastdeploy/function/cumprod.h"

namespace fastdeploy {
namespace function {

void GetCumprodDimInfo(const std::vector<int64_t>& dim, int cumprod_dim,
size_t* outer_dim, size_t* mid_dim, size_t* inner_dim) {
int dim_size = dim.size();
FDASSERT(cumprod_dim >= -dim_size,
"The input dim of CumprodOp should be larger than the opposite "
"rank of input x which is %d. But received dim = %d",
-dim_size, cumprod_dim);
FDASSERT(cumprod_dim < dim_size,
"The input dim of CumprodOp should be smaller than the "
"rank of input x which is %d. But received dim = %d",
dim_size, cumprod_dim);
if (cumprod_dim < 0)
cumprod_dim += dim_size;

*outer_dim = 1;
for (int i = 0; i < cumprod_dim; ++i) {
*outer_dim *= dim[i];
}
*mid_dim = dim[cumprod_dim];
*inner_dim = 1;
for (int i = cumprod_dim + 1; i < dim_size; ++i) {
*inner_dim *= dim[i];
}
}

template <typename T>
void CumprodKernel(const FDTensor& x, FDTensor* out, int axis) {
auto* x_data = reinterpret_cast<const T*>(x.Data());
auto shape = x.Shape();

size_t outer_dim = 1;
size_t mid_dim = 1;
size_t inner_dim = 1;
GetCumprodDimInfo(shape, axis, &outer_dim, &mid_dim, &inner_dim);

out->Allocate(x.Shape(), x.Dtype());
auto* out_data = reinterpret_cast<T*>(out->Data());

for (size_t i = 0; i < outer_dim; i++) {
for (size_t j = 0; j < mid_dim; j++) {
for (size_t k = 0; k < inner_dim; k++) {
size_t pos = i * mid_dim * inner_dim + j * inner_dim + k;
if (j == 0) {
out_data[pos] = x_data[pos];
} else {
out_data[pos] = out_data[pos - inner_dim] * x_data[pos];
}
}
}
}
}

void Cumprod(const FDTensor& x, FDTensor* out, int axis) {
FD_VISIT_INT_FLOAT_TYPES(x.dtype, "CumprodKernel",
([&] { CumprodKernel<data_t>(x, out, axis); }));
}

} // namespace function
} // namespace fastdeploy
31 changes: 31 additions & 0 deletions fastdeploy/function/cumprod.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#pragma once

#include "fastdeploy/core/fd_tensor.h"

namespace fastdeploy {
namespace function {

/** Excute the concatenate operation for input FDTensor along given axis.
@param x The input tensor.
@param out The output tensor which stores the result.
@param axisi Axis which will be concatenated.
*/

FASTDEPLOY_DECL void Cumprod(const FDTensor& x, FDTensor* out, int axis = 0);

} // namespace function
} // namespace fastdeploy
73 changes: 73 additions & 0 deletions tests/function/test_cumprod.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "fastdeploy/core/fd_tensor.h"
#include "fastdeploy/function/cumprod.h"
#include "glog/logging.h"
#include "gtest_utils.h"
#include "gtest/gtest.h"
#include <array>
#include <vector>

namespace fastdeploy {
namespace function {

std::vector<float> CreateTestData() {
// Shape: [2, 3, 4]
std::vector<float> x_data = {
0.8428625, 0.6461913, 0.13740455, 0.11430702, 0.659926, 0.535816,
0.7429162, 0.8456049, 0.21228176, 0.29970083, 0.8621713, 0.40894133,
0.12684688, 0.1566195, 0.42884097, 0.8476526, 0.2458633, 0.669046,
0.87888306, 0.6762589, 0.666453, 0.32523027, 0.4139388, 0.8341406};
return x_data;
}

TEST(fastdeploy, cumprod) {
CheckShape check_shape;
CheckData check_data;
FDTensor x, y;
auto test_data = CreateTestData();
x.SetExternalData({2, 3, 4}, FDDataType::FP32, test_data.data());

std::vector<float> result = {0.842862, 0.646191, 0.137405, 0.114307, 0.659926,
0.535816, 0.742916, 0.845605, 0.212282, 0.299701,
0.862171, 0.408941, 0.106914, 0.101206, 0.058925,
0.096893, 0.162252, 0.358486, 0.652937, 0.571848,
0.141476, 0.097472, 0.356886, 0.341115};
Cumprod(x, &y, 0);
check_shape(y.shape, {2, 3, 4});
check_data(reinterpret_cast<const float*>(y.Data()), result.data(),
result.size());

result = {0.842862, 0.646191, 0.137405, 0.114307, 0.556227, 0.34624,
0.10208, 0.096659, 0.118077, 0.103768, 0.088011, 0.039528,
0.126847, 0.15662, 0.428841, 0.847653, 0.031187, 0.104786,
0.376901, 0.573233, 0.020785, 0.034079, 0.156014, 0.478157};
Cumprod(x, &y, 1);
check_shape(y.shape, {2, 3, 4});
check_data(reinterpret_cast<const float*>(y.Data()), result.data(),
result.size());

result = {0.842862, 0.54465, 0.074837, 0.008554, 0.659926, 0.353599,
0.262694, 0.222136, 0.212282, 0.063621, 0.054852, 0.022431,
0.126847, 0.019867, 0.00852, 0.007222, 0.245863, 0.164494,
0.144571, 0.097767, 0.666453, 0.216751, 0.089722, 0.07484};
Cumprod(x, &y, 2);
check_shape(y.shape, {2, 3, 4});
check_data(reinterpret_cast<const float*>(y.Data()), result.data(),
result.size());
}

} // namespace function
} // namespace fastdeploy

0 comments on commit 6e2e646

Please sign in to comment.