Skip to content

Commit

Permalink
[CVCUDA] Add CV-CUDA support in PaddleSeg (PaddlePaddle#1761)
Browse files Browse the repository at this point in the history
* add cvcuda support in ppseg

* python and pybind

* add resize op, remove concat,std::move

* define resize op
  • Loading branch information
GodIsBoom authored Apr 9, 2023
1 parent c90aa7b commit ed19c75
Show file tree
Hide file tree
Showing 5 changed files with 128 additions and 96 deletions.
3 changes: 2 additions & 1 deletion fastdeploy/vision/segmentation/ppseg/model.cc
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,8 @@ bool PaddleSegModel::BatchPredict(const std::vector<cv::Mat>& imgs,
std::vector<FDMat> fd_images = WrapMat(imgs);
// Record the shape of input images
std::map<std::string, std::vector<std::array<int, 2>>> imgs_info;
if (!preprocessor_.Run(&fd_images, &reused_input_tensors_, &imgs_info)) {
preprocessor_.SetImgsInfo(&imgs_info);
if (!preprocessor_.Run(&fd_images, &reused_input_tensors_)) {
FDERROR << "Failed to preprocess input data while using model:"
<< ModelName() << "." << std::endl;
return false;
Expand Down
115 changes: 68 additions & 47 deletions fastdeploy/vision/segmentation/ppseg/ppseg_pybind.cc
Original file line number Diff line number Diff line change
Expand Up @@ -15,44 +15,52 @@

namespace fastdeploy {
void BindPPSeg(pybind11::module& m) {
pybind11::class_<vision::segmentation::PaddleSegPreprocessor>(
m, "PaddleSegPreprocessor")
pybind11::class_<vision::segmentation::PaddleSegPreprocessor,
vision::ProcessorManager>(m, "PaddleSegPreprocessor")
.def(pybind11::init<std::string>())
.def("run",
[](vision::segmentation::PaddleSegPreprocessor& self,
std::vector<pybind11::array>& im_list) {
std::vector<vision::FDMat> images;
for (size_t i = 0; i < im_list.size(); ++i) {
images.push_back(vision::WrapMat(PyArrayToCvMat(im_list[i])));
}
images.push_back(vision::WrapMat(PyArrayToCvMat(im_list[i])));
}
// Record the shape of input images
std::map<std::string, std::vector<std::array<int, 2>>> imgs_info;
std::vector<FDTensor> outputs;
if (!self.Run(&images, &outputs, &imgs_info)) {
throw std::runtime_error("Failed to preprocess the input data in PaddleSegPreprocessor.");
self.SetImgsInfo(&imgs_info);
if (!self.Run(&images, &outputs)) {
throw std::runtime_error(
"Failed to preprocess the input data in "
"PaddleSegPreprocessor.");
}
for (size_t i = 0; i < outputs.size(); ++i) {
outputs[i].StopSharing();
}
return make_pair(outputs, imgs_info);;
return make_pair(outputs, imgs_info);
;
})
.def("disable_normalize",
[](vision::segmentation::PaddleSegPreprocessor& self) {
self.DisableNormalize();
})
.def("disable_normalize", [](vision::segmentation::PaddleSegPreprocessor& self) {
self.DisableNormalize();
})
.def("disable_permute", [](vision::segmentation::PaddleSegPreprocessor& self) {
self.DisablePermute();
})
.def_property("is_vertical_screen",
&vision::segmentation::PaddleSegPreprocessor::GetIsVerticalScreen,
&vision::segmentation::PaddleSegPreprocessor::SetIsVerticalScreen);
.def("disable_permute",
[](vision::segmentation::PaddleSegPreprocessor& self) {
self.DisablePermute();
})
.def_property(
"is_vertical_screen",
&vision::segmentation::PaddleSegPreprocessor::GetIsVerticalScreen,
&vision::segmentation::PaddleSegPreprocessor::SetIsVerticalScreen);

pybind11::class_<vision::segmentation::PaddleSegModel, FastDeployModel>(
m, "PaddleSegModel")
.def(pybind11::init<std::string, std::string, std::string, RuntimeOption,
ModelFormat>())
.def("clone", [](vision::segmentation::PaddleSegModel& self) {
return self.Clone();
})
.def("clone",
[](vision::segmentation::PaddleSegModel& self) {
return self.Clone();
})
.def("predict",
[](vision::segmentation::PaddleSegModel& self,
pybind11::array& data) {
Expand All @@ -62,48 +70,61 @@ void BindPPSeg(pybind11::module& m) {
return res;
})
.def("batch_predict",
[](vision::segmentation::PaddleSegModel& self, std::vector<pybind11::array>& data) {
[](vision::segmentation::PaddleSegModel& self,
std::vector<pybind11::array>& data) {
std::vector<cv::Mat> images;
for (size_t i = 0; i < data.size(); ++i) {
images.push_back(PyArrayToCvMat(data[i]));
images.push_back(PyArrayToCvMat(data[i]));
}
std::vector<vision::SegmentationResult> results;
self.BatchPredict(images, &results);
return results;
})
.def_property_readonly("preprocessor", &vision::segmentation::PaddleSegModel::GetPreprocessor)
.def_property_readonly("postprocessor", &vision::segmentation::PaddleSegModel::GetPostprocessor);
.def_property_readonly(
"preprocessor",
&vision::segmentation::PaddleSegModel::GetPreprocessor)
.def_property_readonly(
"postprocessor",
&vision::segmentation::PaddleSegModel::GetPostprocessor);

pybind11::class_<vision::segmentation::PaddleSegPostprocessor>(
m, "PaddleSegPostprocessor")
.def(pybind11::init<std::string>())
.def("run",
[](vision::segmentation::PaddleSegPostprocessor& self,
.def("run",
[](vision::segmentation::PaddleSegPostprocessor& self,
std::vector<FDTensor>& inputs,
const std::map<std::string, std::vector<std::array<int, 2>>>& imgs_info) {
std::vector<vision::SegmentationResult> results;
if (!self.Run(inputs, &results, imgs_info)) {
throw std::runtime_error("Failed to postprocess the runtime result in PaddleSegPostprocessor.");
}
return results;
})
const std::map<std::string, std::vector<std::array<int, 2>>>&
imgs_info) {
std::vector<vision::SegmentationResult> results;
if (!self.Run(inputs, &results, imgs_info)) {
throw std::runtime_error(
"Failed to postprocess the runtime result in "
"PaddleSegPostprocessor.");
}
return results;
})
.def("run",
[](vision::segmentation::PaddleSegPostprocessor& self,
std::vector<pybind11::array>& input_array,
const std::map<std::string, std::vector<std::array<int, 2>>>& imgs_info) {
std::vector<vision::SegmentationResult> results;
std::vector<FDTensor> inputs;
PyArrayToTensorList(input_array, &inputs, /*share_buffer=*/true);
if (!self.Run(inputs, &results, imgs_info)) {
throw std::runtime_error("Failed to postprocess the runtime result in PaddleSegPostprocessor.");
}
return results;
})
.def_property("apply_softmax",
&vision::segmentation::PaddleSegPostprocessor::GetApplySoftmax,
&vision::segmentation::PaddleSegPostprocessor::SetApplySoftmax)
.def_property("store_score_map",
&vision::segmentation::PaddleSegPostprocessor::GetStoreScoreMap,
&vision::segmentation::PaddleSegPostprocessor::SetStoreScoreMap);
const std::map<std::string, std::vector<std::array<int, 2>>>&
imgs_info) {
std::vector<vision::SegmentationResult> results;
std::vector<FDTensor> inputs;
PyArrayToTensorList(input_array, &inputs, /*share_buffer=*/true);
if (!self.Run(inputs, &results, imgs_info)) {
throw std::runtime_error(
"Failed to postprocess the runtime result in "
"PaddleSegPostprocessor.");
}
return results;
})
.def_property(
"apply_softmax",
&vision::segmentation::PaddleSegPostprocessor::GetApplySoftmax,
&vision::segmentation::PaddleSegPostprocessor::SetApplySoftmax)
.def_property(
"store_score_map",
&vision::segmentation::PaddleSegPostprocessor::GetStoreScoreMap,
&vision::segmentation::PaddleSegPostprocessor::SetStoreScoreMap);
}
} // namespace fastdeploy
56 changes: 30 additions & 26 deletions fastdeploy/vision/segmentation/ppseg/preprocessor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,8 @@ namespace segmentation {

PaddleSegPreprocessor::PaddleSegPreprocessor(const std::string& config_file) {
this->config_file_ = config_file;
FDASSERT(BuildPreprocessPipelineFromConfig(), "Failed to create PaddleSegPreprocessor.");
FDASSERT(BuildPreprocessPipelineFromConfig(),
"Failed to create PaddleSegPreprocessor.");
initialized_ = true;
}

Expand All @@ -35,7 +36,7 @@ bool PaddleSegPreprocessor::BuildPreprocessPipelineFromConfig() {
FDERROR << "Failed to load yaml file " << config_file_
<< ", maybe you should check this file." << std::endl;
return false;
}
}

if (cfg["Deploy"]["transforms"]) {
auto preprocess_cfg = cfg["Deploy"]["transforms"];
Expand Down Expand Up @@ -76,7 +77,7 @@ bool PaddleSegPreprocessor::BuildPreprocessPipelineFromConfig() {
if (input_height != -1 && input_width != -1 && !is_contain_resize_op_) {
is_contain_resize_op_ = true;
processors_.insert(processors_.begin(),
std::make_shared<Resize>(input_width, input_height));
std::make_shared<Resize>(input_width, input_height));
}
}
if (!disable_permute_) {
Expand All @@ -88,22 +89,24 @@ bool PaddleSegPreprocessor::BuildPreprocessPipelineFromConfig() {
return true;
}

bool PaddleSegPreprocessor::Run(std::vector<FDMat>* images, std::vector<FDTensor>* outputs, std::map<std::string, std::vector<std::array<int, 2>>>* imgs_info) {

bool PaddleSegPreprocessor::Apply(FDMatBatch* image_batch,
std::vector<FDTensor>* outputs) {
std::vector<FDMat>* images = image_batch->mats;
if (!initialized_) {
FDERROR << "The preprocessor is not initialized." << std::endl;
return false;
}
if (images->size() == 0) {
FDERROR << "The size of input images should be greater than 0." << std::endl;
FDERROR << "The size of input images should be greater than 0."
<< std::endl;
return false;
}
std::vector<std::array<int, 2>> shape_info;
for (const auto& image : *images) {
shape_info.push_back({static_cast<int>(image.Height()),
static_cast<int>(image.Width())});
shape_info.push_back(
{static_cast<int>(image.Height()), static_cast<int>(image.Width())});
}
(*imgs_info)["shape_info"] = shape_info;
(*imgs_info_)["shape_info"] = shape_info;
for (size_t i = 0; i < processors_.size(); ++i) {
if (processors_[i]->Name() == "Resize") {
auto processor = dynamic_cast<Resize*>(processors_[i].get());
Expand All @@ -123,13 +126,17 @@ bool PaddleSegPreprocessor::Run(std::vector<FDMat>* images, std::vector<FDTensor
// Batch preprocess : resize all images to the largest image shape in batch
if (!is_contain_resize_op_ && img_num > 1) {
int max_width = 0;
int max_height = 0;
int max_height = 0;
for (size_t i = 0; i < img_num; ++i) {
max_width = std::max(max_width, ((*images)[i]).Width());
max_height = std::max(max_height, ((*images)[i]).Height());
}
pre_resize_op_->SetWidthAndHeight(max_width, max_height);
for (size_t i = 0; i < img_num; ++i) {
Resize::Run(&(*images)[i], max_width, max_height);
if (!(*pre_resize_op_)(&(*images)[i])) {
FDERROR << "Failed to batch resize max_width and max_height"
<< std::endl;
}
}
}
for (size_t i = 0; i < img_num; ++i) {
Expand All @@ -142,32 +149,29 @@ bool PaddleSegPreprocessor::Run(std::vector<FDMat>* images, std::vector<FDTensor
}
}
outputs->resize(1);
// Concat all the preprocessed data to a batch tensor
std::vector<FDTensor> tensors(img_num);
for (size_t i = 0; i < img_num; ++i) {
(*images)[i].ShareWithTensor(&(tensors[i]));
tensors[i].ExpandDim(0);
}
if (tensors.size() == 1) {
(*outputs)[0] = std::move(tensors[0]);
} else {
function::Concat(tensors, &((*outputs)[0]), 0);
}
FDTensor* tensor = image_batch->Tensor();
(*outputs)[0].SetExternalData(tensor->Shape(), tensor->Dtype(),
tensor->Data(), tensor->device,
tensor->device_id);
return true;
}

void PaddleSegPreprocessor::DisableNormalize() {
this->disable_normalize_ = true;
// the DisableNormalize function will be invalid if the configuration file is loaded during preprocessing
// the DisableNormalize function will be invalid if the configuration file is
// loaded during preprocessing
if (!BuildPreprocessPipelineFromConfig()) {
FDERROR << "Failed to build preprocess pipeline from configuration file." << std::endl;
FDERROR << "Failed to build preprocess pipeline from configuration file."
<< std::endl;
}
}
void PaddleSegPreprocessor::DisablePermute() {
this->disable_permute_ = true;
// the DisablePermute function will be invalid if the configuration file is loaded during preprocessing
// the DisablePermute function will be invalid if the configuration file is
// loaded during preprocessing
if (!BuildPreprocessPipelineFromConfig()) {
FDERROR << "Failed to build preprocess pipeline from configuration file." << std::endl;
FDERROR << "Failed to build preprocess pipeline from configuration file."
<< std::endl;
}
}
} // namespace segmentation
Expand Down
29 changes: 21 additions & 8 deletions fastdeploy/vision/segmentation/ppseg/preprocessor.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once
#include "fastdeploy/vision/common/processors/manager.h"
#include "fastdeploy/vision/common/processors/transform.h"
#include "fastdeploy/vision/common/result.h"

Expand All @@ -20,25 +21,24 @@ namespace vision {
namespace segmentation {
/*! @brief Preprocessor object for PaddleSeg serials model.
*/
class FASTDEPLOY_DECL PaddleSegPreprocessor {
class FASTDEPLOY_DECL PaddleSegPreprocessor : public ProcessorManager {
public:
/** \brief Create a preprocessor instance for PaddleSeg serials model
*
* \param[in] config_file Path of configuration file for deployment, e.g ppliteseg/deploy.yaml
*/
explicit PaddleSegPreprocessor(const std::string& config_file);

/** \brief Process the input image and prepare input tensors for runtime
/** \brief Implement the virtual function of ProcessorManager, Apply() is the
* body of Run(). Apply() contains the main logic of preprocessing, Run() is
* called by users to execute preprocessing
*
* \param[in] images The input image data list, all the elements are returned by cv::imread()
* \param[in] image_batch The input image batch
* \param[in] outputs The output tensors which will feed in runtime
* \param[in] imgs_info The original input images shape info map, key is "shape_info", value is vector<array<int, 2>> a{{height, width}}
* \return true if the preprocess successed, otherwise false
*/
virtual bool Run(
std::vector<FDMat>* images,
std::vector<FDTensor>* outputs,
std::map<std::string, std::vector<std::array<int, 2>>>* imgs_info);
virtual bool Apply(FDMatBatch* image_batch,
std::vector<FDTensor>* outputs);

/// Get is_vertical_screen property of PP-HumanSeg model, default is false
bool GetIsVerticalScreen() const {
Expand All @@ -54,6 +54,15 @@ class FASTDEPLOY_DECL PaddleSegPreprocessor {
void DisableNormalize();
/// This function will disable hwc2chw in preprocessing step.
void DisablePermute();
/// This function will set imgs_info_ in PaddleSegPreprocessor
void SetImgsInfo(
std::map<std::string, std::vector<std::array<int, 2>>>* imgs_info) {
imgs_info_ = imgs_info;
}
/// This function will get imgs_info_ in PaddleSegPreprocessor
std::map<std::string, std::vector<std::array<int, 2>>>* GetImgsInfo() {
return imgs_info_;
}

private:
virtual bool BuildPreprocessPipelineFromConfig();
Expand All @@ -72,6 +81,10 @@ class FASTDEPLOY_DECL PaddleSegPreprocessor {
bool is_contain_resize_op_ = false;

bool initialized_ = false;

std::map<std::string, std::vector<std::array<int, 2>>>* imgs_info_;
std::shared_ptr<Resize> pre_resize_op_ =
std::make_shared<Resize>(0, 0);
};

} // namespace segmentation
Expand Down
Loading

0 comments on commit ed19c75

Please sign in to comment.