Skip to content

Commit

Permalink
[Model] [Part 1] Refactor PaddleClas module (PaddlePaddle#508)
Browse files Browse the repository at this point in the history
* Split PaddleClas Module refactor

* Split PaddleClas Module refactor

* fix bug
  • Loading branch information
jiangjiajun authored Nov 7, 2022
1 parent 40b099a commit 6633fa3
Show file tree
Hide file tree
Showing 17 changed files with 227 additions and 269 deletions.
1 change: 1 addition & 0 deletions fastdeploy/core/fd_tensor.cc
Original file line number Diff line number Diff line change
Expand Up @@ -131,6 +131,7 @@ void FDTensor::Resize(const std::vector<int64_t>& new_shape,
const FDDataType& data_type,
const std::string& tensor_name,
const Device& new_device) {
external_data_ptr = nullptr;
name = tensor_name;
device = new_device;
dtype = data_type;
Expand Down
6 changes: 6 additions & 0 deletions fastdeploy/core/fd_tensor.h
Original file line number Diff line number Diff line change
Expand Up @@ -93,6 +93,12 @@ struct FASTDEPLOY_DECL FDTensor {
// Total number of elements in this tensor
int Numel() const;

// Get shape of FDTensor
std::vector<int64_t> Shape() const { return shape; }

// Get dtype of FDTensor
FDDataType Dtype() const { return dtype; }

void Resize(size_t nbytes);

void Resize(const std::vector<int64_t>& new_shape);
Expand Down
35 changes: 35 additions & 0 deletions fastdeploy/pybind/fd_tensor.cc
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "fastdeploy/fastdeploy_model.h"
#include "fastdeploy/pybind/main.h"

namespace fastdeploy {

void BindFDTensor(pybind11::module& m) {
pybind11::class_<FDTensor>(m, "FDTensor")
.def(pybind11::init<>(), "Default Constructor")
.def_readwrite("name", &FDTensor::name)
.def_readonly("shape", &FDTensor::shape)
.def_readonly("dtype", &FDTensor::dtype)
.def_readonly("device", &FDTensor::device)
.def("numpy", [](FDTensor& self) {
return TensorToPyArray(self);
})
.def("from_numpy", [](FDTensor& self, pybind11::array& pyarray, bool share_buffer = false) {
PyArrayToTensor(pyarray, &self, share_buffer);
});
}

} // namespace fastdeploy
7 changes: 5 additions & 2 deletions fastdeploy/pybind/main.cc.in
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

namespace fastdeploy {

void BindFDTensor(pybind11::module&);
void BindRuntime(pybind11::module&);
void BindFDModel(pybind11::module&);
void BindVision(pybind11::module&);
Expand Down Expand Up @@ -70,7 +71,7 @@ void PyArrayToTensor(pybind11::array& pyarray, FDTensor* tensor,
data_shape.insert(data_shape.begin(), pyarray.shape(),
pyarray.shape() + pyarray.ndim());
if (share_buffer) {
tensor-> SetExternalData(data_shape, dtype,
tensor->SetExternalData(data_shape, dtype,
pyarray.mutable_data());
} else {
tensor->Resize(data_shape, dtype);
Expand All @@ -80,6 +81,7 @@ void PyArrayToTensor(pybind11::array& pyarray, FDTensor* tensor,

void PyArrayToTensorList(std::vector<pybind11::array>& pyarrays, std::vector<FDTensor>* tensors,
bool share_buffer) {
tensors->resize(pyarrays.size());
for(auto i = 0; i < pyarrays.size(); ++i) {
PyArrayToTensor(pyarrays[i], &(*tensors)[i], share_buffer);
}
Expand All @@ -88,7 +90,7 @@ void PyArrayToTensorList(std::vector<pybind11::array>& pyarrays, std::vector<FDT
pybind11::array TensorToPyArray(const FDTensor& tensor) {
auto numpy_dtype = FDDataTypeToNumpyDataType(tensor.dtype);
auto out = pybind11::array(numpy_dtype, tensor.shape);
memcpy(out.mutable_data(), tensor.Data(), tensor.Numel() * FDDataTypeSize(tensor.dtype));
memcpy(out.mutable_data(), tensor.CpuData(), tensor.Nbytes());
return out;
}

Expand Down Expand Up @@ -149,6 +151,7 @@ PYBIND11_MODULE(@PY_LIBRARY_NAME@, m) {
"Make programer easier to deploy deeplearning model, save time to save "
"the world!";

BindFDTensor(m);
BindRuntime(m);
BindFDModel(m);
#ifdef ENABLE_VISION
Expand Down
1 change: 1 addition & 0 deletions fastdeploy/pybind/main.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@
#include <pybind11/numpy.h>
#include <pybind11/pybind11.h>
#include <pybind11/stl.h>
#include <pybind11/eval.h>

#include <type_traits>

Expand Down
46 changes: 19 additions & 27 deletions fastdeploy/pybind/runtime.cc
Original file line number Diff line number Diff line change
Expand Up @@ -162,6 +162,25 @@ void BindRuntime(pybind11::module& m) {
}
return results;
})
.def("infer", [](Runtime& self, std::map<std::string, FDTensor>& data) {
std::vector<FDTensor> inputs;
inputs.reserve(data.size());
for (auto iter = data.begin(); iter != data.end(); ++iter) {
FDTensor tensor;
tensor.SetExternalData(iter->second.Shape(), iter->second.Dtype(), iter->second.Data(), iter->second.device);
tensor.name = iter->first;
inputs.push_back(tensor);
}
std::vector<FDTensor> outputs;
if (!self.Infer(inputs, &outputs)) {
pybind11::eval("raise Exception('Failed to inference with Runtime.')");
}
return outputs;
})
.def("infer", [](Runtime& self, std::vector<FDTensor>& inputs) {
std::vector<FDTensor> outputs;
return self.Infer(inputs, &outputs);
})
.def("num_inputs", &Runtime::NumInputs)
.def("num_outputs", &Runtime::NumOutputs)
.def("get_input_info", &Runtime::GetInputInfo)
Expand Down Expand Up @@ -202,33 +221,6 @@ void BindRuntime(pybind11::module& m) {
.value("FP64", FDDataType::FP64)
.value("UINT8", FDDataType::UINT8);

pybind11::class_<FDTensor>(m, "FDTensor", pybind11::buffer_protocol())
.def(pybind11::init())
.def("cpu_data",
[](FDTensor& self) {
auto ptr = self.CpuData();
auto numel = self.Numel();
auto dtype = FDDataTypeToNumpyDataType(self.dtype);
auto base = pybind11::array(dtype, self.shape);
return pybind11::array(dtype, self.shape, ptr, base);
})
.def("resize", static_cast<void (FDTensor::*)(size_t)>(&FDTensor::Resize))
.def("resize",
static_cast<void (FDTensor::*)(const std::vector<int64_t>&)>(
&FDTensor::Resize))
.def(
"resize",
[](FDTensor& self, const std::vector<int64_t>& shape,
const FDDataType& dtype, const std::string& name,
const Device& device) { self.Resize(shape, dtype, name, device); })
.def("numel", &FDTensor::Numel)
.def("nbytes", &FDTensor::Nbytes)
.def_readwrite("name", &FDTensor::name)
.def_readwrite("is_pinned_memory", &FDTensor::is_pinned_memory)
.def_readonly("shape", &FDTensor::shape)
.def_readonly("dtype", &FDTensor::dtype)
.def_readonly("device", &FDTensor::device);

m.def("get_available_backends", []() { return GetAvailableBackends(); });
}

Expand Down
88 changes: 0 additions & 88 deletions fastdeploy/vision/common/processors/limit_long.cc

This file was deleted.

51 changes: 0 additions & 51 deletions fastdeploy/vision/common/processors/limit_long.h

This file was deleted.

2 changes: 1 addition & 1 deletion fastdeploy/vision/common/processors/limit_short.cc
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ bool LimitShort::ImplByFlyCV(Mat* mat) {
} else if (interp_ == 2) {
interp_method = fcv::InterpolationType::INTER_CUBIC;
} else {
FDERROR << "LimitLong: Only support interp_ be 0/1/2 with FlyCV, but "
FDERROR << "LimitShort: Only support interp_ be 0/1/2 with FlyCV, but "
"now it's "
<< interp_ << "." << std::endl;
return false;
Expand Down
13 changes: 13 additions & 0 deletions fastdeploy/vision/common/processors/mat.cc
Original file line number Diff line number Diff line change
Expand Up @@ -174,5 +174,18 @@ Mat Mat::Create(int height, int width, int channels,
return mat;
}

FDMat WrapMat(const cv::Mat& image) {
FDMat mat(image);
return mat;
}

std::vector<FDMat> WrapMat(const std::vector<cv::Mat>& images) {
std::vector<FDMat> mats;
for (size_t i = 0; i < images.size(); ++i) {
mats.emplace_back(FDMat(images[i]));
}
return mats;
}

} // namespace vision
} // namespace fastdeploy
10 changes: 10 additions & 0 deletions fastdeploy/vision/common/processors/mat.h
Original file line number Diff line number Diff line change
Expand Up @@ -147,5 +147,15 @@ struct FASTDEPLOY_DECL Mat {
FDDataType type, void* data, ProcLib lib);
};

typedef Mat FDMat;
/*
* @brief Wrap a cv::Mat to FDMat, there's no memory copy, memory buffer is managed by user
*/
FASTDEPLOY_DECL FDMat WrapMat(const cv::Mat& image);
/*
* Warp a vector<cv::Mat> to vector<FDMat>, there's no memory copy, memory buffer is managed by user
*/
FASTDEPLOY_DECL std::vector<FDMat> WrapMat(const std::vector<cv::Mat>& images);

} // namespace vision
} // namespace fastdeploy
2 changes: 1 addition & 1 deletion fastdeploy/vision/common/processors/resize.cc
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ bool Resize::ImplByFlyCV(Mat* mat) {
} else if (interp_ == 2) {
interp_method = fcv::InterpolationType::INTER_CUBIC;
} else {
FDERROR << "LimitLong: Only support interp_ be 0/1/2 with FlyCV, but "
FDERROR << "Resize: Only support interp_ be 0/1/2 with FlyCV, but "
"now it's "
<< interp_ << "." << std::endl;
return false;
Expand Down
Loading

0 comments on commit 6633fa3

Please sign in to comment.