Skip to content

Commit

Permalink
[Hackathon 181] Add TVM support for FastDeploy on macOS (PaddlePaddle…
Browse files Browse the repository at this point in the history
…#1969)

* update for tvm backend

* update third_party

* update third_party

* update

* update

* update

* update

* update

* update

* update

* update

---------

Co-authored-by: DefTruth <[email protected]>
  • Loading branch information
Zheng-Bicheng and DefTruth authored May 25, 2023
1 parent 49c033a commit 643730b
Show file tree
Hide file tree
Showing 20 changed files with 658 additions and 31 deletions.
13 changes: 12 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,7 @@ option(ENABLE_POROS_BACKEND "Whether to enable poros backend." OFF)
option(ENABLE_OPENVINO_BACKEND "Whether to enable openvino backend." OFF)
option(ENABLE_RKNPU2_BACKEND "Whether to enable RKNPU2 backend." OFF)
option(ENABLE_SOPHGO_BACKEND "Whether to enable SOPHON backend." OFF)
option(ENABLE_TVM_BACKEND "Whether to enable TVM backend." OFF)
option(ENABLE_LITE_BACKEND "Whether to enable paddle lite backend." OFF)
option(ENABLE_HORIZON_BACKEND "Whether to enable HORIZON backend." OFF)
option(ENABLE_VISION "Whether to enable vision models usage." OFF)
Expand Down Expand Up @@ -169,6 +170,7 @@ file(GLOB_RECURSE DEPLOY_OPENVINO_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/f
file(GLOB_RECURSE DEPLOY_RKNPU2_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/runtime/backends/rknpu2/*.cc)
file(GLOB_RECURSE DEPLOY_HORIZON_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/runtime/backends/horizon/*.cc)
file(GLOB_RECURSE DEPLOY_SOPHGO_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/runtime/backends/sophgo/*.cc)
file(GLOB_RECURSE DEPLOY_TVM_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/runtime/backends/tvm/*.cc)
file(GLOB_RECURSE DEPLOY_LITE_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/runtime/backends/lite/*.cc)
file(GLOB_RECURSE DEPLOY_ENCRYPTION_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/encryption/*.cc)
file(GLOB_RECURSE DEPLOY_PIPELINE_SRCS ${PROJECT_SOURCE_DIR}/${CSRCS_DIR_NAME}/fastdeploy/pipeline/*.cc)
Expand All @@ -188,7 +190,8 @@ list(REMOVE_ITEM ALL_DEPLOY_SRCS ${DEPLOY_ORT_SRCS} ${DEPLOY_PADDLE_SRCS}
${DEPLOY_OPENVINO_SRCS} ${DEPLOY_LITE_SRCS}
${DEPLOY_VISION_SRCS} ${DEPLOY_TEXT_SRCS}
${DEPLOY_PIPELINE_SRCS} ${DEPLOY_RKNPU2_SRCS}
${DEPLOY_SOPHGO_SRCS} ${DEPLOY_ENCRYPTION_SRCS} ${DEPLOY_HORIZON_SRCS})
${DEPLOY_SOPHGO_SRCS} ${DEPLOY_ENCRYPTION_SRCS}
${DEPLOY_HORIZON_SRCS} ${DEPLOY_TVM_SRCS})

set(DEPEND_LIBS "")

Expand Down Expand Up @@ -263,6 +266,14 @@ if(ENABLE_HORIZON_BACKEND)
list(APPEND DEPEND_LIBS ${BPU_libs})
endif()

if(ENABLE_TVM_BACKEND)
set(CMAKE_CXX_STANDARD 17)
add_definitions(-DENABLE_TVM_BACKEND)
list(APPEND ALL_DEPLOY_SRCS ${DEPLOY_TVM_SRCS})
include(${PROJECT_SOURCE_DIR}/cmake/tvm.cmake)
list(APPEND DEPEND_LIBS ${TVM_RUNTIME_LIB})
endif()

if(ENABLE_SOPHGO_BACKEND)
add_definitions(-DENABLE_SOPHGO_BACKEND)
list(APPEND ALL_DEPLOY_SRCS ${DEPLOY_SOPHGO_SRCS})
Expand Down
10 changes: 10 additions & 0 deletions FastDeploy.cmake.in
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ set(RKNN2_TARGET_SOC "@RKNN2_TARGET_SOC@")
# Inference backend and FastDeploy Moudle
set(ENABLE_ORT_BACKEND @ENABLE_ORT_BACKEND@)
set(ENABLE_RKNPU2_BACKEND @ENABLE_RKNPU2_BACKEND@)
set(ENABLE_TVM_BACKEND @ENABLE_TVM_BACKEND@)
set(ENABLE_HORIZON_BACKEND @ENABLE_HORIZON_BACKEND@)
set(ENABLE_SOPHGO_BACKEND @ENABLE_SOPHGO_BACKEND@)
set(ENABLE_LITE_BACKEND @ENABLE_LITE_BACKEND@)
Expand Down Expand Up @@ -129,6 +130,15 @@ if(ENABLE_ORT_BACKEND)
list(APPEND FASTDEPLOY_LIBS ${ORT_LIB})
endif()

if(ENABLE_TVM_BACKEND)
if(APPLE)
set(TVM_RUNTIME_LIB ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/tvm/lib/libtvm_runtime.dylib)
else()
set(TVM_RUNTIME_LIB ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/tvm/lib/libtvm_runtime.so)
endif()
list(APPEND FASTDEPLOY_LIBS ${TVM_RUNTIME_LIB})
endif()

if(ENABLE_PADDLE_BACKEND)
find_library(PADDLE_LIB paddle_inference ${CMAKE_CURRENT_LIST_DIR}/third_libs/install/paddle_inference/paddle/lib NO_DEFAULT_PATH)
if(WIN32)
Expand Down
1 change: 1 addition & 0 deletions cmake/summary.cmake
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,7 @@ function(fastdeploy_summary)
message(STATUS " ENABLE_POROS_BACKEND : ${ENABLE_POROS_BACKEND}")
message(STATUS " ENABLE_TRT_BACKEND : ${ENABLE_TRT_BACKEND}")
message(STATUS " ENABLE_OPENVINO_BACKEND : ${ENABLE_OPENVINO_BACKEND}")
message(STATUS " ENABLE_TVM_BACKEND : ${ENABLE_TVM_BACKEND}")
message(STATUS " ENABLE_BENCHMARK : ${ENABLE_BENCHMARK}")
message(STATUS " ENABLE_VISION : ${ENABLE_VISION}")
message(STATUS " ENABLE_TEXT : ${ENABLE_TEXT}")
Expand Down
55 changes: 55 additions & 0 deletions cmake/tvm.cmake
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
# set path

set(TVM_URL_BASE "https://bj.bcebos.com/fastdeploy/third_libs/")
set(TVM_VERSION "0.12.0")
set(TVM_SYSTEM "")

if (${CMAKE_SYSTEM} MATCHES "Darwin")
if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "arm64")
set(TVM_SYSTEM "macos-arm64")
endif ()
elseif (${CMAKE_SYSTEM} MATCHES "Linux")
if(CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "x86")
set(TVM_SYSTEM "linux-x86")
endif ()
else ()
error("TVM only support MacOS in Arm64 or linux in x86")
endif ()
set(TVM_FILE "tvm-${TVM_SYSTEM}-${TVM_VERSION}.tgz")
set(TVM_URL "${TVM_URL_BASE}${TVM_FILE}")

set(TVM_RUNTIME_PATH "${THIRD_PARTY_PATH}/install/tvm")
execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory "${TVM_RUNTIME_PATH}")
download_and_decompress(${TVM_URL}
"${CMAKE_CURRENT_BINARY_DIR}/${TVM_FILE}"
"${THIRD_PARTY_PATH}/install/")
include_directories(${TVM_RUNTIME_PATH}/include)

# copy dlpack to third_party
set(DLPACK_PATH "${THIRD_PARTY_PATH}/install/dlpack")
execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory "${DLPACK_PATH}")
execute_process(COMMAND ${CMAKE_COMMAND} -E copy_directory
"${PROJECT_SOURCE_DIR}/third_party/dlpack"
"${THIRD_PARTY_PATH}/install/")
include_directories(${DLPACK_PATH}/include)

set(DMLC_CORE_PATH "${THIRD_PARTY_PATH}/install/dmlc-core")
execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory "${DMLC_CORE_PATH}")
set(DMLC_CORE_URL https://bj.bcebos.com/fastdeploy/third_libs/dmlc-core.tgz)
download_and_decompress(${DMLC_CORE_URL}
"${CMAKE_CURRENT_BINARY_DIR}/dmlc-core.tgz"
"${THIRD_PARTY_PATH}/install/")
include_directories(${DMLC_CORE_PATH}/include)

# include lib
if (EXISTS ${TVM_RUNTIME_PATH})
if (${CMAKE_SYSTEM} MATCHES "Darwin")
set(TVM_RUNTIME_LIB ${TVM_RUNTIME_PATH}/lib/libtvm_runtime.dylib)
elseif (${CMAKE_SYSTEM} MATCHES "Linux")
set(TVM_RUNTIME_LIB ${TVM_RUNTIME_PATH}/lib/libtvm_runtime.so)
endif ()
include(${TVM_RUNTIME_PATH}/lib/cmake/tvm/tvmConfig.cmake)
add_definitions(-DDMLC_USE_LOGGING_LIBRARY=<tvm/runtime/logging.h>)
else ()
error(FATAL_ERROR "[tvm.cmake] TVM_RUNTIME_PATH does not exist.")
endif ()
35 changes: 35 additions & 0 deletions examples/vision/detection/paddledetection/tvm/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
[English](README.md) | 简体中文

# PaddleDetection TVM部署示例

在TVM上已经通过测试的PaddleDetection模型如下:

* picodet
* PPYOLOE

### Paddle模型转换为TVM模型

由于TVM不支持NMS算子,因此在转换模型前我们需要对PaddleDetection模型进行裁剪,将模型的输出节点改为NMS节点的输入节点。
输入以下命令,你将得到一个裁剪后的PPYOLOE模型。

```bash
git clone https://github.com/PaddlePaddle/Paddle2ONNX.git
cd Paddle2ONNX/tools/paddle
wget https://bj.bcebos.com/fastdeploy/models/ppyoloe_plus_crn_m_80e_coco.tgz
tar xvf ppyoloe_plus_crn_m_80e_coco.tgz
python prune_paddle_model.py --model_dir ppyoloe_plus_crn_m_80e_coco \
--model_filename model.pdmodel \
--params_filename model.pdiparams \
--output_names tmp_17 concat_14.tmp_0 \
--save_dir ppyoloe_plus_crn_m_80e_coco
```

裁剪完模型后我们就可以通过tvm python库实现编译模型,这里为了方便大家使用,提供了转换脚本。
输入以下命令,你将得到转换过后的TVM模型。
注意,FastDeploy在推理PPYOLOE时不关依赖模型,还依赖yml文件,因此你还需要将对应的yml文件拷贝到模型目录下。

```bash
python path/to/FastDeploy/tools/tvm/paddle2tvm.py --model_path=./ppyoloe_plus_crn_m_80e_coco/model \
--shape_dict="{'image': [1, 3, 640, 640], 'scale_factor': [1, 2]}"
cp ppyoloe_plus_crn_m_80e_coco/infer_cfg.yml tvm_save
```
13 changes: 13 additions & 0 deletions examples/vision/detection/paddledetection/tvm/cpp/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,13 @@
PROJECT(infer_demo C CXX)
CMAKE_MINIMUM_REQUIRED (VERSION 3.10)

# 指定下载解压后的fastdeploy库路径
option(FASTDEPLOY_INSTALL_DIR "Path of downloaded fastdeploy sdk.")

include(${FASTDEPLOY_INSTALL_DIR}/FastDeploy.cmake)

# 添加FastDeploy依赖头文件
include_directories(${FASTDEPLOY_INCS})

add_executable(infer_ppyoloe_demo ${PROJECT_SOURCE_DIR}/infer_ppyoloe_demo.cc)
target_link_libraries(infer_ppyoloe_demo ${FASTDEPLOY_LIBS})
60 changes: 60 additions & 0 deletions examples/vision/detection/paddledetection/tvm/cpp/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
[English](README.md) | 简体中文

# PaddleDetection C++部署示例

本目录下提供`infer_ppyoloe_demo.cc`快速完成PPDetection模型使用TVM加速部署的示例。

## 转换模型并运行

```bash
# build example
mkdir build
cd build
cmake .. -DFASTDEPLOY_INSTALL_DIR=/path/to/fastdeploy-sdk
make -j
wget https://gitee.com/paddlepaddle/PaddleDetection/raw/release/2.4/demo/000000014439.jpg
./infer_ppyoloe_demo ../tvm_save 000000014439.jpg
```


## PaddleDetection C++接口

### 模型类

PaddleDetection目前支持6种模型系列,类名分别为`PPYOLOE`, `PicoDet`, `PaddleYOLOX`, `PPYOLO`, `FasterRCNN``SSD`,`PaddleYOLOv5`,`PaddleYOLOv6`,`PaddleYOLOv7`,`RTMDet`,`CascadeRCNN`,`PSSDet`,`RetinaNet`,`PPYOLOESOD`,`FCOS`,`TTFNet`,`TOOD`,`GFL`所有类名的构造函数和预测函数在参数上完全一致,本文档以PPYOLOE为例讲解API
```c++
fastdeploy::vision::detection::PPYOLOE(
const string& model_file,
const string& params_file,
const string& config_file
const RuntimeOption& runtime_option = RuntimeOption(),
const ModelFormat& model_format = ModelFormat::PADDLE)
```
PaddleDetection PPYOLOE模型加载和初始化,其中model_file为导出的ONNX模型格式。
**参数**
> * **model_file**(str): 模型文件路径
> * **params_file**(str): 参数文件路径
> * **config_file**(str): 配置文件路径,即PaddleDetection导出的部署yaml文件
> * **runtime_option**(RuntimeOption): 后端推理配置,默认为None,即采用默认配置
> * **model_format**(ModelFormat): 模型格式,默认为PADDLE格式
#### Predict函数
> ```c++
> PPYOLOE::Predict(cv::Mat* im, DetectionResult* result)
> ```
>
> 模型预测接口,输入图像直接输出检测结果。
>
> **参数**
>
> > * **im**: 输入图像,注意需为HWC,BGR格式
> > * **result**: 检测结果,包括检测框,各个框的置信度, DetectionResult说明参考[视觉模型预测结果](../../../../../docs/api/vision_results/)
- [模型介绍](../../)
- [Python部署](../python)
- [视觉模型预测结果](../../../../../docs/api/vision_results/)
- [如何切换模型推理后端引擎](../../../../../docs/cn/faq/how_to_change_backend.md)
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include "fastdeploy/vision.h"

void TVMInfer(const std::string& model_dir, const std::string& image_file) {
auto model_file = model_dir + "/tvm_model";
auto params_file = "";
auto config_file = model_dir + "/infer_cfg.yml";

auto option = fastdeploy::RuntimeOption();
option.UseCpu();
option.UseTVMBackend();

auto format = fastdeploy::ModelFormat::TVMFormat;

auto model = fastdeploy::vision::detection::PPYOLOE(
model_file, params_file, config_file, option, format);
model.GetPostprocessor().ApplyNMS();

auto im = cv::imread(image_file);

fastdeploy::vision::DetectionResult res;
if (!model.Predict(&im, &res)) {
std::cerr << "Failed to predict." << std::endl;
return;
}

std::cout << res.Str() << std::endl;
auto vis_im = fastdeploy::vision::VisDetection(im, res, 0.5);
cv::imwrite("infer.jpg", vis_im);
std::cout << "Visualized result saved in ./infer.jpg" << std::endl;
}

int main(int argc, char* argv[]) {
if (argc < 3) {
std::cout
<< "Usage: infer_demo path/to/model_dir path/to/image run_option, "
"e.g ./infer_model ./picodet_model_dir ./test.jpeg"
<< std::endl;
return -1;
}

TVMInfer(argv[1], argv[2]);
return 0;
}
6 changes: 5 additions & 1 deletion fastdeploy/core/config.h.in
Original file line number Diff line number Diff line change
Expand Up @@ -71,4 +71,8 @@

#ifndef ENABLE_HORIZON_BACKEND
#cmakedefine ENABLE_HORIZON_BACKEND
#endif
#endif

#ifndef ENABLE_TVM_BACKEND
#cmakedefine ENABLE_TVM_BACKEND
#endif
21 changes: 21 additions & 0 deletions fastdeploy/runtime/backends/tvm/option.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,21 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#pragma once

namespace fastdeploy {
struct TVMBackendOption {
TVMBackendOption() {}
};

} // namespace fastdeploy
Loading

0 comments on commit 643730b

Please sign in to comment.