Skip to content

Commit

Permalink
[Other] Add function alias (PaddlePaddle#571)
Browse files Browse the repository at this point in the history
Add function alias
  • Loading branch information
jiangjiajun authored Nov 12, 2022
1 parent 2693434 commit f2e492c
Show file tree
Hide file tree
Showing 2 changed files with 22 additions and 4 deletions.
10 changes: 10 additions & 0 deletions fastdeploy/runtime.h
Original file line number Diff line number Diff line change
Expand Up @@ -115,6 +115,11 @@ struct FASTDEPLOY_DECL RuntimeOption {
/// Set Paddle Inference as inference backend, support CPU/GPU
void UsePaddleBackend();

/// Wrapper function of UsePaddleBackend()
void UsePaddleInferBackend() {
return UsePaddleBackend();
}

/// Set ONNX Runtime as inference backend, support CPU/GPU
void UseOrtBackend();

Expand All @@ -130,6 +135,11 @@ struct FASTDEPLOY_DECL RuntimeOption {
/// Set Paddle Lite as inference backend, only support arm cpu
void UseLiteBackend();

/// Wrapper function of UseLiteBackend()
void UsePaddleLiteBackend() {
return UseLiteBackend();
}

/// Set mkldnn switch while using Paddle Inference as inference backend
void SetPaddleMKLDNN(bool pd_mkldnn = true);

Expand Down
16 changes: 12 additions & 4 deletions python/fastdeploy/runtime.py
Original file line number Diff line number Diff line change
Expand Up @@ -172,8 +172,7 @@ def unconst_ops_thres(self, value):
@long_to_int.setter
def long_to_int(self, value):
assert isinstance(
value,
bool), "The value to set `long_to_int` must be type of bool."
value, bool), "The value to set `long_to_int` must be type of bool."
self._option.long_to_int = value

@use_nvidia_tf32.setter
Expand Down Expand Up @@ -230,6 +229,11 @@ def use_paddle_backend(self):
"""
return self._option.use_paddle_backend()

def use_paddle_infer_backend(self):
"""Wrapper function of use_paddle_backend(), use Paddle Inference backend, support inference Paddle model on CPU/Nvidia GPU.
"""
return self.use_paddle_backend()

def use_poros_backend(self):
"""Use Poros backend, support inference TorchScript model on CPU/Nvidia GPU.
"""
Expand All @@ -255,6 +259,11 @@ def use_lite_backend(self):
"""
return self._option.use_lite_backend()

def use_paddle_lite_backend(self):
"""Wrapper function of use_lite_backend(), use Paddle Lite backend, support inference Paddle model on ARM CPU.
"""
return self.use_lite_backend()

def set_paddle_mkldnn(self, use_mkldnn=True):
"""Enable/Disable MKLDNN while using Paddle Inference backend, mkldnn is enabled by default.
"""
Expand Down Expand Up @@ -383,8 +392,7 @@ def __repr__(self):
continue
if hasattr(getattr(self._option, attr), "__call__"):
continue
message += " {} : {}\t\n".format(attr,
getattr(self._option, attr))
message += " {} : {}\t\n".format(attr, getattr(self._option, attr))
message.strip("\n")
message += ")"
return message

0 comments on commit f2e492c

Please sign in to comment.