22 #include "fastdeploy/utils/utils.h" 54 enum FASTDEPLOY_DECL Device {
77 static std::map<ModelFormat, std::vector<Backend>>
78 s_default_backends_by_format = {
88 static std::map<Device, std::vector<Backend>>
89 s_default_backends_by_device = {
99 {Device::DIRECTML, {Backend::ORT}}
103 auto iter = s_default_backends_by_format.find(format);
104 if (iter == s_default_backends_by_format.end()) {
105 FDERROR <<
"Didn't find format is registered in " <<
106 "s_default_backends_by_format." << std::endl;
109 for (
size_t i = 0; i < iter->second.size(); ++i) {
110 if (iter->second[i] == backend) {
114 std::string msg = Str(iter->second);
115 FDERROR << backend <<
" only supports " << msg <<
", but now it's " 116 << format <<
"." << std::endl;
120 inline bool Supported(Device device,
Backend backend) {
121 auto iter = s_default_backends_by_device.find(device);
122 if (iter == s_default_backends_by_device.end()) {
123 FDERROR <<
"Didn't find device is registered in " <<
124 "s_default_backends_by_device." << std::endl;
127 for (
size_t i = 0; i < iter->second.size(); ++i) {
128 if (iter->second[i] == backend) {
132 std::string msg = Str(iter->second);
133 FDERROR << backend <<
" only supports " << msg <<
", but now it's " 134 << device <<
"." << std::endl;
138 FASTDEPLOY_DECL std::ostream& operator<<(std::ostream& o,
const Backend& b);
139 FASTDEPLOY_DECL std::ostream& operator<<(std::ostream& o,
const Device& d);
140 FASTDEPLOY_DECL std::ostream& operator<<(std::ostream& o,
const ModelFormat& f);
Model with SOPHGO format.
Definition: enum_variables.h:73
Paddle Inference, support Paddle format model, CPU / Nvidia GPU.
Definition: enum_variables.h:35
Auto recognize the model format by model file name.
Definition: enum_variables.h:68
ModelFormat
Definition: enum_variables.h:67
Backend
Definition: enum_variables.h:30
Poros, support TorchScript format model, CPU / Nvidia GPU.
Definition: enum_variables.h:36
TensorRT, support Paddle/ONNX format model, Nvidia GPU only.
Definition: enum_variables.h:34
RKNPU2, support RKNN format model, Rockchip NPU only.
Definition: enum_variables.h:39
Model with RKNN format.
Definition: enum_variables.h:71
bool IsBackendAvailable(const Backend &backend)
Check if the inference backend available.
Definition: enum_variables.cc:119
Paddle Lite, support Paddle format model, ARM CPU only.
Definition: enum_variables.h:38
Intel OpenVINO, support Paddle/ONNX format, CPU only.
Definition: enum_variables.h:37
SOPHGOTPU, support SOPHGO format model, Sophgo TPU only.
Definition: enum_variables.h:40
Model with ONNX format.
Definition: enum_variables.h:70
Model with paddlepaddle format.
Definition: enum_variables.h:69
Model with TorchScript format.
Definition: enum_variables.h:72
All C++ FastDeploy APIs are defined inside this namespace.
Definition: option.h:16
Unknown inference backend.
Definition: enum_variables.h:31
std::vector< Backend > GetAvailableBackends()
Get all the available inference backend in FastDeploy.
Definition: enum_variables.cc:90