16 #include "fastdeploy/core/fd_type.h" 48 const std::vector<int32_t>& min,
49 const std::vector<int32_t>& opt,
50 const std::vector<int32_t>& max) {
51 min_shape[tensor_name].clear();
52 max_shape[tensor_name].clear();
53 opt_shape[tensor_name].clear();
54 min_shape[tensor_name].assign(min.begin(), min.end());
55 if (opt.size() == 0) {
56 opt_shape[tensor_name].assign(min.begin(), min.end());
58 opt_shape[tensor_name].assign(opt.begin(), opt.end());
60 if (max.size() == 0) {
61 max_shape[tensor_name].assign(min.begin(), min.end());
63 max_shape[tensor_name].assign(max.begin(), max.end());
71 std::map<std::string, std::vector<int32_t>> max_shape;
72 std::map<std::string, std::vector<int32_t>> min_shape;
73 std::map<std::string, std::vector<int32_t>> opt_shape;
74 bool enable_pinned_memory =
false;
75 void* external_stream_ =
nullptr;
77 std::string model_file =
"";
78 std::string params_file =
"";
bool enable_log_info
Enable log while converting onnx model to tensorrt.
Definition: option.h:34
std::string serialize_file
Set cache file path while use TensorRT backend. Loadding a Paddle/ONNX model and initialize TensorRT ...
Definition: option.h:67
Auto recognize the model format by model file name.
Definition: enum_variables.h:68
void SetShape(const std::string &tensor_name, const std::vector< int32_t > &min, const std::vector< int32_t > &opt, const std::vector< int32_t > &max)
Set shape range of input tensor for the model that contain dynamic input shape while using TensorRT b...
Definition: option.h:47
ModelFormat
Definition: enum_variables.h:67
size_t max_workspace_size
max_workspace_size for TensorRT
Definition: option.h:31
size_t max_batch_size
max_batch_size, it's deprecated in TensorRT 8.x
Definition: option.h:28
Option object to configure TensorRT backend.
Definition: option.h:26
bool enable_fp16
Enable half precison inference, on some device not support half precision, it will fallback to float3...
Definition: option.h:38
All C++ FastDeploy APIs are defined inside this namespace.
Definition: option.h:16