FastDeploy  latest
Fast & Easy to Deploy!
option.h
1 // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 #pragma once
16 
17 #include "fastdeploy/core/fd_type.h"
19 #include <iostream>
20 #include <memory>
21 #include <string>
22 #include <vector>
23 #include <map>
24 namespace fastdeploy {
25 
43  int execution_mode = -1;
45  Device device = Device::CPU;
47  int device_id = 0;
48  void* external_stream_ = nullptr;
50  bool enable_fp16 = false;
51 };
52 } // namespace fastdeploy
int graph_optimization_level
Definition: option.h:33
Option object to configure ONNX Runtime backend.
Definition: option.h:28
A brief file description.
int execution_mode
Definition: option.h:43
int device_id
Inference device id.
Definition: option.h:47
bool enable_fp16
Use fp16 to infer.
Definition: option.h:50
int inter_op_num_threads
Definition: option.h:39
Device device
Inference device, OrtBackend supports CPU/GPU.
Definition: option.h:45
int intra_op_num_threads
Number of threads to execute the operator, -1: default.
Definition: option.h:35
All C++ FastDeploy APIs are defined inside this namespace.
Definition: option.h:16