Skip to content

Commit db9ee35

Browse files
authored
[TensorRT EP] c4996 suppression to build with trt10.2ga on Windows (microsoft#21358)
### Description <!-- Describe your changes. --> Supress C4996 deprecated api warning as errors as a walkaround to build ORT with TRT10.2GA on Windows ### Motivation and Context <!-- - Why is this change required? What problem does it solve? - If it fixes an open issue, please link to the issue here. --> Four apis were recently declared as deprecated, which are being used by core code of TRT EP. Temporally suppress deprecated api warnings before updating these apis
1 parent e5f18ba commit db9ee35

File tree

1 file changed

+70
-0
lines changed

1 file changed

+70
-0
lines changed

onnxruntime/core/providers/tensorrt/tensorrt_execution_provider.cc

Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -70,7 +70,14 @@ bool SetDynamicRange(nvinfer1::INetworkDefinition& network, std::unordered_map<s
7070
const std::string tensor_name = network.getInput(i)->getName();
7171
auto dynamic_range_iter = dynamic_range_map.find(tensor_name);
7272
if (dynamic_range_iter != dynamic_range_map.end()) {
73+
#if defined(_MSC_VER)
74+
#pragma warning(push)
75+
#pragma warning(disable : 4996)
76+
#endif
7377
if (!network.getInput(i)->setDynamicRange(-dynamic_range_iter->second, dynamic_range_iter->second)) {
78+
#if defined(_MSC_VER)
79+
#pragma warning(pop)
80+
#endif
7481
LOGS_DEFAULT(ERROR) << "Failed to set dynamic range for network input " << tensor_name;
7582
return false;
7683
}
@@ -84,7 +91,14 @@ bool SetDynamicRange(nvinfer1::INetworkDefinition& network, std::unordered_map<s
8491
const std::string tensor_name = trt_layer->getOutput(j)->getName();
8592
auto dynamic_range_iter = dynamic_range_map.find(tensor_name);
8693
if (dynamic_range_iter != dynamic_range_map.end()) {
94+
#if defined(_MSC_VER)
95+
#pragma warning(push)
96+
#pragma warning(disable : 4996)
97+
#endif
8798
if (!trt_layer->getOutput(j)->setDynamicRange(-dynamic_range_iter->second, dynamic_range_iter->second)) {
99+
#if defined(_MSC_VER)
100+
#pragma warning(pop)
101+
#endif
88102
LOGS_DEFAULT(ERROR) << "Failed to set dynamic range for tensor " << tensor_name;
89103
return false;
90104
}
@@ -122,7 +136,14 @@ bool SetDynamicRange(nvinfer1::INetworkDefinition& network, std::unordered_map<s
122136
}
123137
max_weight = std::max(max_weight, std::abs(weight));
124138
}
139+
#if defined(_MSC_VER)
140+
#pragma warning(push)
141+
#pragma warning(disable : 4996)
142+
#endif
125143
if (!trt_layer->getOutput(j)->setDynamicRange(static_cast<float>(-max_weight), static_cast<float>(max_weight))) {
144+
#if defined(_MSC_VER)
145+
#pragma warning(pop)
146+
#endif
126147
LOGS_DEFAULT(ERROR) << "Failed to set dynamic range for layer " << const_layer_name;
127148
return false;
128149
}
@@ -2232,7 +2253,14 @@ SubGraphCollection_t TensorrtExecutionProvider::GetSupportedList(SubGraphCollect
22322253
auto trt_network = std::unique_ptr<nvinfer1::INetworkDefinition>(trt_builder->createNetworkV2(network_flags));
22332254

22342255
auto trt_parser = tensorrt_ptr::unique_pointer<nvonnxparser::IParser>(nvonnxparser::createParser(*trt_network, trt_logger));
2256+
#if defined(_MSC_VER)
2257+
#pragma warning(push)
2258+
#pragma warning(disable : 4996)
2259+
#endif
22352260
trt_parser->supportsModel(string_buf.data(), string_buf.size(), parser_nodes_list, model_path_);
2261+
#if defined(_MSC_VER)
2262+
#pragma warning(pop)
2263+
#endif
22362264

22372265
SubGraphCollection_t next_nodes_list;
22382266
const std::vector<NodeIndex>& subgraph_node_index = graph_viewer->GetNodesInTopologicalOrder(1 /*priority-based topological sort*/);
@@ -3074,7 +3102,14 @@ Status TensorrtExecutionProvider::CreateNodeComputeInfoFromGraph(const GraphView
30743102
} else {
30753103
// Set INT8 per tensor dynamic range
30763104
if (int8_enable_ && trt_builder->platformHasFastInt8() && int8_calibration_cache_available_) {
3105+
#if defined(_MSC_VER)
3106+
#pragma warning(push)
3107+
#pragma warning(disable : 4996)
3108+
#endif
30773109
trt_config->setInt8Calibrator(nullptr);
3110+
#if defined(_MSC_VER)
3111+
#pragma warning(pop)
3112+
#endif
30783113
if (!SetDynamicRange(*trt_network, dynamic_range_map)) {
30793114
return ORT_MAKE_STATUS(ONNXRUNTIME, EP_FAIL,
30803115
"TensorRT EP could not set INT8 dynamic range for fused node: " + fused_node.Name());
@@ -3193,7 +3228,14 @@ Status TensorrtExecutionProvider::CreateNodeComputeInfoFromGraph(const GraphView
31933228
// Note: Creating an execution context from an engine is thread safe per TRT doc
31943229
// https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#threading
31953230
if (context_memory_sharing_enable_) {
3231+
#if defined(_MSC_VER)
3232+
#pragma warning(push)
3233+
#pragma warning(disable : 4996)
3234+
#endif
31963235
size_t mem_size = trt_engine->getDeviceMemorySize();
3236+
#if defined(_MSC_VER)
3237+
#pragma warning(pop)
3238+
#endif
31973239
if (mem_size > max_ctx_mem_size_) {
31983240
max_ctx_mem_size_ = mem_size;
31993241
}
@@ -3466,7 +3508,14 @@ Status TensorrtExecutionProvider::CreateNodeComputeInfoFromGraph(const GraphView
34663508

34673509
// Set INT8 Per Tensor Dynamic range
34683510
if (trt_state->int8_enable && trt_builder->platformHasFastInt8() && trt_state->int8_calibration_cache_available) {
3511+
#if defined(_MSC_VER)
3512+
#pragma warning(push)
3513+
#pragma warning(disable : 4996)
3514+
#endif
34693515
trt_config->setInt8Calibrator(nullptr);
3516+
#if defined(_MSC_VER)
3517+
#pragma warning(pop)
3518+
#endif
34703519
if (!SetDynamicRange(*trt_state->network->get(), trt_state->dynamic_range_map)) {
34713520
return ORT_MAKE_STATUS(ONNXRUNTIME, EP_FAIL, "TensorRT EP failed to set INT8 dynamic range.");
34723521
}
@@ -3734,7 +3783,14 @@ Status TensorrtExecutionProvider::CreateNodeComputeInfoFromGraph(const GraphView
37343783

37353784
// Set execution context memory
37363785
if (trt_state->context_memory_sharing_enable) {
3786+
#if defined(_MSC_VER)
3787+
#pragma warning(push)
3788+
#pragma warning(disable : 4996)
3789+
#endif
37373790
size_t mem_size = trt_engine->getDeviceMemorySize();
3791+
#if defined(_MSC_VER)
3792+
#pragma warning(pop)
3793+
#endif
37383794
if (mem_size > *max_context_mem_size_ptr) {
37393795
*max_context_mem_size_ptr = mem_size;
37403796
}
@@ -3865,7 +3921,14 @@ Status TensorrtExecutionProvider::CreateNodeComputeInfoFromPrecompiledEngine(con
38653921
// Note: Creating an execution context from an engine is thread safe per TRT doc
38663922
// https://docs.nvidia.com/deeplearning/tensorrt/developer-guide/index.html#threading
38673923
if (context_memory_sharing_enable_) {
3924+
#if defined(_MSC_VER)
3925+
#pragma warning(push)
3926+
#pragma warning(disable : 4996)
3927+
#endif
38683928
size_t mem_size = trt_engine->getDeviceMemorySize();
3929+
#if defined(_MSC_VER)
3930+
#pragma warning(pop)
3931+
#endif
38693932
if (mem_size > max_ctx_mem_size_) {
38703933
max_ctx_mem_size_ = mem_size;
38713934
}
@@ -4038,7 +4101,14 @@ Status TensorrtExecutionProvider::CreateNodeComputeInfoFromPrecompiledEngine(con
40384101

40394102
// Set execution context memory
40404103
if (trt_state->context_memory_sharing_enable) {
4104+
#if defined(_MSC_VER)
4105+
#pragma warning(push)
4106+
#pragma warning(disable : 4996)
4107+
#endif
40414108
size_t mem_size = trt_engine->getDeviceMemorySize();
4109+
#if defined(_MSC_VER)
4110+
#pragma warning(pop)
4111+
#endif
40424112
if (mem_size > *max_context_mem_size_ptr) {
40434113
*max_context_mem_size_ptr = mem_size;
40444114
}

0 commit comments

Comments
 (0)