@@ -17,7 +17,7 @@ endif ()
1717 add_definitions ("-DONNX_ML=1" )
1818 add_definitions ("-DONNX_NAMESPACE=onnx" )
1919 set (CUDA_INCLUDE_DIRS ${CUDAToolkit_INCLUDE_DIRS} )
20- set (TENSORRT_ROOT ${onnxruntime_TENSORRT_HOME } )
20+ set (TENSORRT_RTX_ROOT ${onnxruntime_TENSORRT_RTX_HOME } )
2121 set (OLD_CMAKE_CXX_FLAGS ${CMAKE_CXX_FLAGS} )
2222 set (PROTOBUF_LIBRARY ${PROTOBUF_LIB} )
2323 if (WIN32 )
@@ -34,12 +34,12 @@ endif ()
3434 endif ()
3535 set (CXX_VERSION_DEFINED TRUE )
3636
37- find_path (TENSORRT_INCLUDE_DIR NvInfer.h
38- HINTS ${TENSORRT_ROOT }
37+ find_path (TENSORRT_RTX_INCLUDE_DIR NvInfer.h
38+ HINTS ${TENSORRT_RTX_ROOT }
3939 PATH_SUFFIXES include )
4040
4141
42- file (READ ${TENSORRT_INCLUDE_DIR } /NvInferVersion.h NVINFER_VER_CONTENT)
42+ file (READ ${TENSORRT_RTX_INCLUDE_DIR } /NvInferVersion.h NVINFER_VER_CONTENT)
4343 string (REGEX MATCH "define TRT_MAJOR_RTX * +([0-9]+)" NV_TRT_MAJOR_RTX "${NVINFER_VER_CONTENT} " )
4444 string (REGEX REPLACE "define TRT_MAJOR_RTX * +([0-9]+)" "\\ 1" NV_TRT_MAJOR_RTX "${NV_TRT_MAJOR_RTX} " )
4545 string (REGEX MATCH "define TRT_MINOR_RTX * +([0-9]+)" NV_TRT_MINOR_RTX "${NVINFER_VER_CONTENT} " )
@@ -54,37 +54,37 @@ endif ()
5454 endif ()
5555
5656 if (WIN32 )
57- set (NVINFER_LIB "tensorrt_rtx_${NV_TRT_MAJOR_RTX} _${NV_TRT_MINOR_RTX} " )
58- set (PARSER_LIB "tensorrt_onnxparser_rtx_${NV_TRT_MAJOR_RTX} _${NV_TRT_MINOR_RTX} " )
57+ set (TRT_RTX_LIB "tensorrt_rtx_${NV_TRT_MAJOR_RTX} _${NV_TRT_MINOR_RTX} " )
58+ set (RTX_PARSER_LIB "tensorrt_onnxparser_rtx_${NV_TRT_MAJOR_RTX} _${NV_TRT_MINOR_RTX} " )
5959 endif ()
6060
61- if (NOT NVINFER_LIB )
62- set (NVINFER_LIB "tensorrt_rtx" )
61+ if (NOT TRT_RTX_LIB )
62+ set (TRT_RTX_LIB "tensorrt_rtx" )
6363 endif ()
6464
65- if (NOT PARSER_LIB )
66- set (PARSER_LIB "tensorrt_onnxparser_rtx" )
65+ if (NOT RTX_PARSER_LIB )
66+ set (RTX_PARSER_LIB "tensorrt_onnxparser_rtx" )
6767 endif ()
6868
69- MESSAGE (STATUS "Looking for ${NVINFER_LIB } " )
69+ MESSAGE (STATUS "Looking for ${TRT_RTX_LIB } " )
7070
71- find_library (TENSORRT_LIBRARY_INFER ${NVINFER_LIB }
72- HINTS ${TENSORRT_ROOT }
71+ find_library (TENSORRT_LIBRARY_INFER ${TRT_RTX_LIB }
72+ HINTS ${TENSORRT_RTX_ROOT }
7373 PATH_SUFFIXES lib lib64 lib/x64)
7474
7575 if (NOT TENSORRT_LIBRARY_INFER)
76- MESSAGE (STATUS "Can't find ${NVINFER_LIB } " )
76+ MESSAGE (STATUS "Can't find ${TRT_RTX_LIB } " )
7777 endif ()
7878
7979 if (onnxruntime_USE_TENSORRT_BUILTIN_PARSER)
80- MESSAGE (STATUS "Looking for ${PARSER_LIB } " )
80+ MESSAGE (STATUS "Looking for ${RTX_PARSER_LIB } " )
8181
82- find_library (TENSORRT_LIBRARY_NVONNXPARSER ${PARSER_LIB }
83- HINTS ${TENSORRT_ROOT }
82+ find_library (TENSORRT_LIBRARY_NVONNXPARSER ${RTX_PARSER_LIB }
83+ HINTS ${TENSORRT_RTX_ROOT }
8484 PATH_SUFFIXES lib lib64 lib/x64)
8585
8686 if (NOT TENSORRT_LIBRARY_NVONNXPARSER)
87- MESSAGE (STATUS "Can't find ${PARSER_LIB } " )
87+ MESSAGE (STATUS "Can't find ${RTX_PARSER_LIB } " )
8888 endif ()
8989
9090 set (TENSORRT_LIBRARY ${TENSORRT_LIBRARY_INFER} ${TENSORRT_LIBRARY_NVONNXPARSER} )
@@ -104,7 +104,6 @@ endif ()
104104 # The onnx_tensorrt repo contains a test program, getSupportedAPITest, which doesn't support Windows. It uses
105105 # unistd.h. So we must exclude it from our build. onnxruntime_fetchcontent_makeavailable is for the purpose.
106106 onnxruntime_fetchcontent_makeavailable(onnx_tensorrt)
107- include_directories (${onnx_tensorrt_SOURCE_DIR} )
108107 set (CMAKE_CXX_FLAGS ${OLD_CMAKE_CXX_FLAGS} )
109108 if ( CMAKE_COMPILER_IS_GNUCC )
110109 set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-unused-parameter" )
@@ -114,17 +113,16 @@ endif ()
114113 unset (PROTOBUF_LIBRARY)
115114 unset (OLD_CMAKE_CXX_FLAGS)
116115 unset (OLD_CMAKE_CUDA_FLAGS)
117- set_target_properties (${PARSER_LIB } PROPERTIES LINK_FLAGS "/ignore:4199" )
116+ set_target_properties (${RTX_PARSER_LIB } PROPERTIES LINK_FLAGS "/ignore:4199" )
118117 target_compile_options (nvonnxparser_static PRIVATE /FIio.h /wd4100)
119- target_compile_options (${PARSER_LIB } PRIVATE /FIio.h /wd4100)
118+ target_compile_options (${RTX_PARSER_LIB } PRIVATE /FIio.h /wd4100)
120119 endif ()
121120 # Static libraries are just nvonnxparser_static on all platforms
122121 set (onnxparser_link_libs nvonnxparser_static)
123122 set (TENSORRT_LIBRARY ${TENSORRT_LIBRARY_INFER} )
124123 MESSAGE (STATUS "Find TensorRT libs at ${TENSORRT_LIBRARY} " )
125124 endif ()
126125
127- include_directories (${TENSORRT_INCLUDE_DIR} )
128126 # ${TENSORRT_LIBRARY} is empty if we link nvonnxparser_static.
129127 # nvonnxparser_static is linked against tensorrt libraries in onnx-tensorrt
130128 # See https://github.com/onnx/onnx-tensorrt/blob/8af13d1b106f58df1e98945a5e7c851ddb5f0791/CMakeLists.txt#L121
@@ -152,7 +150,7 @@ endif ()
152150 else ()
153151 target_link_libraries (onnxruntime_providers_nv_tensorrt_rtx PRIVATE ${onnxparser_link_libs} ${trt_link_libs} ${ONNXRUNTIME_PROVIDERS_SHARED} ${PROTOBUF_LIB} flatbuffers::flatbuffers ${ABSEIL_LIBS} PUBLIC CUDA::cudart)
154152 endif ()
155- target_include_directories (onnxruntime_providers_nv_tensorrt_rtx PRIVATE ${ONNXRUNTIME_ROOT} ${CMAKE_CURRENT_BINARY_DIR}
153+ target_include_directories (onnxruntime_providers_nv_tensorrt_rtx PRIVATE ${ONNXRUNTIME_ROOT} ${CMAKE_CURRENT_BINARY_DIR} ${TENSORRT_RTX_INCLUDE_DIR} ${onnx_tensorrt_SOURCE_DIR}
156154 PUBLIC ${CUDAToolkit_INCLUDE_DIRS} )
157155
158156 # ${CMAKE_CURRENT_BINARY_DIR} is so that #include "onnxruntime_config.h" inside tensor_shape.h is found
0 commit comments