Skip to content

Commit bda52f6

Browse files
committed
Merge branch 'master' into sync_msft_17_7_25
2 parents 87a7ac0 + 7fe617c commit bda52f6

File tree

217 files changed

+5655
-1777
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

217 files changed

+5655
-1777
lines changed

.github/workflows/linux-dnnl.yml

Lines changed: 0 additions & 40 deletions
This file was deleted.

.github/workflows/linux_migraphx_ci.yml

Lines changed: 0 additions & 40 deletions
This file was deleted.

.github/workflows/windows_webgpu.yml

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ jobs:
2222
strategy:
2323
matrix:
2424
vcpkg_option: [novcpkg, vcpkg]
25+
wgsl_template: [static, dynamic]
2526
env:
2627
OrtPackageId: Microsoft.ML.OnnxRuntime
2728
OnnxRuntimeBuildDirectory: ${{ github.workspace }}
@@ -123,6 +124,7 @@ jobs:
123124
--build_nodejs `
124125
--build_java `
125126
--use_webgpu `
127+
--wgsl_template ${{ matrix.wgsl_template }} `
126128
${{ matrix.vcpkg_option == 'vcpkg' && '--use_vcpkg' || '' }} `
127129
--cmake_extra_defines `
128130
onnxruntime_BUILD_UNIT_TESTS=ON `

.github/workflows/windows_x64_release_dnnl_build_x64_release.yml

Lines changed: 0 additions & 132 deletions
This file was deleted.

cmake/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -151,6 +151,7 @@ option(onnxruntime_DISABLE_SPARSE_TENSORS "Disable sparse tensors data types" OF
151151
option(onnxruntime_DISABLE_OPTIONAL_TYPE "Disable optional type" OFF)
152152
option(onnxruntime_DISABLE_FLOAT8_TYPES "Disable float 8 types" OFF)
153153
option(onnxruntime_MINIMAL_BUILD "Exclude as much as possible from the build. Support ORT format models. No support for ONNX format models." OFF)
154+
option(onnxruntime_CLIENT_PACKAGE_BUILD "Enables default settings that are more appropriate for client/on-device workloads." OFF)
154155
cmake_dependent_option(onnxruntime_DISABLE_RTTI "Disable RTTI" ON "NOT onnxruntime_ENABLE_PYTHON;NOT onnxruntime_USE_CUDA" OFF)
155156
# For now onnxruntime_DISABLE_EXCEPTIONS will only work with onnxruntime_MINIMAL_BUILD, more changes (ONNX, non-CPU EP, ...) are required to run this standalone
156157
cmake_dependent_option(onnxruntime_DISABLE_EXCEPTIONS "Disable exception handling. Requires onnxruntime_MINIMAL_BUILD currently." ON "onnxruntime_MINIMAL_BUILD;NOT onnxruntime_ENABLE_PYTHON" OFF)

cmake/adjust_global_compile_flags.cmake

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -95,6 +95,11 @@ if (onnxruntime_MINIMAL_BUILD)
9595
endif()
9696
endif()
9797

98+
# ORT build with default settings more appropriate for client/on-device workloads.
99+
if (onnxruntime_CLIENT_PACKAGE_BUILD)
100+
add_compile_definitions(ORT_CLIENT_PACKAGE_BUILD)
101+
endif()
102+
98103
if (onnxruntime_ENABLE_LTO)
99104
include(CheckIPOSupported)
100105
check_ipo_supported(RESULT ipo_enabled OUTPUT ipo_output)

cmake/external/onnxruntime_external_deps.cmake

Lines changed: 18 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -774,13 +774,24 @@ if (onnxruntime_USE_WEBGPU)
774774
endif()
775775

776776
if (NOT CMAKE_SYSTEM_NAME STREQUAL "Emscripten" AND onnxruntime_WGSL_TEMPLATE STREQUAL "dynamic")
777-
onnxruntime_fetchcontent_declare(
778-
duktape
779-
URL ${DEP_URL_duktape}
780-
URL_HASH SHA1=${DEP_SHA1_duktape}
781-
EXCLUDE_FROM_ALL
782-
)
783-
onnxruntime_fetchcontent_makeavailable(duktape)
777+
if(onnxruntime_USE_VCPKG)
778+
find_package(unofficial-duktape CONFIG REQUIRED)
779+
add_library(duktape_static ALIAS unofficial::duktape::duktape)
780+
else()
781+
onnxruntime_fetchcontent_declare(
782+
duktape
783+
URL ${DEP_URL_duktape}
784+
URL_HASH SHA1=${DEP_SHA1_duktape}
785+
EXCLUDE_FROM_ALL
786+
)
787+
onnxruntime_fetchcontent_makeavailable(duktape)
788+
789+
if(NOT TARGET duktape_static)
790+
add_library(duktape_static STATIC "${duktape_SOURCE_DIR}/src/duktape.c")
791+
target_compile_features(duktape_static PRIVATE c_std_99)
792+
target_include_directories(duktape_static INTERFACE $<BUILD_INTERFACE:${duktape_SOURCE_DIR}/src>)
793+
endif()
794+
endif()
784795
endif()
785796
endif()
786797

cmake/onnxruntime_mlas.cmake

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ onnxruntime_add_static_library(onnxruntime_mlas
3131
${MLAS_SRC_DIR}/eltwise.cpp
3232
${MLAS_SRC_DIR}/erf.cpp
3333
${MLAS_SRC_DIR}/compute.cpp
34+
${MLAS_SRC_DIR}/dequantize.cpp
3435
${MLAS_SRC_DIR}/quantize.cpp
3536
${MLAS_SRC_DIR}/qgemm_kernel_default.cpp
3637
${MLAS_SRC_DIR}/qladd.cpp

cmake/onnxruntime_providers_tensorrt.cmake

Lines changed: 5 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -72,26 +72,21 @@
7272
endif()
7373

7474
# TensorRT 10 GA onwards, the TensorRT libraries will have major version appended to the end on Windows,
75-
# for example, nvinfer_10.dll, nvinfer_plugin_10.dll, nvonnxparser_10.dll ...
75+
# for example, nvinfer_10.dll, nvonnxparser_10.dll ...
7676
if (WIN32 AND TRT_GREATER_OR_EQUAL_TRT_10_GA)
7777
set(NVINFER_LIB "nvinfer_${NV_TENSORRT_MAJOR}")
78-
set(NVINFER_PLUGIN_LIB "nvinfer_plugin_${NV_TENSORRT_MAJOR}")
7978
set(PARSER_LIB "nvonnxparser_${NV_TENSORRT_MAJOR}")
8079
endif()
8180

8281
if (NOT NVINFER_LIB)
8382
set(NVINFER_LIB "nvinfer")
8483
endif()
8584

86-
if (NOT NVINFER_PLUGIN_LIB)
87-
set(NVINFER_PLUGIN_LIB "nvinfer_plugin")
88-
endif()
89-
9085
if (NOT PARSER_LIB)
9186
set(PARSER_LIB "nvonnxparser")
9287
endif()
9388

94-
MESSAGE(STATUS "Looking for ${NVINFER_LIB} and ${NVINFER_PLUGIN_LIB}")
89+
MESSAGE(STATUS "Looking for ${NVINFER_LIB}")
9590

9691
find_library(TENSORRT_LIBRARY_INFER ${NVINFER_LIB}
9792
HINTS ${TENSORRT_ROOT}
@@ -101,14 +96,6 @@
10196
MESSAGE(STATUS "Can't find ${NVINFER_LIB}")
10297
endif()
10398

104-
find_library(TENSORRT_LIBRARY_INFER_PLUGIN ${NVINFER_PLUGIN_LIB}
105-
HINTS ${TENSORRT_ROOT}
106-
PATH_SUFFIXES lib lib64 lib/x64)
107-
108-
if (NOT TENSORRT_LIBRARY_INFER_PLUGIN)
109-
MESSAGE(STATUS "Can't find ${NVINFER_PLUGIN_LIB}")
110-
endif()
111-
11299
if (onnxruntime_USE_TENSORRT_BUILTIN_PARSER)
113100
MESSAGE(STATUS "Looking for ${PARSER_LIB}")
114101

@@ -120,7 +107,7 @@
120107
MESSAGE(STATUS "Can't find ${PARSER_LIB}")
121108
endif()
122109

123-
set(TENSORRT_LIBRARY ${TENSORRT_LIBRARY_INFER} ${TENSORRT_LIBRARY_INFER_PLUGIN} ${TENSORRT_LIBRARY_NVONNXPARSER})
110+
set(TENSORRT_LIBRARY ${TENSORRT_LIBRARY_INFER} ${TENSORRT_LIBRARY_NVONNXPARSER})
124111
MESSAGE(STATUS "Find TensorRT libs at ${TENSORRT_LIBRARY}")
125112
else()
126113
if (TRT_GREATER_OR_EQUAL_TRT_10_GA)
@@ -153,15 +140,15 @@
153140
endif()
154141
# Static libraries are just nvonnxparser_static on all platforms
155142
set(onnxparser_link_libs nvonnxparser_static)
156-
set(TENSORRT_LIBRARY ${TENSORRT_LIBRARY_INFER} ${TENSORRT_LIBRARY_INFER_PLUGIN})
143+
set(TENSORRT_LIBRARY ${TENSORRT_LIBRARY_INFER})
157144
MESSAGE(STATUS "Find TensorRT libs at ${TENSORRT_LIBRARY}")
158145
endif()
159146

160147
# ${TENSORRT_LIBRARY} is empty if we link nvonnxparser_static.
161148
# nvonnxparser_static is linked against tensorrt libraries in onnx-tensorrt
162149
# See https://github.com/onnx/onnx-tensorrt/blob/8af13d1b106f58df1e98945a5e7c851ddb5f0791/CMakeLists.txt#L121
163150
# However, starting from TRT 10 GA, nvonnxparser_static doesn't link against tensorrt libraries.
164-
# Therefore, the above code finds ${TENSORRT_LIBRARY_INFER} and ${TENSORRT_LIBRARY_INFER_PLUGIN}.
151+
# Therefore, the above code finds ${TENSORRT_LIBRARY_INFER}.
165152
if(onnxruntime_CUDA_MINIMAL)
166153
set(trt_link_libs ${CMAKE_DL_LIBS} ${TENSORRT_LIBRARY})
167154
else()

0 commit comments

Comments
 (0)