1- # Executes benchmarks implemented in this repository
2- # using scripts for benchmark results visualization,
3- # which are downloaded from Unified Runtime repository.
1+ # Executes benchmarks implemented in this repository using scripts
2+ # for results visualization from intel/llvm (unified-runtime dir).
43name : Benchmarks
54
65on :
@@ -98,23 +97,23 @@ jobs:
9897 - name : Build UMF
9998 run : cmake --build ${{env.BUILD_DIR}} -j $(nproc)
10099
101- # We are going to clone Unified Runtime repository in order to run
102- # the most up-to-date UR scripts for benchmark data visualization
103- - name : Checkout UR
100+ # Get scripts for benchmark data visualization.
101+ # Use specific tag, as the scripts or files' location may change.
102+ - name : Checkout SYCL
104103 uses : actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2
105104 with :
106- repository : oneapi-src/unified-runtime
107- path : ur-repo
105+ repository : intel/llvm
106+ ref : nightly-2025-02-19
107+ path : sycl-repo
108108 fetch-depth : 1
109- fetch-tags : false
110109
111- - name : Install pip packages for benchmarking scripts from UR
110+ - name : Install benchmarking scripts deps
112111 run : |
113- pip install --force-reinstall -r ${{github.workspace}}/ur -repo/third_party/benchmark_requirements.txt
112+ pip install --force-reinstall -r ${{github.workspace}}/sycl -repo/unified-runtime /third_party/benchmark_requirements.txt
114113
115114 - name : Set core range and GPU mask
116115 run : |
117- # Compute the core range for the second NUMA node; first node is for UR jobs.
116+ # Compute the core range for the second NUMA node; first node is for SYCL/ UR jobs.
118117 # Skip the first 4 cores - the kernel is likely to schedule more work on these.
119118 CORES=$(lscpu | awk '
120119 /NUMA node1 CPU|On-line CPU/ {line=$0}
@@ -130,18 +129,21 @@ jobs:
130129 ZE_AFFINITY_MASK=1
131130 echo "ZE_AFFINITY_MASK=$ZE_AFFINITY_MASK" >> $GITHUB_ENV
132131
133- - name : Run UMF benchmarks (using scripts from UR)
132+ - name : Run UMF benchmarks
134133 id : benchmarks
135134 working-directory : ${{env.BUILD_DIR}}
136135 run : >
137- taskset -c ${{ env.CORES }} ${{ github.workspace }}/ur -repo/scripts/benchmarks/main.py
136+ taskset -c ${{ env.CORES }} ${{ github.workspace }}/sycl -repo/unified-runtime /scripts/benchmarks/main.py
138137 ~/bench_workdir_umf
139138 --umf ${{env.BUILD_DIR}}
139+ --compare baseline
140140 ${{ inputs.upload_report && '--output-html' || '' }}
141+ ${{ inputs.pr_no != 0 && '--output-markdown' || '' }}
141142 ${{ inputs.bench_script_params }}
142143
144+ # In case it failed to add a comment, we can still print the results.
143145 - name : Print benchmark results
144- if : ${{ always() }}
146+ if : ${{ always() && inputs.pr_no != 0 }}
145147 run : cat ${{env.BUILD_DIR}}/benchmark_results.md
146148
147149 - name : Add comment to PR
0 commit comments