Skip to content

Commit 10d7f19

Browse files
committed
Auto trigger multi-gpu CI
Signed-off-by: Tailing Yuan <yuantailing@gmail.com>
1 parent 8334235 commit 10d7f19

File tree

2 files changed

+10
-3
lines changed

2 files changed

+10
-3
lines changed

jenkins/L0_MergeRequest.groovy

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -627,9 +627,16 @@ def getAutoTriggerTagList(pipeline, testFilter, globalVars) {
627627
return autoTriggerTagList
628628
}
629629
def specialFileToTagMap = [
630-
"tensorrt_llm/_torch/models/modeling_deepseekv3.py": ["-DeepSeek-"],
631630
"cpp/kernels/fmha_v2/": ["-FMHA-"],
631+
"examples/layer_wise_benchmarks/config_ctx.yaml": ["DGX_B200-4_GPUs-PyTorch-Post-Merge"],
632+
"examples/layer_wise_benchmarks/config_gen.yaml": ["DGX_B200-4_GPUs-PyTorch-Post-Merge"],
633+
"examples/layer_wise_benchmarks/mpi_launch.sh": ["DGX_B200-4_GPUs-PyTorch-Post-Merge"],
634+
"examples/layer_wise_benchmarks/run_single.py": ["DGX_B200-4_GPUs-PyTorch-Post-Merge"],
635+
"examples/layer_wise_benchmarks/run_single.sh": ["DGX_B200-4_GPUs-PyTorch-Post-Merge"],
636+
"tensorrt_llm/_torch/models/modeling_deepseekv3.py": ["-DeepSeek-"],
632637
"tensorrt_llm/_torch/models/modeling_gpt_oss.py": ["-GptOss-"],
638+
"tensorrt_llm/tools/layer_wise_benchmarks/": ["DGX_B200-4_GPUs-PyTorch-Post-Merge"],
639+
"tests/unittest/tools/test_layer_wise_benchmarks.py": ["DGX_B200-4_GPUs-PyTorch-Post-Merge"],
633640
]
634641
for (file in changedFileList) {
635642
for (String key : specialFileToTagMap.keySet()) {

tests/integration/test_lists/test-db/l0_dgx_b200.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -18,8 +18,6 @@ l0_dgx_b200:
1818
- unittest/_torch/multi_gpu_modeling -k "deepseek"
1919
- unittest/_torch/modules/test_fused_moe.py::test_fused_moe_alltoall_fp4[DeepEPLowLatency]
2020
- unittest/_torch/modules/test_fused_moe.py::test_fused_moe_alltoall_fp4[MNNVL]
21-
- unittest/tools/test_layer_wise_benchmarks.py::test_deepseek_r1_ctx_tep
22-
- unittest/tools/test_layer_wise_benchmarks.py::test_deepseek_r1_gen_scaled_from_16_dep
2321
- accuracy/test_llm_api_pytorch.py::TestLlama3_1_8BInstruct::test_bfloat16_4gpus[pp4-attn_backend=TRTLLM-torch_compile=False]
2422
- accuracy/test_llm_api_pytorch.py::TestLlama3_1_8BInstruct::test_fp8_4gpus[tp4-fp8kv=True-attn_backend=TRTLLM-torch_compile=False]
2523
- accuracy/test_llm_api_pytorch.py::TestLlama3_1_8BInstruct::test_fp8_4gpus[pp4-fp8kv=True-attn_backend=TRTLLM-torch_compile=False]
@@ -144,6 +142,8 @@ l0_dgx_b200:
144142
orchestrator: mpi
145143
tests:
146144
- unittest/_torch/modules/test_fused_moe.py::test_fused_moe_alltoall_fp4[DeepEP]
145+
- unittest/tools/test_layer_wise_benchmarks.py::test_deepseek_r1_ctx_tep
146+
- unittest/tools/test_layer_wise_benchmarks.py::test_deepseek_r1_gen_scaled_from_16_dep
147147
- accuracy/test_llm_api_pytorch.py::TestLlama3_1_8BInstruct::test_bfloat16_4gpus[tp4-attn_backend=FLASHINFER-torch_compile=False]
148148
- accuracy/test_llm_api_pytorch.py::TestLlama3_1_8BInstruct::test_fp8_4gpus[tp4-fp8kv=False-attn_backend=FLASHINFER-torch_compile=False]
149149
- accuracy/test_llm_api_pytorch.py::TestLlama3_1_8BInstruct::test_fp8_4gpus[pp4-fp8kv=False-attn_backend=TRTLLM-torch_compile=False]

0 commit comments

Comments
 (0)