Skip to content

Commit 1053b65

Browse files
committed
bugfix: resolve compilation issues when building without NPU TORCH.
1 parent 6cac5ab commit 1053b65

File tree

3 files changed

+6
-3
lines changed

3 files changed

+6
-3
lines changed

CMakeLists.txt

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -330,7 +330,7 @@ else()
330330
endif()
331331

332332
if(USE_NPU)
333-
add_definitions(-DUSE_NPU_TORCH)
333+
# add_definitions(-DUSE_NPU_TORCH)
334334
add_definitions(-DUSE_NPU)
335335
add_definitions(-DBUILD_LIBTORCH)
336336
add_definitions(-DTORCH_SETCUSTOMHANDLER=ON)

xllm/models/llm/qwen3.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@ class QWen3ModelImpl : public LlmModelImplBase<QWen3DecoderLayer> {
4545
xllm::layer::RmsNorm(
4646
model_args.hidden_size(), model_args.rms_norm_eps(), options));
4747
#else
48-
norm_ = register_module("norm", layer::RmsNorm(context));
48+
norm_ = register_module("norm", layer::NpuRmsNorm(context));
4949
#endif
5050
for (auto i = 0; i < FLAGS_micro_batch_num; i++) {
5151
#if defined(USE_NPU_TORCH)

xllm/models/llm/qwen3_moe.h

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -367,10 +367,13 @@ class Qwen3MoeModelImpl : public torch::nn::Module {
367367
torch::Dtype dtype_;
368368
layer::WordEmbedding embed_tokens_{nullptr};
369369
layer::AttentionMask attn_mask_;
370-
layer::RmsNorm norm_{nullptr};
370+
371371
#if defined(USE_NPU)
372372
torch::Tensor cos_sin_;
373373
layer::PosEmbedding atb_pos_emb_{nullptr};
374+
layer::NpuRmsNorm norm_{nullptr};
375+
#else
376+
layer::RmsNorm norm_{nullptr};
374377
#endif
375378
std::vector<int64_t> mrope_section_;
376379
};

0 commit comments

Comments
 (0)