Skip to content

Commit 71acc8d

Browse files
henryxuxu0716刘哲续
andauthored
For nz unset in bf16&fp16 (#4495)
<!-- Thanks for sending a pull request! BEFORE SUBMITTING, PLEASE READ https://docs.vllm.ai/en/latest/contributing/overview.html --> ### What this PR does / why we need it? disable NZ for float weight case. This is only a quick fix for dev branch. For main branch, we'll consider more case to make it more common. ### Does this PR introduce _any_ user-facing change? <!-- Note that it means *any* user-facing change including all aspects such as API, interface or other behavior changes. Documentation-only updates are not considered user-facing changes. --> ### How was this patch tested? qwen2.5 32B <img width="441" height="221" alt="image" src="https://github.com/user-attachments/assets/7ae18ffd-1ce2-43d9-9960-be45250ad0da" /> --------- Signed-off-by: 刘哲续 <liuzhexu1@huawei.com> Co-authored-by: 刘哲续 <liuzhexu1@huawei.com>
1 parent 96c3623 commit 71acc8d

File tree

10 files changed

+16
-14
lines changed

10 files changed

+16
-14
lines changed

vllm_ascend/attention/mla_v1.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -652,7 +652,7 @@ def get_and_maybe_dequant_weights(layer: LinearBase):
652652

653653
# Function `get_and_maybe_dequant_weights` will cast the weights to
654654
# FRACTAL_AND. So we need to cast to FRACTAL_NZ again.
655-
if is_enable_nz():
655+
if is_enable_nz(self.kv_b_proj.weight.data.dtype):
656656
self.kv_b_proj.weight.data = torch_npu.npu_format_cast(
657657
self.kv_b_proj.weight.data, ACL_FORMAT_FRACTAL_NZ)
658658

vllm_ascend/models/qwen2_5_vl.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -284,7 +284,7 @@ def pad_qkv_weight(self, data):
284284
dim=2)
285285
qkv_weight_final = qkv_weight_padded.reshape(-1, self.hidden_size)
286286

287-
if is_enable_nz():
287+
if is_enable_nz(qkv_weight_final.dtype):
288288
qkv_weight_final_copy = torch.empty_like(qkv_weight_final).copy_(
289289
qkv_weight_final)
290290
qkv_weight_final_copy = torch_npu.npu_format_cast(
@@ -300,7 +300,7 @@ def pad_proj_weight(self, data):
300300
(0, self.half_pad_hidden_size_per_attention_head, 0, 0)).reshape(
301301
self.hidden_size, -1)
302302

303-
if is_enable_nz():
303+
if is_enable_nz(out_weight.dtype):
304304
out_weight_copy = torch.empty_like(out_weight).copy_(out_weight)
305305
out_weight_copy = torch_npu.npu_format_cast(
306306
out_weight_copy, ACL_FORMAT_FRACTAL_ND)

vllm_ascend/models/qwen2_vl.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -268,7 +268,7 @@ def pad_qkv_weight(self, data):
268268
dim=2)
269269
qkv_weight_final = qkv_weight_padded.reshape(-1, self.hidden_size)
270270

271-
if is_enable_nz():
271+
if is_enable_nz(qkv_weight_final.dtype):
272272
qkv_weight_final_copy = torch.empty_like(qkv_weight_final).copy_(
273273
qkv_weight_final)
274274
qkv_weight_final_copy = torch_npu.npu_format_cast(
@@ -284,7 +284,7 @@ def pad_proj_weight(self, data):
284284
(0, self.half_pad_hidden_size_per_attention_head, 0, 0)).reshape(
285285
self.hidden_size, -1)
286286

287-
if is_enable_nz():
287+
if is_enable_nz(out_weight.dtype):
288288
out_weight_copy = torch.empty_like(out_weight).copy_(out_weight)
289289
out_weight_copy = torch_npu.npu_format_cast(
290290
out_weight_copy, ACL_FORMAT_FRACTAL_ND)

vllm_ascend/ops/common_fused_moe.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -76,7 +76,7 @@ def process_weights_after_loading(self, layer):
7676
w2_data = self._maybe_pad_weight(layer.w2_weight.data)
7777
layer.w2_weight = torch.nn.Parameter(w2_data, requires_grad=False)
7878

79-
if not is_310p() and is_enable_nz():
79+
if not is_310p() and is_enable_nz(layer.w13_weight.data.dtype):
8080
layer.w13_weight.data = torch_npu.npu_format_cast(
8181
layer.w13_weight.data, ACL_FORMAT_FRACTAL_NZ)
8282
layer.w2_weight.data = torch_npu.npu_format_cast(

vllm_ascend/ops/linear.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -45,8 +45,7 @@ class AscendUnquantizedLinearMethod(UnquantizedLinearMethod):
4545

4646
def process_weights_after_loading(self, layer: torch.nn.Module) -> None:
4747
super().process_weights_after_loading(layer)
48-
if (is_enable_nz() and layer.weight.data.dtype
49-
in [torch.float16, torch.bfloat16]):
48+
if (is_enable_nz(layer.weight.data.dtype)):
5049
layer.weight.data = torch_npu.npu_format_cast(
5150
layer.weight.data, ACL_FORMAT_FRACTAL_NZ)
5251

vllm_ascend/torchair/torchair_sfa.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -842,7 +842,7 @@ def _process_weights_for_fused_mlapo(self, act_dtype: torch.dtype):
842842
wd_qkv = wd_qkv.t().contiguous()
843843
wd_qkv = transdata(wd_qkv,
844844
block_size=(16, 32)).unsqueeze(0).contiguous()
845-
if is_enable_nz():
845+
if is_enable_nz(wd_qkv.dtype):
846846
self.wd_qkv = torch_npu.npu_format_cast(wd_qkv, 29)
847847

848848
kv_a_proj_deq_scl = self.kv_a_proj_with_mqa.deq_scale.clone()
@@ -876,7 +876,7 @@ def _process_weights_for_fused_mlapo(self, act_dtype: torch.dtype):
876876
self.num_heads * (self.qk_nope_head_dim + self.qk_rope_head_dim),
877877
-1)
878878
wu_q = transdata(wu_q, block_size=(16, 32)).unsqueeze(0).contiguous()
879-
if is_enable_nz():
879+
if is_enable_nz(wu_q.dtype):
880880
self.wu_q = torch_npu.npu_format_cast(wu_q, 29)
881881

882882
qb_deq_scl = self.q_proj.deq_scale.data.clone()

vllm_ascend/torchair/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,7 @@ def converting_weight_acl_format(model, format):
146146
if torch_npu.get_npu_format(module.w13_weight.data) == format:
147147
return
148148
if format == ACL_FORMAT_FRACTAL_NZ \
149-
and not is_enable_nz():
149+
and not is_enable_nz(module.w13_weight.data.dtype):
150150
return
151151
module.w13_weight.data = torch_npu.npu_format_cast(
152152
module.w13_weight.data, format)

vllm_ascend/utils.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,13 +71,16 @@ def is_310p():
7171
return _IS_310P
7272

7373

74-
def is_enable_nz(vllm_config: Optional[VllmConfig] = None) -> bool:
74+
def is_enable_nz(dtype: Optional[torch.dtype] = torch.int8,
75+
vllm_config: Optional[VllmConfig] = None) -> bool:
7576
global _ENABLE_NZ
7677
if _ENABLE_NZ is None:
7778
if not vllm_config:
7879
raise ValueError(
7980
"vllm_config must be provided when _ENABLE_NZ is None")
8081
_ENABLE_NZ = envs_ascend.VLLM_ASCEND_ENABLE_NZ and vllm_config.model_config.hf_config.model_type != "qwen3_next"
82+
if dtype in [torch.float16, torch.bfloat16]:
83+
return False
8184
return _ENABLE_NZ
8285

8386

vllm_ascend/worker/model_runner_v1.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2676,7 +2676,7 @@ def load_model(self) -> None:
26762676

26772677
def _convert_torch_format(self, tensor):
26782678
if ACL_FORMAT == ACL_FORMAT_FRACTAL_NZ \
2679-
and not is_enable_nz():
2679+
and not is_enable_nz(tensor.dtype):
26802680
return tensor
26812681
tensor = torch_npu.npu_format_cast(tensor, ACL_FORMAT)
26822682
return tensor

vllm_ascend/worker/worker_v1.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ def __init__(
8181
# register patch for vllm
8282
from vllm_ascend.utils import adapt_patch
8383
adapt_patch()
84-
is_enable_nz(vllm_config)
84+
is_enable_nz(vllm_config=vllm_config)
8585
# Register ops when worker init.
8686
from vllm_ascend import ops
8787
ops.register_dummy_fusion_op()

0 commit comments

Comments
 (0)