@@ -1360,7 +1360,7 @@ def _linear_f16_act_floatx_weight_impl(input_tensor, weight_tensor, bias):
13601360
13611361 return out .view (* act .shape [:- 1 ], out_dim ).to (act .dtype )
13621362
1363- def _linear_fp_act_fp8_weight_check (
1363+ def _linear_fp8_act_fp8_weight_check (
13641364 input_tensor : Union [torch .Tensor , AffineQuantizedTensor ],
13651365 weight_tensor : Union [torch .Tensor , AffineQuantizedTensor ],
13661366 bias : Optional [torch .Tensor ],
@@ -1384,7 +1384,7 @@ def preprocess_scale(input_scale: torch.Tensor, input_shape: Tuple[int]):
13841384
13851385 return input_scale
13861386
1387- def _linear_fp_act_fp8_weight_impl (
1387+ def _linear_fp8_act_fp8_weight_impl (
13881388 input_tensor : AffineQuantizedTensor ,
13891389 weight_tensor : AffineQuantizedTensor ,
13901390 bias : Optional [torch .Tensor ],
@@ -1473,7 +1473,7 @@ def _register_aqt_quantized_linear_dispatches():
14731473 for dispatch_condition , impl in [
14741474 (_linear_int8_act_int8_weight_check , _linear_int8_act_int8_weight_impl ),
14751475 (_linear_int8_act_int8_weight_semi_structured_sparse_check , _linear_int8_act_int8_weight_semi_structured_sparse_impl ),
1476- (_linear_fp_act_fp8_weight_check , _linear_fp_act_fp8_weight_impl ),
1476+ (_linear_fp8_act_fp8_weight_check , _linear_fp8_act_fp8_weight_impl ),
14771477 (_linear_bf16_act_uint4_weight_check , _linear_bf16_act_uint4_weight_impl ),
14781478 (_linear_fp_act_int8_weight_check , _linear_fp_act_int8_weight_impl ),
14791479 (_linear_f16_act_floatx_weight_check , _linear_f16_act_floatx_weight_impl ),
0 commit comments