We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent e36230e commit f1f3a90Copy full SHA for f1f3a90
test/3x/torch/algorithms/fp8_quant/unit_tests/test_layers/test_linear.py
@@ -134,6 +134,8 @@ def test_linear_dynamic_quantization(
134
scale_format: ScaleFormat,
135
use_hpu_graphs: bool
136
):
137
+ if not use_hpu_graphs and (hp_dtype == torch.bfloat16) and device_type == GAUDI2:
138
+ pytest.xfail("[SW-242200] Temporary skip them since the time usage is more than expected.")
139
check_tests_to_skip(scale_method, scale_format, True)
140
N = 1
141
D_in = 8
0 commit comments