File tree Expand file tree Collapse file tree 2 files changed +11
-0
lines changed
test/prototype/mx_formats
torchao/prototype/mx_formats Expand file tree Collapse file tree 2 files changed +11
-0
lines changed Original file line number Diff line number Diff line change @@ -191,6 +191,16 @@ def test_inference_workflow_nvfp4(
191191 f"Got a sqnr of { sqnr } for NVFP4 recipe with bias={ bias } , mm_config={ mm_config } "
192192 )
193193
194+ # serialization
195+ with tempfile .NamedTemporaryFile () as f :
196+ torch .save (m_mx .state_dict (), f )
197+ f .seek (0 )
198+
199+ # temporary workaround for https://github.com/pytorch/ao/issues/3077
200+ torch .serialization .add_safe_globals ([getattr ])
201+
202+ _ = torch .load (f , weights_only = True )
203+
194204
195205class VLLMIntegrationTestCase (TorchAOIntegrationTestCase ):
196206 @pytest .mark .skipif (not torch .cuda .is_available (), reason = "CUDA not available" )
Original file line number Diff line number Diff line change @@ -211,6 +211,7 @@ def _nvfp4_inference_linear_transform(
211211 NVFP4MMConfig ,
212212 MXGemmKernelChoice ,
213213 QuantizeTensorToMXKwargs ,
214+ QuantizeTensorToNVFP4Kwargs ,
214215 ScaleCalculationMode ,
215216 ]
216217)
You can’t perform that action at this time.
0 commit comments