Skip to content

Commit 450d4f6

Browse files
committed
Clean up comments
Signed-off-by: Benji Beck <benjibeck@meta.com>
1 parent 4199d06 commit 450d4f6

File tree

2 files changed

+0
-3
lines changed

2 files changed

+0
-3
lines changed

torchao/quantization/quant_api.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1336,7 +1336,6 @@ def _int8_weight_only_quantize_tensor(weight, config):
13361336
if group_size is None:
13371337
group_size = weight.shape[-1]
13381338
block_size = tuple([1 for x in range(weight.dim() - 1)] + [group_size])
1339-
# todo: support fp8 semi-sparse
13401339
new_weight = to_affine_quantized_intx(
13411340
weight,
13421341
mapping_type,
@@ -1585,7 +1584,6 @@ class Float8WeightOnlyConfig(AOBaseConfig):
15851584
weight_dtype: torch.dtype = e4m3_dtype
15861585
set_inductor_config: bool = True
15871586
version: int = 2
1588-
# todo: add packing format
15891587

15901588
def __post_init__(self):
15911589
torch._C._log_api_usage_once("torchao.quantization.Float8WeightOnlyConfig")

torchao/quantization/quantize_/common/packing_format.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -32,4 +32,3 @@ class PackingFormat(str, Enum):
3232
needed for the rest of the system to understand the specific format that's adopted.
3333
"""
3434
OPAQUE = "opaque"
35-
# todo: add semi-sparse

0 commit comments

Comments
 (0)