|
3 | 3 | # |
4 | 4 | # This source code is licensed under the BSD 3-Clause license found in the |
5 | 5 | # LICENSE file in the root directory of this source tree. |
6 | | -import logging |
7 | | -from dataclasses import dataclass |
8 | | -from typing import Optional, Tuple |
9 | 6 |
|
10 | | -import torch |
11 | | -from torch.utils._python_dispatch import ( |
12 | | - return_and_correct_aliasing, |
13 | | -) |
| 7 | +# Backward compatibility stub - imports from the new location |
| 8 | +import warnings |
14 | 9 |
|
15 | | -from torchao.dtypes.affine_quantized_tensor import ( |
16 | | - AffineQuantizedTensor, |
17 | | - register_layout, |
18 | | -) |
19 | | -from torchao.dtypes.uintx.plain_layout import ( |
20 | | - PlainAQTTensorImpl, |
21 | | - _aqt_is_int8_reduced_range, |
22 | | -) |
23 | | -from torchao.dtypes.utils import ( |
24 | | - Layout, |
25 | | - PlainLayout, |
| 10 | +warnings.warn( |
| 11 | + "Importing BlockSparseLayout from torchao.dtypes is deprecated. " |
| 12 | + "Please use 'from torchao.prototype.dtypes import BlockSparseLayout' instead. " |
| 13 | + "This import path will be removed in a future torchao release. " |
| 14 | + "Please check issue: https://github.com/pytorch/ao/issues/2752 for more details. ", |
| 15 | + DeprecationWarning, |
| 16 | + stacklevel=2, |
26 | 17 | ) |
27 | 18 |
|
28 | | -logger = logging.getLogger(__name__) |
29 | | - |
30 | | -aten = torch.ops.aten |
31 | | - |
32 | | - |
33 | | -@dataclass(frozen=True) |
34 | | -class BlockSparseLayout(Layout): |
35 | | - """BlockSparseLayout is a data class that represents the layout of a block sparse matrix. |
36 | | -
|
37 | | - Attributes: |
38 | | - blocksize (int): The size of the blocks in the sparse matrix. Default is 64. |
39 | | - """ |
40 | | - |
41 | | - blocksize: int = 64 |
42 | | - |
43 | | - |
44 | | -@register_layout(BlockSparseLayout) |
45 | | -class BlockSparseAQTTensorImpl(PlainAQTTensorImpl): |
46 | | - bsr_crow_indices: Optional[torch.Tensor] |
47 | | - bsr_col_indices: Optional[torch.Tensor] |
48 | | - bsr_values: Optional[torch.Tensor] |
49 | | - scale: Optional[torch.Tensor] |
50 | | - zero_point: Optional[torch.Tensor] |
51 | | - |
52 | | - __slots__ = [ |
53 | | - "bsr_crow_indices", |
54 | | - "bsr_col_indices", |
55 | | - "bsr_values", |
56 | | - "scale", |
57 | | - "zero_point", |
58 | | - ] |
59 | | - |
60 | | - @staticmethod |
61 | | - def __new__( # noqa: PYI034 |
62 | | - cls, |
63 | | - shape: torch.Size, |
64 | | - bsr_crow_indices: Optional[torch.Tensor], |
65 | | - bsr_col_indices: Optional[torch.Tensor], |
66 | | - bsr_values: Optional[torch.Tensor], |
67 | | - scale: Optional[torch.Tensor], |
68 | | - zero_point: Optional[torch.Tensor], |
69 | | - _layout: Layout, |
70 | | - requires_grad: bool = False, |
71 | | - ): |
72 | | - if bsr_values is None: |
73 | | - raise ValueError("bsr values must be provided!") |
74 | | - else: |
75 | | - previous_tensor = bsr_values |
76 | | - |
77 | | - kwargs = { |
78 | | - "device": previous_tensor.device, |
79 | | - "dtype": previous_tensor.dtype, |
80 | | - "layout": previous_tensor.layout, |
81 | | - "requires_grad": requires_grad, |
82 | | - } |
83 | | - return torch.Tensor._make_wrapper_subclass(cls, shape, **kwargs) # type: ignore[attr-defined] |
84 | | - |
85 | | - def __init__( # noqa: PYI034 |
86 | | - self, |
87 | | - shape: torch.Size, |
88 | | - bsr_crow_indices: Optional[torch.Tensor], |
89 | | - bsr_col_indices: Optional[torch.Tensor], |
90 | | - bsr_values: Optional[torch.Tensor], |
91 | | - scale: Optional[torch.Tensor], |
92 | | - zero_point: Optional[torch.Tensor], |
93 | | - _layout: Layout, |
94 | | - requires_grad: bool = False, |
95 | | - ): |
96 | | - self.bsr_crow_indices = bsr_crow_indices |
97 | | - self.bsr_col_indices = bsr_col_indices |
98 | | - self.bsr_values = bsr_values |
99 | | - self.scale = scale |
100 | | - self.zero_point = zero_point |
101 | | - self._layout = _layout |
102 | | - |
103 | | - def __tensor_flatten__(self): |
104 | | - inner_tensors = list( |
105 | | - filter(lambda x: getattr(self, x) is not None, self.__slots__) |
106 | | - ) |
107 | | - tensor_meta = (self.shape, self._layout, self.requires_grad) |
108 | | - return inner_tensors, tensor_meta |
109 | | - |
110 | | - @classmethod |
111 | | - def __tensor_unflatten__( |
112 | | - cls, |
113 | | - inner_tensors, |
114 | | - tensor_meta: Tuple[torch.Size, bool], |
115 | | - outer_size, |
116 | | - outer_stride, |
117 | | - ) -> torch.Tensor: |
118 | | - shape, _layout, requires_grad = tensor_meta |
119 | | - return cls( |
120 | | - shape=shape, |
121 | | - bsr_crow_indices=inner_tensors.get("bsr_crow_indices", None), |
122 | | - bsr_col_indices=inner_tensors.get("bsr_col_indices", None), |
123 | | - bsr_values=inner_tensors.get("bsr_values", None), |
124 | | - scale=inner_tensors.get("scale", None), |
125 | | - zero_point=inner_tensors.get("zero_point", None), |
126 | | - _layout=_layout, |
127 | | - requires_grad=requires_grad, |
128 | | - ) |
129 | | - |
130 | | - @classmethod |
131 | | - def from_plain(cls, int_data, scale, zero_point, _layout): |
132 | | - bsr_tensor = int_data.to_sparse_bsr(_layout.blocksize) |
133 | | - return cls( |
134 | | - shape=int_data.shape, |
135 | | - bsr_crow_indices=bsr_tensor.crow_indices(), |
136 | | - bsr_col_indices=bsr_tensor.col_indices(), |
137 | | - bsr_values=bsr_tensor.values(), |
138 | | - scale=scale, |
139 | | - zero_point=zero_point, |
140 | | - _layout=_layout, |
141 | | - requires_grad=False, |
142 | | - ) |
143 | | - |
144 | | - def get_plain(self): |
145 | | - int_data_expanded = torch.ops.blocksparse.bsr_to_dense( |
146 | | - self.crow_indices(), |
147 | | - self.col_indices(), |
148 | | - self.values(), |
149 | | - self.shape[0], |
150 | | - self.shape[1], |
151 | | - ) |
152 | | - return int_data_expanded, self.scale, self.zero_point |
153 | | - |
154 | | - def _apply_fn_to_data(self, func): |
155 | | - return self.__class__( |
156 | | - shape=self.shape, |
157 | | - bsr_crow_indices=func(self.bsr_crow_indices), |
158 | | - bsr_col_indices=func(self.bsr_col_indices), |
159 | | - bsr_values=func(self.bsr_values), |
160 | | - scale=self.scale, |
161 | | - zero_point=self.zero_point, |
162 | | - _layout=self._layout, |
163 | | - requires_grad=self.requires_grad, |
164 | | - ) |
165 | | - |
166 | | - @classmethod |
167 | | - def __torch_dispatch__(cls, func, types, args, kwargs): |
168 | | - kwargs = {} if kwargs is None else kwargs |
169 | | - |
170 | | - if func is aten.detach.default: |
171 | | - return return_and_correct_aliasing( |
172 | | - func, args, kwargs, args[0]._apply_fn_to_data(torch.detach) |
173 | | - ) |
174 | | - if func is aten.clone.default: |
175 | | - return return_and_correct_aliasing( |
176 | | - func, args, kwargs, args[0]._apply_fn_to_data(torch.clone) |
177 | | - ) |
178 | | - |
179 | | - # Need the following for bsr specific functions |
180 | | - if func is aten.crow_indices.default: |
181 | | - return args[0].bsr_crow_indices.detach() |
182 | | - |
183 | | - if func is aten.col_indices.default: |
184 | | - return args[0].bsr_col_indices.detach() |
185 | | - |
186 | | - if func is aten.values.default: |
187 | | - return args[0].bsr_values.detach() |
188 | | - |
189 | | - if func is aten._nnz.default: |
190 | | - return args[0].bsr_values.shape[0] |
191 | | - |
192 | | - raise NotImplementedError( |
193 | | - f"BlockSparseAQTTensorImpl dispatch: attempting to run {func}, this is not supported" |
194 | | - ) |
195 | | - |
196 | | - |
197 | | -def _linear_int8_act_int8_weight_block_sparse_check(input_tensor, weight_tensor, bias): |
198 | | - return ( |
199 | | - isinstance(input_tensor, AffineQuantizedTensor) |
200 | | - and _aqt_is_int8_reduced_range(input_tensor) |
201 | | - and isinstance(weight_tensor, AffineQuantizedTensor) |
202 | | - and weight_tensor.is_cuda |
203 | | - and input_tensor.dtype == weight_tensor.dtype |
204 | | - and isinstance(input_tensor._layout, PlainLayout) |
205 | | - and isinstance(weight_tensor._layout, BlockSparseLayout) |
206 | | - ) |
207 | | - |
208 | | - |
209 | | -def _linear_int8_act_int8_weight_block_sparse_impl(input_tensor, weight_tensor, bias): |
210 | | - x_vals_int8 = input_tensor.tensor_impl.int_data |
211 | | - x_scales = input_tensor.tensor_impl.scale |
212 | | - w_vals = weight_tensor.tensor_impl |
213 | | - w_scales = weight_tensor.tensor_impl.scale |
214 | | - tmp = x_vals_int8.reshape(-1, x_vals_int8.shape[-1]) |
215 | | - tmp_t = tmp.t() |
216 | | - |
217 | | - y = torch.ops.blocksparse.int_addmm( |
218 | | - w_vals.crow_indices(), |
219 | | - w_vals.col_indices(), |
220 | | - w_vals.values(), |
221 | | - tmp_t, |
222 | | - w_scales, |
223 | | - x_scales.reshape(-1), |
224 | | - ) |
225 | | - y_shape = (*x_vals_int8.shape[:-1], w_scales.shape[-1]) |
226 | | - y = y.reshape(*y_shape) |
227 | | - |
228 | | - # can downcast only at the very end |
229 | | - output_dtype = input_tensor.dtype |
230 | | - y = y.to(output_dtype) |
231 | | - if bias is not None: |
232 | | - y += bias |
233 | | - return y |
| 19 | +from torchao.prototype.dtypes.uintx.block_sparse_layout import ( |
| 20 | + BlockSparseAQTTensorImpl, # noqa: F401 |
| 21 | + BlockSparseLayout, # noqa: F401 |
| 22 | + _linear_int8_act_int8_weight_block_sparse_check, # noqa: F401 |
| 23 | + _linear_int8_act_int8_weight_block_sparse_impl, # noqa: F401 |
| 24 | +) |
0 commit comments