|
11 | 11 | import torch.nn.functional as F |
12 | 12 |
|
13 | 13 |
|
| 14 | +class ToySingleLinearModel(torch.nn.Module): |
| 15 | + def __init__( |
| 16 | + self, |
| 17 | + input_dim, |
| 18 | + output_dim, |
| 19 | + dtype, |
| 20 | + device, |
| 21 | + has_bias=False, |
| 22 | + ): |
| 23 | + super().__init__() |
| 24 | + self.dtype = dtype |
| 25 | + self.device = device |
| 26 | + self.linear1 = torch.nn.Linear( |
| 27 | + input_dim, output_dim, bias=has_bias, dtype=dtype, device=device |
| 28 | + ) |
| 29 | + |
| 30 | + def example_inputs(self, batch_size=1): |
| 31 | + return ( |
| 32 | + torch.randn( |
| 33 | + batch_size, |
| 34 | + self.linear1.in_features, |
| 35 | + dtype=self.dtype, |
| 36 | + device=self.device, |
| 37 | + ), |
| 38 | + ) |
| 39 | + |
| 40 | + def forward(self, x): |
| 41 | + x = self.linear1(x) |
| 42 | + return x |
| 43 | + |
| 44 | + |
14 | 45 | # TODO: Refactor torchao and tests to use these models |
15 | | -class ToyLinearModel(torch.nn.Module): |
16 | | - def __init__(self, k=64, n=32, dtype=torch.bfloat16): |
| 46 | +class ToyTwoLinearModel(torch.nn.Module): |
| 47 | + def __init__( |
| 48 | + self, |
| 49 | + input_dim, |
| 50 | + hidden_dim, |
| 51 | + output_dim, |
| 52 | + dtype, |
| 53 | + device, |
| 54 | + has_bias=False, |
| 55 | + ): |
17 | 56 | super().__init__() |
18 | | - self.linear1 = torch.nn.Linear(k, n, bias=False).to(dtype) |
| 57 | + self.dtype = dtype |
| 58 | + self.device = device |
| 59 | + self.linear1 = torch.nn.Linear( |
| 60 | + input_dim, hidden_dim, bias=has_bias, dtype=dtype, device=device |
| 61 | + ) |
| 62 | + self.linear2 = torch.nn.Linear( |
| 63 | + hidden_dim, output_dim, bias=has_bias, dtype=dtype, device=device |
| 64 | + ) |
| 65 | + |
| 66 | + # Note: Tiny-GEMM kernel only uses BF16 inputs |
| 67 | + def example_inputs(self, batch_size=1): |
| 68 | + return ( |
| 69 | + torch.randn( |
| 70 | + batch_size, |
| 71 | + self.linear1.in_features, |
| 72 | + dtype=self.dtype, |
| 73 | + device=self.device, |
| 74 | + ), |
| 75 | + ) |
19 | 76 |
|
20 | 77 | def forward(self, x): |
21 | 78 | x = self.linear1(x) |
| 79 | + x = self.linear2(x) |
22 | 80 | return x |
23 | 81 |
|
24 | 82 |
|
@@ -179,8 +237,8 @@ def create_model_and_input_data( |
179 | 237 | m, k, n (int): dimensions of the model and input data |
180 | 238 | """ |
181 | 239 | if model_type == "linear": |
182 | | - model = ToyLinearModel(k, n, high_precision_dtype).to(device) |
183 | | - input_data = torch.randn(m, k, device=device, dtype=high_precision_dtype) |
| 240 | + model = ToySingleLinearModel(k, n, device=device, dtype=high_precision_dtype) |
| 241 | + input_data = model.example_inputs(batch_size=m)[0] |
184 | 242 | elif "ln_linear" in model_type: |
185 | 243 | # Extract activation type from model_type string |
186 | 244 | match = re.search(r"ln_linear_?(\w+)?", model_type) |
|
0 commit comments