|
| 1 | +""" NAdamW Optimizer |
| 2 | +
|
| 3 | +Based on simplified algorithm in https://github.com/mlcommons/algorithmic-efficiency/tree/main/baselines/nadamw |
| 4 | +
|
| 5 | +Added multi-tensor (foreach) path. |
| 6 | +""" |
| 7 | +import math |
| 8 | +from typing import List, Optional |
| 9 | + |
| 10 | +import torch |
| 11 | +from torch import Tensor |
| 12 | + |
| 13 | + |
| 14 | +# Modified from github.com/pytorch/pytorch/blob/v1.12.1/torch/optim/adamw.py. |
| 15 | +class NAdamW(torch.optim.Optimizer): |
| 16 | + r"""Implements NAdamW algorithm. |
| 17 | +
|
| 18 | + See Table 1 in https://arxiv.org/abs/1910.05446 for the implementation of |
| 19 | + the NAdam algorithm (there is also a comment in the code which highlights |
| 20 | + the only difference of NAdamW and AdamW). |
| 21 | + For further details regarding the algorithm we refer to |
| 22 | + `Decoupled Weight Decay Regularization`_. |
| 23 | +
|
| 24 | + Args: |
| 25 | + params (iterable): iterable of parameters to optimize or dicts defining |
| 26 | + parameter groups |
| 27 | + lr (float, optional): learning rate (default: 1e-3) |
| 28 | + betas (Tuple[float, float], optional): coefficients used for computing |
| 29 | + running averages of gradient and its square (default: (0.9, 0.999)) |
| 30 | + eps (float, optional): term added to the denominator to improve |
| 31 | + numerical stability (default: 1e-8) |
| 32 | + weight_decay (float, optional): weight decay coefficient (default: 1e-2) |
| 33 | + .. _Decoupled Weight Decay Regularization: |
| 34 | + https://arxiv.org/abs/1711.05101 |
| 35 | + .. _On the Convergence of Adam and Beyond: |
| 36 | + https://openreview.net/forum?id=ryQu7f-RZ |
| 37 | + """ |
| 38 | + |
| 39 | + def __init__( |
| 40 | + self, |
| 41 | + params, |
| 42 | + lr=1e-3, |
| 43 | + betas=(0.9, 0.999), |
| 44 | + eps=1e-8, |
| 45 | + weight_decay=1e-2, |
| 46 | + maximize: bool = False, |
| 47 | + foreach: Optional[bool] = None, |
| 48 | + capturable: bool = False, |
| 49 | + ): |
| 50 | + if not 0.0 <= lr: |
| 51 | + raise ValueError(f'Invalid learning rate: {lr}') |
| 52 | + if not 0.0 <= eps: |
| 53 | + raise ValueError(f'Invalid epsilon value: {eps}') |
| 54 | + if not 0.0 <= betas[0] < 1.0: |
| 55 | + raise ValueError(f'Invalid beta parameter at index 0: {betas[0]}') |
| 56 | + if not 0.0 <= betas[1] < 1.0: |
| 57 | + raise ValueError(f'Invalid beta parameter at index 1: {betas[1]}') |
| 58 | + if not 0.0 <= weight_decay: |
| 59 | + raise ValueError(f'Invalid weight_decay value: {weight_decay}') |
| 60 | + defaults = dict( |
| 61 | + lr=lr, |
| 62 | + betas=betas, |
| 63 | + eps=eps, |
| 64 | + weight_decay=weight_decay, |
| 65 | + foreach=foreach, |
| 66 | + maximize=maximize, |
| 67 | + capturable=capturable, |
| 68 | + ) |
| 69 | + super().__init__(params, defaults) |
| 70 | + |
| 71 | + def __setstate__(self, state): |
| 72 | + super().__setstate__(state) |
| 73 | + state_values = list(self.state.values()) |
| 74 | + step_is_tensor = (len(state_values) != 0) and torch.is_tensor( |
| 75 | + state_values[0]['step']) |
| 76 | + if not step_is_tensor: |
| 77 | + for s in state_values: |
| 78 | + s['step'] = torch.tensor(float(s['step'])) |
| 79 | + |
| 80 | + @torch.no_grad() |
| 81 | + def step(self, closure=None): |
| 82 | + """Performs a single optimization step. |
| 83 | +
|
| 84 | + Args: |
| 85 | + closure (callable, optional): A closure that reevaluates the model |
| 86 | + and returns the loss. |
| 87 | + """ |
| 88 | + self._cuda_graph_capture_health_check() |
| 89 | + |
| 90 | + loss = None |
| 91 | + if closure is not None: |
| 92 | + with torch.enable_grad(): |
| 93 | + loss = closure() |
| 94 | + |
| 95 | + for group in self.param_groups: |
| 96 | + params_with_grad = [] |
| 97 | + grads = [] |
| 98 | + exp_avgs = [] |
| 99 | + exp_avg_sqs = [] |
| 100 | + state_steps = [] |
| 101 | + beta1, beta2 = group['betas'] |
| 102 | + |
| 103 | + for p in group['params']: |
| 104 | + if p.grad is None: |
| 105 | + continue |
| 106 | + params_with_grad.append(p) |
| 107 | + if p.grad.is_sparse: |
| 108 | + raise RuntimeError('NAdamW does not support sparse gradients') |
| 109 | + grads.append(p.grad) |
| 110 | + |
| 111 | + state = self.state[p] |
| 112 | + |
| 113 | + # State initialization |
| 114 | + if len(state) == 0: |
| 115 | + state['step'] = torch.tensor(0.) |
| 116 | + # Exponential moving average of gradient values |
| 117 | + state['exp_avg'] = torch.zeros_like(p, memory_format=torch.preserve_format) |
| 118 | + # Exponential moving average of squared gradient values |
| 119 | + state['exp_avg_sq'] = torch.zeros_like(p, memory_format=torch.preserve_format) |
| 120 | + |
| 121 | + exp_avgs.append(state['exp_avg']) |
| 122 | + exp_avg_sqs.append(state['exp_avg_sq']) |
| 123 | + state_steps.append(state['step']) |
| 124 | + |
| 125 | + nadamw( |
| 126 | + params_with_grad, |
| 127 | + grads, |
| 128 | + exp_avgs, |
| 129 | + exp_avg_sqs, |
| 130 | + state_steps, |
| 131 | + beta1=beta1, |
| 132 | + beta2=beta2, |
| 133 | + lr=group['lr'], |
| 134 | + weight_decay=group['weight_decay'], |
| 135 | + eps=group['eps'], |
| 136 | + maximize=group['maximize'], |
| 137 | + capturable=group['capturable'], |
| 138 | + ) |
| 139 | + |
| 140 | + return loss |
| 141 | + |
| 142 | + |
| 143 | +def nadamw( |
| 144 | + params: List[Tensor], |
| 145 | + grads: List[Tensor], |
| 146 | + exp_avgs: List[Tensor], |
| 147 | + exp_avg_sqs: List[Tensor], |
| 148 | + state_steps: List[Tensor], |
| 149 | + foreach: Optional[bool] = None, |
| 150 | + capturable: bool = False, |
| 151 | + *, |
| 152 | + beta1: float, |
| 153 | + beta2: float, |
| 154 | + lr: float, |
| 155 | + weight_decay: float, |
| 156 | + eps: float, |
| 157 | + maximize: bool, |
| 158 | +) -> None: |
| 159 | + r"""Functional API that performs NAdamW algorithm computation. |
| 160 | + See NAdamW class for details. |
| 161 | + """ |
| 162 | + |
| 163 | + if not all(isinstance(t, torch.Tensor) for t in state_steps): |
| 164 | + raise RuntimeError( |
| 165 | + 'API has changed, `state_steps` argument must contain a list of' + |
| 166 | + ' singleton tensors') |
| 167 | + |
| 168 | + if foreach is None: |
| 169 | + foreach = True |
| 170 | + if foreach and not torch.jit.is_scripting(): |
| 171 | + func = _multi_tensor_nadamw |
| 172 | + else: |
| 173 | + func = _single_tensor_nadamw |
| 174 | + |
| 175 | + func( |
| 176 | + params, |
| 177 | + grads, |
| 178 | + exp_avgs, |
| 179 | + exp_avg_sqs, |
| 180 | + state_steps, |
| 181 | + beta1=beta1, |
| 182 | + beta2=beta2, |
| 183 | + lr=lr, |
| 184 | + weight_decay=weight_decay, |
| 185 | + eps=eps, |
| 186 | + maximize=maximize, |
| 187 | + capturable=capturable, |
| 188 | + ) |
| 189 | + |
| 190 | + |
| 191 | +def _single_tensor_nadamw( |
| 192 | + params: List[Tensor], |
| 193 | + grads: List[Tensor], |
| 194 | + exp_avgs: List[Tensor], |
| 195 | + exp_avg_sqs: List[Tensor], |
| 196 | + state_steps: List[Tensor], |
| 197 | + *, |
| 198 | + beta1: float, |
| 199 | + beta2: float, |
| 200 | + lr: float, |
| 201 | + weight_decay: float, |
| 202 | + eps: float, |
| 203 | + maximize: bool, |
| 204 | + capturable: bool |
| 205 | +): |
| 206 | + |
| 207 | + for i, param in enumerate(params): |
| 208 | + grad = grads[i] if not maximize else -grads[i] |
| 209 | + exp_avg = exp_avgs[i] |
| 210 | + exp_avg_sq = exp_avg_sqs[i] |
| 211 | + step_t = state_steps[i] |
| 212 | + |
| 213 | + # Update step. |
| 214 | + step_t += 1 |
| 215 | + |
| 216 | + # Perform stepweight decay. |
| 217 | + param.mul_(1. - lr * weight_decay) |
| 218 | + |
| 219 | + # Decay the first and second moment running average coefficient. |
| 220 | + exp_avg.mul_(beta1).add_(grad, alpha=1 - beta1) |
| 221 | + exp_avg_sq.mul_(beta2).addcmul_(grad, grad, value=1 - beta2) |
| 222 | + |
| 223 | + if capturable: |
| 224 | + step = step_t |
| 225 | + |
| 226 | + # 1 - beta1 ** step can't be captured in a CUDA graph, even if step is a CUDA tensor |
| 227 | + # (incurs "RuntimeError: CUDA error: operation not permitted when stream is capturing") |
| 228 | + bias_correction1 = 1 - torch.pow(beta1, step) |
| 229 | + bias_correction2 = 1 - torch.pow(beta2, step) |
| 230 | + |
| 231 | + step_size = lr / bias_correction1 |
| 232 | + step_size_neg = step_size.neg() |
| 233 | + |
| 234 | + bias_correction2_sqrt = bias_correction2.sqrt() |
| 235 | + |
| 236 | + # Only difference between NAdamW and AdamW in this implementation. |
| 237 | + # The official PyTorch implementation of NAdam uses a different algorithm. |
| 238 | + exp_avg = exp_avg.mul(beta1).add_(grad, alpha=1 - beta1) |
| 239 | + |
| 240 | + denom = (exp_avg_sq.sqrt() / (bias_correction2_sqrt * step_size_neg)).add_(eps / step_size_neg) |
| 241 | + param.addcdiv_(exp_avg, denom) |
| 242 | + else: |
| 243 | + step = step_t.item() |
| 244 | + bias_correction1 = 1 - beta1 ** step |
| 245 | + bias_correction2 = 1 - beta2 ** step |
| 246 | + step_size = lr / bias_correction1 |
| 247 | + bias_correction2_sqrt = math.sqrt(bias_correction2) |
| 248 | + |
| 249 | + # Only difference between NAdamW and AdamW in this implementation. |
| 250 | + # The official PyTorch implementation of NAdam uses a different algorithm. |
| 251 | + exp_avg = exp_avg.mul(beta1).add_(grad, alpha=1 - beta1) |
| 252 | + |
| 253 | + denom = (exp_avg_sq.sqrt() / bias_correction2_sqrt).add_(eps) |
| 254 | + param.addcdiv_(exp_avg, denom, value=-step_size) |
| 255 | + |
| 256 | + |
| 257 | +def _multi_tensor_nadamw( |
| 258 | + params: List[Tensor], |
| 259 | + grads: List[Tensor], |
| 260 | + exp_avgs: List[Tensor], |
| 261 | + exp_avg_sqs: List[Tensor], |
| 262 | + state_steps: List[Tensor], |
| 263 | + *, |
| 264 | + beta1: float, |
| 265 | + beta2: float, |
| 266 | + lr: float, |
| 267 | + weight_decay: float, |
| 268 | + eps: float, |
| 269 | + maximize: bool, |
| 270 | + capturable: bool, |
| 271 | +): |
| 272 | + if len(params) == 0: |
| 273 | + return |
| 274 | + |
| 275 | + if capturable: |
| 276 | + assert all( |
| 277 | + p.is_cuda and step.is_cuda for p, step in zip(params, state_steps) |
| 278 | + ), "If capturable=True, params and state_steps must be CUDA tensors." |
| 279 | + |
| 280 | + if maximize: |
| 281 | + grads = torch._foreach_neg(tuple(grads)) # type: ignore[assignment] |
| 282 | + |
| 283 | + grads = [torch.view_as_real(x) if torch.is_complex(x) else x for x in grads] |
| 284 | + exp_avgs = [torch.view_as_real(x) if torch.is_complex(x) else x for x in exp_avgs] |
| 285 | + exp_avg_sqs = [torch.view_as_real(x) if torch.is_complex(x) else x for x in exp_avg_sqs] |
| 286 | + params = [torch.view_as_real(x) if torch.is_complex(x) else x for x in params] |
| 287 | + |
| 288 | + # update steps |
| 289 | + torch._foreach_add_(state_steps, 1) |
| 290 | + |
| 291 | + # Perform stepweight decay |
| 292 | + torch._foreach_mul_(params, 1 - lr * weight_decay) |
| 293 | + |
| 294 | + # Decay the first and second moment running average coefficient |
| 295 | + torch._foreach_mul_(exp_avgs, beta1) |
| 296 | + torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) |
| 297 | + |
| 298 | + torch._foreach_mul_(exp_avg_sqs, beta2) |
| 299 | + torch._foreach_addcmul_(exp_avg_sqs, grads, grads, 1 - beta2) |
| 300 | + |
| 301 | + if capturable: |
| 302 | + # TODO: use foreach_pow if/when foreach_pow is added |
| 303 | + bias_correction1 = [torch.pow(beta1, step) for step in state_steps] |
| 304 | + bias_correction2 = [torch.pow(beta2, step) for step in state_steps] |
| 305 | + # foreach_sub doesn't allow a scalar as the first arg |
| 306 | + torch._foreach_sub_(bias_correction1, 1) |
| 307 | + torch._foreach_sub_(bias_correction2, 1) |
| 308 | + torch._foreach_neg_(bias_correction1) |
| 309 | + torch._foreach_neg_(bias_correction2) |
| 310 | + |
| 311 | + # foreach_div doesn't allow a scalar as the first arg |
| 312 | + step_size = torch._foreach_div(bias_correction1, lr) |
| 313 | + torch._foreach_reciprocal_(step_size) |
| 314 | + torch._foreach_neg_(step_size) |
| 315 | + |
| 316 | + bias_correction2_sqrt = torch._foreach_sqrt(bias_correction2) |
| 317 | + |
| 318 | + exp_avg_sq_sqrt = torch._foreach_sqrt(exp_avg_sqs) |
| 319 | + torch._foreach_div_( |
| 320 | + exp_avg_sq_sqrt, torch._foreach_mul(bias_correction2_sqrt, step_size) |
| 321 | + ) |
| 322 | + eps_over_step_size = torch._foreach_div(step_size, eps) |
| 323 | + torch._foreach_reciprocal_(eps_over_step_size) |
| 324 | + denom = torch._foreach_add(exp_avg_sq_sqrt, eps_over_step_size) |
| 325 | + |
| 326 | + torch._foreach_addcdiv_(params, exp_avgs, denom) |
| 327 | + else: |
| 328 | + bias_correction1 = [1 - beta1 ** step.item() for step in state_steps] |
| 329 | + bias_correction2 = [1 - beta2 ** step.item() for step in state_steps] |
| 330 | + |
| 331 | + step_size = [(lr / bc) * -1 for bc in bias_correction1] |
| 332 | + |
| 333 | + bias_correction2_sqrt = [math.sqrt(bc) for bc in bias_correction2] |
| 334 | + |
| 335 | + # Only difference between NAdamW and AdamW in this implementation. |
| 336 | + # The official PyTorch implementation of NAdam uses a different algorithm. |
| 337 | + exp_avgs = torch._foreach_mul(exp_avgs, beta1) |
| 338 | + torch._foreach_add_(exp_avgs, grads, alpha=1 - beta1) |
| 339 | + |
| 340 | + exp_avg_sq_sqrt = torch._foreach_sqrt(exp_avg_sqs) |
| 341 | + torch._foreach_div_(exp_avg_sq_sqrt, bias_correction2_sqrt) |
| 342 | + denom = torch._foreach_add(exp_avg_sq_sqrt, eps) |
| 343 | + |
| 344 | + torch._foreach_addcdiv_(params, exp_avgs, denom, step_size) |
0 commit comments