|
494 | 494 | "optimizer.zero_grad()\n", |
495 | 495 | "\n", |
496 | 496 | "# Forward pass, then backward pass, then update weights\n", |
497 | | - "output = model.forward(images)\n", |
| 497 | + "output = model(images)\n", |
498 | 498 | "loss = criterion(output, labels)\n", |
499 | 499 | "loss.backward()\n", |
500 | 500 | "print('Gradient -', model[0].weight.grad)" |
|
581 | 581 | " # TODO: Training pass\n", |
582 | 582 | " optimizer.zero_grad()\n", |
583 | 583 | " \n", |
584 | | - " output = model.forward(images)\n", |
| 584 | + " output = model(images)\n", |
585 | 585 | " loss = criterion(output, labels)\n", |
586 | 586 | " loss.backward()\n", |
587 | 587 | " optimizer.step()\n", |
|
625 | 625 | "img = images[0].view(1, 784)\n", |
626 | 626 | "# Turn off gradients to speed up this part\n", |
627 | 627 | "with torch.no_grad():\n", |
628 | | - " logps = model.forward(img)\n", |
| 628 | + " logps = model(img)\n", |
629 | 629 | "\n", |
630 | 630 | "# Output of the network are logits, need to take softmax for probabilities\n", |
631 | 631 | "ps = torch.exp(logps)\n", |
|
0 commit comments