Skip to content

Commit 615085c

Browse files
committed
Update polynomial_autograd.py with changes to use exp(x) instead of sin(x) as the function to be learned
1 parent 0b98a1c commit 615085c

File tree

1 file changed

+11
-6
lines changed

1 file changed

+11
-6
lines changed

beginner_source/examples_autograd/polynomial_autograd.py

Lines changed: 11 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
"""
1+
r"""
22
PyTorch: Tensors and autograd
33
-------------------------------
44
@@ -27,8 +27,8 @@
2727
# Create Tensors to hold input and outputs.
2828
# By default, requires_grad=False, which indicates that we do not need to
2929
# compute gradients with respect to these Tensors during the backward pass.
30-
x = torch.linspace(-math.pi, math.pi, 2000, dtype=dtype)
31-
y = torch.sin(x)
30+
x = torch.linspace(-1, 1, 2000, dtype=dtype)
31+
y = torch.exp(x) # A Taylor expansion would be 1 + x + (1/2) x**2 + (1/3!) x**3 + ...
3232

3333
# Create random Tensors for weights. For a third order polynomial, we need
3434
# 4 weights: y = a + b x + c x^2 + d x^3
@@ -39,17 +39,22 @@
3939
c = torch.randn((), dtype=dtype, requires_grad=True)
4040
d = torch.randn((), dtype=dtype, requires_grad=True)
4141

42-
learning_rate = 1e-6
43-
for t in range(2000):
42+
learning_rate = 1e-5
43+
for t in range(5000):
4444
# Forward pass: compute predicted y using operations on Tensors.
4545
y_pred = a + b * x + c * x ** 2 + d * x ** 3
4646

4747
# Compute and print loss using operations on Tensors.
4848
# Now loss is a Tensor of shape (1,)
4949
# loss.item() gets the scalar value held in the loss.
5050
loss = (y_pred - y).pow(2).sum()
51+
52+
# Calculare initial loss, so we can report loss relative to it
53+
if t==0:
54+
initial_loss=loss.item()
55+
5156
if t % 100 == 99:
52-
print(t, loss.item())
57+
print(f'Iteration t = {t:4d} loss(t)/loss(0) = {round(loss.item()/initial_loss, 6):10.6f} a = {a.item():10.6f} b = {b.item():10.6f} c = {c.item():10.6f} d = {d.item():10.6f}')
5358

5459
# Use autograd to compute the backward pass. This call will compute the
5560
# gradient of loss with respect to all Tensors with requires_grad=True.

0 commit comments

Comments
 (0)