Trying to learn about Pytorch.

Here’s the code that was shown to me to work with. Not sure what I did incorrectly but his brain threw a core dump each time I ran it which is no good. If I can find the site that this came from I will of course point towards it.

import torch
import math

dtype = torch.float
device = torch.device(“cpu”)
device = torch.device(“cuda:0”)

#create random input and output data.
x = torch.linspace(-math.pi, math.pi, 2000, device=device, dtype=dtype)
y = torch.sin(x)

#randomly initialize weights.
a = torch.random.randn((), device=device, dtype=dtype)
b = torch.random.randn((), device=device, dtype=dtype)
c = torch.random.randn((), device=device, dtype=dtype)
d = torch.random.randn((), device=device, dtype=dtype)

learning_rate = 1e-6
for t in range(2000):
y_pred = a + b * x + c * x ** 2 + d * x ** 3
#compute and print loss.
loss = (y_pred – y).pow(2).sum().item()
if t % 100 == 99:
print(t, loss)

grad_y_pred = 2.0 * (y_pred – y)
grad_a = grad_y_pred.sum()
grad_b = (grad_y_pred * x).sum()
grad_c = (grad_y_pred * x ** 2).sum()
grad_d = (grad_y_pred * x ** 3).sum()

#update weights.
a -= learning_rate * grad_a
b -= learning_rate * grad_b
c -= learning_rate * grad_c
d -= learning_rate * grad_d
print(f”Result: y = {a.item()} + {b.item()} x + {c.item()} x^2 + {d.item()} x^3″)

Leave a comment