pytorch拟合一元一次函数拟合一元一次函数
pytorch拟合一元一次函数拟合一元一次函数1. 自定义网络2. 使用卷积网络
拟合函数y=a×x+by=a imes x+by=a×x+b,其中a=1,b=2a=1,b=2a=1,b=2。
1. 自定义网络自定义网络
import torch
import numpy as np
class Net:
def __init__(self):
self.a = torch.rand(1, requires_grad=True)
self.b = torch.rand(1, requires_grad=True)
self.__parameters = dict(a=self.a, b=self.b)
self.___gpu = False
def forward(self, inputs):
return self.a * inputs + self.b
def parameters(self):
for name, param in self.__parameters.items():
yield param
if __name__ == '__main__':
x = np.linspace(1, 50, 50)
y = x + 2
x = torch.from_numpy(x.astype(np.float32))
y = torch.from_numpy(y.astype(np.float32))
net = Net()
optimizer = torch.optim.Adam(net.parameters(), lr=0.001, weight_decay=0.0005)
loss_op = torch.nn.MSELoss(reduction='sum')
for i in range(1, 20001, 1):
out = net.forward(x)
loss = loss_op(y, out)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# 输出中间过程
loss_numpy = loss.cpu().detach().numpy()
if i % 1000 == 0:
print(i, loss_numpy)
if loss_numpy < 0.00001:
a = net.a.cpu().detach().numpy()
b = net.b.cpu().detach().numpy()
print(a, b)
exit()
这种方法定义网络时没有继承torch.nn.Module,完全自己写了一个网络,要显式调用Net的forward函数。损失函数使用
的是L2损失。
2. 使用卷积网络使用卷积网络
import torch
import numpy as np
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
layers = [] layers.append(torch.nn.Conv2d(1, 1, kernel_size=1, stride=1, bias=True))
self.net = torch.nn.ModuleList(layers)
def forward(self, x):
return self.net[0](x)
if __name__ == '__main__':
x = np.linspace(1, 50, 50)
y = x + 2 # a = 1, b = 2
x = torch.from_numpy(x.astype(np.float32))
y = torch.from_numpy(y.astype(np.float32))
net = Net()
optimizer = torch.optim.Adam(net.parameters(), lr=0.001, weight_decay=0.0005)
loss_op = torch.nn.L1Loss(reduce=True, size_average=True)
评论0
最新资源