跟B站的唐宇迪学习Pytorch框架,复现视频里的代码,方便后续查阅。
import numpy as np
import torch
import torch.nn as nn
# 构造输入数据x和其对应的标签y
x_values=[i for i in range(11)]
x_train=np.array(x_values,dtype=np.float32)
x_train=x_train.reshape(-1,1)
y_values=[2*i+1 for i in x_values]
y_train=np.array(y_values,dtype=np.float32)
y_train=y_train.reshape(-1,1)
# 其实线性回归是不加激活函数的全连接层
class LinearRegressionModel(nn.Module):
def __init__(self,input_dim,output_dim):
super(LinearRegressionModel,self).__init__()
self.linear=nn.Linear(input_dim,output_dim)
# 重写前向传播方法,继承自module
def forward(self,x):
out=self.linear(x)
return out
input_dim=1
output_dim=1
model=LinearRegressionModel(input_dim,output_dim)
#训练次数
epochs=1000
#定义学习率
learning_rate=0.01
# 优化器选择随机梯度下降算法
optimizer=torch.optim.SGD(model.parameters(),lr=learning_rate)
# 选择损失函数MSE
criterion=nn.MSELoss()
for epoch in range(epochs):
epoch+=1
# 转成tensor格式
inputs=torch.from_numpy(x_train)
labels=torch.from_numpy(y_train)
# 每次迭代梯度清零,防止累加
optimizer.zero_grad()
# 前向传播
outputs=model(inputs)
# 计算损失
loss=criterion(outputs,labels)
# 反向传播
loss.backward()
# 更新权重参数
optimizer.step()
if epoch%50==0:
print('epoch{},loss{}'.format(epoch,loss.item()))
下面是运行结果,因模型简单,收敛得比较快
epoch50,loss0.1350768655538559
epoch100,loss0.07704267650842667
epoch150,loss0.04394209757447243
epoch200,loss0.02506297081708908
epoch250,loss0.014295018278062344
epoch300,loss0.008153305388987064
epoch350,loss0.0046503255143761635
epoch400,loss0.0026523659471422434
epoch450,loss0.0015128119848668575
epoch500,loss0.0008628470241092145
epoch550,loss0.000492122198920697
epoch600,loss0.0002806899428833276
epoch650,loss0.00016008899547159672
epoch700,loss9.131137630902231e-05
epoch750,loss5.208040602155961e-05
epoch800,loss2.9707529392908327e-05
epoch850,loss1.694333514024038e-05
epoch900,loss9.664465324021876e-06
epoch950,loss5.5125256039900705e-06
epoch1000,loss3.1437150482815923e-06