深度學習的優化器選擇(SGD、Momentum、RMSprop、Adam四種)


 
        
import torch
import torch.utils.data as Data
import torch.nn.functional as F
import matplotlib.pyplot as plt
import torch.nn as nn

LR=0.01
BATCH_SIZE=32
EPOCH=5

x=torch.unsqueeze(torch.linspace(-1,1,1000),dim=1)#將一維數據轉換為二維數據
y=x.pow(2)+0.1*torch.normal(torch.zeros(*x.size()))

torch_dataset=Data.TensorDataset(x,y)
loader=Data.DataLoader(dataset=torch_dataset,batch_size=BATCH_SIZE,shuffle=True)
#構建網絡
class Net(nn.Module):
    def __init__(self):
        super(Net,self).__init__()
        self.hidden=nn.Linear(1,20)#Sequential是將網絡的層組合到一起
        self.predict = nn.Linear(20,1)
    def forward(self,x):
        x=F.relu(self.hidden(x))#將ReLU層添加到網絡
        x = F.relu(self.predict(x))
        return x

net_SGD=Net()
net_Momentum=Net()
net_RMSProp=Net()
net_Adam=Net()
nets=[net_SGD,net_Momentum,net_RMSProp,net_Adam]
opt_SGD=torch.optim.SGD(net_SGD.parameters(),lr=LR)
opt_Momentum=torch.optim.SGD(net_Momentum.parameters(),lr=LR,momentum=0.9)
opt_RMSProp=torch.optim.RMSprop(net_RMSProp.parameters(),lr=LR,alpha=0.9)
opt_Adam=torch.optim.Adam(net_Adam.parameters(),lr=LR,betas=(0.9,0.99))
optimizers=[opt_SGD,opt_Momentum,opt_RMSProp,opt_Adam]

loss_func=torch.nn.MSELoss()
loss_his=[[],[],[],[]]
for epoch in range(EPOCH):
    for step,(batch_x,batch_y) in enumerate(loader):
        for net,opt,l_his in zip(nets,optimizers,loss_his):
            output=net(batch_x)
            loss=loss_func(output,batch_y)
            opt.zero_grad()
            loss.backward()
            opt.step()
            l_his.append(loss.data.numpy())
labels=['SGD','Momentum','RMSprop','Adam']

print(loss_his)
for i,l_his in enumerate(loss_his):
    plt.plot(l_his,label=labels[i])

plt.legend(loc='best')
plt.xlabel('Steps')
plt.ylabel('Loss')
plt.ylim((0,0.4))
plt.show()

 

 

最終得到的訓練比較圖,如下,可以看出各種個優化器的:


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM