『PyTorch』第四彈_通過LeNet初識pytorch神經網絡_上
# Author : Hellcat # Time : 2018/2/11 import torch as t import torch.nn as nn import torch.nn.functional as F class LeNet(nn.Module): def __init__(self): super(LeNet,self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.conv2 = nn.Conv2d(6,16,5) self.fc1 = nn.Linear(16*5*5,120) self.fc2 = nn.Linear(120,84) self.fc3 = nn.Linear(84,10) def forward(self,x): x = F.max_pool2d(F.relu(self.conv1(x)),(2,2)) x = F.max_pool2d(F.relu(self.conv2(x)),2) x = x.view(x.size()[0], -1) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x if __name__ == "__main__": net = LeNet() # #########訓練網絡######### from torch import optim # 初始化Loss函數 & 優化器 loss_fn = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) for epoch in range(2): running_loss = 0.0 for step, data in enumerate(trainloader, 0): # step為訓練次數, trainloader包含batch的數據和標簽 inputs, labels = data inputs, labels = t.autograd.Variable(inputs), t.autograd.Variable(labels) # 梯度清零 optimizer.zero_grad() # forward outputs = net(inputs) # backward loss = loss_fn(outputs, labels) loss.backward() # update optimizer.step() running_loss += loss.data[0] if step % 2000 == 1999: print("[{0:d}, {1:5d}] loss: {2:3f}".format(epoch+1, step+1, running_loss/2000)) running_loss = 0. print("Finished Training")
這是使用LeNet分類cifar_10的例子,數據處理部分由於不是重點,沒有列上來,主要是對使用torch分類有一個直觀理解,
初始化網絡
初始化Loss函數 & 優化器
進入step循環:
梯度清零
向前傳播
計算本次Loss
向后傳播
更新參數
由於pytorch的網絡是class,所以在不考慮持久化的情況下,后續處理都不是太難,值得一提的是預測函數,我們直接net(Variable(test_data))即可,輸出是概率分布的Variable,我們只要調用:
_, predict = t.max(test_out, 1)
即可,這是因為當指定了dim時,torch.max會融合max和argmax的功能,
>> a = torch.randn(4, 4)
>> a
0.0692 0.3142 1.2513 -0.5428
0.9288 0.8552 -0.2073 0.6409
1.0695 -0.0101 -2.4507 -1.2230
0.7426 -0.7666 0.4862 -0.6628
torch.FloatTensor of size 4x4]
>>> torch.max(a, 1)
(
1.2513
0.9288
1.0695
0.7426
[torch.FloatTensor of size 4]
,
2
0
0
0
[torch.LongTensor of size 4]
)
其他torch的高級功能沒有使用到,本篇的目的是對於torch神經網絡基本的使用有個理解。