代碼一
訓練代碼:
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import torchvision.transforms as transforms
trainset = torchvision.datasets.MNIST(root='./data', train=True,
download=True, transform=transforms.ToTensor())
trainloader = torch.utils.data.DataLoader(trainset, batch_size=4,
shuffle=True, num_workers=2)
testset = torchvision.datasets.MNIST(root='./data', train=False,
download=True, transform=transforms.ToTensor())
testloader = torch.utils.data.DataLoader(testset, batch_size=4,
shuffle=False, num_workers=2)
print("train_data:", trainset.data.size())
print("train_labels:", trainset.targets.size())
print("test_data:", testset.data.size())
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 6, 5, 1, 2)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(x.size()[0], -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
net = Net()
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
print(device)
net = net.to(device)
for epoch in range(2):
running_loss = 0.0
for i, data in enumerate(trainloader, 0):
inputs, labels = data[0].to(device), data[1].to(device)
optimizer.zero_grad()
outputs = net(inputs)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
if i % 2000 == 1999:
print('[%d, %5d] loss: %.3f' %
(epoch + 1, i + 1, running_loss / 2000))
running_loss = 0.0
print('Finished Training')
測試代碼:
correct = 0
total = 0
with torch.no_grad():
for data in testloader:
images, labels = data[0].to(device), data[1].to(device)
outputs = net(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: %d %%' % (
100 * correct / total))
代碼二
來源:https://blog.csdn.net/u014453898/article/details/90707987
訓練代碼:
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
lr = 0.01 #學習率
momentum = 0.5
log_interval = 10 #跑多少次batch進行一次日志記錄
epochs = 10
batch_size = 64
test_batch_size = 1000
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Sequential( # input_size=(1*28*28)
nn.Conv2d(1, 6, 5, 1, 2), # padding=2保證輸入輸出尺寸相同
nn.ReLU(), # input_size=(6*28*28)
nn.MaxPool2d(kernel_size=2, stride=2), # output_size=(6*14*14)
)
self.conv2 = nn.Sequential(
nn.Conv2d(6, 16, 5),
nn.ReLU(), # input_size=(16*10*10)
nn.MaxPool2d(2, 2) # output_size=(16*5*5)
)
self.fc1 = nn.Sequential(
nn.Linear(16 * 5 * 5, 120),
nn.ReLU()
)
self.fc2 = nn.Sequential(
nn.Linear(120, 84),
nn.ReLU()
)
self.fc3 = nn.Linear(84, 10)
# 定義前向傳播過程,輸入為x
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
# nn.Linear()的輸入輸出都是維度為一的值,所以要把多維度的tensor展平成一維
x = x.view(x.size()[0], -1)
x = self.fc1(x)
x = self.fc2(x)
x = self.fc3(x)
return x #F.softmax(x, dim=1)
def train(epoch): # 定義每個epoch的訓練細節
model.train() # 設置為trainning模式
for batch_idx, (data, target) in enumerate(train_loader):
data = data.to(device)
target = target.to(device)
data, target = Variable(data), Variable(target) # 把數據轉換成Variable
optimizer.zero_grad() # 優化器梯度初始化為零
output = model(data) # 把數據輸入網絡並得到輸出,即進行前向傳播
loss = F.cross_entropy(output,target) #交叉熵損失函數
loss.backward() # 反向傳播梯度
optimizer.step() # 結束一次前傳+反傳之后,更新參數
if batch_idx % log_interval == 0: # 准備打印相關信息
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test():
model.eval() # 設置為test模式
test_loss = 0 # 初始化測試損失值為0
correct = 0 # 初始化預測正確的數據個數為0
for data, target in test_loader:
data = data.to(device)
target = target.to(device)
data, target = Variable(data), Variable(target) #計算前要把變量變成Variable形式,因為這樣子才有梯度
output = model(data)
test_loss += F.cross_entropy(output, target, size_average=False).item() # sum up batch loss 把所有loss值進行累加
pred = output.data.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.data.view_as(pred)).cpu().sum() # 對預測正確的數據個數進行累加
test_loss /= len(test_loader.dataset) # 因為把所有loss值進行過累加,所以最后要除以總得數據長度才得平均loss
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
if __name__ == '__main__':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') #啟用GPU
train_loader = torch.utils.data.DataLoader( # 加載訓練數據
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)) #數據集給出的均值和標准差系數,每個數據集都不同的,都數據集提供方給出的
])),
batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader( # 加載訓練數據,詳細用法參考我的Pytorch打怪路(一)系列-(1)
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)) #數據集給出的均值和標准差系數,每個數據集都不同的,都數據集提供方給出的
])),
batch_size=test_batch_size, shuffle=True)
model = LeNet() # 實例化一個網絡對象
model = model.to(device)
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum) # 初始化優化器
for epoch in range(1, epochs + 1): # 以epoch為單位進行循環
train(epoch)
test()
torch.save(model, 'model.pth') #保存模型
測試代碼:
import torch
import cv2
import torch.nn.functional as F
from modela import LeNet ##重要,雖然顯示灰色(即在次代碼中沒用到),但若沒有引入這個模型代碼,加載模型時會找不到模型
from torch.autograd import Variable
from torchvision import datasets, transforms
import numpy as np
if __name__ =='__main__':
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = torch.load('model.pth') #加載模型
model = model.to(device)
model.eval() #把模型轉為test模式
img = cv2.imread("3.jpg") #讀取要預測的圖片
trans = transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)#圖片轉為灰度圖,因為mnist數據集都是灰度圖
img = trans(img)
img = img.to(device)
img = img.unsqueeze(0) #圖片擴展多一維,因為輸入到保存的模型中是4維的[batch_size,通道,長,寬],而普通圖片只有三維,[通道,長,寬]
#擴展后,為[1,1,28,28]
output = model(img)
prob = F.softmax(output, dim=1)
prob = Variable(prob)
prob = prob.cpu().numpy() #用GPU的數據訓練的模型保存的參數都是gpu形式的,要顯示則先要轉回cpu,再轉回numpy模式
print(prob) #prob是10個分類的概率
pred = np.argmax(prob) #選出概率最大的一個
print(pred.item())
代碼三
來源:https://www.cnblogs.com/denny402/p/7506523.html
訓練代碼:
import torch
import torchvision
from torch.autograd import Variable
import torch.utils.data.dataloader as Data
train_data = torchvision.datasets.MNIST(
'./mnist', train=True, transform=torchvision.transforms.ToTensor(), download=True
)
test_data = torchvision.datasets.MNIST(
'./mnist', train=False, transform=torchvision.transforms.ToTensor()
)
print("train_data:", train_data.train_data.size())
print("train_labels:", train_data.train_labels.size())
print("test_data:", test_data.test_data.size())
train_loader = Data.DataLoader(dataset=train_data, batch_size=64, shuffle=True)
test_loader = Data.DataLoader(dataset=test_data, batch_size=64)
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = torch.nn.Sequential(
torch.nn.Conv2d(1, 32, 3, 1, 1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2))
self.conv2 = torch.nn.Sequential(
torch.nn.Conv2d(32, 64, 3, 1, 1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2)
)
self.conv3 = torch.nn.Sequential(
torch.nn.Conv2d(64, 64, 3, 1, 1),
torch.nn.ReLU(),
torch.nn.MaxPool2d(2)
)
self.dense = torch.nn.Sequential(
torch.nn.Linear(64 * 3 * 3, 128),
torch.nn.ReLU(),
torch.nn.Linear(128, 10)
)
def forward(self, x):
conv1_out = self.conv1(x)
conv2_out = self.conv2(conv1_out)
conv3_out = self.conv3(conv2_out)
res = conv3_out.view(conv3_out.size(0), -1)
out = self.dense(res)
return out
model = Net()
print(model)
optimizer = torch.optim.Adam(model.parameters())
loss_func = torch.nn.CrossEntropyLoss()
for epoch in range(10):
print('epoch {}'.format(epoch + 1))
# training-----------------------------
train_loss = 0.
train_acc = 0.
for batch_x, batch_y in train_loader:
batch_x, batch_y = Variable(batch_x), Variable(batch_y)
out = model(batch_x)
loss = loss_func(out, batch_y)
train_loss += loss.data[0]
pred = torch.max(out, 1)[1]
train_correct = (pred == batch_y).sum()
train_acc += train_correct.data[0]
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Train Loss: {:.6f}, Acc: {:.6f}'.format(train_loss / (len(
train_data)), train_acc / (len(train_data))))
測試代碼:
model.eval()
eval_loss = 0.
eval_acc = 0.
for batch_x, batch_y in test_loader:
batch_x, batch_y = Variable(batch_x, volatile=True), Variable(batch_y, volatile=True)
out = model(batch_x)
loss = loss_func(out, batch_y)
eval_loss += loss.data[0]
pred = torch.max(out, 1)[1]
num_correct = (pred == batch_y).sum()
eval_acc += num_correct.data[0]
print('Test Loss: {:.6f}, Acc: {:.6f}'.format(eval_loss / (len(
test_data)), eval_acc / (len(test_data))))