在上一篇博客中,我們快速搭建,訓練了一個小型的網絡,但是存在一下問題。
- 僅僅是使用了 CPU,並沒有使用 GPU 進行訓練;
- 學習率太高,導致最后數值提不上去;
針對這2個問題,我們進行統一的解決。
並最后寫一個 detect 模塊,將我們寫出的網絡進行應用。
pytorch 使用 GPU 進行訓練
在代碼中,修改訓練設備為 GPU 較為簡單,主要有兩種方法,而且主要是對 3 個對象進行使用
主要是 模型、數據和損失函數
使用 .cude() 方法
簡述幾個修改的地方
啟動GPU
if torch.cuda.is_available():
images = images.cuda()
targets = targets.cuda()
my_model = my_model_cuda()
my_loss_fn = my_loss_fn.cuda()
修改學習率
learning_rate = 1e-3
修改之后的代碼
import torch
import torchvision
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import time
# prepare the data for training and testing
data_path = "../data_cifar10"
dataset_train = torchvision.datasets.CIFAR10(root=data_path, train=True, transform=torchvision.transforms.ToTensor(),
download=True)
dataset_test = torchvision.datasets.CIFAR10(root=data_path, train=False, transform=torchvision.transforms.ToTensor(),
download=True)
# dataset_train.cuda(),不存在該方法,直接報錯
dataloader_train = DataLoader(dataset_train, batch_size=64)
dataloader_test = DataLoader(dataset_test, batch_size=64)
# dataloader_train.cuda() 直接就是不存在該方法
# create the module class
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.model = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=5, stride=1, padding=2),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=5, stride=1, padding=2),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=1, padding=2),
nn.MaxPool2d(kernel_size=2),
nn.Flatten(),
nn.Linear(in_features=1024, out_features=64),
nn.Linear(in_features=64, out_features=10)
)
def forward(self, x):
return self.model(x)
# create the neural network, loss_function and optimization
logdir = "../logs"
writer = SummaryWriter(log_dir=logdir)
learning_rate = 1e-3
my_model = MyModel()
if torch.cuda.is_available():
my_model = my_model.cuda()
my_loss_fn = torch.nn.CrossEntropyLoss()
if torch.cuda.is_available():
my_loss_fn = my_loss_fn.cuda()
my_optimization = torch.optim.SGD(my_model.parameters(), lr=learning_rate)
# my_optimization = my_optimization.cuda()
max_epoch = 200
train_step = 0
test_step = 0
train_size = len(dataset_train)
test_size = len(dataset_test)
start_time = time.time()
for epoch in range(max_epoch):
print("-------Epoch {}-------".format(epoch))
# train
loss_sum = 0.0
train_step = 0
my_model.train()
for images, targets in dataloader_train:
if torch.cuda.is_available():
images = images.cuda()
targets = targets.cuda()
output = my_model(images)
cur_loss = my_loss_fn(output, targets)
loss_sum += cur_loss
# optimize the model parameters
my_optimization.zero_grad()
cur_loss.backward()
my_optimization.step()
if train_step % 100 == 0:
print(f"epoch:{epoch}, train_step:{train_step}, cur_loss:{cur_loss}")
train_step += 1
writer.add_scalar("epoch:train_loss", loss_sum, epoch)
print(f"--epoch {epoch}:train_loss {loss_sum}")
# test
my_model.eval()
right_classify_cnt = 0
loss_sum = 0.0
with torch.no_grad():
for images, targets in dataloader_test:
if torch.cuda.is_available():
images = images.cuda()
targets = targets.cuda()
output = my_model(images)
cur_loss = my_loss_fn(output, targets)
loss_sum += cur_loss
right_classify_cnt += (output.argmax(dim=1) == targets).sum()
writer.add_scalar("epoch:test_loss", loss_sum, epoch)
writer.add_scalar("epoch:test_accuracy", right_classify_cnt / test_size, epoch)
print(f"--epoch {epoch}:test_loss {loss_sum}")
print(f"--epoch {epoch}:test_accuracy {right_classify_cnt / test_size}")
end_time = time.time()
print(f"#### my_time:{end_time - start_time} if epoch % 5 == 0:
torch.save(my_model.state_dict(), f="./epoch_1_{}.pth".format(epoch))
writer.close()
使用 .to() 方法
該方法是最為常用的方法,比.cuda() 方法需要匹配上的 if torch.cuda.is_available()
方便多了
舉個例子
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
images = images.to(device)
targets = targets.to(device)
my_model = my_model.to(device)
my_loss_fn = my_loss_fn.to(device)
修改之后的代碼是
import torch
import torchvision
import torch.nn as nn
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
import time
# prepare the data for training and testing
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
data_path = "../data_cifar10"
dataset_train = torchvision.datasets.CIFAR10(root=data_path, train=True, transform=torchvision.transforms.ToTensor(),
download=True)
dataset_test = torchvision.datasets.CIFAR10(root=data_path, train=False, transform=torchvision.transforms.ToTensor(),
download=True)
# dataset_train.cuda(),不存在該方法,直接報錯
dataloader_train = DataLoader(dataset_train, batch_size=64)
dataloader_test = DataLoader(dataset_test, batch_size=64)
# dataloader_train.cuda() 直接就是不存在該方法
# create the module class
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.model = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=32, kernel_size=5, stride=1, padding=2),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(in_channels=32, out_channels=32, kernel_size=5, stride=1, padding=2),
nn.MaxPool2d(kernel_size=2),
nn.Conv2d(in_channels=32, out_channels=64, kernel_size=5, stride=1, padding=2),
nn.MaxPool2d(kernel_size=2),
nn.Flatten(),
nn.Linear(in_features=1024, out_features=64),
nn.Linear(in_features=64, out_features=10)
)
def forward(self, x):
return self.model(x)
# create the neural network, loss_function and optimization
logdir = "../logs"
writer = SummaryWriter(log_dir=logdir)
learning_rate = 1e-3
my_model = MyModel().to(device)
my_loss_fn = torch.nn.CrossEntropyLoss().to(device)
my_optimization = torch.optim.SGD(my_model.parameters(), lr=learning_rate)
max_epoch = 200
train_step = 0
test_step = 0
train_size = len(dataset_train)
test_size = len(dataset_test)
start_time = time.time()
for epoch in range(max_epoch):
print("-------Epoch {}-------".format(epoch))
# train
loss_sum = 0.0
train_step = 0
my_model.train()
for images, targets in dataloader_train:
images = images.to(device)
targets = targets.to(device)
output = my_model(images)
cur_loss = my_loss_fn(output, targets)
loss_sum += cur_loss
# optimize the model parameters
my_optimization.zero_grad()
cur_loss.backward()
my_optimization.step()
if train_step % 100 == 0:
print(f"epoch:{epoch}, train_step:{train_step}, cur_loss:{cur_loss}")
train_step += 1
writer.add_scalar("epoch:train_loss", loss_sum, epoch)
print(f"--epoch {epoch}:train_loss {loss_sum}")
# test
my_model.eval()
right_classify_cnt = 0
loss_sum = 0.0
with torch.no_grad():
for images, targets in dataloader_test:
images = images.to(device)
targets = targets.to(device)
output = my_model(images)
cur_loss = my_loss_fn(output, targets)
loss_sum += cur_loss
right_classify_cnt += (output.argmax(dim=1) == targets).sum()
writer.add_scalar("epoch:test_loss", loss_sum, epoch)
writer.add_scalar("epoch:test_accuracy", right_classify_cnt / test_size, epoch)
print(f"--epoch {epoch}:test_loss {loss_sum}")
print(f"--epoch {epoch}:test_accuracy {right_classify_cnt / test_size}")
end_time = time.time()
print(f"#### my_time:{end_time - start_time} if epoch % 5 == 0:
torch.save(my_model.state_dict(), f="./epoch_2_{}.pth".format(epoch))
writer.close()
對網絡進行應用
有條件的同學,可以考慮寫一個可視化的界面,這里我先寫一個沒有界面的 cmd 進行測試
無非就是加載模型,然后再進行應用。
不過這里,我們需要首先對輸入的圖片進行規范化
Image 讀取圖片,然后 ToTensor()轉化為tensor數據類型
然后 to(device) 放入 CPU 或者是 GPU
進行 convert格式轉化
然后進行 resize 裁剪圖片,送到我們的網絡中去。
最后網絡輸出結果->(可以考慮使用字典記性映射):
這里,可以查看下convert 和 resize 的作用
import torch
import torchvision
import torch.nn as nn
from mymodel import *
from torch.utils.tensorboard import SummaryWriter
from PIL import Image
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# 加載模型
my_model = MyModel()
my_model.to(device)
my_model.load_state_dict(torch.load("./project_models/epoch_1_195.pth", map_location=torch.device('cpu'))) # 注意這個報錯問題
# 加載代為驗證的圖像
my_model.eval()
image_class_list = [
'airplane', 'frog', 'dog', 'cat'
]
idx_to_class = {
0:'airplane', 1:'automobile', 2:'bird', 3:'cat', 4:'deer', 5:'dog', 6:'frog',
7:'horse', 8:'ship', 9:'truck'
}
with torch.no_grad():
for i in range(0, 4):
image = Image.open(f"../test_data/img_{i}.png")
image = image.convert("RGB")
image = image.resize((32, 32))
trans_totensor = torchvision.transforms.ToTensor()
image = trans_totensor(image)
image.to(device)
image = torch.reshape(image, (1, 3, 32, 32))
output = my_model(image)
cur_class = output.argmax(dim=1)
print(f"image:{i} ({image_class_list[i]})-> output:{cur_class}({idx_to_class[cur_class.item()]})")
關於硬件資源問題
很多同學可能像我一樣,電腦沒有GPU,或者是GPU不是NAVDIA、或者是驅動過於老舊並且無法更新,那么GPU如何獲取呢?
google colab
提供了免費的算力資源,可以進行使用,但是你需要google
賬號,可以去 某寶 進行獲取。