神經網絡學習--PyTorch學習06 遷移VGG16


    因為我們從頭訓練一個網絡模型花費的時間太長,所以使用遷移學習,也就是將已經訓練好的模型進行微調和二次訓練,來更快的得到更好的結果。

import torch
import torchvision
from torchvision import datasets, models, transforms
import os
from torch.autograd import Variable
import matplotlib.pyplot as plt
import time

data_dir = "DogsVSCats"
data_transform = {x: transforms.Compose([transforms.Resize([224, 224]),  # 設置尺寸
                                        transforms.ToTensor(),  # 轉為Tensor
                                        transforms.Normalize(mean=[0.5, 0.5, 0.5],std=[0.5, 0.5, 0.5])])  # 標准化
                  for x in {"train", "valid"}}  # {"train":"訓練集數據格式","valid":"測試集數據格式"}
image_datasets = {x: datasets.ImageFolder(root=os.path.join(data_dir, x),  # 載入數據
                                         transform = data_transform[x])
                  for x in {"train", "valid"}}  # {"train":"訓練集","valid":"測試集"}
dataloader = {x: torch.utils.data.DataLoader(dataset=image_datasets[x],
                                            batch_size=16,
                                            shuffle=True)
              for x in {"train", "valid"}}  # {包裝16個為一個批次"train":"訓練集數據載入","valid":"測試集數據載入"}
X_example, y_example = next(iter(dataloader["train"]))  # 迭代得到一個批次的樣本
example_classes = image_datasets["train"].classes
index_classes = image_datasets["train"].class_to_idx

model = models.vgg16(pretrained=True)  # 使用VGG16 網絡預訓練好的模型
for parma in model.parameters():  # 設置自動梯度為false
    parma.requires_grad = False

model.classifier = torch.nn.Sequential(  # 修改全連接層 自動梯度會恢復為默認值
    torch.nn.Linear(25088, 4096),
    torch.nn.ReLU(),
    torch.nn.Dropout(p=0.5),
    torch.nn.Linear(4096, 4096),
    torch.nn.Dropout(p=0.5),
    torch.nn.Linear(4096, 2))
Use_gpu = torch.cuda.is_available()
if Use_gpu:  # 判斷是否有cuda
    model = model.cuda()

loss_f = torch.nn.CrossEntropyLoss()  # 設置殘差損失
optimizer = torch.optim.Adam(model.classifier.parameters(), lr=0.00001)  # 使用Adam優化函數

epoch_n = 5
time_open = time.time()

for epoch in range(epoch_n):
    print("Epoch{}/{}".format(epoch,epoch_n-1))
    print("-"*10)
    for phase in {"train","valid"}:
        if phase == "train":
            print("Training...")
            model.train(True)
        else:
            print("Validing...")
            model.train(False)

        running_loss = 0.0
        running_corrects = 0
        for batch, data in enumerate(dataloader[phase], 1):  # enumerate 得到下標和數據
            X, y = data
            if Use_gpu:
                X, y = Variable(X.cuda()), Variable(y.cuda())  # **************************************
            else:
                X, y = Variable(X), Variable(y)
            y_pred = model(X)  # 預測
            _, pred = torch.max(y_pred, 1)
            optimizer.zero_grad()  # 梯度歸零
            loss = loss_f(y_pred, y)  # 設置損失

            if phase == "train":
                loss.backward()  # 反向傳播
                optimizer.step()  # 更新參數
            running_loss += loss.item()
            running_corrects += torch.sum(pred == y.data)

            if batch % 500 == 0 and phase == "train":
                print("Batch{},TrainLoss:{:.4f},Train ACC:{:.4f}".format(
                    batch, running_loss / batch, 100 * running_corrects / (16 * batch)))
        epocn_loss = running_loss * 16 / len(image_datasets[phase])
        epoch_acc = 100 * running_corrects / len(image_datasets[phase])
        print("{} Loss:{:.4f} Acc:{:4f}%".format(phase, epocn_loss, epoch_acc))
time_end = time.time() - time_open
print(time_end)

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM