Pytorch學習之源碼理解:pytorch/examples/mnists


Pytorch學習之源碼理解:pytorch/examples/mnists

from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR


class Net(nn.Module):
    def __init__(self):
        super(Net, self).__init__()#調用父類的構造方法
        self.conv1 = nn.Conv2d(1, 32, 3, 1)#輸入1個channel,輸出32個channels,kernel_size=3,stride(步長)=1
        self.conv2 = nn.Conv2d(32, 64, 3, 1)#再變成64channels
        self.dropout1 = nn.Dropout2d(0.25)#以0.25的概率dropout
        self.dropout2 = nn.Dropout2d(0.5)
        self.fc1 = nn.Linear(9216, 128)#9216->128
        self.fc2 = nn.Linear(128, 10)
    #定義網絡各層
    def forward(self, x):
        x = self.conv1(x)
        #線性整流函數(Rectified Linear Unit, ReLU)是一個激活函數,這是當成一層了
        #卷積神經網絡中,若不采用非線性激活,會導致神經網絡只能擬合線性可分的數據,因此通常會在卷積操作后,添加非線性激活單元,其中包括logistic-sigmoid、tanh-sigmoid、ReLU等。
        x = F.relu(x)
        x = self.conv2(x)
        x = F.max_pool2d(x, 2)
        x = self.dropout1(x)
        x = torch.flatten(x, 1)
        x = self.fc1(x)
        x = F.relu(x)
        x = self.dropout2(x)
        x = self.fc2(x)
        output = F.log_softmax(x, dim=1)
        return output


def train(args, model, device, train_loader, optimizer, epoch):
    model.train()
    #這是兩種模式
    #model.train() :啟用 BatchNormalization 和 Dropout
    #model.eval() :不啟用 BatchNormalization 和 Dropout
    #model.eval(),pytorch會自動把BN和DropOut固定住,不會取平均,而是用訓練好的值。
    # 不然的話,一旦test的batch_size過小,很容易就會被BN層導致生成圖片顏色失真極大;在模型測試階段使用
    #trainloader對每一個batch加了id
    for batch_idx, (data, target) in enumerate(train_loader):
        #讀入數據到device中,之后就用新的變量表示就可,對程序不影響(物理層和應用層)
        data, target = data.to(device), target.to(device)
        optimizer.zero_grad()#初始化優化器參數
        output = model(data)
        loss = F.nll_loss(output, target)#計算loss
        loss.backward()#反向傳播,計算梯度和
        optimizer.step()#調整參數。
        #上面的方法都是共享一個參數空間的,所以不需要傳遞參數
        if batch_idx % args.log_interval == 0:
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                epoch, batch_idx * len(data), len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.item()))


def test(args, model, device, test_loader):
    model.eval()
    test_loss = 0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.to(device), target.to(device)
            output = model(data)
            test_loss += F.nll_loss(output, target, reduction='sum').item()  # sum up batch loss
            pred = output.argmax(dim=1, keepdim=True)  # get the index of the max log-probability
            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(test_loader.dataset)

    print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
        test_loss, correct, len(test_loader.dataset),
        100. * correct / len(test_loader.dataset)))


def main():
    # Training settings
    #都是可選參數,是為了調參用的
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')#加上參數描述,在--help中輸出
    parser.add_argument('--batch-size', type=int, default=64, metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs', type=int, default=14, metavar='N',
                        help='number of epochs to train (default: 14)')
    parser.add_argument('--lr', type=float, default=1.0, metavar='LR',
                        help='learning rate (default: 1.0)')
    parser.add_argument('--gamma', type=float, default=0.7, metavar='M',
                        help='Learning rate step gamma (default: 0.7)')
    parser.add_argument('--no-cuda', action='store_true', default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed', type=int, default=1, metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                        help='how many batches to wait before logging training status')

    parser.add_argument('--save-model', action='store_true', default=False,
                        help='For Saving the current Model')
    args = parser.parse_args()#獲取參數,從這里就可以開始調用這些參數了。沒有輸入也沒有設置默認值的就是null,用在布爾表達式里面也可以表示false
    use_cuda = not args.no_cuda and torch.cuda.is_available()#有cuda並且沒設置參數說不用才用cuda

    torch.manual_seed(args.seed)#設置隨機種子,以便於生成隨機數

    device = torch.device("cuda" if use_cuda else "cpu")#決定用cpu還是GPU

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
    #載入訓練集
    train_loader = torch.utils.data.DataLoader(
        #torchvision下的datasets模塊,如果沒發現本地有這個包就下載
        datasets.MNIST('../data', train=True, download=True,
                       transform=transforms.Compose([
                           transforms.ToTensor(),#輸出tensor類型
                           transforms.Normalize((0.1307,), (0.3081,))#do normalize
                       ])),
        batch_size=args.batch_size, shuffle=True, **kwargs)#一次讀多少
    #載入測試集
    test_loader = torch.utils.data.DataLoader(
        datasets.MNIST('../data', train=False, transform=transforms.Compose([
                           transforms.ToTensor(),
                           transforms.Normalize((0.1307,), (0.3081,))
                       ])),
        batch_size=args.test_batch_size, shuffle=True, **kwargs)
    #將模型讀入device
    model = Net().to(device)
    #設置優化器,這里使用的是Adagrad優化方法(Adaptive Gradient)
    optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
    #等間隔調整學習率 StepLR
    #等間隔調整學習率,調整倍數為 gamma 倍,調整間隔為 step_size。間隔單位是step。需要注意的是, step 通常是指 epoch,不要弄成 iteration 了。
    scheduler = StepLR(optimizer, step_size=1, gamma=args.gamma)
    for epoch in range(1, args.epochs + 1):#迭代次數
        train(args, model, device, train_loader, optimizer, epoch)
        test(args, model, device, test_loader)
        scheduler.step()#每次迭代之后調整學習率

    if args.save_model:#保存模型
        torch.save(model.state_dict(), "mnist_cnn.pt")


if __name__ == '__main__':
    main()


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM