PyTorch代碼學習-ImageNET訓練
PyTorch代碼學習-ImageNET訓練
文章說明:本人學習pytorch/examples/ImageNET/main()理解(待續)
# -*- coding: utf-8 -*- import argparse # 命令行解釋器相關程序,命令行解釋器 import os # 操作系統文件相關 import shutil # 文件高級操作 import time # 調用時間模塊 import torch import torch.nn as nn import torch.nn.parallel import torch.backends.cudnn as cudnn # gpu 使用 import torch.distributed as dist # 分布式(pytorch 0.2) import torch.optim # 優化器 import torch.utils.data import torch.utils.data.distributed import torchvision.transforms as transforms import torchvision.datasets as datasets import torchvision.models as models # name中若為小寫且不以‘——’開頭,則對其進行升序排列 model_names = sorted(name for name in models.__dict__ if name.islower() and not name.startswith("__") and callable(models.__dict__[name])) # callable功能為判斷返回對象是否可調用(即某種功能)。 # 創建argparse.ArgumentParser對象 parser = argparse.ArgumentParser(description='PyTorch ImageNet Training') # 添加命令行元素 parser.add_argument('data', metavar='DIR', help='path to dataset') parser.add_argument('--arch', '-a', metavar='ARCH', default='resnet18', choices=model_names, help='model architecture: ' + ' | '.join(model_names) + ' (default: resnet18)') parser.add_argument('-j', '--workers', default=4, type=int, metavar='N', help='number of data loading workers (default: 4)') parser.add_argument('--epochs', default=90, type=int, metavar='N', help='number of total epochs to run') parser.add_argument('--start-epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)') parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N', help='mini-batch size (default: 256)') parser.add_argument('--lr', '--learning-rate', default=0.1, type=float, metavar='LR', help='initial learning rate') parser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum') parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, metavar='W', help='weight decay (default: 1e-4)') parser.add_argument('--print-freq', '-p', default=10, type=int, metavar='N', help='print frequency (default: 10)') parser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)') parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set') parser.add_argument('--pretrained', dest='pretrained', action='store_true', help='use pre-trained model') parser.add_argument('--world-size', default=1, type=int, help='number of distributed processes') parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str, help='url used to set up distributed training') parser.add_argument('--dist-backend', default='gloo', type=str, help='distributed backend') # 定義參數 best_prec1 = 0 # 定義主函數main() def main(): global args, best_prec1 # 使用函數parse_args()進行參數解析,輸入默認是sys.argv[1:], # 返回值是一個包含命令參數的Namespace,所有參數以屬性的形式存在,比如args.myoption。 args = parser.parse_args() ########## 使用多播地址進行初始化 args.distributed = args.world_size > 1 if args.distributed: dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url, world_size=args.world_size) ##### step1: create model and set GPU # 導入pretrained model 或者創建model if args.pretrained: # format 格式化表達字符串,上述默認arch為resnet18 print("=> using pre-trained model '{}'".format(args.arch)) model = models.__dict__[args.arch](pretrained=True) else: print("=> creating model '{}'".format(args.arch)) model = models.__dict__[args.arch]() # 分布式運行,可實現在多塊GPU上運行 if not args.distributed: if args.arch.startswith('alexnet') or args.arch.startswith('vgg'): # 批處理,多GPU默認用dataparallel使用在多塊gpu上 model.features = torch.nn.DataParallel(model.features) model.cuda() else: model = torch.nn.DataParallel(model).cuda() else: # Wrap model in DistributedDataParallel (CUDA only for the moment) model.cuda() model = torch.nn.parallel.DistributedDataParallel(model) ##### step2: define loss function (criterion) and optimizer # 使用交叉熵損失函數 criterion = nn.CrossEntropyLoss().cuda() # optimizer 使用 SGD + momentum # 動量,默認設置為0.9 optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, # 權值衰減,默認為1e-4 weight_decay=args.weight_decay) # 恢復模型(詳見模型存取與恢復) ####step3:optionally resume from a checkpoint if args.resume: if os.path.isfile(args.resume): # 判斷返回的是不是文件 print("=> loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume) # load 一個save的對象 args.start_epoch = checkpoint['epoch'] # default = 90 best_prec1 = checkpoint['best_prec1'] # best_prec1 = 0 model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) # load_state_dict:恢復模型 print("=> loaded checkpoint '{}' (epoch {})" .format(args.resume, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(args.resume)) cudnn.benchmark = True ##### step4: Data loading code base of dataset(have downloaded) and normalize # 從 train、val文件中導入數據 traindir = os.path.join(args.data, 'train') valdir = os.path.join(args.data, 'val') # 數據預處理:normalize: - mean / std normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]) # ImageFolder 一個通用的數據加載器 train_dataset = datasets.ImageFolder( traindir, # 對數據進行預處理 transforms.Compose([ # 將幾個transforms 組合在一起 transforms.RandomSizedCrop(224), # 隨機切再resize成給定的size大小 transforms.RandomHorizontalFlip(), # 概率為0.5,隨機水平翻轉。 transforms.ToTensor(), # 把一個取值范圍是[0,255]或者shape為(H,W,C)的numpy.ndarray, # 轉換成形狀為[C,H,W],取值范圍是[0,1.0]的torch.FloadTensor normalize, ])) ####### if args.distributed: # Use a DistributedSampler to restrict each process to a distinct subset of the dataset. train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) else: train_sampler = None ###### # train 數據下載及預處理 train_loader = torch.utils.data.DataLoader( train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None), num_workers=args.workers, pin_memory=True, sampler=train_sampler) val_loader = torch.utils.data.DataLoader( datasets.ImageFolder(valdir, transforms.Compose([ # 重新改變大小為`size`,若:height>width`,則:(size*height/width, size) transforms.Scale(256), # 將給定的數據進行中心切割,得到給定的size。 transforms.CenterCrop(224), transforms.ToTensor(), normalize, ])), batch_size=args.batch_size, shuffle=False, num_workers=args.workers, pin_memory=True) # default workers = 4 ##### step5: 驗證函數 if args.evaluate: validate(val_loader, model, criterion) # 自定義的validate函數,見下 return ##### step6:開始訓練模型 for epoch in range(args.start_epoch, args.epochs): # Use .set_epoch() method to reshuffle the dataset partition at every iteration if args.distributed: train_sampler.set_epoch(epoch) adjust_learning_rate(optimizer, epoch) # adjust_learning_rate 自定義的函數,見下 # train for one epoch train(train_loader, model, criterion, optimizer, epoch) # evaluate on validation set prec1 = validate(val_loader, model, criterion) # remember best prec@1 and save checkpoint is_best = prec1 > best_prec1 best_prec1 = max(prec1, best_prec1) save_checkpoint({ 'epoch': epoch + 1, 'arch': args.arch, 'state_dict': model.state_dict(), 'best_prec1': best_prec1, 'optimizer' : optimizer.state_dict(), }, is_best) # 定義相關函數 # def train 函數 def train(train_loader, model, criterion, optimizer, epoch): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() # switch to train mode model.train() end = time.time() for i, (input, target) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) target = target.cuda(async=True) input_var = torch.autograd.Variable(input) target_var = torch.autograd.Variable(target) # compute output output = model(input_var) # criterion 為定義過的損失函數 loss = criterion(output, target_var) # measure accuracy and record loss prec1, prec5 = accuracy(output.data, target, topk=(1, 5)) losses.update(loss.data[0], input.size(0)) top1.update(prec1[0], input.size(0)) top5.update(prec5[0], input.size(0)) # compute gradient and do SGD step optimizer.zero_grad() loss.backward() optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() # 每十步輸出一次 if i % args.print_freq ==