(轉)PyTorch DDP模式單機多卡訓練


一、啟動訓練的命令

python -m torch.distributed.launch --nproc_per_node=NUM_GPUS_YOU_HAVE train.py

其中torch.distributed.launch表示以分布式的方式啟動訓練,--nproc_per_node指定一共就多少個節點,可以設置成顯卡的個數

二、啟動之后每個進程可以自動獲取到參數

import argparse import torch import torch.distributed as dist from torch.nn.parallel import DistributedDataParallel as DDP parser = argparse.ArgumentParser() parser.add_argument("--local_rank", type=int,default=-1) opt = parser.parse_args() local_rank = opt.local_rank print("local rank {}".format(local_rank)) assert torch.cuda.device_count() > opt.local_rank torch.cuda.set_device(opt.local_rank) device = torch.device('cuda', opt.local_rank) dist.init_process_group(backend='nccl', init_method='env://') # distributed backend opt.world_size = dist.get_world_size() print("world size {}".format(opt.world_size)) print("get rank {}".format(dist.get_rank())) 

每個進程都能獲取到local rank,local rank 表示的是進程的優先級,該優先級是自動分配的。world size 表示的一共運行的進程數和nproc_per_node設置的數值相對應。

 

 

 

三、正式開始DDP介紹訓練模式設置

1.導入包

import torch import torchvision print("current torch version is {}".format(torch.__version__)) print("current torchvision version is {}".format(torchvision.__version__)) import sys from models.resnet import * from torchvision import datasets, transforms import os import torch.optim as optim from torch.optim import lr_scheduler import torch.distributed as dist import torch.multiprocessing as mp from torch.nn.parallel import DistributedDataParallel as DDP import time import copy from torch.nn import DataParallel import argparse 
  1. 參數解讀
parser = argparse.ArgumentParser() parser.add_argument("--image_folder",type=str,default='/home/jl/datasets/oilrecognition',help='train and val folder path') parser.add_argument("--local_rank", type=int,default=-1,help='DDP parameter, do not modify')#不需要賦值,啟動命令 torch.distributed.launch會自動賦值 parser.add_argument("--distribute",action='store_true',help='whether using multi gpu train') parser.add_argument("--distribute_mode",type=str,default='DDP',help="using which mode to ") parser.add_argument('--epochs', type=int, default=20) parser.add_argument('--batch_size', type=int, default=64, help='total batch size for all GPUs') parser.add_argument("--save_path",type=str,default= "./save",help="the path used to save state_dict") opt = parser.parse_args() 
  1. 初始化部分
if opt.distribute and opt.local_rank != -1: global device torch.cuda.set_device(opt.local_rank) torch.distributed.init_process_group(backend='nccl', init_method='env://') device = torch.device('cuda', opt.local_rank) 
  1. 數據部分
    data_dir = opt.image_folder image_datasets={} image_datasets['train'] = datasets.ImageFolder(os.path.join(data_dir, 'train'),data_transforms['train']) image_datasets['val'] = datasets.ImageFolder(os.path.join(data_dir, 'val'),data_transforms['val']) word_size = dist.get_world_size() if opt.distribute and opt.local_rank != -1: train_sampler = torch.utils.data.distributed.DistributedSampler(image_datasets['train'],num_replicas = word_size,rank = opt.local_rank) else: train_sampler = None print("batch size is : {}".format(opt.batch_size)) dataloaders = {} dataloaders['train'] = torch.utils.data.DataLoader(image_datasets['train'], batch_size=opt.batch_size,shuffle=(train_sampler is None), num_workers=4, pin_memory=True, sampler=train_sampler) dataloaders['val'] = torch.utils.data.DataLoader(image_datasets['val'], batch_size=opt.batch_size,shuffle = False,num_workers=4) 
  1. 模型部分
 if opt.distribute and opt.local_rank != -1:
        model.to(device)
        model = DDP(model, device_ids=[opt.local_rank])

6.模型保存部分

if dist.get_rank()== -1 or 0:
        save_path = './oil_net.pt'
        torch.save(model.state_dict(), save_path)


作者:RunningJiang
鏈接:https://www.jianshu.com/p/7818b128b9cd
來源:簡書
著作權歸作者所有。商業轉載請聯系作者獲得授權,非商業轉載請注明出處。


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM