pytorch 計算圖像數據集的均值和標准差


在使用 torchvision.transforms進行數據處理時我們經常進行的操作是:

transforms.Normalize((0.485,0.456,0.406), (0.229,0.224,0.225))

前面的(0.485,0.456,0.406)表示均值,分別對應的是RGB三個通道;后面的(0.229,0.224,0.225)則表示的是標准差

這上面的均值和標准差的值是ImageNet數據集計算出來的,所以很多人都使用它們

但是如果你想要計算自己的數據集的均值和標准差,讓其作為你的transforms.Normalize函數的參數的話可以進行下面的操作

代碼get_mean_std.py:

# coding:utf-8
import os
import numpy as np
from torchvision.datasets import ImageFolder
import torchvision.transforms as transforms
from dataloader import Dataloader
from options import options
import pickle
"""
    在訓練前先運行該函數獲得數據的均值和標准差
"""

class Dataloader():
    def __init__(self, opt):
        # 訓練,驗證,測試數據集文件夾名
        self.opt = opt
        self.dirs = ['train', 'test', 'testing']

        self.means = [0, 0, 0]
        self.stdevs = [0, 0, 0]

        self.transform = transforms.Compose([transforms.Resize(opt.isize),
                                        transforms.CenterCrop(opt.isize),
                                        transforms.ToTensor(),#數據值從[0,255]范圍轉為[0,1],相當於除以255操作
                                        # transforms.Normalize((0.485,0.456,0.406), (0.229,0.224,0.225))
                                        ])

        # 因為這里使用的是ImageFolder,按文件夾給數據分類,一個文件夾為一類,label會自動標注好
        self.dataset = {x: ImageFolder(os.path.join(opt.dataroot, x), self.transform) for x in self.dirs}


    def get_mean_std(self, type, mean_std_path):
        """
        計算數據集的均值和標准差
        :param type: 使用的是那個數據集的數據,有'train', 'test', 'testing'
        :param mean_std_path: 計算出來的均值和標准差存儲的文件
        :return: 
        """
        num_imgs = len(self.dataset[type])
        for data in self.dataset[type]:
            img = data[0]
            for i in range(3):
                # 一個通道的均值和標准差
                self.means[i] += img[i, :, :].mean()
                self.stdevs[i] += img[i, :, :].std()


        self.means = np.asarray(self.means) / num_imgs
        self.stdevs = np.asarray(self.stdevs) / num_imgs

        print("{} : normMean = {}".format(type, self.means))
        print("{} : normstdevs = {}".format(type, self.stdevs))
        
        # 將得到的均值和標准差寫到文件中,之后就能夠從中讀取
        with open(mean_std_path, 'wb') as f:
            pickle.dump(self.means, f)
            pickle.dump(self.stdevs, f)
            print('pickle done')

if __name__ == '__main__':
    opt = options().parse()
    dataloader = Dataloader(opt)
    for x in dataloader.dirs:
        mean_std_path = 'mean_std_value_' + x + '.pkl'
        dataloader.get_mean_std(x, mean_std_path)

然后再從相應的文件讀取均值和標准差放到dataloader.py的transforms.Normalize函數中即可:

# coding:utf-8
import os
import torch
import torchvision.transforms as transforms
from torchvision.datasets import ImageFolder
import numpy as np
import pickle


"""
    用於加載訓練train、驗證test和測試數據testing
"""

class Dataloader():
    def __init__(self, opt):
        # 訓練,驗證,測試數據集文件夾名
        self.opt = opt
        self.dirs = ['train', 'test', 'testing']
        # 均值和標准差存儲的文件路徑
        self.mean_std_path = {x: 'mean_std_value_' + x + '.pkl' for x in self.dirs}

        # 初始化為0
        self.means = {x: [0, 0, 0] for x in self.dirs}
        self.stdevs = {x: [0, 0, 0] for x in self.dirs}
        print(type(self.means['train']))
        print(self.means)
        print(self.stdevs)

        for x in self.dirs:
            #如果存在則說明之前有獲取過均值和標准差
            if os.path.exists(self.mean_std_path[x]):
                with open(self.mean_std_path[x], 'rb') as f:
                    self.means[x] = pickle.load(f)
                    self.stdevs[x] = pickle.load(f)
                    print('pickle load done')

        print(self.means)
        print(self.stdevs)
        # 將相應的均值和標准差設置到transforms.Normalize函數中
        self.transform = {x: transforms.Compose([transforms.Resize(opt.isize),
                                        transforms.CenterCrop(opt.isize),
                                        transforms.ToTensor(),
                                        transforms.Normalize(self.means[x], self.stdevs[x]),
                                        ]) for x in self.dirs}
...

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM