以下內容需要直方圖均衡化、規定化知識
均衡化:https://blog.csdn.net/macunshi/article/details/79815870
規定化:https://blog.csdn.net/macunshi/article/details/79819263
直方圖均衡化應用:
圖像直方圖均衡化能拉伸灰度圖,讓像素值均勻分布在0,255之間,使圖像看起來不會太亮或太暗,常用於圖像增強;
直方圖規定化應用:
舉個例子,當我們需要對多張圖像進行拼接時,我們希望這些圖片的亮度、飽和度保持一致,事實上就是讓它們的直方圖分布一致,這時就需要直方圖規定化。
直方圖規定化與均衡化的思想一致,事實上就是找到各個灰度級別的映射關系。具體實現的過程中一般會選一個參考圖像記為A,找到A的直方圖與目標圖像的直方圖的映射關系,從而找到目標圖像的像素以A為“參考”時的映射關系。
具體實現可參考文中鏈接(看完茅塞頓開)
基於python利用直方圖規定化統一圖像風格
參考圖像

原始圖像(第一行)/處理后的圖像(第二行)
![]() |
![]() |
![]() |
![]() |
![]() |
![]() |
![]() |
![]() |
源碼:
import os
import cv2
import numpy as np
def get_map(Hist):
# 計算概率分布Pr
sum_Hist = sum(Hist)
Pr = Hist/sum_Hist
# 計算累計概率Sk
Sk = []
temp_sum = 0
for n in Pr:
temp_sum = temp_sum + n
Sk.append(temp_sum)
Sk = np.array(Sk)
# 計算映射關系img_map
img_map = []
for m in range(256):
temp_map = int(255*Sk[m] + 0.5)
img_map.append(temp_map)
img_map = np.array(img_map)
return img_map
def get_off_map(map_): # 計算反向映射,尋找最小期望
map_2 = list(map_)
off_map = []
temp_pre = 0 # 如果循環開始就找不到映射時,默認映射為0
for n in range(256):
try:
temp1 = map_2.index(n)
temp_pre = temp1
except BaseException:
temp1 = temp_pre # 找不到映射關系時,近似取向前最近的有效映射值
off_map.append(temp1)
off_map = np.array(off_map)
return off_map
def get_infer_map(infer_img):
infer_Hist_b = cv2.calcHist([infer_img], [0], None, [256], [0,255])
infer_Hist_g = cv2.calcHist([infer_img], [1], None, [256], [0,255])
infer_Hist_r = cv2.calcHist([infer_img], [2], None, [256], [0,255])
infer_b_map = get_map(infer_Hist_b)
infer_g_map = get_map(infer_Hist_g)
infer_r_map = get_map(infer_Hist_r)
infer_b_off_map = get_off_map(infer_b_map)
infer_g_off_map = get_off_map(infer_g_map)
infer_r_off_map = get_off_map(infer_r_map)
return [infer_b_off_map, infer_g_off_map, infer_r_off_map]
def get_finalmap(org_map, infer_off_map): # 計算原始圖像到最終輸出圖像的映射關系
org_map = list(org_map)
infer_off_map = list(infer_off_map)
final_map = []
for n in range(256):
temp1 = org_map[n]
temp2 = infer_off_map[temp1]
final_map.append(temp2)
final_map = np.array(final_map)
return final_map
def get_newimg(img_org, org2infer_maps):
w, h, _ = img_org.shape
b, g ,r =cv2.split(img_org)
for i in range(w):
for j in range(h):
temp1 = b[i,j]
b[i,j] = org2infer_maps[0][temp1]
for i in range(w):
for j in range(h):
temp1 = g[i,j]
g[i,j] = org2infer_maps[1][temp1]
for i in range(w):
for j in range(h):
temp1 = r[i,j]
r[i,j] = org2infer_maps[2][temp1]
newimg = cv2.merge([b,g,r])
return newimg
def get_new_img(img_org, infer_map):
org_Hist_b = cv2.calcHist([img_org], [0], None, [256], [0,255])
org_Hist_g = cv2.calcHist([img_org], [1], None, [256], [0,255])
org_Hist_r = cv2.calcHist([img_org], [2], None, [256], [0,255])
org_b_map = get_map(org_Hist_b)
org_g_map = get_map(org_Hist_g)
org_r_map = get_map(org_Hist_r)
org2infer_map_b = get_finalmap(org_b_map, infer_map[0])
org2infer_map_g = get_finalmap(org_g_map, infer_map[1])
org2infer_map_r = get_finalmap(org_r_map, infer_map[2])
return get_newimg(img_org, [org2infer_map_b, org2infer_map_g, org2infer_map_r])
if __name__ == "__main__":
dstroot = './imgs'
infer_img_path = './abc.png'
infer_img = cv2.imread(infer_img_path)
outroot = './out1'
infer_map = get_infer_map(infer_img) # 計算參考映射關系
dstlist = os.listdir(dstroot)
for n in dstlist:
img_path = os.path.join(dstroot, n)
print(img_path)
img_org = cv2.imread(img_path)
new_img = get_new_img(img_org, infer_map) # 根據映射關系獲得新的圖像
new_path = os.path.join(outroot, n)
cv2.imwrite(new_path, new_img)








