目前有個想法,就是將UI截圖與自動化截圖進行對比。不一致的情況下提示錯誤
截圖對比方法有:
https://www.cnblogs.com/dcb3688/p/4610660.html
import cv2
import numpy as np
# 均值哈希算法
def aHash(img):
# 縮放為8*8
img = cv2.resize(img, (8, 8))
# 轉換為灰度圖
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# s為像素和初值為0,hash_str為hash值初值為''
s = 0
hash_str = ''
# 遍歷累加求像素和
for i in range(8):
for j in range(8):
s = s + gray[i, j]
# 求平均灰度
avg = s / 64
# 灰度大於平均值為1相反為0生成圖片的hash值
for i in range(8):
for j in range(8):
if gray[i, j] > avg:
hash_str = hash_str + '1'
else:
hash_str = hash_str + '0'
return hash_str
# 差值感知算法
def dHash(img):
# 縮放8*8
img = cv2.resize(img, (9, 8))
# 轉換灰度圖
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
hash_str = ''
# 每行前一個像素大於后一個像素為1,相反為0,生成哈希
for i in range(8):
for j in range(8):
if gray[i, j] > gray[i, j + 1]:
hash_str = hash_str + '1'
else:
hash_str = hash_str + '0'
return hash_str
# 感知哈希算法(pHash)
def pHash(img):
# 縮放32*32
img = cv2.resize(img, (32, 32)) # , interpolation=cv2.INTER_CUBIC
# 轉換為灰度圖
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# 將灰度圖轉為浮點型,再進行dct變換
dct = cv2.dct(np.float32(gray))
# opencv實現的掩碼操作
dct_roi = dct[0:8, 0:8]
hash = []
avreage = np.mean(dct_roi)
for i in range(dct_roi.shape[0]):
for j in range(dct_roi.shape[1]):
if dct_roi[i, j] > avreage:
hash.append(1)
else:
hash.append(0)
return hash
# 通過得到RGB每個通道的直方圖來計算相似度
def classify_hist_with_split(image1, image2, size=(256, 256)):
# 將圖像resize后,分離為RGB三個通道,再計算每個通道的相似值
image1 = cv2.resize(image1, size)
image2 = cv2.resize(image2, size)
sub_image1 = cv2.split(image1)
sub_image2 = cv2.split(image2)
sub_data = 0
for im1, im2 in zip(sub_image1, sub_image2):
sub_data += calculate(im1, im2)
sub_data = sub_data / 3
return sub_data
# 計算單通道的直方圖的相似值
def calculate(image1, image2):
hist1 = cv2.calcHist([image1], [0], None, [256], [0.0, 255.0])
hist2 = cv2.calcHist([image2], [0], None, [256], [0.0, 255.0])
# 計算直方圖的重合度
degree = 0
for i in range(len(hist1)):
if hist1[i] != hist2[i]:
degree = degree + (1 - abs(hist1[i] - hist2[i]) / max(hist1[i], hist2[i]))
else:
degree = degree + 1
degree = degree / len(hist1)
return degree
# Hash值對比
def cmpHash(hash1, hash2):
n = 0
# hash長度不同則返回-1代表傳參出錯
if len(hash1)!=len(hash2):
return -1
# 遍歷判斷
for i in range(len(hash1)):
# 不相等則n計數+1,n最終為相似度
if hash1[i] != hash2[i]:
n = n + 1
return n
img1 = cv2.imread('openpic/x1y2.png') # 11--- 16 ----13 ---- 0.43
img2 = cv2.imread('openpic/x2y4.png')
img1 = cv2.imread('openpic/x3y5.png') # 10----11 ----8------0.25
img2 = cv2.imread('openpic/x9y1.png')
img1 = cv2.imread('openpic/x1y2.png') # 6------5 ----2--------0.84
img2 = cv2.imread('openpic/x2y6.png')
img1 = cv2.imread('openpic/t1.png') # 14------19---10--------0.70
img2 = cv2.imread('openpic/t2.png')
img1 = cv2.imread('openpic/t1.png') # 39------33---18--------0.58
img2 = cv2.imread('openpic/t3.png')
hash1 = aHash(img1)
hash2 = aHash(img2)
n = cmpHash(hash1, hash2)
print('均值哈希算法相似度:', n)
hash1 = dHash(img1)
hash2 = dHash(img2)
n = cmpHash(hash1, hash2)
print('差值哈希算法相似度:', n)
hash1 = pHash(img1)
hash2 = pHash(img2)
n = cmpHash(hash1, hash2)
print('感知哈希算法相似度:', n)
n = classify_hist_with_split(img1, img2)
print('三直方圖算法相似度:', n)
由於截圖對比要求較高,我選擇差值哈希算法。
具體截圖代碼如下

對比代碼

結果:

