原文:https://blog.csdn.net/zhuisui_woxin/article/details/84400439
安裝相應版本的庫(注意:對庫安裝的版本有特殊要求):
pip install -i https://pypi.tuna.tsinghua.edu.cn/simple opencv-python==3.4.2.16
pip install -i https://pypi.tuna.tsinghua.edu.cn/simple opencv-contrib-python==3.4.2.16
如果需要卸載舊版本:
pip uninstall opencv-python
pip uninstall opencv-contrib-python
所用到的測試圖片:
代碼1:
#opencv----特征匹配----BFMatching
import cv2
from matplotlib import pyplot as plt
#pip install -i https://pypi.tuna.tsinghua.edu.cn/simple opencv-python==3.4.2.16
#pip install -i https://pypi.tuna.tsinghua.edu.cn/simple opencv-contrib-python==3.4.2.16
#讀取需要特征匹配的兩張照片,格式為灰度圖。
template=cv2.imread("template_adjust.jpg",0)
target=cv2.imread("target.jpg",0)
orb=cv2.ORB_create()#建立orb特征檢測器
kp1,des1=orb.detectAndCompute(template,None)#計算template中的特征點和描述符
kp2,des2=orb.detectAndCompute(target,None) #計算target中的
bf = cv2.BFMatcher(cv2.NORM_HAMMING,crossCheck=True) #建立匹配關系
mathces=bf.match(des1,des2) #匹配描述符
mathces=sorted(mathces,key=lambda x:x.distance) #據距離來排序
result= cv2.drawMatches(template,kp1,target,kp2,mathces[:40],None,flags=2) #畫出匹配關系
plt.imshow(result),plt.show() #matplotlib描繪出來
代碼2:
#
'''
基於FLANN的匹配器(FLANN based Matcher)
1.FLANN代表近似最近鄰居的快速庫。它代表一組經過優化的算法,用於大數據集中的快速最近鄰搜索以及高維特征。
2.對於大型數據集,它的工作速度比BFMatcher快。
3.需要傳遞兩個字典來指定要使用的算法及其相關參數等
對於SIFT或SURF等算法,可以用以下方法:
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
對於ORB,可以使用以下參數:
index_params= dict(algorithm = FLANN_INDEX_LSH,
table_number = 6, # 12 這個參數是searchParam,指定了索引中的樹應該遞歸遍歷的次數。值越高精度越高
key_size = 12, # 20
multi_probe_level = 1) #2
'''
import cv2 as cv
from matplotlib import pyplot as plt
queryImage=cv.imread("template_adjust.jpg",0)
trainingImage=cv.imread("target.jpg",0)#讀取要匹配的灰度照片
sift=cv.xfeatures2d.SIFT_create()#創建sift檢測器
kp1, des1 = sift.detectAndCompute(queryImage,None)
kp2, des2 = sift.detectAndCompute(trainingImage,None)
#設置Flannde參數
FLANN_INDEX_KDTREE=0
indexParams=dict(algorithm=FLANN_INDEX_KDTREE,trees=5)
searchParams= dict(checks=50)
flann=cv.FlannBasedMatcher(indexParams,searchParams)
matches=flann.knnMatch(des1,des2,k=2)
#設置好初始匹配值
matchesMask=[[0,0] for i in range (len(matches))]
for i, (m,n) in enumerate(matches):
if m.distance< 0.5*n.distance: #舍棄小於0.5的匹配結果
matchesMask[i]=[1,0]
drawParams=dict(matchColor=(0,0,255),singlePointColor=(255,0,0),matchesMask=matchesMask,flags=0) #給特征點和匹配的線定義顏色
resultimage=cv.drawMatchesKnn(queryImage,kp1,trainingImage,kp2,matches,None,**drawParams) #畫出匹配的結果
plt.imshow(resultimage,),plt.show()
代碼3:
# 原文:https://blog.csdn.net/zhuisui_woxin/article/details/84400439
# 基於FLANN的匹配器(FLANN based Matcher)定位圖片
import numpy as np
import cv2
from matplotlib import pyplot as plt
MIN_MATCH_COUNT = 10 # 設置最低特征點匹配數量為10
template = cv2.imread('template_adjust.jpg',0) # queryImage
target = cv2.imread('target.jpg',0) # trainImage
# Initiate SIFT detector創建sift檢測器
sift = cv2.xfeatures2d.SIFT_create()
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(template,None)
kp2, des2 = sift.detectAndCompute(target,None)
#創建設置FLANN匹配
FLANN_INDEX_KDTREE = 0
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv2.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
# store all the good matches as per Lowe's ratio test.
good = []
#舍棄大於0.7的匹配
for m,n in matches:
if m.distance < 0.7*n.distance:
good.append(m)
if len(good)>MIN_MATCH_COUNT:
# 獲取關鍵點的坐標
src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
#計算變換矩陣和MASK
M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
matchesMask = mask.ravel().tolist()
h,w = template.shape
# 使用得到的變換矩陣對原圖像的四個角進行變換,獲得在目標圖像上對應的坐標
pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
dst = cv2.perspectiveTransform(pts,M)
cv2.polylines(target,[np.int32(dst)],True,0,2, cv2.LINE_AA)
else:
print( "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT))
matchesMask = None
draw_params = dict(matchColor=(0,255,0),
singlePointColor=None,
matchesMask=matchesMask,
flags=2)
result = cv2.drawMatches(template,kp1,target,kp2,good,None,**draw_params)
plt.imshow(result, 'gray')
plt.show()
其它之后再補全吧