opencv入門———提取視頻/圖像中的物體


如下圖:

 

 

提取后:

 

這里可以加載網絡攝像頭對視頻圖像進行逐幀處理動態檢測,在圖書館,我手機上模擬的網絡攝像頭和電腦不在同一熱點,這里就直接拍了張照片進行測試。

①原圖片太大了,對圖像縮小一點

img=cv2.imread("C:/Users/31132/Desktop/mtest.jpg")

print(img.shape)
x,y=img.shape[0],img.shape[1]
frameWidth=x//6
frameHeight=y//6
imgRsize=cv2.resize(img,(frameWidth,frameHeight))

 

②對圖像進行預處理,包括轉換為灰度圖像,高斯模糊,邊緣檢測,膨脹,腐蝕等,獲取圖像較為清晰的輪廓

def preProcessing(img):
    imgGray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)      #灰度圖像
    imgBlur=cv2.GaussianBlur(imgGray,(5,5),1)         #高斯模糊
    imgCanny=cv2.Canny(imgBlur,200,200)         #邊緣檢測
    kernal=np.ones((5,5))                  
    imgDial=cv2.dilate(imgCanny,kernal,iterations=2) #膨脹
    imgThres=cv2.erode(imgDial,kernal,iterations=1)  #腐蝕

    return imgThres

效果如下:

 

 

③繪制輪廓,檢測圖像位置,獲取四個楞角

def getContours(img):
    coutours,hierarchy=cv2.findContours(img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
    biggest=np.array([])
    maxArea=0
    for cnt in coutours:
        area=cv2.contourArea(cnt)
        if area>5000:
            # cv2.drawContours(imgcontours, cnt, -1, (0, 0, 255), 5)
            #周長
            peri=cv2.arcLength(cnt,True)
            #擬合輪廓點集
            approx=cv2.approxPolyDP(cnt,0.03*peri,True)
            if area>maxArea and len(approx)==4:
                maxArea=area
                biggest=approx
    print(biggest)
    cv2.drawContours(imgcontours, biggest, -1, (255,140,0),22)

    return  biggest

 

 ④根據四個點的大小調整位置並作透視轉化

def getWarp(img,biggest):
    newbig=np.zeros_like(biggest)
    newbig[0]=biggest[1]
    newbig[1]=biggest[0]
    newbig[2]=biggest[2]
    newbig[3] = biggest[3]
    print('new',newbig)
    waitdots = np.float32(newbig)
    resultd = np.float32([[0, 0], [frameWidth, 0], [0, frameHeight], [frameWidth, frameHeight]])
    martix = cv2.getPerspectiveTransform(waitdots, resultd)
    imgout = cv2.warpPerspective(img, martix, (frameWidth, frameHeight))

    return imgout

 

 

所有代碼

#day05
#加載視頻,網絡攝像頭
# cap=cv2.VideoCapture("http://192.168.137.116:4747/video")


# cap.set(3,frameWidth)
# cap.set(4,frameHeight)
# cap.set(10,150)

def getContours(img):
    coutours,hierarchy=cv2.findContours(img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
    biggest=np.array([])
    maxArea=0
    for cnt in coutours:
        area=cv2.contourArea(cnt)
        if area>5000:
            # cv2.drawContours(imgcontours, cnt, -1, (0, 0, 255), 5)
            #周長
            peri=cv2.arcLength(cnt,True)
            #擬合輪廓點集
            approx=cv2.approxPolyDP(cnt,0.03*peri,True)
            if area>maxArea and len(approx)==4:
                maxArea=area
                biggest=approx
    print(biggest)
    cv2.drawContours(imgcontours, biggest, -1, (255,140,0),22)

    return  biggest


def preProcessing(img):
    imgGray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    imgBlur=cv2.GaussianBlur(imgGray,(5,5),1)
    imgCanny=cv2.Canny(imgBlur,200,200)
    kernal=np.ones((5,5))
    imgDial=cv2.dilate(imgCanny,kernal,iterations=2)
    imgThres=cv2.erode(imgDial,kernal,iterations=1)

    return imgThres

def getWarp(img,biggest):
    newbig=np.zeros_like(biggest)
    newbig[0]=biggest[1]
    newbig[1]=biggest[0]
    newbig[2]=biggest[2]
    newbig[3] = biggest[3]
    print('new',newbig)
    waitdots = np.float32(newbig)
    resultd = np.float32([[0, 0], [frameWidth, 0], [0, frameHeight], [frameWidth, frameHeight]])
    martix = cv2.getPerspectiveTransform(waitdots, resultd)
    imgout = cv2.warpPerspective(img, martix, (frameWidth, frameHeight))

    return imgout


#圖像顯示:遍歷幀
# while True:
#     success,img=cap.read()
img=cv2.imread("C:/Users/31132/Desktop/mtest.jpg")

print(img.shape)
x,y=img.shape[0],img.shape[1]
frameWidth=x//6
frameHeight=y//6
imgRsize=cv2.resize(img,(frameWidth,frameHeight))
imgcontours=imgRsize.copy()
imgThres=preProcessing(imgRsize)
biggest=getContours(imgThres)

imgOut=getWarp(imgRsize,biggest)
cv2.imshow("Video",imgOut)
cv2.waitKey(0)

 


免責聲明!

本站轉載的文章為個人學習借鑒使用,本站對版權不負任何法律責任。如果侵犯了您的隱私權益,請聯系本站郵箱yoyou2525@163.com刪除。



 
粵ICP備18138465號   © 2018-2025 CODEPRJ.COM