相机标定原理、步骤


  • 常用术语
    内参矩阵: Intrinsic Matrix
    焦距: Focal Length
    主点: Principal Point
    径向畸变: Radial Distortion
    切向畸变: Tangential Distortion
    旋转矩阵: Rotation Matrices
    平移向量: Translation Vectors
    平均重投影误差: Mean Reprojection Error
    重投影误差: Reprojection Errors
    重投影点: Reprojected Points
  • 相机标定流程
    1.从标定板图片或者视频流中读取标定板信息
    2.取标定板中的关键点,保存在数组中
    3.应用cv2.calibrateCamera()标定函数
    4.应用cv2.getOptimalNewCameraMatrix()函数
    5.保存摄像机内参信息
from pathlib import Path
import cv2
import numpy as np
from models.utils import VideoStreamer
import os.path as osp
import json
import datetime


class InitConfigs(object):

    def dict_to_object(self, **data):
        self.__dict__.update(data)


class CalibrateCamera(object):
    def __init__(self, inputs):
        configs = InitConfigs()
        configs.dict_to_object(**inputs)
        self.configs = configs
        # input:0  resize=[-1]  skip=1  image_glob=['*.png','*.jpg','*.jpeg'] max_length:1000000 pattern_size"(11,8)
        self.vs = VideoStreamer(configs.input, configs.resize, configs.skip,
                                configs.image_glob, configs.max_length)
        self.calibrate = False
        self.image_size = None
        self.quit = False
        self.source_type = self.check_source_type()

    def check_source_type(self):
        basedir = self.configs.input
        if isinstance(basedir, int) or basedir.isdigit():
            print('==> USB webcam input: {}'.format(basedir))
            source_type = "webcam"
        elif basedir.startswith(('http', 'rtsp')):
            print('==> IP camera input: {}'.format(basedir))
            source_type = "IPcam"
        elif Path(basedir).is_dir():
            print('==> Image directory input: {}'.format(basedir))
            source_type = "image"
        elif Path(basedir).exists():
            print('==> Video input: {}'.format(basedir))
            source_type = "video"
        else:
            raise ValueError('VideoStreamer input \"{}\" not recognized.'.format(basedir))
        return source_type

    def keyboard_response(self):

        key = chr(cv2.waitKey(1) & 0xFF)
        if key == 'q':
            self.vs.cleanup()
            self.quit = True
            print('Exiting (via q).')
        elif key == 'c':  # set the current frame as anchor
            self.calibrate = True
            print("self.calibrate",self.calibrate)

    def draw_chessboard_corners(self, frame, corners, color=(0, 255, 0), thickness=1, radius=3):

        if len(frame.shape) < 3:
            frame = np.expand_dims(frame, axis=2)
            frame = np.repeat(frame, 3, axis=2)
        for i, corner in enumerate(corners):
            cv2.circle(frame, tuple(corner[0].astype(np.int)), radius, color, thickness)
        return frame


    def get_keypoints(self):
        objps, imgps = [], []
        if self.source_type is "image":
            while True:
                self.keyboard_response()
                if self.quit:
                    break
                frame, ret = self.vs.next_frame()
                if not ret:
                    print('Finished detection.')
                    break
                objp, imgp, valid = self.detect_keypoints(frame)
                out = frame
                if valid:
                    objps.append(objp)
                    imgps.append(imgp)
                    out = self.draw_chessboard_corners(frame, imgp)
                cv2.imshow('frame', out)
                cv2.waitKey(500)
        else:
            frame, ret = self.vs.next_frame()
            assert ret, 'Error when reading the first frame (try different --input?)'

            while True:
                self.keyboard_response()
                if self.quit:
                    break
                frame, ret = self.vs.next_frame()
                if not ret:
                    print('Finished detection.')
                    break
                if self.calibrate:
                    objp, imgp, valid = self.detect_keypoints(frame)
                    if valid:
                        print("==> Chessboard detected, press any key to continue.")
                        objps.append(objp)
                        imgps.append(imgp)
                        out = self.draw_chessboard_corners(frame, imgp)
                        cv2.imshow('frame', out)
                        cv2.waitKey(0)
                        st = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S.jpg")
                        filename = osp.join(self.configs.output_dir, st)
                        cv2.imwrite(filename, frame)
                    else:
                        print("==> Chessboard detection failed, check your chessboard.")
                    self.calibrate = False
                cv2.imshow('frame', frame)
        cv2.destroyAllWindows()
        self.vs.cleanup()
        # print("objps",objps)
        # print("imgps",imgps)
        return objps, imgps

    def detect_keypoints(self, img):
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
        if len(img.shape) > 2:
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        else:
            gray = img
        self.image_size = gray.shape
        ret, corners = cv2.findChessboardCorners(gray, self.configs.pattern_size, None)
        objp = None
        if ret:
            corners = cv2.cornerSubPix(gray, corners, (5, 5), (-1, -1), criteria)
            objp = np.zeros((self.configs.pattern_size[0] * self.configs.pattern_size[1], 3), np.float32)
            objp[:, :2] = np.mgrid[0:self.configs.pattern_size[1], 0:self.configs.pattern_size[0]].T.reshape(-1, 2)
        return objp, corners, ret

    def run(self):
        objpoints, imgpoints = self.get_keypoints()
        print("true")
        print(len(objpoints),len(imgpoints))
        if len(objpoints) > 1 and len(imgpoints) > 1:
            ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, self.image_size[::-1], None, None)
            if ret:
                h, w = self.image_size
                cameramtx, _ = cv2.getOptimalNewCameraMatrix(mtx, dist, (w, h), 1, (w, h))
                if self.configs.output_dir is not None:
                    out_name = osp.join(self.configs.output_dir, 'intrinsics2.json')
                    with open(out_name, 'w') as fp:
                        json.dump(dict({"camera_matrix": cameramtx.reshape(1, -1).tolist(),
                                        "distortions": dist.reshape(1, -1).tolist()}), fp)
            else:
                print("==> Not enough corners detected. Please retry the chessboard detection.")


if __name__ == '__main__':
    inputs = dict(
        {
            "input": "output", //输入为标定板图片存放的路径
            "output_dir": 'output',
            "image_glob": ['*.png', '*.jpg', '*.jpeg'],
            "skip": 1,
            "max_length": 1000000,
            "resize": [-1],
            "pattern_size": (11, 8),
        }
    )
    runner = CalibrateCamera(inputs)
    runner.run()
  • 读取视频流类
class VideoStreamer:
    """ Class to help process image streams. Four types of possible inputs:"
        1.) USB Webcam.
        2.) An IP camera
        3.) A directory of images (files in directory matching 'image_glob').
        4.) A video file, such as an .mp4 or .avi file.
    """
    def __init__(self, basedir, resize, skip, image_glob, max_length=1000000):
        self._ip_grabbed = False
        self._ip_running = False
        self._ip_camera = False
        self._ip_image = None
        self._ip_index = 0
        self.cap = []
        self.camera = True
        self.video_file = False
        self.listing = []
        self.resize = resize
        self.interp = cv2.INTER_AREA
        self.i = 0
        self.skip = skip
        self.max_length = max_length
        if isinstance(basedir, int) or basedir.isdigit():
            print('==> Processing USB webcam input: {}'.format(basedir))
            self.cap = cv2.VideoCapture(int(basedir))
            self.listing = range(0, self.max_length)
        elif basedir.startswith(('http', 'rtsp')):
            print('==> Processing IP camera input: {}'.format(basedir))
            self.cap = cv2.VideoCapture(basedir)
            self.start_ip_camera_thread()
            self._ip_camera = True
            self.listing = range(0, self.max_length)
        elif Path(basedir).is_dir():
            print('==> Processing image directory input: {}'.format(basedir))
            self.listing = list(Path(basedir).glob(image_glob[0]))
            for j in range(1, len(image_glob)):
                image_path = list(Path(basedir).glob(image_glob[j]))
                self.listing = self.listing + image_path
            self.listing.sort()
            self.listing = self.listing[::self.skip]
            self.max_length = np.min([self.max_length, len(self.listing)])
            if self.max_length == 0:
                raise IOError('No images found (maybe bad \'image_glob\' ?)')
            self.listing = self.listing[:self.max_length]
            self.camera = False
        elif Path(basedir).exists():
            print('==> Processing video input: {}'.format(basedir))
            self.cap = cv2.VideoCapture(basedir)
            self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 1)
            num_frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
            self.listing = range(0, num_frames)
            self.listing = self.listing[::self.skip]
            self.video_file = True
            self.max_length = np.min([self.max_length, len(self.listing)])
            self.listing = self.listing[:self.max_length]
        else:
            raise ValueError('VideoStreamer input \"{}\" not recognized.'.format(basedir))
        if self.camera and not self.cap.isOpened():
            raise IOError('Could not read camera')
  • 标定板图片保存在output目录中,最终的摄像机内参信息也保存在该文件夹,文件名为intrinsics.json,目录如下图所示。
    image


免责声明!

本站转载的文章为个人学习借鉴使用,本站对版权不负任何法律责任。如果侵犯了您的隐私权益,请联系本站邮箱yoyou2525@163.com删除。



 
粤ICP备18138465号  © 2018-2025 CODEPRJ.COM