import cv2 import numpy as np import glob from scipy.io import savemat import matplotlib.pyplot as plt # 读取文件夹中的所有棋盘格图像 # images = glob.glob('calibration/screen_calibration/*.bmp') def calibrate_screen(screen_img_path, cam_mtx, cam_dist, chessboard_size, square_size, debug=False): # Prepare object points for chessboard corners in world coordinates objp = np.zeros((chessboard_size[0] * chessboard_size[1], 3), np.float32) objp[:, :2] = np.mgrid[0:chessboard_size[0], 0:chessboard_size[1]].T.reshape(-1, 2) objp *= square_size # import pdb; pdb.set_trace() objpoints = [] # 三维点 imgpoints = [] # 二维点 for img_path in screen_img_path: image_data = np.fromfile(img_path, dtype=np.uint8) img = cv2.imdecode(image_data, cv2.IMREAD_COLOR) # import pdb; pdb.set_trace() dst = cv2.undistort(img, cam_mtx, cam_dist, None, None) gray = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY) # alpha = 3 # 对比度保持不变 # beta = 0 # 增加亮度 # bright_image = cv2.convertScaleAbs(gray, alpha=alpha, beta=beta) # cv2.namedWindow('bright_image', 0) # cv2.imshow('bright_image', bright_image) # cv2.namedWindow('gray', 0) # cv2.imshow('gray', gray) # cv2.waitKey(0) # cv2.destroyAllWindows() ret, corners = cv2.findChessboardCorners(gray, chessboard_size, flags=cv2.CALIB_CB_FAST_CHECK) if ret: print('screen img_path = ',img_path) objpoints.append(objp) criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) corners_refined = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria) imgpoints.append(corners_refined) if debug: img = cv2.drawChessboardCorners(img, chessboard_size, corners, ret) # 在角点图像上标记坐标原点(第一个角点) origin = tuple(corners[0].ravel().astype(int)) cv2.putText(img, '(0,0)', origin, cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2) # 添加 x 轴箭头 (右方向) end_x = tuple(corners[1].ravel().astype(int)) # 选择横向的下一个角点 cv2.arrowedLine(img, origin, end_x, (255, 255, 255), 2, tipLength=0.2) cv2.putText(img, 'X', (end_x[0] + 10, end_x[1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2) # 添加 y 轴箭头 (下方向) end_y = tuple(corners[chessboard_size[0]].ravel().astype(int)) # 选择纵向的下一个角点 cv2.arrowedLine(img, origin, end_y, (255, 255, 255), 2, tipLength=0.2) cv2.putText(img, 'Y', (end_y[0], end_y[1] + 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2) cv2.namedWindow('screen', 0) cv2.imshow('screen', img) cv2.waitKey(0) cv2.destroyAllWindows() # import pdb; pdb.set_trace() objpoints = np.array(objpoints) imgpoints = np.array(imgpoints) # import pdb; pdb.set_trace() # if debug: # # 使用两幅子图绘制 objp 和 corners 的点 # fig, ax = plt.subplots(1, 2, figsize=(12, 6)) # # 第一个子图:objp 点 # ax[0].scatter(objpoints[-1, :, 0], objpoints[-1, :, 1], color='blue', label='objp (Object Points)') # for i in range(len(objpoints[0])): # ax[0].text(objpoints[-1, i, 0], objpoints[-1, i, 1], f'{i}', color='blue', fontsize=12) # ax[0].set_title('Object Points (objp)') # ax[0].set_xlabel('X Axis') # ax[0].set_ylabel('Y Axis') # ax[0].grid(True) # ax[0].axis('equal') # # 第二个子图:corners 点 # ax[1].scatter(imgpoints[-1, :, 0, 0], imgpoints[-1, :, 0, 1], color='red', label='corners (Image Points)') # for i in range(len(imgpoints[0])): # ax[1].text(imgpoints[-1, i, 0, 0], imgpoints[-1, i, 0, 1], f'{i}', color='red', fontsize=12) # ax[1].set_title('Screen Image Points (corners)') # ax[1].set_xlabel('X Axis') # ax[1].set_ylabel('Y Axis') # ax[1].grid(True) # ax[1].axis('equal') # # 调整布局并显示图像 # plt.tight_layout() # plt.show() #print("camera Calibration mtx:", cam_mtx) #print("Screen Calibration dist:", cam_dist) ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], cam_mtx, cam_dist) # 获得最后一张图片的旋转矩阵和平移向量 # import pdb; pdb.set_trace() R = cv2.Rodrigues(rvecs[-1])[0] # 转换为旋转矩阵 # R, _ = cv2.Rodrigues(np.array([[0.024044506712974],[0.002032279577832],[-3.138030042895759]])) T = tvecs[-1] # print("*"*100) # print("Screen Calibration mtx:", mtx) # print("Screen Calibration dist:", dist) # print("camera Calibration mtx1:", cam_mtx) # print("Screen Calibration dist1:", cam_dist) # print("*"*100) mat_data = { 'screen_matrix': mtx, 'screen_distortion': dist, 'screen_rotation_matrix': R, 'screen_translation_vector': T, 'screen_error': ret/len(objpoints) } return mat_data