import cv2 import numpy as np import glob import scipy.io as sio import matplotlib.pyplot as plt from aprilgrid import Detector from .calibrate import generate_3d_points, show_detect_result def calibrate_world_aprilgrid(calibration_images_path, world_chessboard_size=(6,6), world_square_size=35.2, world_square_spacing=10.56, debug=False): # 初始化 AprilTag 检测器 detector = Detector('t36h11') # 定义标定板的大小和标签大小 # grid_size = (5, 6) # 标定板的行数和列数 # tag_size = 25.3125 # 每个标签的边长(单位:毫米) object_point_single = generate_3d_points(world_chessboard_size, world_square_size, world_square_spacing) if 0: # 可视化3D点 fig = plt.figure() ax = fig.add_subplot(111, projection='3d') # 提取并可视化每个标签的四个角点 for tag in object_point_single: x_vals = tag[:, 0] y_vals = tag[:, 1] z_vals = tag[:, 2] # 绘制每个标签的四个角点连成的方形 ax.plot(x_vals[[0, 1, 2, 3, 0]], y_vals[[0, 1, 2, 3, 0]], z_vals[[0, 1, 2, 3, 0]], marker='o') # 设置轴标签和标题 ax.set_xlabel('X (mm)') ax.set_ylabel('Y (mm)') ax.set_zlabel('Z (mm)') ax.set_title('3D Visualization of AprilGrid Points') plt.show() # 用于保存所有图像的检测到的角点和 ID all_corners = [] all_object_points = [] image_size = None img_lst = [] # 遍历每张图像 for img_file in calibration_images_path: print('Processing:', img_file) # 从文件读取并解码图像 image_data = np.fromfile(img_file, dtype=np.uint8) img = cv2.imdecode(image_data, cv2.IMREAD_COLOR) img_lst.append(img) if image_size is None: image_size = (img.shape[1], img.shape[0]) # 保存图像尺寸 # 转换为灰度图像 gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 检测 AprilTags detections = detector.detect(gray) corners = detections[0].corners # 检测到的角点 (4, 2) tag_id = detections[0].tag_id # 标签 ID print(tag_id, corners) if debug: show_detect_result(img, detections) # 如果检测到标签 if detections: image_points = [] object_points = [] # 遍历每个检测到的标签 for detection in detections: corners = detection.corners # 检测到的角点 (4, 2) tag_id = detection.tag_id # 标签 ID print(tag_id, corners) # 将检测到的角点存储为 np.float32 格式 image_points.append(corners.astype(np.float32)) # 根据标签 ID 提取相应的 3D 坐标 if tag_id < len(object_point_single): object_points.append(object_point_single[tag_id]) else: print(f"Warning: Tag ID {tag_id} is out of range.") if object_points and image_points: # 保存当前图像的 2D 角点 all_corners.append(np.array(image_points, dtype=np.float32).reshape(-1, 2)) # 转为 (N, 2) 形状 # 保存对应的 3D 坐标 all_object_points.append(np.array(object_points, dtype=np.float32).reshape(-1, 3)) # 转为 (N, 3) 形状 else: print("No valid detections in this image.") # 调用相机标定 ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera( all_object_points, # 3D 物体点 all_corners, # 2D 图像点 image_size, # 图像大小 None, # 相机内参初始值 None # 畸变系数初始值 ) # 计算重投影误差 total_error = 0 for i in range(len(all_object_points)): imgpoints2, _ = cv2.projectPoints(all_object_points[i], rvecs[i], tvecs[i], mtx, dist) # 将 imgpoints2 从 (N, 1, 2) 转换为 (N, 2) imgpoints2 = np.squeeze(imgpoints2) # 确保 all_corners[i] 和 imgpoints2 类型匹配 error = cv2.norm(all_corners[i], imgpoints2, cv2.NORM_L2) / len(imgpoints2) total_error += error if debug: for (x, y), (px, py) in zip(all_corners[i], np.squeeze(imgpoints2)): cv2.circle(img_lst[i], (int(x), int(y)), 5, (0, 255, 0), -1) # Original points in green cv2.circle(img_lst[i], (int(px), int(py)), 5, (0, 0, 255), -1) # Reprojected points in red cv2.namedWindow('Error Visualization', 0) cv2.imshow('Error Visualization', img_lst[i]) cv2.waitKey(0) mean_error = total_error / len(all_object_points) #print(f"Mean re-projection error: {mean_error}") print('rvecs[0] = ', rvecs[0]) #print('rvecs[0] = ', (rvecs[0])) # 获得第一张图片的旋转矩阵和平移向量 R = cv2.Rodrigues(rvecs[0])[0] # 转换为旋转矩阵 T = tvecs[0] # 创建一个字典来保存所有的标定数据 calibration_data = { 'camera_matrix': mtx, 'distortion_coefficients': dist, 'rotation_matrix': R, 'translation_vector': T, 'error': ret/len(all_object_points) } # # 创建一个字典来保存所有的标定数据 # calibration_data = { # 'camera_matrix': mtx, # 'distortion_coefficients': dist, # 'rotation_matrix': rvecs, # 'translation_vector': tvecs, # 'mean_reprojection_error': mean_error, # 'error':ret/len(all_object_points) # } print(calibration_data) return calibration_data def calibrate_world(world_img_path, chessboard_size, square_size, debug=False): # 准备棋盘格的世界坐标 objp = np.zeros((chessboard_size[0] * chessboard_size[1], 3), np.float32) objp[:, :2] = np.mgrid[0:chessboard_size[0], 0:chessboard_size[1]].T.reshape(-1, 2) objp *= square_size # 储存棋盘格角点的世界坐标和图像坐标 objpoints = [] # 世界坐标系中的三维点 imgpoints = [] # 图像平面的二维点 for fname in world_img_path: img = cv2.imread(fname) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 寻找棋盘格角点 #ret, corners = cv2.findChessboardCorners(gray, chessboard_size, None) flag = cv2.CALIB_CB_EXHAUSTIVE | cv2.CALIB_CB_ACCURACY ret, corners = cv2.findChessboardCornersSB(gray, chessboard_size, None) # 如果找到足够的角点,添加到列表中 if ret: objpoints.append(objp) criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) corners_refined = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria) imgpoints.append(corners_refined) if debug: img = cv2.drawChessboardCorners(img, chessboard_size, corners, ret) # 在角点图像上标记坐标原点(第一个角点) origin = tuple(corners[0].ravel().astype(int)) cv2.putText(img, '(0,0)', origin, cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2) # 添加 x 轴箭头 (右方向) end_x = tuple(corners[1].ravel().astype(int)) # 选择横向的下一个角点 cv2.arrowedLine(img, origin, end_x, (255, 255, 255), 2, tipLength=0.2) cv2.putText(img, 'X', (end_x[0] + 10, end_x[1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2) # 添加 y 轴箭头 (下方向) end_y = tuple(corners[chessboard_size[0]].ravel().astype(int)) # 选择纵向的下一个角点 cv2.arrowedLine(img, origin, end_y, (255, 255, 255), 2, tipLength=0.2) cv2.putText(img, 'Y', (end_y[0], end_y[1] + 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2) cv2.namedWindow('img', 0) cv2.imshow('img', img) cv2.waitKey(100) cv2.destroyAllWindows() objpoints = np.array(objpoints) imgpoints = np.array(imgpoints) if debug: fig, ax = plt.subplots(1, 2, figsize=(12, 6)) # 第一个子图:objp 点 ax[0].scatter(objpoints[-1, :, 0], objpoints[-1, :, 1], color='blue', label='objp (Object Points)') for i in range(len(objpoints[0])): ax[0].text(objpoints[-1, i, 0], objpoints[-1, i, 1], f'{i}', color='blue', fontsize=12) ax[0].set_title('Object Points (objp)') ax[0].set_xlabel('X Axis') ax[0].set_ylabel('Y Axis') ax[0].grid(True) ax[0].axis('equal') # 第二个子图:corners 点 ax[1].scatter(imgpoints[-1, :, 0, 0], imgpoints[-1, :, 0, 1], color='red', label='corners (Image Points)') for i in range(len(imgpoints[0])): ax[1].text(imgpoints[-1, i, 0, 0], imgpoints[-1, i, 0, 1], f'{i}', color='red', fontsize=12) ax[1].set_title('Camera Image Points (corners)') ax[1].set_xlabel('X') ax[1].set_ylabel('Y') ax[1].grid(True) ax[1].axis('equal') #plt.show() # 相机标定 ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None) # 获得最后一张图片的旋转矩阵和平移向量 R_vector = rvecs[-1] R = cv2.Rodrigues(R_vector)[0] # 转换为旋转矩阵 T = tvecs[-1] # 创建一个字典来保存所有的标定数据 calibration_data = { 'camera_matrix': mtx, 'distortion_coefficients': dist, 'rotation_matrix': R, 'rotation_vector': R_vector, 'translation_vector': T, 'error': ret/len(objpoints) } return calibration_data