Browse Source

0108update

zhangkang 6 months ago
parent
commit
5a2672b962
54 changed files with 2587 additions and 633 deletions
  1. BIN
      config/cam_matlab.pkl
  2. BIN
      config/cam_params4.pkl
  3. BIN
      config/cam_params_1 - 副本.pkl
  4. BIN
      config/cam_params_1.pkl
  5. BIN
      config/cam_params_1115.pkl
  6. BIN
      config/cam_params_1119.pkl
  7. BIN
      config/cam_params_1125.pkl
  8. BIN
      config/cam_params_4_refined.pkl
  9. 0 0
      config/camera_params_1129.json
  10. BIN
      config/camera_params_1129.pkl
  11. BIN
      config/camera_params_1203.pkl
  12. BIN
      config/camera_params_1216_4_1.pkl
  13. BIN
      config/camera_params_1226_1.pkl
  14. BIN
      config/camera_params_1227cam0.pkl
  15. BIN
      config/camera_params_1227cam1.pkl
  16. BIN
      config/camera_params_1227cam2.pkl
  17. BIN
      config/camera_params_1227cam3.pkl
  18. 13 9
      config/cfg_3freq_wafer_1.json
  19. 25 0
      config/cfg_3freq_wafer_1226.json
  20. 20 0
      config/cfg_3freq_wafer_1_test.json
  21. 20 0
      config/cfg_3freq_wafer_1_test_chess.json
  22. 6 5
      config/cfg_3freq_wafer_4.json
  23. 3 3
      config/cfg_3freq_wafer_matlab.json
  24. 20 0
      config/cfg_3freq_wafer_pad1115.json
  25. 21 0
      config/cfg_3freq_wafer_pad1119.json
  26. 21 0
      config/cfg_3freq_wafer_pad1125.json
  27. 25 0
      config/cfg_3freq_wafer_pad1203.json
  28. 26 0
      config/cfg_3freq_wafer_pad1219-4-1.json
  29. BIN
      config/screen_matlab.pkl
  30. BIN
      config/screen_params4.pkl
  31. BIN
      config/screen_params_1 - 副本.pkl
  32. BIN
      config/screen_params_1.pkl
  33. BIN
      config/screen_params_1115.pkl
  34. BIN
      config/screen_params_1119.pkl
  35. BIN
      config/screen_params_1125.pkl
  36. BIN
      config/screen_params_1129.pkl
  37. BIN
      config/screen_params_1203.pkl
  38. BIN
      config/screen_params_1216_4_1.pkl
  39. BIN
      config/screen_params_1226_1.pkl
  40. 497 283
      pmd.py
  41. 4 3
      src/__init__.py
  42. 4 3
      src/calibration/__init__.py
  43. 166 30
      src/calibration/calibrate.py
  44. 105 142
      src/calibration/get_camera_params.py
  45. 257 9
      src/calibration/get_screen_params.py
  46. 1 1
      src/eval.py
  47. 9 6
      src/phase.py
  48. 20 19
      src/recons.py
  49. 103 56
      src/utils.py
  50. 3 1
      src/vis.py
  51. 118 0
      tools/0_generate_fringe.py
  52. 97 49
      tools/chessboard.py
  53. 2 2
      tools/generate_fringe.py
  54. 1001 12
      unit_test.py

BIN
config/cam_matlab.pkl


BIN
config/cam_params4.pkl


BIN
config/cam_params_1 - 副本.pkl


BIN
config/cam_params_1.pkl


BIN
config/cam_params_1115.pkl


BIN
config/cam_params_1119.pkl


BIN
config/cam_params_1125.pkl


BIN
config/cam_params_4_refined.pkl


+ 0 - 0
config/camera_params_1129.json


BIN
config/camera_params_1129.pkl


BIN
config/camera_params_1203.pkl


BIN
config/camera_params_1216_4_1.pkl


BIN
config/camera_params_1226_1.pkl


BIN
config/camera_params_1227cam0.pkl


BIN
config/camera_params_1227cam1.pkl


BIN
config/camera_params_1227cam2.pkl


BIN
config/camera_params_1227cam3.pkl


+ 13 - 9
config/cfg_3freq_wafer_1.json

@@ -1,16 +1,20 @@
1 1
 {
2 2
     "cam_num":1,
3
-    "screen_square_size": 29,
4
-    "screen_chessboard_size": [10, 7],
5
-    "world_chessboard_size": [8, 8],
6
-    "d": -20.7,
3
+    "screen_square_size": 29.0960,
4
+    "screen_chessboard_size": [9, 16],
5
+    "screen_square_spacing":14.5498,
6
+    "world_chessboard_size": [6, 6],
7
+    "world_square_size": 35.2,
8
+    "world_square_spacing":10.56,
9
+    "d": 11.22,
7 10
     "num_freq": 3,
8 11
     "screenPixelSize": [1920, 1080],
9
-    "screenorigin_point": [1320, 300],
10
-    "Axis_conversion": [[-1, 1]],
12
+    "screenorigin_point": [610, 791],
13
+    "Axis_conversion": [[1, -1]],
11 14
     "k": 32.0,
12 15
     "pixelSize_mm": 0.363745,
13 16
     "grid_spacing": 1,
14
-    "cam_params": "cam_params_1.pkl",
15
-    "screen_params":"screen_params_1.pkl"
16
-}
17
+    "cam_params": "camera_params_1226_1.pkl",
18
+    "screen_params":"screen_params_1226_1.pkl"
19
+}
20
+

+ 25 - 0
config/cfg_3freq_wafer_1226.json

@@ -0,0 +1,25 @@
1
+{
2
+    "cam_num":1,
3
+    "screen_square_size": 29.0960,
4
+    "screen_chessboard_size": [9, 16],
5
+    "screen_square_spacing":14.5498,
6
+    "world_chessboard_size": [6, 6],
7
+    "world_square_size": 35.2,
8
+    "world_square_spacing":10.56,
9
+    "d":5.7,
10
+    "d3":4.18,
11
+    "d2":5.7,
12
+    "dright": 5.78,
13
+    "ddapingjing": 5.48,
14
+    "dwafer":-8.78,
15
+    "num_freq": 3,
16
+    "screenPixelSize": [1920, 1080],
17
+    "screenorigin_point": [610, 791],
18
+    "Axis_conversion": [[1, -1]],
19
+    "k": 32.0,
20
+    "pixelSize_mm": 0.363745,
21
+    "grid_spacing": 1,
22
+    "cam_params": "camera_params_1226_1.pkl",
23
+    "screen_params":"screen_params_1226_1.pkl"
24
+}
25
+

+ 20 - 0
config/cfg_3freq_wafer_1_test.json

@@ -0,0 +1,20 @@
1
+{
2
+    "cam_num":1,
3
+    "screen_square_size": 6.596,  
4
+    "screen_chessboard_size": [9, 16],
5
+    "screen_square_spacing":3.4,
6
+    "world_chessboard_size": [8, 8],
7
+    "world_square_size": 12,
8
+    "world_square_spacing":10.56,
9
+    "d": -1.7,
10
+    "num_freq": 3,
11
+    "screenPixelSize": [2360, 1640],
12
+    "screenorigin_point": [25, 1457],
13
+    "Axis_conversion": [[1, 1]],
14
+    "k": 32.0,
15
+    "pixelSize_mm": 0.068,
16
+    "grid_spacing": 1,
17
+    "cam_params": "cam_params_1.pkl",
18
+    "screen_params":"screen_params_1.pkl"
19
+}
20
+

+ 20 - 0
config/cfg_3freq_wafer_1_test_chess.json

@@ -0,0 +1,20 @@
1
+{
2
+    "cam_num":1,
3
+    "screen_square_size": 3.846,  
4
+    "screen_chessboard_size": [24, 15],
5
+    "screen_square_spacing":3.4,
6
+    "world_chessboard_size": [8, 8],
7
+    "world_square_size": 12,
8
+    "world_square_spacing":10.56,
9
+    "d": 0.07,
10
+    "num_freq": 3,
11
+    "screenPixelSize": [2360, 1640],
12
+    "screenorigin_point": [719, 1100],
13
+    "Axis_conversion": [[1, -1]],
14
+    "k": 32.0,
15
+    "pixelSize_mm": 0.096,
16
+    "grid_spacing": 1,
17
+    "cam_params": "cam_params_1.pkl",
18
+    "screen_params":"screen_params_1.pkl"
19
+}
20
+

+ 6 - 5
config/cfg_3freq_wafer_4.json

@@ -4,14 +4,15 @@
4 4
     "screen_chessboard_size": [10, 6],
5 5
     "world_chessboard_size": [6, 6],
6 6
     "world_square_size": 35.2,
7
-    "d": -2.5,
7
+    "world_square_spacing":10.56,
8
+    "d": 12.5,
8 9
     "num_freq": 3,
9 10
     "screenPixelSize": [1600, 1200],
10
-    "screenorigin_point": [649, 550],
11
-    "Axis_conversion": [[-1, -1]],
11
+    "screenorigin_point": [379, 298],
12
+    "Axis_conversion": [[-1, -1],[-1, -1], [-1, -1],[-1, -1]],
12 13
     "k": 32.0,
13
-    "pixelSize_mm": 0.253125,
14
+    "pixelSize_mm": 0.254,
14 15
     "grid_spacing":1,
15 16
     "cam_params": "cam_params_4.pkl",
16
-    "screen_params":"screen_params_4.pkl"
17
+    "screen_params":"screen_params_1216_4_1.pkl"
17 18
 }

+ 3 - 3
config/cfg_3freq_wafer_matlab.json

@@ -1,13 +1,13 @@
1 1
 {
2 2
     "cam_num":1,
3 3
     "screen_square_size": 9,
4
-    "screen_chessboard_size": [10, 7],
5
-    "world_chessboard_size": [8, 8],
4
+    "screen_chessboard_size": [9, 9],
5
+    "world_chessboard_size": [41, 40],
6 6
     "d": 10.04,  
7 7
     "num_freq": 3,
8 8
     "screenPixelSize": [2160, 1215],
9 9
     "screenorigin_point": [1512.5,121.5],
10
-    "Axis_conversion": [[-1, 1]],
10
+    "Axis_conversion": [[1, -1]],
11 11
     "k": 32.0,
12 12
     "pixelSize_mm": 0.095955,
13 13
     "grid_spacing": 1,

+ 20 - 0
config/cfg_3freq_wafer_pad1115.json

@@ -0,0 +1,20 @@
1
+{
2
+    "cam_num":1,
3
+    "screen_square_size": 3.84,  
4
+    "screen_chessboard_size": [24, 15],
5
+    "screen_square_spacing":0,
6
+    "world_chessboard_size": [8, 8],
7
+    "world_square_size": 12,
8
+    "world_square_spacing":0,
9
+    "d": 0.5,
10
+    "num_freq": 3,
11
+    "screenPixelSize": [2048, 1536],
12
+    "screenorigin_point": [563, 1047],
13
+    "Axis_conversion": [[1, -1]],
14
+    "k": 32.0,
15
+    "pixelSize_mm": 0.09,
16
+    "grid_spacing": 1,
17
+    "cam_params": "cam_params_1115.pkl",
18
+    "screen_params":"screen_params_1115.pkl"
19
+}
20
+

+ 21 - 0
config/cfg_3freq_wafer_pad1119.json

@@ -0,0 +1,21 @@
1
+{
2
+    "cam_num":1,
3
+    "screen_square_size": 3.702,  
4
+    "screen_chessboard_size": [24, 15],
5
+    "screen_square_spacing":0,
6
+    "world_chessboard_size": [8, 8],
7
+    "world_square_size": 12,
8
+    "world_square_spacing":0,
9
+    "d": -0.96,
10
+    "num_freq": 3,
11
+    "screenPixelSize": [1920, 1200],
12
+    "screenorigin_point_python": [614, 809], 
13
+    "screenorigin_point": [614, 809],
14
+    "screenorigin_point_matlab": [1305, 389], 
15
+    "Axis_conversion": [[1, -1]],
16
+    "k": 32.0,
17
+    "pixelSize_mm": 0.1234,
18
+    "grid_spacing": 1,
19
+    "cam_params": "cam_params_1119.pkl",
20
+    "screen_params":"screen_params_1119.pkl"
21
+}

+ 21 - 0
config/cfg_3freq_wafer_pad1125.json

@@ -0,0 +1,21 @@
1
+{
2
+    "cam_num":1,
3
+    "screen_square_size": 3.702,  
4
+    "screen_chessboard_size": [15, 24],
5
+    "screen_square_spacing":0,
6
+    "world_chessboard_size": [8, 8],
7
+    "world_square_size": 12,
8
+    "world_square_spacing":0,
9
+    "d": -2.96,
10
+    "num_freq": 3,
11
+    "screenPixelSize": [1920, 1200],
12
+    "screenorigin_point_python": [614, 809], 
13
+    "screenorigin_point": [510, 850],
14
+    "screenorigin_point_matlab": [1305, 389], 
15
+    "Axis_conversion": [[1, -1]],
16
+    "k": 32.0,
17
+    "pixelSize_mm": 0.1234,
18
+    "grid_spacing": 1,
19
+    "cam_params": "camera_params_1129.pkl",
20
+    "screen_params":"screen_params_1129.pkl"
21
+}

+ 25 - 0
config/cfg_3freq_wafer_pad1203.json

@@ -0,0 +1,25 @@
1
+{
2
+    "cam_num":1,
3
+    "screen_square_size": 12.34,  
4
+    "screen_chessboard_size": [10, 6],
5
+    "screen_square_spacing":0,
6
+    "world_chessboard_size": [8, 8],
7
+    "world_square_size": 12,
8
+    "world_square_spacing":0,
9
+    "d": -2.31,
10
+    "d_ao":-2.31,
11
+    "d_tu":-2.31,
12
+    "d_mirror":0.18,
13
+    "d_pingjing":-2.96,
14
+    "num_freq": 3,
15
+    "screenPixelSize": [1920, 1200],
16
+    "screenorigin_point_python": [614, 809], 
17
+    "screenorigin_point": [255, 189],
18
+    "screenorigin_point_matlab": [1305, 389], 
19
+    "Axis_conversion": [[1, -1]],
20
+    "k": 32.0,
21
+    "pixelSize_mm": 0.1234,
22
+    "grid_spacing": 1,
23
+    "cam_params": "camera_params_1203.pkl",
24
+    "screen_params":"screen_params_1203.pkl"
25
+}

+ 26 - 0
config/cfg_3freq_wafer_pad1219-4-1.json

@@ -0,0 +1,26 @@
1
+{
2
+    "cam_num":4,
3
+    "screen_square_size": 12.34,  
4
+    "screen_chessboard_size": [10, 6],
5
+    "screen_square_spacing":0,
6
+    "world_chessboard_size": [8, 8],
7
+    "world_square_size": 12,
8
+    "world_square_spacing":0,
9
+    "d": -5.18,
10
+    "d_ao":-2.31,
11
+    "d_tu":-2.31,
12
+    "d_mirror":0.18,
13
+    "d_pingjing":12.96,
14
+    "num_freq": 3,
15
+    "screenPixelSize": [1600, 1200],
16
+    "screenorigin_point_python": [0, 0], 
17
+    "screenorigin_point": [379, 298],
18
+    "screenorigin_point_matlab": [1220, 900], 
19
+    "Axis_conversion": [[-1, -1],[-1, -1],[-1, -1],[-1, -1]],
20
+    "k": 32.0,
21
+    "pixelSize_mm-old": 0.253125,
22
+    "pixelSize_mm": 0.253125,
23
+    "grid_spacing": 1,
24
+    "cam_params": "cam_params_4.pkl",
25
+    "screen_params":"screen_params_1216_4_1.pkl"
26
+}

BIN
config/screen_matlab.pkl


BIN
config/screen_params4.pkl


BIN
config/screen_params_1 - 副本.pkl


BIN
config/screen_params_1.pkl


BIN
config/screen_params_1115.pkl


BIN
config/screen_params_1119.pkl


BIN
config/screen_params_1125.pkl


BIN
config/screen_params_1129.pkl


BIN
config/screen_params_1203.pkl


BIN
config/screen_params_1216_4_1.pkl


BIN
config/screen_params_1226_1.pkl


+ 497 - 283
pmd.py

@@ -17,7 +17,8 @@ from src.phase import extract_phase, unwrap_phase
17 17
 from src.recons import reconstruction_cumsum, poisson_recons_with_smoothed_gradients
18 18
 from src.pcl_postproc import smooth_pcl, align2ref
19 19
 import matplotlib.pyplot as plt
20
-from src.calibration import calibrate_world, calibrate_screen, map_screen_to_world
20
+from src.calibration import calibrate_world, calibrate_screen_chessboard, calibrate_screen_circlegrid, map_screen_to_world
21
+from src.calibration.get_camera_params import calibrate_world_aprilgrid
21 22
 import argparse
22 23
 from src.vis import plot_coords
23 24
 import cv2
@@ -25,6 +26,8 @@ from src.eval import get_eval_result
25 26
 import pickle
26 27
 from collections import defaultdict
27 28
 from scipy.io import loadmat, savemat
29
+from scipy.optimize import minimize 
30
+import copy
28 31
 
29 32
 
30 33
 
@@ -40,359 +43,570 @@ def pmdstart(config_path, img_folder):
40 43
     return True
41 44
 
42 45
 
43
-def main(config_path, img_folder):
44
-    current_dir = os.path.dirname(os.path.abspath(__file__))
45
-    os.chdir(current_dir)
46
+def optimize_calibration_with_gradient_constraint(cam_params, screen_params, point_cloud, gradient_data, config_path):
47
+    # 只使用屏幕参数(旋转矩阵和平移向量)
48
+    initial_params = np.concatenate([
49
+        screen_params['screen_rotation_matrix'].flatten(),
50
+        screen_params['screen_translation_vector'].flatten()
51
+    ])
46 52
 
47
-    cfg = json.load(open(config_path, 'r'))
48
-    n_cam = cfg['cam_num']
49
-    num_freq = cfg['num_freq']
50
-    save_path = 'debug'
51
-    debug = False
52
-    grid_spacing = cfg['grid_spacing']
53
-    num_freq = cfg['num_freq']
54
-    smooth = False
55
-    align = False
56
-    denoise = False
57
-    #cammera_img_path = 'D:\\data\\four_cam\\calibrate\\calibrate-1008'
58
-    screen_img_path = 'D:\\data\\four_cam\\calibrate\\cam3-screen-1008'
59
-    cammera_img_path = 'D:\\data\\four_cam\\calibrate\\calibrate-1016'
60
-    #screen_img_path = 'D:\\data\\four_cam\\calibrate\\screen0920'
61
-
62
-    print(f"开始执行时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
63
-    print("\n1. 相机标定")
64
-    preprocess_start = time.time()
53
+    # 定义屏幕参数的尺度因子
54
+    scale_factors = np.ones_like(initial_params)
55
+    scale_factors[0:9] = 1.0    # 旋转矩阵
56
+    scale_factors[9:] = 100.0   # 平移向量(假设单位是毫米)
65 57
 
66
-    cam_para_path = os.path.join(current_dir, 'config', cfg['cam_params'])
67
-    print('cam_para_path = ', cam_para_path)
68
-    print('current_dir = ', current_dir)
69
-    #cam_para_path = 'D:\\code\\pmd-python\\config\\cam_params.pkl'
70
-    if os.path.exists(cam_para_path):
71
-    #if False:
72
-        with open(cam_para_path, 'rb') as pkl_file:
73
-            cam_params = pickle.load(pkl_file)
74
-    else:
75
-        cam_params = []
76
-        camera_subdir = [item for item in os.listdir(cammera_img_path) if os.path.isdir(os.path.join(cammera_img_path, item))]
77
-        camera_subdir.sort()
78
-        assert len(camera_subdir) == 4, f"found {len(camera_subdir)} cameras, should be 4"
79
-        for i in range(n_cam):
80
-            cam_img_path = glob.glob(os.path.join(cammera_img_path, camera_subdir[i], "*.bmp"))
81
-            cam_img_path.sort()
82
-            #print('cam_img_path = ', cam_img_path)
83
-            cam_param_raw = calibrate_world(cam_img_path, i, cfg['world_chessboard_size'], cfg['world_square_size'], debug=0)
84
-            cam_params.append(cam_param_raw)
85
-        # with open(cam_para_path, 'wb') as pkl_file:
86
-        #    pickle.dump(cam_params, pkl_file) 
58
+    # 归一化初始参数
59
+    normalized_params = initial_params / scale_factors
60
+    iteration_count = [0]
87 61
 
88
-    print("\n2. 屏幕标定")
89
-    screen_cal_start = time.time()
62
+    print("\nInitial screen parameters:")
63
+    print(f"Rotation matrix:\n{screen_params['screen_rotation_matrix']}")
64
+    print(f"Translation vector:\n{screen_params['screen_translation_vector']}")
90 65
 
91
-    screen_img_path = glob.glob(os.path.join(screen_img_path, "*.bmp"))
92
-    screen_para_path = os.path.join('config', cfg['screen_params'])   
93
-    if os.path.exists(screen_para_path):
94
-    #if False:
95
-        with open(screen_para_path, 'rb') as pkl_file:
96
-            screen_params = pickle.load(pkl_file)[0]
97
-    else:
98
-        screen_params = calibrate_screen(screen_img_path, cam_params[3]['camera_matrix'], cam_params[3]['distortion_coefficients'], cfg['screen_chessboard_size'], cfg['screen_square_size'], debug=0)
99
-        # with open(screen_para_path, 'wb') as pkl_file:
100
-        #     pickle.dump([screen_params], pkl_file)
66
+    def objective_function(normalized_params):
67
+        try:
68
+            # 反归一化参数
69
+            params = normalized_params * scale_factors
70
+            
71
+            iteration_count[0] += 1
72
+            print(f"\nIteration {iteration_count[0]}:")
73
+            
74
+            # 解析屏幕参数
75
+            screen_R = params[:9].reshape(3, 3)
76
+            screen_T = params[9:].reshape(3, 1)
77
+            
78
+            # 构建参数字典
79
+            temp_cam_params = [{
80
+                'camera_matrix': cam_params['camera_matrix'],
81
+                'distortion_coefficients': cam_params['distortion_coefficients'],
82
+                'rotation_matrix': cam_params['rotation_matrix'],
83
+                'rotation_vector': cam_params['rotation_vector'],
84
+                'translation_vector': cam_params['translation_vector']
85
+            }]
86
+            
87
+            temp_screen_params = {
88
+                'screen_matrix': screen_params['screen_matrix'],
89
+                'screen_distortion': screen_params['screen_distortion'],
90
+                'screen_rotation_matrix': screen_R,
91
+                'screen_rotation_vector': cv2.Rodrigues(screen_R)[0],
92
+                'screen_translation_vector': screen_T
93
+            }
94
+
95
+            if iteration_count[0] % 5 == 0:  # 每5次迭代打印一次详细信息
96
+                print("\nCurrent screen parameters:")
97
+                print(f"Rotation matrix:\n{screen_R}")
98
+                print(f"Translation vector:\n{screen_T.flatten()}")
99
+
100
+            try:
101
+                reconstructed_points, reconstructed_gradients = reconstruct_surface(
102
+                    temp_cam_params,
103
+                    temp_screen_params,
104
+                    config_path,
105
+                    img_folder
106
+                )
107
+                
108
+                # 只计算平面度误差
109
+                planarity_error = compute_planarity_error(reconstructed_points)
110
+                total_error = planarity_error
111
+                
112
+                print(f"Planarity error: {planarity_error:.6f}")
113
+                
114
+                # 监控点云变化
115
+                if hasattr(objective_function, 'prev_points'):
116
+                    point_changes = reconstructed_points - objective_function.prev_points
117
+                    print(f"Max point change: {np.max(np.abs(point_changes)):.8f}")
118
+                    print(f"Mean point change: {np.mean(np.abs(point_changes)):.8f}")
119
+                objective_function.prev_points = reconstructed_points.copy()
120
+                
121
+                return total_error
122
+                
123
+            except Exception as e:
124
+                print(f"Error in reconstruction: {str(e)}")
125
+                import traceback
126
+                traceback.print_exc()
127
+                return 1e10
128
+                
129
+        except Exception as e:
130
+            print(f"Error in parameter processing: {str(e)}")
131
+            import traceback
132
+            traceback.print_exc()
133
+            return 1e10
134
+
135
+    # 设置边界(只针对屏幕参数)
136
+    bounds = []
137
+    # 旋转矩阵边界
138
+    for i in range(9):
139
+        bounds.append((normalized_params[i]-0.5, normalized_params[i]+0.5))
140
+    # 平移向量边界
141
+    for i in range(3):
142
+        bounds.append((normalized_params[i+9]-0.5, normalized_params[i+9]+0.5))
143
+
144
+    # 优化
145
+    result = minimize(
146
+        objective_function,
147
+        normalized_params,
148
+        method='L-BFGS-B',
149
+        bounds=bounds,
150
+        options={
151
+            'maxiter': 100,
152
+            'ftol': 1e-8,
153
+            'gtol': 1e-8,
154
+            'disp': True,
155
+            'eps': 1e-3
156
+        }
157
+    )
158
+
159
+    # 反归一化获取最终结果
160
+    final_params = result.x * scale_factors
101 161
     
162
+    # 构建最终的参数字典
163
+    optimized_screen_params = {
164
+        'screen_matrix': screen_params['screen_matrix'],
165
+        'screen_distortion': screen_params['screen_distortion'],
166
+        'screen_rotation_matrix': final_params[:9].reshape(3, 3),
167
+        'screen_rotation_vector': cv2.Rodrigues(final_params[:9].reshape(3, 3))[0],
168
+        'screen_translation_vector': final_params[9:].reshape(3, 1)
169
+    }
102 170
     
103
-    screen_cal_end = time.time()
104
-    print(f"   完成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
105
-    print(f"   耗时: {screen_cal_end - screen_cal_start:.2f} 秒")
106
-   
107
-    print("\n3. 相位提取,相位展开")
108
-    phase_start = time.time()
109
-    x_uns, y_uns = [], []
110
-    binary_masks = []
111
-    for cam_id in range(n_cam):
112
-        print('cam_id = ', cam_id)
113
-        #white_path = os.path.join(img_folder, f'{cam_id}_frame_24.bmp') 
114
-        white_path = 'D:\\code\\code of PMD\\code of PMD\\picture\\white.bmp'
115
-        if n_cam == 3:
116
-            binary = get_white_mask(white_path, bin_thresh=12, debug=0)
117
-            
118
-        elif n_cam == 1:
119
-            #angle_rad, binary = find_notch(white_path, n_cam, debug=0)
120
-            binary = get_white_mask(white_path, bin_thresh=12, debug=0)
121
-
122
-        binary_masks.append(binary)
171
+    print("\nOptimization completed!")
172
+    print("Final screen parameters:")
173
+    print(f"Rotation matrix:\n{optimized_screen_params['screen_rotation_matrix']}")
174
+    print(f"Translation vector:\n{optimized_screen_params['screen_translation_vector']}")
175
+    
176
+    return cam_params, optimized_screen_params
123 177
 
124
-        #phases = extract_phase(img_folder, cam_id, binary, cam_params[cam_id]['camera_matrix'], cam_params[cam_id]['distortion_coefficients'], num_freq=num_freq)
125
-        #x_un, y_un = unwrap_phase(phases, save_path, num_freq, debug=0)
178
+def compute_planarity_error(points):
179
+    """计算点云的平面度误差"""
180
+    # 移除中心
181
+    centered_points = points - np.mean(points, axis=0)
182
+    
183
+    # 使用SVD找到最佳拟合平面
184
+    _, s, vh = np.linalg.svd(centered_points)
185
+    
186
+    # 最奇异值表示点到平面的平均距离
187
+    planarity_error = s[2] / len(points)
188
+    
189
+    return planarity_error
190
+
191
+# 在主程序中使用
192
+def optimize_parameters(cam_params, screen_params, point_cloud, gradient_data):
193
+    """
194
+    优化参数的包装函数
195
+    """
196
+    print("开始优化标定参数...")
197
+    
198
+    # 保存原始参数
199
+    original_cam_params = copy.deepcopy(cam_params)
200
+    original_screen_params = copy.deepcopy(screen_params)
201
+    
202
+    try:
203
+        # 执行优化
204
+        new_cam_params, new_screen_params = optimize_calibration_with_gradient_constraint(
205
+            cam_params,
206
+            screen_params,
207
+            point_cloud,
208
+            gradient_data,
209
+            config_path
210
+        )
126 211
         
127
-        #x_uns.append(x_un)
128
-        #y_uns.append(y_un)
129
-
130
-    phase_end = time.time()
131
-    print(f"   完成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
132
-    print(f"   耗时: {phase_end - phase_start:.2f} 秒")
212
+        # 打印优化前后的参数变化
213
+        print("\n参数优化结果:")
214
+        print("机内参变化:")
215
+        print("原始值:\n", original_cam_params['camera_matrix'])
216
+        print("优化后:\n", new_cam_params['camera_matrix'])
217
+        
218
+        print("\n屏幕姿态变化:")
219
+        print("原始平移向量:", original_screen_params['screen_translation_vector'])
220
+        print("优化后平移向量:", new_screen_params['screen_translation_vector'])
221
+        
222
+        return new_cam_params, new_screen_params
223
+        
224
+    except Exception as e:
225
+        print("优化过程出错:")
226
+        print(f"错误类型: {type(e).__name__}")
227
+        print(f"错误信息: {str(e)}")
228
+        print("错误详细信息:")
229
+        import traceback
230
+        traceback.print_exc()
231
+        return original_cam_params, original_screen_params
232
+
233
+
234
+def optimization_callback(xk):
235
+    """
236
+    优化过程的回调函数,用于监控优化进度
237
+    """
238
+    global iteration_count
239
+    iteration_count += 1
133 240
     
134
-    # matlab align
135
-    #cam_params = loadmat('D:\\code\\code of PMD\\code of PMD\\calibration\\calibrationSessionworld.mat')
136
-    #screen_params = loadmat('D:\\code\\code of PMD\\code of PMD\\calibration\\calibrationSessionscreen.mat')
137
-   
138
-    #print('cam_params = ', cam_params)
139
-    #print('screen_params = ', screen_params)
241
+    # 每10次迭代打印一次当前误差
242
+    if iteration_count % 10 == 0:
243
+        error = objective_function(xk)
244
+        print(f"Iteration {iteration_count}, Error: {error:.6f}")
245
+        
246
+        # 可以保存中间结果
247
+        points, grads = reconstruct_surface(...)
248
+        np.save(f'intermediate_points_{iteration_count}.npy', points)
249
+        np.save(f'intermediate_grads_{iteration_count}.npy', grads)
140 250
 
141
-    cam_params = []
142
-    screen_params = []
143
-    mtx = np.array([[6944.89018564351, 0, 2709.44699784446], [0, 6942.91962497637, 1882.05677580185], [0,0,1]])
144
-    R = cv2.Rodrigues(np.array([0.0732148059333282,-0.265812028310130,-0.0532640604086260]).reshape(3,1))[0]
145
-    T = np.array([-15.179977,-42.247126,246.92182]).reshape(3,1)
146
-    
147
-    cam_calibration_data = {
148
-        'camera_matrix': mtx,
149
-        'distortion_coefficients': 0,
150
-        'rotation_matrix': R,
151
-        'translation_vector': T,
152
-        'error': 0
153
-    }
154 251
 
155
-    mtx = np.array([[6944.89018564351, 0, 2709.44699784446], [0, 6942.91962497637, 1882.05677580185], [0,0,1]])
156
-    R = np.array([[0.96514996,-0.042578806,0.25821037],[0.029285983,0.99805061,0.055111798],[-0.26005361,-0.045629206,0.96451547]])
157
-    T = np.array([30.1970,-49.3108,507.4424]).reshape(3,1)
158
-    
159
-    screen_calibration_data = {
160
-        'screen_matrix': mtx,
161
-        'screen_distortion': 0,
162
-        'screen_rotation_matrix': R,
163
-        'screen_translation_vector': T,
164
-        'screen_error': 0
165
-    }
166 252
 
253
+def validate_optimization(original_points, original_grads, 
254
+                        optimized_points, optimized_grads):
255
+    """
256
+    验证优化结果
257
+    """
258
+    # 计算平面度改善
259
+    original_planarity = compute_planarity_error(original_points)
260
+    optimized_planarity = compute_planarity_error(optimized_points)
261
+    
262
+    # 计算梯度改善 - 使用法向量梯度
263
+    original_gradient_error = np.sum(np.abs(original_grads[:, 2:])) # gx, gy
264
+    optimized_gradient_error = np.sum(np.abs(optimized_grads[:, 2:]))
265
+    
266
+    print("\n优化结果验证:")
267
+    print(f"平面度误差: {original_planarity:.6f} -> {optimized_planarity:.6f}")
268
+    print(f"梯度误差: {original_gradient_error:.6f} -> {optimized_gradient_error:.6f}")
167 269
     
168
-    cam_params.append(cam_calibration_data)
169
-    #screen_params.append(screen_calibration_data)
170
-    screen_params = screen_calibration_data
270
+    # 可视化结
271
+    fig = plt.figure(figsize=(15, 5))
272
+    
273
+    # 原始点云
274
+    ax1 = fig.add_subplot(131, projection='3d')
275
+    ax1.scatter(original_points[:, 0], original_points[:, 1], original_points[:, 2], 
276
+                c=original_grads[:, 2], cmap='viridis')
277
+    ax1.set_title('Original Surface')
278
+    
279
+    # 优化后点云
280
+    ax2 = fig.add_subplot(132, projection='3d')
281
+    ax2.scatter(optimized_points[:, 0], optimized_points[:, 1], optimized_points[:, 2], 
282
+                c=optimized_grads[:, 2], cmap='viridis')
283
+    ax2.set_title('Optimized Surface')
284
+    
285
+    # 梯度分布对比
286
+    ax3 = fig.add_subplot(133)
287
+    ax3.hist([original_grads[:, 2], optimized_grads[:, 2]], 
288
+             label=['Original', 'Optimized'], bins=50, alpha=0.7)
289
+    ax3.set_title('Gradient Distribution')
290
+    ax3.legend()
291
+    
292
+    plt.tight_layout()
293
+    plt.show()
171 294
 
172
-    x_un = loadmat('D:\\code\\code of PMD\\code of PMD\\phase_information\\x_phase_unwrapped.mat')
173
-    y_un = loadmat('D:\\code\\code of PMD\\code of PMD\\phase_information\\y_phase_unwrapped.mat')
174 295
 
175
-    x_un = x_un['x_phase_unwrapped']
176
-    y_un = y_un['y_phase_unwrapped']
177 296
 
178
-    x_uns, y_uns = [], []
179
-    x_uns.append(x_un)
180
-    y_uns.append(y_un)
181
-   
182
-    #fig, axes = plt.subplots(1, 2, figsize=(12, 6))
183 297
 
298
+def reconstruct_surface(cam_params, screen_params, config_path, img_folder):
299
+    # 添加参数验证
300
+    # print("\nDebug - reconstruct_surface input parameters:")
301
+    # print("Camera parameters:")
302
+    # print(f"Matrix:\n{cam_params[0]['camera_matrix']}")
303
+    # print(f"Translation:\n{cam_params[0]['translation_vector']}")
304
+    
305
+    cfg = json.load(open(config_path, 'r'))
306
+    n_cam = cfg['cam_num']
307
+    grid_spacing = cfg['grid_spacing']
308
+    num_freq = cfg['num_freq']
309
+    
310
+    x_uns, y_uns = [], []
311
+    binary_masks = []
312
+    
313
+    for cam_id in range(n_cam):
314
+        print('cam_id = ', cam_id)
315
+        # 验证使用的是传入的参数而不是其他来源
316
+        #print(f"\nUsing camera parameters for camera {cam_id}:")
317
+        #print(f"Matrix:\n{cam_params[cam_id]['camera_matrix']}")
318
+        
319
+        white_path = os.path.join(img_folder, f'{cam_id}_frame_24.bmp')
320
+        #binary = get_white_mask(white_path, bin_thresh=82, size_thresh=0.5, debug=0)  #凹 凸 平晶 平面镜
184 321
 
185
-        # 第一个子图
186
-    # cax0 = axes[0].imshow(x_un)
187
-    # axes[0].set_title('x_phase_unwrapped')
188
-    # axes[0].set_xlabel('X Axis')
189
-    # axes[0].set_ylabel('Y Axis')
190
-    # fig.colorbar(cax0, ax=axes[0])
322
+        binary = get_white_mask(white_path, bin_thresh=40, size_thresh=0.8, debug=0)
191 323
 
192
-    #     # 第二个子图
193
-    # cax1 = axes[1].imshow(y_un)
194
-    # axes[1].set_title('y_phase_unwrapped')
195
-    # axes[1].set_xlabel('X Axis')
196
-    # axes[1].set_ylabel('Y Axis')
197
-    # fig.colorbar(cax0, ax=axes[1])
324
+        #binary = get_white_mask(white_path, bin_thresh=30, size_thresh=0.8, debug=0) #四相机
198 325
 
199
-    #     # 调整子图之间的间距
200
-    # plt.tight_layout()
201
-        #plt.savefig(os.path.join(save_path, "phase_unwrapped.png"))
202
-    #plt.show()
326
+        binary_masks.append(binary)
327
+        mtx = cam_params[cam_id]['camera_matrix']
328
+        dist_coeffs = cam_params[cam_id]['distortion_coefficients']
329
+        phases = extract_phase(img_folder, cam_id, binary, mtx, dist_coeffs, num_freq)
330
+        x_un, y_un = unwrap_phase(phases, num_freq, debug=0)
331
+        x_uns.append(x_un)
332
+        y_uns.append(y_un)
333
+            
334
+        np.save('./x_phase_unwrapped_python.npy', x_un)
335
+        np.save('./y_phase_unwrapped_python.npy', y_un)
203 336
     
204
-    #return 0
205
-    print('screen_params = ', screen_params)
206
-
207 337
     if n_cam == 1:
208
-        screen_to_world = map_screen_to_world(screen_params, cam_params[0]) 
338
+        #screen_to_world = map_screen_to_world(screen_params, cam_params[0], -30, 10, 80) 
339
+        #screen_to_world = map_screen_to_world(screen_params, cam_params[0], -10, -10, 20) 
340
+        screen_to_world = map_screen_to_world(screen_params, cam_params[0], 0, 0, 0)  
209 341
     elif n_cam == 4:
210
-        screen_to_world = map_screen_to_world(screen_params, cam_params[3])  
342
+        screen_to_world = map_screen_to_world(screen_params, cam_params[3], 0, 0, 0)  
211 343
     else:
212 344
         print('camera number should be 1 or 4')
213 345
         return 0
214 346
     
215
-
216 347
     print("\n4. 获得不同坐标系下点的位置")
217
-    get_point_start = time.time()
218 348
     total_cloud_point = np.empty((0, 3))
219 349
     total_gradient = np.empty((0, 4))
220
-    total_boundary_point = np.empty((0, 3))
350
+    
221 351
     for i in range(n_cam):
222 352
         print('cam_id = ', i)
223
-        contours_point = get_meshgrid_contour(binary_masks[i], save_path, debug=False)
224
-        #world_points, world_points_boundary, world_points_boundary_3 = get_world_points(contours_point, cam_params[i], i, grid_spacing, cfg['d'], erosion_pixels=2, debug=0)
225
-        #world_points_x, world_points_y = np.meshgrid(np.linspace(31.8020, 77.9135, 1), np.linspace(33.5894,79.9621,1))
226
-        min_x, max_x, min_y, max_y = 31.8020, 77.9135-1, 33.5894, 79.9621-1
227
-        meshwidth = 1
228
-        world_points_x, world_points_y = np.meshgrid(np.arange(min_x, max_x + meshwidth, meshwidth), np.arange(min_y, max_y + meshwidth, meshwidth))
229
-        world_points_z = np.zeros_like(world_points_x)-cfg['d']
230
-        #print()
231
-        print('world_points_z = ', world_points_x.reshape(-1, 1).shape, world_points_y.reshape(-1, 1).shape, world_points_z.reshape(-1, 1).shape)
232
-
233
-        world_points = np.hstack((world_points_x.reshape(-1, 1), world_points_y.reshape(-1, 1),  world_points_z.reshape(-1, 1)))
234
-        camera_points, u_p, v_p = get_camera_points(world_points, cam_params[i], save_path, i, debug=0)
353
+        contours_point = get_meshgrid_contour(binary_masks[i], debug=0)
354
+        world_points, world_points_boundary, world_points_boundary_3 = get_world_points(contours_point, cam_params[i], i, grid_spacing, cfg['d'], erosion_pixels=0, debug=0)
355
+        camera_points, u_p, v_p = get_camera_points(world_points, cam_params[i], i, debug=0)
235 356
         point_data = {'x_w': world_points[:, 0], 'y_w': world_points[:, 1], 'z_w': world_points[:, 2], 
236 357
                     'x_c': camera_points[:, 0], 'y_c': camera_points[:, 1], 'z_c': camera_points[:, 2],  
237 358
                     'u_p': u_p, 'v_p': v_p}
238
-        screen_points = get_screen_points(point_data, x_uns[i], y_uns[i], screen_params, screen_to_world, cfg, save_path, i, debug=debug)
239
-        #plot_coords(world_points, camera_points, screen_points)
240
-        z_raw,  gradient_xy = reconstruction_cumsum(world_points, camera_points, screen_points, debug=0, smooth=smooth, align=align, denoise=denoise)
359
+        screen_points = get_screen_points(point_data, x_uns[i], y_uns[i], screen_to_world, cfg, i, debug=0)
241 360
         
242
-        z_raw_xy = np.round(z_raw[:, :2]).astype(int)
243
-
244
-        print('world_points =', world_points)
361
+        # 添加调试信息
362
+        # print(f"\nDebug - Reconstruction:")
245 363
         
246
-        # # 创建布尔掩码,初始为 True
247
-        # mask = np.ones(len(z_raw_xy), dtype=bool)
248
-
249
-        # # 遍历每个边界点,标记它们在 aligned 中的位置
250
-        # for boundary_point in world_points_boundary:
251
-        #     # 标记与当前边界点相同 xy 坐标的行
252
-        #     mask &= ~np.all(z_raw_xy == boundary_point[:2], axis=1)
253 364
         
254
-        # # 使用掩码过滤出非边界点
255
-        # non_boundary_points = z_raw[mask]
256
-        # non_boundary_aligned, rotation_matrix = align2ref(non_boundary_points)
365
+        # print(f"Camera points shape: {camera_points.shape}")
366
+        # print(f"Screen points shape: {screen_points.shape}")
367
+        #ds(world_points, camera_points, screen_points)
368
+        #plot_coords(world_points, camera_points, screen_points)
257 369
         
258
-        # 创建布尔掩码,初始为 True
259
-        mask = np.ones(len(z_raw_xy), dtype=bool)
260
-
261
-        # 遍历每个边界点,标记它们在 aligned 中的位置
262
-        # for boundary_point in world_points_boundary_3:
263
-        #     # 标记与当前边界点相同 xy 坐标的行
264
-        #     mask &= ~np.all(z_raw_xy == boundary_point[:2], axis=1)
370
+        z1_cumsum, gradient_xy = reconstruction_cumsum(world_points, camera_points, screen_points, debug=0)
371
+        write_point_cloud(os.path.join(img_folder, str(i) + '_cloudpoint.txt'), np.round(z1_cumsum[:, 0]), np.round(z1_cumsum[:, 1]),  z1_cumsum[:, 2])
372
+        np.savetxt(os.path.join(img_folder, str(i) + '_gradient.txt'), gradient_xy, fmt='%.10f', delimiter=',')
373
+        total_cloud_point = np.vstack([total_cloud_point, np.column_stack((z1_cumsum[:, 0], z1_cumsum[:, 1], z1_cumsum[:, 2]))])
374
+        total_gradient = np.vstack([total_gradient, np.column_stack((gradient_xy[:, 0], gradient_xy[:, 1], gradient_xy[:, 2], gradient_xy[:, 3]))])
375
+        # 检查返回值
376
+        # print(f"z1_cumsum shape: {z1_cumsum.shape}")
377
+        # print(f"gradient_xy shape: {gradient_xy.shape}")
378
+        # print(f"Sample z1_cumsum values:\n{z1_cumsum[:3]}")
265 379
         
266
-        # 使用掩码过滤出非边界点
267
-        non_boundary_points = z_raw[mask] 
268
-        # z_raw_aligned = non_boundary_points @ rotation_matrix.T
269
-        # z_raw_aligned[:,2] = z_raw_aligned[:,2] - np.mean(z_raw_aligned[:, 2])
380
+    return total_cloud_point, total_gradient
270 381
 
271
-        #z_raw_aligned = non_boundary_points
272
-        z_raw_aligned, _  = align2ref(non_boundary_points)
382
+   
273 383
 
274
-        #non_boundary_points = smoothed
275
-        write_point_cloud(os.path.join(img_folder, str(i) + '_cloudpoint.txt'), np.round(z_raw_aligned[:, 0]), np.round(z_raw_aligned[:, 1]),  z_raw_aligned[:, 2])
276
-        np.savetxt(os.path.join(img_folder, str(i) + '_gradient.txt'), gradient_xy, fmt='%.10f', delimiter=',')
277
-        total_cloud_point = np.vstack([total_cloud_point, np.column_stack((z_raw_aligned[:, 0], z_raw_aligned[:, 1], z_raw_aligned[:, 2]))])
278
-        total_gradient = np.vstack([total_gradient, np.column_stack((gradient_xy[:, 0], gradient_xy[:, 1], gradient_xy[:, 2], gradient_xy[:, 3]))])
384
+def main(config_path, img_folder):
385
+    current_dir = os.path.dirname(os.path.abspath(__file__))
386
+    os.chdir(current_dir)
387
+
388
+    cfg = json.load(open(config_path, 'r'))
389
+    n_cam = cfg['cam_num']
390
+    
279 391
     
280
-    if 0:
281
-        fig = plt.figure()
282
-        ax = fig.add_subplot(111, projection='3d')
283
-
284
-        # 提取 x, y, z 坐标
285
-        x_vals = total_cloud_point[:, 0]
286
-        y_vals = total_cloud_point[:, 1]
287
-        z_vals = total_cloud_point[:, 2]
288
-
289
-        # 绘制3D点云
290
-        ax.scatter(x_vals, y_vals, z_vals, c=z_vals, cmap='viridis', marker='o')
291
-
292
-        # 设置轴标签和标题
293
-        ax.set_xlabel('X (mm)')
294
-        ax.set_ylabel('Y (mm)')
295
-        ax.set_zlabel('Z (mm)')
296
-        ax.set_title('z_raw 3D Point Cloud Visualization gradient')
297
-        plt.show()
298
-
299
-        # fig = plt.figure()
300
-        # ax = fig.add_subplot(111, projection='3d')
301
-        # smoothed_total = smooth_pcl(total_cloud_point, 3)
302
-        #  # 提取 x, y, z 坐标
303
-        # x_vals = smoothed_total[:, 0]
304
-        # y_vals = smoothed_total[:, 1] 
305
-        # z_vals = smoothed_total[:, 2]
306
-
307
-        # # 绘制3D点云
308
-        # ax.scatter(x_vals, y_vals, z_vals, c=z_vals, cmap='viridis', marker='o')
309
-
310
-        # # 设置轴标签和标题
311
-        # ax.set_xlabel('X (mm)')
312
-        # ax.set_ylabel('Y (mm)')
313
-        # ax.set_zlabel('Z (mm)')
314
-        # ax.set_title('smoothed 3D Point Cloud Visualization')
315
-       
316
-
317
-    get_point_end = time.time()
392
+    matlab_align = 0
393
+
394
+    if n_cam == 4:
395
+        screen_img_path = 'D:\\data\\four_cam\\calibrate\\cam3-screen-1008'
396
+        camera_img_path = 'D:\\data\\four_cam\\calibrate\\calibrate-1016'
397
+    elif n_cam == 1:
398
+        camera_img_path = 'D:\\data\\one_cam\\padtest1125\\test1\\'
399
+        screen_img_path = 'D:\\data\\one_cam\\pad-test-1125\\test4\\calibration\\screen\\'
400
+
401
+    print(f"开始执行时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
402
+    print("\n1. 机标定")
403
+    preprocess_start = time.time()
404
+
405
+    cam_para_path = os.path.join(current_dir, 'config', cfg['cam_params'])
406
+    print('cam_para_path = ', cam_para_path)
407
+    if os.path.exists(cam_para_path):
408
+    #if False:
409
+        with open(cam_para_path, 'rb') as pkl_file:
410
+            cam_params = pickle.load(pkl_file)
411
+    else:
412
+        if n_cam == 4:
413
+            cam_params = []
414
+            camera_subdir = [item for item in os.listdir(camera_img_path) if os.path.isdir(os.path.join(camera_img_path, item))]
415
+            camera_subdir.sort()
416
+            assert len(camera_subdir) == 4, f"found {len(camera_subdir)} cameras, should be 4"
417
+            for i in range(n_cam):
418
+                cam_img_path = glob.glob(os.path.join(camera_img_path, camera_subdir[i], "*.bmp"))
419
+                cam_img_path.sort()
420
+                cam_param_raw = calibrate_world_aprilgrid(cam_img_path, cfg['world_chessboard_size'], cfg['world_square_size'], cfg['world_square_spacing'], debug=1)
421
+                cam_params.append(cam_param_raw)
422
+            with open(cam_para_path, 'wb') as pkl_file:
423
+               pickle.dump(cam_params, pkl_file)
424
+        elif n_cam == 1:
425
+            cam_params = []
426
+            cam_img_path = glob.glob(os.path.join(camera_img_path,  "*.bmp"))
427
+            cam_img_path.sort()
428
+            if matlab_align:
429
+                cfg['world_chessboard_size'] = [41, 40]
430
+                cfg['world_square_size'] = 2
431
+            cam_param_raw = calibrate_world(cam_img_path, cfg['world_chessboard_size'], cfg['world_square_size'], debug=1)
432
+            cam_params.append(cam_param_raw)
433
+            with open(cam_para_path, 'wb') as pkl_file:
434
+               pickle.dump(cam_params, pkl_file)
435
+    print('raw cam_param = ', cam_params)
436
+    print("\n2. 屏幕标定")
437
+    screen_cal_start = time.time() 
438
+
439
+    screen_img_path = glob.glob(os.path.join(screen_img_path, "*.bmp"))
440
+    screen_para_path = os.path.join('config', cfg['screen_params'])   
441
+    if os.path.exists(screen_para_path):
442
+    #if False:
443
+        with open(screen_para_path, 'rb') as pkl_file:
444
+            screen_params = pickle.load(pkl_file)
445
+    else:
446
+        if n_cam == 3:
447
+            screen_params = calibrate_screen_chessboard(screen_img_path, cam_params[3]['camera_matrix'], cam_params[3]['distortion_coefficients'], cfg['screen_chessboard_size'], cfg['screen_square_size'], debug=0)
448
+        elif n_cam == 1:
449
+            raw_cam_mtx = cam_params[0]['camera_matrix']
450
+            screen_params = calibrate_screen_circlegrid(screen_img_path, raw_cam_mtx, cam_params[0]['distortion_coefficients'], cfg['screen_chessboard_size'], cfg['screen_square_size'], debug=1)
451
+        with open(screen_para_path, 'wb') as pkl_file:
452
+            pickle.dump(screen_params, pkl_file)
453
+    
454
+    screen_cal_end = time.time()
318 455
     print(f"   完成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
319
-    print(f"   耗时: {get_point_end - get_point_start:.2f} 秒")
456
+    print(f"   耗时: {screen_cal_end - screen_cal_start:.2f} s")
457
+    
458
+
459
+    total_cloud_point, total_gradient = reconstruct_surface(cam_params, screen_params, config_path, img_folder)
320 460
     
321 461
     print("\n5. 后处理")
322 462
     post_process_start = time.time()
323 463
     total_cloud_point[:,0] = np.round(total_cloud_point[:,0])
324 464
     total_cloud_point[:,1] = np.round(total_cloud_point[:,1])
325 465
     #fitted_points = post_process(total_cloud_point, debug=0)
326
-    test = post_process_with_grad(img_folder, n_cam, 1)
466
+    clout_points = post_process_with_grad(img_folder, n_cam, 1)
327 467
     fitted_points = total_cloud_point
328
-    #fitted_points = total_cloud_point
329
-    #align_fitted, _ = align2ref(fitted_points)
330 468
     align_fitted = fitted_points
331 469
     
332
-    write_point_cloud(os.path.join(img_folder, 'cloudpoint.txt'), np.round(align_fitted[:, 0]-np.mean(align_fitted[:, 0])), np.round(align_fitted[:, 1]-np.mean(align_fitted[:, 1])), align_fitted[:,2]-np.min(align_fitted[:,2]))
333
-    if 0:
334
-        fig = plt.figure()
335
-        ax = fig.add_subplot(111, projection='3d')
336
-
337
-        # 提取 x, y, z 坐标
338
-        x_vals = np.round(fitted_points[:, 0]-np.mean(fitted_points[:, 0]))
339
-        y_vals = np.round(fitted_points[:, 1]-np.mean(fitted_points[:, 1]))
340
-        z_vals = align_fitted[:,2]-np.min(align_fitted[:,2])
341
-
342
-        x_vals = fitted_points[:, 0]
343
-        y_vals = fitted_points[:, 1]
344
-        z_vals = fitted_points[:, 2]
345
-
346
-        # 绘制3D点云
347
-        ax.scatter(x_vals, y_vals, z_vals, c=z_vals, cmap='viridis', marker='o')
348
-
349
-        # 设置轴标签和标题
350
-        ax.set_xlabel('X (mm)')
351
-        ax.set_ylabel('Y (mm)')
352
-        ax.set_zlabel('Z (mm)')
353
-        ax.set_title('post 3D Point Cloud Visualization')
354
-        plt.show()
470
+    write_point_cloud(os.path.join(img_folder, 'cloudpoint.txt'), np.round(clout_points[:, 0]-np.mean(clout_points[:, 0])), np.round(clout_points[:, 1]-np.mean(clout_points[:, 1])), 1000 * clout_points[:,2])
355 471
     
356
-    post_process_end = time.time()
357
-    print(f"   完成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
358
-    print(f"   耗时: {post_process_end - post_process_start:.2f} 秒")
359 472
 
360 473
     print("\n6. 评估")
361 474
     eval_start = time.time()
362 475
     point_cloud_path = os.path.join(img_folder, 'cloudpoint.txt')
363 476
     json_path = os.path.join(img_folder, 'result.json')
364 477
     theta_notch = 0
365
-    #get_eval_result(point_cloud_path, json_path, theta_notch, 0)
478
+    get_eval_result(point_cloud_path, json_path, theta_notch, 1)
366 479
     eval_end = time.time()
367 480
     print(f"   完成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
368 481
     print(f"   耗时: {eval_end - eval_start:.2f} 秒")
482
+    # new_cam_params, new_screen_params = optimize_parameters(
483
+    #     cam_params[0],  # 假设单相系统
484
+    #     screen_params,
485
+    #     total_cloud_point,
486
+    #     total_gradient
487
+    # )
488
+
489
+    #print('after optimazation:', new_cam_params, new_screen_params)
490
+
491
+
492
+    #total_cloud_point, total_gradient = reconstruct_surface(cam_params, screen_params, config_path)
493
+
494
+    #print('cam_params = ', cam_params)
495
+    #print('screen_params = ', screen_params)
496
+   
497
+    # print("\n3. 相位提取,相位展开") 
498
+    # phase_start = time.time()                                                                                      
499
+    
500
+    # scales = [0.995291, 0.993975, 0.993085, 0.994463]
501
+    # scales = [1,1,1,1]
502
+    
503
+
504
+        
505
+
506
+    # phase_end = time.time()
507
+    # print(f"   完成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
508
+    # print(f"   耗时: {phase_end - phase_start:.2f} 秒")
509
+
510
+
511
+
512
+
513
+    #     z_raw_xy = np.round(z_poisson[:, :2]).astype(int)
514
+        
515
+    #     #创建布尔掩码,初始为 True
516
+    #     mask = np.ones(len(z_raw_xy), dtype=bool)
517
+
518
+    #     # 使用掩码过滤出非边界点
519
+    #     non_boundary_points = z_poisson[mask] 
520
+        
521
+    #     z_raw_aligned, _  = align2ref(non_boundary_points)
522
+
523
+    #     #non_boundary_points = smoothed
524
+    #     write_point_cloud(os.path.join(img_folder, str(i) + '_cloudpoint.txt'), np.round(z_raw_aligned[:, 0]), np.round(z_raw_aligned[:, 1]),  z_raw_aligned[:, 2])
525
+    #     np.savetxt(os.path.join(img_folder, str(i) + '_gradient.txt'), gradient_xy, fmt='%.10f', delimiter=',')
526
+    #     total_cloud_point = np.vstack([total_cloud_point, np.column_stack((z_raw_aligned[:, 0], z_raw_aligned[:, 1], z_raw_aligned[:, 2]))])
527
+    #     total_gradient = np.vstack([total_gradient, np.column_stack((gradient_xy[:, 0], gradient_xy[:, 1], gradient_xy[:, 2], gradient_xy[:, 3]))])
528
+    
529
+
530
+    # get_point_end = time.time()
531
+    # print(f"   完成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
532
+    # print(f"   耗时: {get_point_end - get_point_start:.2f} 秒")
533
+    
534
+    
535
+    # if 0:
536
+    #     fig = plt.figure()
537
+    #     ax = fig.add_subplot(111, projection='3d')
538
+
539
+    #     # 提取 x, y, z 坐标
540
+    #     x_vals = np.round(fitted_points[:, 0]-np.mean(fitted_points[:, 0]))
541
+    #     y_vals = np.round(fitted_points[:, 1]-np.mean(fitted_points[:, 1]))
542
+    #     z_vals = align_fitted[:,2]-np.min(align_fitted[:,2])
543
+
544
+    #     x_vals = fitted_points[:, 0]
545
+    #     y_vals = fitted_points[:, 1]
546
+    #     z_vals = fitted_points[:, 2]
547
+
548
+    #     # 绘制3D点云
549
+    #     ax.scatter(x_vals, y_vals, z_vals, c=z_vals, cmap='viridis', marker='o')
550
+
551
+    #     # 设置轴标签和标题
552
+    #     ax.set_xlabel('X (mm)')
553
+    #     ax.set_ylabel('Y (mm)')
554
+    #     ax.set_zlabel('Z (mm)')
555
+    #     ax.set_title('post 3D Point Cloud Visualization')
556
+    #     plt.show()
557
+    
558
+    # post_process_end = time.time()
559
+    # print(f"   完成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
560
+    # print(f"   耗时: {post_process_end - post_process_start:.2f} 秒")
561
+
562
+   
369 563
 
370 564
     return True
371 565
 
372 566
 
373 567
 if __name__ == '__main__':
374
-    config_path = 'config\\cfg_3freq_wafer_matlab.json'
375
-    #img_folder = 'D:\\data\\four_cam\\1008_storage\\pingjing_20241017092315228\\'   #'D:\\data\\four_cam\\betone_1011\\20241011142348762-1'
376
-   # img_folder = 'D:\\huchao\\inspect_server_202409241013_py\\storage\\20241023193453708\\'
377
-    img_folder = 'D:\\data\\one_cam\\pingjing_20241024154405250\\'
378
-    #img_folder = 'D:\\data\\one_cam\\betone-20241025095352783\\'
379
-    #img_folder = 'D:\\data\\one_cam\\20241025113857041-neg\\'
380
-    #'
381
-    json_path = os.path.join(img_folder, 'result.json')
568
+    #config_path = 'config\\cfg_3freq_wafer_1226.json'
569
+    config_path = 'config\\cfg_3freq_wafer_1226.json'
570
+    
571
+    #config_path = 'config\\cfg_3freq_wafer_matlab.json'
572
+    img_folder = 'D:\\data\\one_cam\\pad-test-1125\\test4\\pingjing\\'
573
+    img_folder = 'D:\\data\\one_cam\\pad-test-1106\\1104-test-other\\'
574
+    #img_folder = 'D:\\data\\one_cam\\pad-test-1125\\test7-4\\20241213183308114\\' #凸
575
+    #img_folder = 'D:\\data\\one_cam\\pad-test-1125\\test7-4\\20241213183451799\\'  #凹
576
+    img_folder = 'D:\\data\\one_cam\\pad-test-1125\\test7-4\\20241213182959977\\' #平面镜
577
+    img_folder = 'D:\\data\\one_cam\\pad-test-1125\\test7-4\\20241219180143344\\'  #平晶
578
+    #img_folder = 'D:\\data\\four_cam\\1223\\20241223134629640-0\\'  #晶圆
579
+    #img_folder = 'D:\\data\\four_cam\\1223\\20241223135156305-1\\'  #晶圆
580
+    #img_folder = 'D:\\data\\four_cam\\1223\\20241223135626457-2-0\\'  #晶圆
581
+    img_folder = 'D:\\data\\four_cam\\1223\\20241223135935517-2-1\\'  #晶圆
582
+
583
+    #img_folder = 'D:\\data\\four_cam\\1223\\20241223172437775-2-0\\'  #晶圆
584
+    #img_folder = 'D:\\data\\four_cam\\1223\\20241223172712226-2-1\\'  #晶圆
585
+   # img_folder = 'D:\\data\\four_cam\\1223\\20241223172931654-1\\'  #晶圆
586
+    img_folder = 'D:\\data\\four_cam\\1223\\20241223173521117-0\\'  #晶圆
587
+    #img_folder = 'D:\\data\\four_cam\\betone_1011\\20241011142901251-2\\'
588
+
589
+    # img_folder = 'D:\\data\\one_cam\\1226image\\20241226142937478\\'  #凹
590
+    # #img_folder = 'D:\\data\\one_cam\\1226image\\20241226143014962\\'  #凸  
591
+    # #img_folder = 'D:\\data\\one_cam\\1226image\\20241226143043070\\'  #平面镜
592
+    # #img_folder = 'D:\\data\\one_cam\\1226image\\20241226142826690\\'  #平晶
593
+    # img_folder = 'D:\\data\\one_cam\\1226image\\20241226143916513\\'  #晶圆
594
+    # #img_folder = 'D:\\data\\one_cam\\1226image\\20241226144357273\\'
595
+    # img_folder = 'D:\\data\\one_cam\\1226image\\20241226144616239\\'
596
+    img_folder = 'D:\\data\\one_cam\\betone1230\\20241230110516029\\'
597
+    img_folder = 'D:\\data\\one_cam\\betone1230\\20241230110826833\\'  #2
598
+    #img_folder = 'D:\\data\\one_cam\\betone1230\\20241230111002224\\'   
599
+     
600
+    #img_folder = 'D:\\data\\one_cam\\betone1230\\20250103151342402-pingjing\\'
382 601
 
383
-    # 20241011142348762-1  20241011142901251-2   20241011143925746-3   20241011144821292-4 
384
-    #     0.60                0.295                0.221                  0.346
602
+    
603
+   # img_folder = 'D:\\data\\one_cam\\betone1230\\20250102181845157-1\\'
604
+    #img_folder = 'D:\\data\\one_cam\\betone1230\\20250102181648331-2\\'
605
+    #img_folder = 'D:\\data\\one_cam\\betone1230\\20250102182008126-3\\'
385 606
 
386
-    # x max =  653.5925
387
-    # y max =  692.1735648
388
-    # x max =  251.0775
389
-    # y max =  293.05856482
390
-    # x max =  184.7275
391
-    # y max =  239.00919669
392
-    # x max =  342.2525
393
-    # y max =  386.92087185
607
+
608
+    json_path = os.path.join(img_folder, 'result.json')
394 609
 
395 610
     pmdstart(config_path, img_folder)
396
-    #fitted_points = post_process_with_grad(img_folder, 1)
397 611
     
398 612
     

+ 4 - 3
src/__init__.py

@@ -1,4 +1,5 @@
1
-from .calibration import calibrate_world, calibrate_screen, map_screen_to_world  
1
+from .calibration import calibrate_world, calibrate_screen_chessboard, calibrate_screen_aprilgrid, calibrate_screen_circlegrid
2
+from .calibration.get_camera_params import calibrate_world_aprilgrid
2 3
 from .phase import compute_phase, extract_phase, unwrap_phase
3 4
 from .utils import get_meshgrid, get_world_points, get_camera_points, get_screen_points, get_baseline_params, \
4 5
                    write_point_cloud, read_and_undis, binarize, get_white_mask, get_world_points_from_mask, find_notch
@@ -9,8 +10,8 @@ from .vis import plot_coords
9 10
 from .eval import get_eval_result
10 11
 
11 12
 __all__ = ['compute_phase', 'extract_phase', 'unwrap_phase', 
12
-           'calibrate_world', 'calibrate_screen','map_screen_to_world',
13
+           'calibrate_world', 'calibrate_screen_chessboard', 'calibrate_screen_aprilgrid','map_screen_to_world',
13 14
            'get_meshgrid', 'get_world_points', 'get_camera_points', 'get_screen_points',
14 15
            'reconstruction_cumsum', 'get_baseline_params', 'write_point_cloud', 
15 16
            'read_and_undis', 'smooth_pcl', 'generate_ROI', 'binarize', 'plot_coords',
16
-           'align2ref','get_white_mask', 'get_world_points_from_mask','get_eval_result', 'find_notch']
17
+           'align2ref','get_white_mask', 'get_world_points_from_mask','get_eval_result', 'find_notch','calibrate_world_aprilgrid','calibrate_screen_circlegrid']

+ 4 - 3
src/calibration/__init__.py

@@ -1,5 +1,6 @@
1 1
 from .get_camera_params import calibrate_world
2
-from .get_screen_params import calibrate_screen
3
-from .calibrate import map_screen_to_world
2
+from .get_screen_params import calibrate_screen_chessboard, calibrate_screen_aprilgrid, calibrate_screen_circlegrid
3
+from .calibrate import map_screen_to_world, show_detect_result_new, show_detect_result
4 4
 
5
-__all__ = ['calibrate_world', 'calibrate_screen', 'map_screen_to_world']
5
+
6
+__all__ = ['calibrate_world', 'calibrate_screen_chessboard', 'calibrate_screen_aprilgrid', 'map_screen_to_world','show_detect_result_new','show_detect_result','calibrate_screen_circlegrid']

+ 166 - 30
src/calibration/calibrate.py

@@ -1,48 +1,60 @@
1 1
 import numpy as np
2
+import cv2
3
+import matplotlib.pyplot as plt
2 4
 
3
-def map_screen_to_world(data_screen, date_world):
4
-    # 提取旋转矩阵和平移向量
5
-    Rv2c = data_screen['screen_rotation_matrix']
6
-    Tv2c = data_screen['screen_translation_vector']
7
-    A = date_world['camera_matrix']
8
-    Rw2c = date_world['rotation_matrix']
9
-    Tw2c = date_world['translation_vector']
10
-    # print('Rv2c = ', Rv2c)
11
-    # print('Tv2c = ', Tv2c)
5
+def map_screen_to_world(screen_params, cam_params, x_offset=0, y_offset=0, z_offset=0):
6
+    """
7
+    将屏幕坐标映射到世界坐标
8
+    """
9
+    # 确保平移向量是一维数组
10
+    Tv2c = screen_params['screen_translation_vector'].reshape(-1)
11
+    nc = cam_params['translation_vector'].reshape(-1)
12 12
 
13
-    # print(Rv2c.shape, Tv2c.shape, A.shape, Rw2c.shape, Tw2c.shape)
14
-    # Rv2c = np.array([[0.965149963314468, -0.0425788059963296, 0.258210366937518],
15
-    #                  [0.0292859832567332, 0.998050610379376, 0.0551117982472064],
16
-    #                  [-0.260053608893949, -0.0456292055736385, 0.964515472193138]])
13
+    # print("Debug - Translation vectors:")
14
+    # print(f"Screen translation vector: {Tv2c}")
15
+    # print(f"Camera translation vector: {nc}")
16
+    # print(f"Shapes: Tv2c {Tv2c.shape}, nc {nc.shape}")
17
+    
18
+    assert Tv2c.shape == nc.shape, f"Shape mismatch: Tv2c {Tv2c.shape} != nc {nc.shape}"
17 19
     
18
-    # Tv2c = np.array([[30.1969654856753], [-49.3107982399140], [507.442359986000]])
20
+    # 提取旋转矩阵和平移向量
21
+    #print('data_screen = ', screen_params)
22
+    
23
+    # Rv2c = data_screen[0]['screen_rotation_matrix']
24
+    # Tv2c = data_screen[0]['screen_translation_vector']
25
+    Rv2c = screen_params['screen_rotation_matrix']
26
+    Tv2c = screen_params['screen_translation_vector']
27
+    A = cam_params['camera_matrix']
28
+    Rw2c = cam_params['rotation_matrix']
29
+    Tw2c = cam_params['translation_vector']
19 30
 
20
-    # Rw2c = np.array([[0.963494285110906, 0.0428998912293359, -0.264269487249541],
21
-    #                  [-0.0622337198197605, 0.995928128631295, -0.0652236668576798],
22
-    #                  [0.260395327677015, 0.0792891034977518, 0.962240880128517]])
31
+    #screen_r_vector = data_screen['screen_rotation_vector']
32
+    #print('screen_r_vector = ', screen_r_vector)
33
+    #screen_r_vector[0] = screen_r_vector[0] + 0.18
23 34
     
24
-    # Tw2c = np.array([[-15.1799773887131], [-42.2471261981148], [246.921818304047]]) 
25
-     
26
-    # A = np.array([[6944.89018564351,	0,	2709.44699784446],
27
-    #                     [0,	6942.91962497637,	1882.05677580185],
28
-    #                     [0,	0,	1]])
29
-    # print(Rv2c.shape, Tv2c.shape, A.shape, Rw2c.shape, Tw2c.shape)
30
-    # print(np.mean(Rv2c))
31
-    # print(np.mean(Tv2c))
32
-    # print(np.mean(Rw2c))
33
-    # print(np.mean(Tw2c))
34
-    # print(np.mean(A))
35
+    # Rv2c = cv2.Rodrigues(np.array(screen_r_vector).reshape(3,1))[0]
36
+    # print('Rv2c = ', Rv2c)
37
+    # print('Tv2c = ', Tv2c)
38
+    # Tv2c[0] = Tv2c[0] + x_shift
39
+    # Tv2c[1] = Tv2c[1] + y_shift
40
+    # Tv2c[2] = Tv2c[2] + z_shift
35 41
 
42
+    # Rv2c = [[ 0.96953516 , 0.00218731, -0.24494243],
43
+    #     [-0.0080033 ,  0.99970912 ,-0.02275152],
44
+    #     [ 0.24482142 , 0.02401875,  0.96927064]]
45
+    # Tw2c = [[ -1.84854824],[-92.88120666],[424.69051313]]
46
+   
36 47
     # 计算 nc 和 dw2c
37 48
     nc = Rw2c[:, 2]
38 49
     nc = nc.reshape(-1, 1)
50
+    Tv2c = Tv2c.reshape(-1, 1)
39 51
     if nc[2] < 0:
40 52
         nc = -nc
41 53
     dw2c = np.abs(nc.T @ Tw2c)
42 54
 
43 55
     # 计算 Rs2c 和 Ts2c
44 56
     Rs2c = np.linalg.inv(np.eye(3) - 2 * np.outer(nc, nc)) @ Rv2c
45
-    assert Tv2c.shape == nc.shape
57
+    assert Tv2c.shape == nc.shape, f"Shape mismatch: Tv2c {Tv2c.shape} != nc {nc.shape}"
46 58
     Ts2c = np.linalg.inv(np.eye(3) - 2 * np.outer(nc, nc)) @ (Tv2c - 2 * dw2c * nc)
47 59
 
48 60
     # 计算 Rc2w 、Tc2w 、Rs2w 、Ts2w
@@ -66,4 +78,128 @@ def map_screen_to_world(data_screen, date_world):
66 78
         'screen_to_world_rotation': Rs2w,
67 79
         'screen_to_world_translation': Ts2w
68 80
     }
69
-    return data_to_save
81
+    return data_to_save
82
+
83
+
84
+
85
+
86
+
87
+def validate_corners(corners, image_shape):
88
+    """确保角点在图像有效范围内"""
89
+    h, w = image_shape
90
+    for corner in corners:
91
+        x, y = corner.ravel()
92
+        if not (0 <= x < w and 0 <= y < h):
93
+            print(f"Invalid corner: ({x}, {y}) is out of image bounds.")
94
+            return False
95
+    return True
96
+
97
+def show_detect_result(img, detections):
98
+    colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0)]
99
+    for detection in detections:
100
+        corners = detection.corners
101
+        tag_id = detection.tag_id
102
+        center = np.mean(corners, axis=0).astype(int)[0]
103
+        for i, corner in enumerate(corners):
104
+            color = colors[i % len(colors)]
105
+            cv2.circle(img, tuple(int(x) for x in corner[0]), 5, color, -1)
106
+        cv2.circle(img, center, 5, (255, 255, 0), -1)
107
+        cv2.putText(img, str(tag_id), (center[0]-40, center[1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
108
+        cv2.polylines(img, [np.int32(corners)], isClosed=True, color=(255, 255, 0), thickness=2)
109
+
110
+    cv2.namedWindow('AprilGrid Detection', 0)
111
+    cv2.imshow('AprilGrid Detection', img)
112
+    cv2.waitKey(0)
113
+    cv2.destroyAllWindows()
114
+
115
+
116
+def show_detect_result_new(img, detections):
117
+    colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0)]
118
+    for detection in detections:
119
+        corners = detection.corners
120
+        
121
+        tag_id = detection.tag_id
122
+        print(tag_id)
123
+        print(corners)
124
+        center = np.mean(corners, axis=0).astype(int)
125
+        for i, corner in enumerate(corners):
126
+            color = colors[i % len(colors)]
127
+            cv2.circle(img, tuple(int(x) for x in corner), 5, color, -1)
128
+        cv2.circle(img, center, 5, (255, 255, 0), -1)
129
+        cv2.putText(img, str(tag_id), (center[0]-40, center[1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
130
+        cv2.polylines(img, [np.int32(corners)], isClosed=True, color=(255, 255, 0), thickness=2)
131
+
132
+    cv2.namedWindow('AprilGrid Detection', 0)
133
+    cv2.imshow('AprilGrid Detection', img)
134
+    cv2.waitKey(100)
135
+    
136
+
137
+# 生成 AprilGrid 标定板的 3D 物体坐标,并考虑标签之间的间隙
138
+def generate_3d_points(grid_size, tag_size, tag_spacing):
139
+    """
140
+    grid_size: (rows, cols) 标定板上标签的行数和列数
141
+    tag_size: 每个标签的物理大小(单位:mm)
142
+    tag_spacing: 标签之间的间隙(单位:mm),表示两个标签之间的间隔
143
+    """
144
+    obj_points = []
145
+    
146
+    # 遍历标定板的每一行和每一列
147
+    for i in range(grid_size[0]):  # 行
148
+        for j in range(grid_size[1]):  # 列
149
+            # 每个标签的四个角点坐标
150
+            # 注意:在计算标签位置时,要包括标签之间的间隙
151
+            x_offset = j * (tag_size + tag_spacing)
152
+            y_offset = i * (tag_size + tag_spacing)
153
+            
154
+            obj_points.append([
155
+                [x_offset, y_offset, 0],                         # 左上角
156
+                [x_offset + tag_size, y_offset, 0],              # 右上角
157
+                [x_offset + tag_size, y_offset + tag_size, 0],   # 右下角
158
+                [x_offset, y_offset + tag_size, 0]               # 左下角
159
+            ])
160
+    
161
+    # 将生成的3D点转换为 NumPy 数组并返回
162
+    return np.array(obj_points, dtype=np.float32)
163
+
164
+
165
+import numpy as np
166
+import matplotlib.pyplot as plt
167
+from mpl_toolkits.mplot3d import Axes3D
168
+
169
+def generate_3d_points(grid_size, tag_size, tag_spacing):
170
+    """生成标定板上每个标签的四个角点的3D坐标"""
171
+    obj_points = []
172
+    for i in range(grid_size[0]):  # 行
173
+        for j in range(grid_size[1]):  # 列
174
+            x_offset = j * (tag_size + tag_spacing)
175
+            y_offset = i * (tag_size + tag_spacing)
176
+            obj_points.append([
177
+                [x_offset, y_offset, 0],  # 左上角
178
+                [x_offset + tag_size, y_offset, 0],  # 右上角
179
+                [x_offset + tag_size, y_offset + tag_size, 0],  # 右下角
180
+                [x_offset, y_offset + tag_size, 0]  # 左下角
181
+            ])
182
+    return np.array(obj_points, dtype=np.float32)
183
+
184
+def visualize_3d_points(obj_points):
185
+    fig = plt.figure()
186
+    ax = fig.add_subplot(111, projection='3d')
187
+
188
+    for idx, tag in enumerate(obj_points):
189
+        xs = [point[0] for point in tag]
190
+        ys = [point[1] for point in tag]
191
+        zs = [point[2] for point in tag]
192
+        ax.plot(xs + [xs[0]], ys + [ys[0]], zs + [zs[0]], marker='o')
193
+        
194
+        # 计算中心点
195
+        center_x = np.mean(xs)
196
+        center_y = np.mean(ys)
197
+        center_z = np.mean(zs)
198
+        
199
+        # 标注ID
200
+        ax.text(center_x, center_y, center_z, str(idx), color='red')
201
+
202
+    ax.set_xlabel('X axis')
203
+    ax.set_ylabel('Y axis')
204
+    ax.set_zlabel('Z axis')
205
+    plt.show()

+ 105 - 142
src/calibration/get_camera_params.py

@@ -4,67 +4,10 @@ import glob
4 4
 import scipy.io as sio
5 5
 import matplotlib.pyplot as plt
6 6
 from aprilgrid import Detector
7
-# 读取文件夹中的所有棋盘格图像
8
-# images = glob.glob('calibration/world_calibration/*.bmp')
9
-
10
-def show_detect_result(img, detections):
11
-    colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0)]
12
-    for detection in detections:
13
-        corners = detection.corners
14
-        tag_id = detection.tag_id
15
-        center = np.mean(corners, axis=0).astype(int)[0]
16
-        for i, corner in enumerate(corners):
17
-            color = colors[i % len(colors)]
18
-            cv2.circle(img, tuple(int(x) for x in corner[0]), 25, color, -1)
19
-        cv2.circle(img, center, 5, (255, 255, 0), -1)
20
-        cv2.putText(img, str(tag_id), (center[0]-40, center[1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
21
-        cv2.polylines(img, [np.int32(corners)], isClosed=True, color=(0, 255, 255), thickness=2)
22
-
23
-    cv2.namedWindow('AprilGrid Detection', 0)
24
-    cv2.imshow('AprilGrid Detection', img)
25
-    cv2.waitKey(0)
26
-    cv2.destroyAllWindows()
27
-
7
+from .calibrate import generate_3d_points, show_detect_result
28 8
 
29
-def validate_corners(corners, image_shape):
30
-    """确保角点在图像有效范围内"""
31
-    h, w = image_shape
32
-    for corner in corners:
33
-        x, y = corner.ravel()
34
-        if not (0 <= x < w and 0 <= y < h):
35
-            print(f"Invalid corner: ({x}, {y}) is out of image bounds.")
36
-            return False
37
-    return True
38
-
39
-
40
-# 生成 AprilGrid 标定板的 3D 物体坐标,并考虑标签之间的间隙
41
-def generate_3d_points(grid_size, tag_size, tag_spacing):
42
-    """
43
-    grid_size: (rows, cols) 标定板上标签的行数和列数
44
-    tag_size: 每个标签的物理大小(单位:mm)
45
-    tag_spacing: 标签之间的间隙(单位:mm),表示两个标签之间的间隔
46
-    """
47
-    obj_points = []
48
-    
49
-    # 遍历标定板的每一行和每一列
50
-    for i in range(grid_size[0]):  # 行
51
-        for j in range(grid_size[1]):  # 列
52
-            # 每个标签的四个角点坐标
53
-            # 注意:在计算标签位置时,要包括标签之间的间隙
54
-            x_offset = j * (tag_size + tag_spacing)
55
-            y_offset = i * (tag_size + tag_spacing)
56
-            
57
-            obj_points.append([
58
-                [x_offset, y_offset, 0],                         # 左上角
59
-                [x_offset + tag_size, y_offset, 0],              # 右上角
60
-                [x_offset + tag_size, y_offset + tag_size, 0],   # 右下角
61
-                [x_offset, y_offset + tag_size, 0]               # 左下角
62
-            ])
63
-    
64
-    # 将生成的3D点转换为 NumPy 数组并返回
65
-    return np.array(obj_points, dtype=np.float32)
66 9
 
67
-def calibrate_world(calibration_images_path, cam_id, world_chessboard_size=(6,6), world_square_size=35.2, world_square_spacing=10.56, debug=False):
10
+def calibrate_world_aprilgrid(calibration_images_path, world_chessboard_size=(6,6), world_square_size=35.2, world_square_spacing=10.56, debug=False):
68 11
     
69 12
     # 初始化 AprilTag 检测器
70 13
     detector = Detector('t36h11')
@@ -97,6 +40,7 @@ def calibrate_world(calibration_images_path, cam_id, world_chessboard_size=(6,6)
97 40
     all_corners = []
98 41
     all_object_points = []
99 42
     image_size = None
43
+    img_lst = []
100 44
 
101 45
     # 遍历每张图像
102 46
     for img_file in calibration_images_path:
@@ -105,6 +49,7 @@ def calibrate_world(calibration_images_path, cam_id, world_chessboard_size=(6,6)
105 49
         # 从文件读取并解码图像
106 50
         image_data = np.fromfile(img_file, dtype=np.uint8)
107 51
         img = cv2.imdecode(image_data, cv2.IMREAD_COLOR)
52
+        img_lst.append(img)
108 53
 
109 54
         if image_size is None:
110 55
             image_size = (img.shape[1], img.shape[0])  # 保存图像尺寸
@@ -114,7 +59,11 @@ def calibrate_world(calibration_images_path, cam_id, world_chessboard_size=(6,6)
114 59
 
115 60
         # 检测 AprilTags
116 61
         detections = detector.detect(gray)
117
-        if 0:
62
+        corners = detections[0].corners  # 检测到的角点 (4, 2)
63
+        tag_id = detections[0].tag_id    # 标签 ID
64
+        print(tag_id, corners)
65
+
66
+        if debug:
118 67
             show_detect_result(img, detections)
119 68
 
120 69
         # 如果检测到标签
@@ -126,6 +75,7 @@ def calibrate_world(calibration_images_path, cam_id, world_chessboard_size=(6,6)
126 75
             for detection in detections:
127 76
                 corners = detection.corners  # 检测到的角点 (4, 2)
128 77
                 tag_id = detection.tag_id    # 标签 ID
78
+                print(tag_id, corners)
129 79
 
130 80
                 # 将检测到的角点存储为 np.float32 格式
131 81
                 image_points.append(corners.astype(np.float32))
@@ -158,13 +108,19 @@ def calibrate_world(calibration_images_path, cam_id, world_chessboard_size=(6,6)
158 108
     total_error = 0
159 109
     for i in range(len(all_object_points)):
160 110
         imgpoints2, _ = cv2.projectPoints(all_object_points[i], rvecs[i], tvecs[i], mtx, dist)
161
-
162 111
         # 将 imgpoints2 从 (N, 1, 2) 转换为 (N, 2)
163 112
         imgpoints2 = np.squeeze(imgpoints2)
164 113
 
165 114
         # 确保 all_corners[i] 和 imgpoints2 类型匹配
166 115
         error = cv2.norm(all_corners[i], imgpoints2, cv2.NORM_L2) / len(imgpoints2)
167 116
         total_error += error
117
+        if debug:
118
+            for (x, y), (px, py) in zip(all_corners[i], np.squeeze(imgpoints2)):
119
+                cv2.circle(img_lst[i], (int(x), int(y)), 5, (0, 255, 0), -1)  # Original points in green
120
+                cv2.circle(img_lst[i], (int(px), int(py)), 5, (0, 0, 255), -1)  # Reprojected points in red
121
+            cv2.namedWindow('Error Visualization', 0)
122
+            cv2.imshow('Error Visualization', img_lst[i])
123
+            cv2.waitKey(0)
168 124
 
169 125
     mean_error = total_error / len(all_object_points)
170 126
     #print(f"Mean re-projection error: {mean_error}")
@@ -200,89 +156,96 @@ def calibrate_world(calibration_images_path, cam_id, world_chessboard_size=(6,6)
200 156
 
201 157
 
202 158
 
203
-# def calibrate_world(world_img_path, chessboard_size, square_size, debug=False):
159
+def calibrate_world(world_img_path, chessboard_size, square_size, debug=False):
204 160
 
205
-#     # 准备棋盘格的世界坐标
206
-#     objp = np.zeros((chessboard_size[0] * chessboard_size[1], 3), np.float32)
207
-#     objp[:, :2] = np.mgrid[0:chessboard_size[0], 0:chessboard_size[1]].T.reshape(-1, 2)
208
-#     objp *= square_size
161
+    # 准备棋盘格的世界坐标
162
+    objp = np.zeros((chessboard_size[0] * chessboard_size[1], 3), np.float32)
163
+    objp[:, :2] = np.mgrid[0:chessboard_size[0], 0:chessboard_size[1]].T.reshape(-1, 2)
164
+    objp *= square_size
209 165
 
210
-#     # 储存棋盘格角点的世界坐标和图像坐标
211
-#     objpoints = []  # 世界坐标系中的三维点
212
-#     imgpoints = []  # 图像平面的二维点
166
+    # 储存棋盘格角点的世界坐标和图像坐标
167
+    objpoints = []  # 世界坐标系中的三维点
168
+    imgpoints = []  # 图像平面的二维点
213 169
 
214
-#     for fname in world_img_path:
215
-#         img = cv2.imread(fname)
216
-#         gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
170
+    for fname in world_img_path:
171
+        img = cv2.imread(fname)
172
+        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
173
+        # 寻找棋盘格角点
174
+        #ret, corners = cv2.findChessboardCorners(gray, chessboard_size, None)
175
+        flag = cv2.CALIB_CB_EXHAUSTIVE | cv2.CALIB_CB_ACCURACY
176
+        ret, corners = cv2.findChessboardCornersSB(gray, chessboard_size, None)
217 177
         
218
-#         # 寻找棋盘格角点
219
-#         ret, corners = cv2.findChessboardCorners(gray, chessboard_size, None)
178
+        # 如果找到足够的角点,添加到列表中
179
+        if ret:
180
+            objpoints.append(objp)
181
+           
182
+            criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
183
+            corners_refined = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
184
+            imgpoints.append(corners_refined)
220 185
         
221
-#         # 如果找到足够的角点,添加到列表中
222
-#         if ret:
223
-#             objpoints.append(objp)
224
-#             imgpoints.append(corners)
225
-            
226
-#             if debug:
227
-#                 img = cv2.drawChessboardCorners(img, chessboard_size, corners, ret)
228
-#                 # 在角点图像上标记坐标原点(第一个角点)
229
-#                 origin = tuple(corners[0].ravel().astype(int))
230
-#                 cv2.putText(img, '(0,0)', origin, cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
231
-
232
-#                 # 添加 x 轴箭头 (右方向)
233
-#                 end_x = tuple(corners[1].ravel().astype(int))  # 选择横向的下一个角点
234
-#                 cv2.arrowedLine(img, origin, end_x, (255, 255, 255), 2, tipLength=0.2)
235
-#                 cv2.putText(img, 'X', (end_x[0] + 10, end_x[1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
236
-
237
-#                 # 添加 y 轴箭头 (下方向)
238
-#                 end_y = tuple(corners[chessboard_size[0]].ravel().astype(int))  # 选择纵向的下一个角点
239
-#                 cv2.arrowedLine(img, origin, end_y, (255, 255, 255), 2, tipLength=0.2)
240
-#                 cv2.putText(img, 'Y', (end_y[0], end_y[1] + 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
241
-#                 cv2.imshow('img', img)
242
-#                 cv2.waitKey(500)
243
-
244
-
245
-#     cv2.destroyAllWindows()
246
-
247
-#     objpoints = np.array(objpoints)
248
-#     imgpoints = np.array(imgpoints)
249
-
250
-#     if debug:
251
-#         fig, ax = plt.subplots(1, 2, figsize=(12, 6))
252
-
253
-#         # 第一个子图:objp 点
254
-#         ax[0].scatter(objpoints[-1, :, 0], objpoints[-1, :, 1], color='blue', label='objp (Object Points)')
255
-#         for i in range(len(objpoints[0])):
256
-#             ax[0].text(objpoints[-1, i, 0], objpoints[-1, i, 1], f'{i}', color='blue', fontsize=12)
257
-#         ax[0].set_title('Object Points (objp)')
258
-#         ax[0].set_xlabel('X Axis')
259
-#         ax[0].set_ylabel('Y Axis')
260
-#         ax[0].grid(True)
261
-#         ax[0].axis('equal')
262
-
263
-#         # 第二个子图:corners 点
264
-#         ax[1].scatter(imgpoints[-1, :, 0, 0], imgpoints[-1, :, 0, 1], color='red', label='corners (Image Points)')
265
-#         for i in range(len(imgpoints[0])):
266
-#             ax[1].text(imgpoints[-1, i, 0, 0], imgpoints[-1, i, 0, 1], f'{i}', color='red', fontsize=12)
267
-#         ax[1].set_title('Camera Image Points (corners)')
268
-#         ax[1].set_xlabel('X')
269
-#         ax[1].set_ylabel('Y')
270
-#         ax[1].grid(True)
271
-#         ax[1].axis('equal')
272
-#         plt.show()
273
-#     # 相机标定
274
-#     ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
275
-
276
-#     # 获得最后一张图片的旋转矩阵和平移向量
277
-#     R = cv2.Rodrigues(rvecs[-1])[0]  # 转换为旋转矩阵
278
-#     T = tvecs[-1]
279
-
280
-#     # 创建一个字典来保存所有的标定数据
281
-#     calibration_data = {
282
-#         'camera_matrix': mtx,
283
-#         'distortion_coefficients': dist,
284
-#         'rotation_matrix': R,
285
-#         'translation_vector': T,
286
-#         'error': ret/len(objpoints)
287
-#     }
288
-#     return calibration_data
186
+            if debug:
187
+                img = cv2.drawChessboardCorners(img, chessboard_size, corners, ret)
188
+                # 在角点图像上标记坐标原点(第一个角点)
189
+                origin = tuple(corners[0].ravel().astype(int))
190
+                cv2.putText(img, '(0,0)', origin, cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
191
+
192
+                # 添加 x 轴箭头 (右方向)
193
+                end_x = tuple(corners[1].ravel().astype(int))  # 选择横向的下一个角点
194
+                cv2.arrowedLine(img, origin, end_x, (255, 255, 255), 2, tipLength=0.2)
195
+                cv2.putText(img, 'X', (end_x[0] + 10, end_x[1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
196
+
197
+                # 添加 y 轴箭头 (下方向)
198
+                end_y = tuple(corners[chessboard_size[0]].ravel().astype(int))  # 选择纵向的下一个角点
199
+                cv2.arrowedLine(img, origin, end_y, (255, 255, 255), 2, tipLength=0.2)
200
+                cv2.putText(img, 'Y', (end_y[0], end_y[1] + 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
201
+                cv2.namedWindow('img', 0)
202
+                cv2.imshow('img', img)
203
+                cv2.waitKey(100)
204
+
205
+
206
+    cv2.destroyAllWindows()
207
+
208
+    objpoints = np.array(objpoints)
209
+    imgpoints = np.array(imgpoints)
210
+
211
+    if debug:
212
+        fig, ax = plt.subplots(1, 2, figsize=(12, 6))
213
+
214
+        # 第一个子图:objp 点
215
+        ax[0].scatter(objpoints[-1, :, 0], objpoints[-1, :, 1], color='blue', label='objp (Object Points)')
216
+        for i in range(len(objpoints[0])):
217
+            ax[0].text(objpoints[-1, i, 0], objpoints[-1, i, 1], f'{i}', color='blue', fontsize=12)
218
+        ax[0].set_title('Object Points (objp)')
219
+        ax[0].set_xlabel('X Axis')
220
+        ax[0].set_ylabel('Y Axis')
221
+        ax[0].grid(True)
222
+        ax[0].axis('equal')
223
+
224
+        # 第二个子图:corners 点
225
+        ax[1].scatter(imgpoints[-1, :, 0, 0], imgpoints[-1, :, 0, 1], color='red', label='corners (Image Points)')
226
+        for i in range(len(imgpoints[0])):
227
+            ax[1].text(imgpoints[-1, i, 0, 0], imgpoints[-1, i, 0, 1], f'{i}', color='red', fontsize=12)
228
+        ax[1].set_title('Camera Image Points (corners)')
229
+        ax[1].set_xlabel('X')
230
+        ax[1].set_ylabel('Y')
231
+        ax[1].grid(True)
232
+        ax[1].axis('equal')
233
+        #plt.show()
234
+    # 相机标定
235
+    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], None, None)
236
+
237
+    # 获得最后一张图片的旋转矩阵和平移向量
238
+    R_vector = rvecs[-1]
239
+    R = cv2.Rodrigues(R_vector)[0]  # 转换为旋转矩阵
240
+    T = tvecs[-1]
241
+
242
+    # 创建一个字典来保存所有的标定数据
243
+    calibration_data = {
244
+        'camera_matrix': mtx,
245
+        'distortion_coefficients': dist,
246
+        'rotation_matrix': R,
247
+        'rotation_vector': R_vector,
248
+        'translation_vector': T,
249
+        'error': ret/len(objpoints)
250
+    }
251
+    return calibration_data

+ 257 - 9
src/calibration/get_screen_params.py

@@ -3,11 +3,17 @@ import numpy as np
3 3
 import glob
4 4
 from scipy.io import savemat
5 5
 import matplotlib.pyplot as plt
6
+import os
7
+import ctypes
8
+import pupil_apriltags as apriltag  # 确保使用正确的库
9
+from aprilgrid import Detector
10
+from .calibrate import generate_3d_points, show_detect_result_new, show_detect_result, visualize_3d_points
6 11
 
7 12
 # 读取文件夹中的所有棋盘格图像
8 13
 # images = glob.glob('calibration/screen_calibration/*.bmp')
9 14
 
10
-def calibrate_screen(screen_img_path, cam_mtx, cam_dist, chessboard_size, square_size, debug=False):
15
+
16
+def calibrate_screen_chessboard(screen_img_path, cam_mtx, cam_dist, chessboard_size, square_size, debug=False):
11 17
     # Prepare object points for chessboard corners in world coordinates
12 18
     objp = np.zeros((chessboard_size[0] * chessboard_size[1], 3), np.float32)
13 19
     objp[:, :2] = np.mgrid[0:chessboard_size[0], 0:chessboard_size[1]].T.reshape(-1, 2)
@@ -16,7 +22,8 @@ def calibrate_screen(screen_img_path, cam_mtx, cam_dist, chessboard_size, square
16 22
 
17 23
     objpoints = []  # 三维点
18 24
     imgpoints = []  # 二维点
19
-    
25
+
26
+    #print('cam_mtx = ', cam_mtx)
20 27
 
21 28
     for img_path in screen_img_path:
22 29
         
@@ -35,9 +42,11 @@ def calibrate_screen(screen_img_path, cam_mtx, cam_dist, chessboard_size, square
35 42
         # cv2.imshow('gray', gray)
36 43
         # cv2.waitKey(0)
37 44
         # cv2.destroyAllWindows()
38
-        ret, corners = cv2.findChessboardCorners(gray, chessboard_size, flags=cv2.CALIB_CB_FAST_CHECK)
45
+        flag = cv2.CALIB_CB_EXHAUSTIVE | cv2.CALIB_CB_ACCURACY
46
+        ret, corners = cv2.findChessboardCornersSB(gray, chessboard_size, flag)
47
+        #ret, corners = cv2.findChessboardCorners(gray, chessboard_size, flags=cv2.CALIB_CB_FAST_CHECK)
39 48
         if ret:
40
-            print('screen img_path = ',img_path)
49
+           # print('screen img_path = ',img_path)
41 50
             objpoints.append(objp)
42 51
             criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
43 52
             corners_refined = cv2.cornerSubPix(gray, corners, (11, 11), (-1, -1), criteria)
@@ -60,9 +69,10 @@ def calibrate_screen(screen_img_path, cam_mtx, cam_dist, chessboard_size, square
60 69
                 cv2.putText(img, 'Y', (end_y[0], end_y[1] + 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
61 70
                 cv2.namedWindow('screen', 0)
62 71
                 cv2.imshow('screen', img)
63
-                cv2.waitKey(0)
64
-                cv2.destroyAllWindows()
72
+                cv2.waitKey(100)
73
+                
65 74
     # import pdb; pdb.set_trace()
75
+    cv2.destroyAllWindows()
66 76
 
67 77
     objpoints = np.array(objpoints)
68 78
     imgpoints = np.array(imgpoints)
@@ -102,9 +112,10 @@ def calibrate_screen(screen_img_path, cam_mtx, cam_dist, chessboard_size, square
102 112
     ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], cam_mtx, cam_dist)
103 113
     # 获得最后一张图片的旋转矩阵和平移向量
104 114
     # import pdb; pdb.set_trace()
105
-    R = cv2.Rodrigues(rvecs[-1])[0]  # 转换为旋转矩阵
115
+    R_vector = rvecs[0]
116
+    R = cv2.Rodrigues(R_vector)[0]  # 转换为旋转矩阵
106 117
     # R, _ = cv2.Rodrigues(np.array([[0.024044506712974],[0.002032279577832],[-3.138030042895759]]))
107
-    T = tvecs[-1]
118
+    T = tvecs[0]
108 119
     # print("*"*100)
109 120
     # print("Screen Calibration mtx:", mtx)
110 121
     # print("Screen Calibration dist:", dist)
@@ -117,7 +128,244 @@ def calibrate_screen(screen_img_path, cam_mtx, cam_dist, chessboard_size, square
117 128
         'screen_matrix': mtx,
118 129
         'screen_distortion': dist,
119 130
         'screen_rotation_matrix': R,
131
+        'screen_rotation_vector': R_vector,
120 132
         'screen_translation_vector': T,
121 133
         'screen_error': ret/len(objpoints)
122 134
     }
123
-    return mat_data
135
+    print('screen mat data = ', mat_data)
136
+    return mat_data
137
+
138
+
139
+
140
+def calibrate_screen_circlegrid(screen_img_path, cam_mtx, cam_dist, chessboard_size, square_size, debug=False):
141
+    # Prepare object points for chessboard corners in world coordinates
142
+    objp = np.zeros((chessboard_size[0] * chessboard_size[1], 3), np.float32)
143
+    objp[:, :2] = np.mgrid[0:chessboard_size[0], 0:chessboard_size[1]].T.reshape(-1, 2)
144
+    objp *= square_size
145
+    # import pdb; pdb.set_trace()
146
+
147
+    objpoints = []  # 三维点
148
+    imgpoints = []  # 二维点
149
+
150
+    #print('cam_mtx = ', cam_mtx)
151
+
152
+    for img_path in screen_img_path:
153
+        
154
+        image_data = np.fromfile(img_path, dtype=np.uint8)
155
+        img = cv2.imdecode(image_data, cv2.IMREAD_COLOR)
156
+       
157
+        # import pdb; pdb.set_trace()
158
+        dst = cv2.undistort(img, cam_mtx, cam_dist, None, None)
159
+        gray = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)
160
+        
161
+        circle_w, circle_h = chessboard_size
162
+        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
163
+        params = cv2.SimpleBlobDetector_Params()
164
+        params.maxArea = 10e4
165
+        params.minArea = 10
166
+        params.minDistBetweenBlobs = 5
167
+        blobDetector = cv2.SimpleBlobDetector_create(params)
168
+        ret, corners = cv2.findCirclesGrid(gray, (circle_w, circle_h), cv2.CALIB_CB_SYMMETRIC_GRID, blobDetector, None)
169
+        if ret:
170
+            objpoints.append(objp)
171
+            corners_refined = cv2.cornerSubPix(gray, corners, (circle_w, circle_h), (-1, -1), criteria)
172
+            #cv2.drawChessboardCorners(dst, (circle_w, circle_h), corners_refined, ret)
173
+            imgpoints.append(corners_refined)
174
+
175
+            if debug:
176
+                img = cv2.drawChessboardCorners(img, chessboard_size, corners, ret)
177
+                # 在角点图像上标记坐标原点(第一个角点)
178
+                origin = tuple(corners[0].ravel().astype(int))
179
+                cv2.putText(img, '(0,0)', origin, cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
180
+
181
+                # 添加 x 轴箭头 (右方向)
182
+                end_x = tuple(corners[1].ravel().astype(int))  # 选择横向的下一个角点
183
+                cv2.arrowedLine(img, origin, end_x, (255, 255, 255), 2, tipLength=0.2)
184
+                cv2.putText(img, 'X', (end_x[0] + 10, end_x[1]), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
185
+
186
+                # 添加 y 轴箭头 (下方向)
187
+                end_y = tuple(corners[chessboard_size[0]].ravel().astype(int))  # 选择纵向的下一个角点
188
+                cv2.arrowedLine(img, origin, end_y, (255, 255, 255), 2, tipLength=0.2)
189
+                cv2.putText(img, 'Y', (end_y[0], end_y[1] + 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2)
190
+                cv2.namedWindow('screen', 0)
191
+                cv2.imshow('screen', img)
192
+                cv2.waitKey(100)
193
+                
194
+    # import pdb; pdb.set_trace()
195
+    cv2.destroyAllWindows()
196
+
197
+    objpoints = np.array(objpoints)
198
+    imgpoints = np.array(imgpoints)
199
+
200
+    print('objpoints = ', objpoints)
201
+    print('imgpoints = ', imgpoints)
202
+    
203
+   
204
+    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1], cam_mtx, cam_dist)
205
+    # 获得最后一张图片的旋转矩阵和平移向量
206
+    # import pdb; pdb.set_trace()
207
+    R_vector = rvecs[-1]
208
+    R = cv2.Rodrigues(R_vector)[0]  # 转换为旋转矩阵
209
+   
210
+    T = tvecs[-1]
211
+  
212
+    mat_data = {
213
+        'screen_matrix': mtx,
214
+        'screen_distortion': dist,
215
+        'screen_rotation_matrix': R,
216
+        'screen_rotation_vector': R_vector,
217
+        'screen_translation_vector': T,
218
+        'screen_error': ret/len(objpoints)
219
+    }
220
+    print('screen mat data = ', mat_data)
221
+    return mat_data
222
+
223
+
224
+
225
+def calibrate_screen_aprilgrid(screen_img_path, cam_mtx, cam_dist, chessboard_size, square_size, square_spacing, debug=False):
226
+    # Prepare object points for chessboard corners in world coordinates
227
+    # objp = np.zeros((chessboard_size[0] * chessboard_size[1], 3), np.float32)
228
+    # objp[:, :2] = np.mgrid[0:chessboard_size[0], 0:chessboard_size[1]].T.reshape(-1, 2)
229
+    # objp *= square_size
230
+    # import pdb; pdb.set_trace()
231
+    # 初始化 AprilTag 检测器
232
+    #detector = Detector('t36h11')
233
+    dll_path = "C:\\Users\\zhang\\AppData\\Local\\Programs\\Python\\Python312\\Lib\\site-packages\\pupil_apriltags\\lib\\apriltag.dll"
234
+    if not os.path.exists(dll_path):
235
+        raise FileNotFoundError(f"{dll_path} 不存在")
236
+    else:
237
+        print('file exists')
238
+
239
+    ctypes.WinDLL(dll_path, winmode=0)
240
+    detector = apriltag.Detector(
241
+        families="tag36h11",  # 确保与标签类型一致
242
+        nthreads=4,           # 多线程加速
243
+        quad_decimate=0.5,    # 降采样因子,调小提高检测精度
244
+        quad_sigma=0.8,       # 高斯模糊,处理噪声图像
245
+        refine_edges=True     # 边缘细化
246
+    )
247
+    #detector = Detector('t36h11')
248
+
249
+    object_point_single = generate_3d_points(chessboard_size, square_size, square_spacing)  
250
+
251
+    #visualize_3d_points(object_point_single)
252
+
253
+    # 用于保存所有图像的检测到的角点和 ID
254
+    all_corners = []
255
+    all_object_points = []
256
+    image_size = None
257
+    img_lst = []
258
+
259
+    # 遍历每张图像
260
+    for img_file in screen_img_path:
261
+        print('Processing:', img_file)
262
+        
263
+        # 从文件读取并解码图像
264
+        image_data = np.fromfile(img_file, dtype=np.uint8)
265
+        img = cv2.imdecode(image_data, cv2.IMREAD_COLOR)
266
+        img_lst.append(img)
267
+        #screen mirror flip
268
+        #img = cv2.flip(img, 0)
269
+
270
+        if image_size is None:
271
+            image_size = (img.shape[1], img.shape[0])  # 保存图像尺寸
272
+        
273
+        # 转换为灰度图像
274
+        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
275
+        gray = cv2.undistort(gray, cam_mtx, cam_dist, None, cam_mtx)
276
+        
277
+        # 检测 AprilTags
278
+        detections = detector.detect(gray)
279
+        if debug:
280
+            show_detect_result_new(img, detections)
281
+       
282
+        # 如果检测到标签
283
+        if detections:
284
+            image_points = []
285
+            object_points = []
286
+
287
+            # 遍历每个检测到的标签
288
+            for detection in detections:
289
+                corners = detection.corners  # 检测到的角点 (4, 2)
290
+                tag_id = detection.tag_id    # 标签 ID
291
+
292
+                # 将检测到的角点存储为 np.float32 格式
293
+                image_points.append(corners.astype(np.float32))
294
+
295
+                # 根据标签 ID 提取相应的 3D 坐标
296
+                if tag_id < len(object_point_single):
297
+                    object_points.append(object_point_single[tag_id])
298
+                else:
299
+                    print(f"Warning: Tag ID {tag_id} is out of range.")
300
+
301
+            if object_points and image_points:
302
+                # 保存当前图像的 2D 角点
303
+                all_corners.append(np.array(image_points, dtype=np.float32).reshape(-1, 2))  # 转为 (N, 2) 形状
304
+
305
+                # 保存对应的 3D 坐标
306
+                all_object_points.append(np.array(object_points, dtype=np.float32).reshape(-1, 3))  # 转为 (N, 3) 形状
307
+            else:
308
+                print("No valid detections in this image.")
309
+
310
+    # 调用相机标定
311
+    ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(
312
+        all_object_points,   # 3D 物体点
313
+        all_corners,         # 2D 图像点
314
+        image_size,          # 图像大小
315
+        None,                # 相机内参初始值
316
+        None                 # 畸变系数初始值
317
+    )
318
+
319
+   # 计算重投影误差
320
+    total_error = 0
321
+    for i in range(len(all_object_points)):
322
+        imgpoints2, _ = cv2.projectPoints(all_object_points[i], rvecs[i], tvecs[i], mtx, dist)
323
+
324
+        # 将 imgpoints2 从 (N, 1, 2) 转换为 (N, 2)
325
+        imgpoints2 = np.squeeze(imgpoints2)
326
+
327
+        # 确保 all_corners[i] 和 imgpoints2 类型匹配
328
+        error = cv2.norm(all_corners[i], imgpoints2, cv2.NORM_L2) / len(imgpoints2)
329
+        total_error += error
330
+        if debug:
331
+            for (x, y), (px, py) in zip(all_corners[i], np.squeeze(imgpoints2)):
332
+                cv2.circle(img_lst[i], (int(x), int(y)), 5, (0, 255, 0), -1)  # Original points in green
333
+                cv2.circle(img_lst[i], (int(px), int(py)), 5, (0, 0, 255), -1)  # Reprojected points in red
334
+            cv2.namedWindow('Error Visualization', 0)
335
+            cv2.imshow('Error Visualization', img_lst[i])
336
+            cv2.waitKey(100)
337
+
338
+    mean_error = total_error / len(all_object_points)
339
+    print(f"Mean re-projection error: {mean_error}")
340
+
341
+    print('ret/len(all_object_points) = ', ret/len(all_object_points))
342
+    #print('rvecs[0] = ', (rvecs[0]))
343
+
344
+    # 获得第一张图片的旋转矩阵和平移向量
345
+    R = cv2.Rodrigues(rvecs[-2])[0]  # 转换为旋转矩阵
346
+    T = tvecs[-2]
347
+
348
+    # 创建一个字典来保存所有的标定数据
349
+    calibration_data = {
350
+        'screen_matrix': mtx,
351
+        'screen_distortion': dist,
352
+        'screen_rotation_matrix': R,
353
+        'screen_translation_vector': T,
354
+        'screen_error': ret/len(all_object_points)
355
+    }
356
+
357
+   
358
+
359
+    # # 创建一个字典来保存所有的标定数据
360
+    # calibration_data = {
361
+    #     'camera_matrix': mtx,
362
+    #     'distortion_coefficients': dist,
363
+    #     'rotation_matrix': rvecs,
364
+    #     'translation_vector': tvecs,
365
+    #     'mean_reprojection_error': mean_error,
366
+    #     'error':ret/len(all_object_points)
367
+    # }
368
+    print(calibration_data)
369
+
370
+    return calibration_data
371
+        

+ 1 - 1
src/eval.py

@@ -207,7 +207,7 @@ def get_eval_result(pointcloud_path, json_path, theta_notch, debug):
207 207
     
208 208
     x = data[:, 0]
209 209
     y = data[:, 1]
210
-    z = 1000 * data[:, 2]
210
+    z =  data[:, 2]
211 211
     
212 212
     max_z = max(z)
213 213
     min_z = min(z)

+ 9 - 6
src/phase.py

@@ -34,11 +34,12 @@ def read_image_data(image_folder: str, cam_id: int, mtx, dst, img_type: str = 'b
34 34
     Returns:
35 35
         Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: 低频x, 低频y, 高频x, 高频y图像数组
36 36
     """
37
+    #cam_id = 3
37 38
     filenames = glob.glob(os.path.join(image_folder, f'*{str(cam_id)}*_*.{img_type}'))
38
-    #print('filenames = ',filenames)
39
+    
39 40
     filenames = sort_filenames(filenames)
40
-    # print(filenames)
41
-    # import pdb; pdb.set_trace()
41
+    #print('filenames = ', filenames)
42
+    
42 43
     assert len(filenames) >= 2*num_freq*num_shift, f'There should be 2*{num_freq}*{num_shift} images in the folder, but got {len(filenames)}'
43 44
     
44 45
     img_data = []
@@ -61,6 +62,7 @@ def read_image_data(image_folder: str, cam_id: int, mtx, dst, img_type: str = 'b
61 62
     
62 63
     return output_img
63 64
 
65
+
64 66
 def compute_phase(img_data: np.ndarray, mask: np.ndarray) -> np.ndarray:
65 67
     """
66 68
     计算相位信息。
@@ -73,6 +75,7 @@ def compute_phase(img_data: np.ndarray, mask: np.ndarray) -> np.ndarray:
73 75
         np.ndarray: 计算得到的相位数组
74 76
     """
75 77
     I1, I2, I3, I4 = [np.ma.masked_where(mask == 0, img_data[:, :, i]) for i in range(4)]
78
+    
76 79
     phase = np.arctan2(I3 - I1, I4 - I2) + np.pi
77 80
     return phase.filled(fill_value=np.nan)
78 81
 
@@ -89,8 +92,8 @@ def extract_phase_2(img_folder: str, cam_id: int, mask, mtx, dst, num_shift: int
89 92
     Returns:
90 93
         Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]: 低频x相位, 低频y相位, 高频x相位, 高频y相位
91 94
     """
92
-    print(img_folder)
93
-    high_freq_x, high_freq_y, low_freq_x, low_freq_y = read_image_data(img_folder, mtx, dst,num_freq=2, num_shift=num_shift)
95
+   
96
+    high_freq_x, high_freq_y, low_freq_x, low_freq_y = read_image_data(img_folder, cam_id, mtx, dst, num_freq=2, num_shift=num_shift)
94 97
 
95 98
     return (
96 99
         compute_phase(high_freq_x, mask),
@@ -126,7 +129,7 @@ def extract_phase(img_folder: str, cam_id: int, mask, mtx, dst, num_freq: int =
126 129
     else:
127 130
         raise ValueError(f"num_freq should be 2 or 3, but got {num_freq}")
128 131
 
129
-def unwrap_phase(phases, save_path, num_freq: int = 2, k: int = 32, debug: bool = False) -> Tuple[np.ndarray, np.ndarray]:
132
+def unwrap_phase(phases, num_freq: int = 2, k: int = 32, debug: bool = False) -> Tuple[np.ndarray, np.ndarray]:
130 133
     """
131 134
     解包裹相位。
132 135
 

+ 20 - 19
src/recons.py

@@ -12,6 +12,7 @@ import plotly.io as pio
12 12
 from scipy.spatial import Delaunay
13 13
 from src.pcl_postproc import smooth_pcl, align2ref
14 14
 from src.eval import fit_plane, remove_noise
15
+from src.utils import expand_to_rectangle_with_unit_spacing
15 16
 from scipy.ndimage import gaussian_filter
16 17
 from scipy.optimize import least_squares
17 18
 import scipy.sparse as sp
@@ -299,8 +300,8 @@ def poisson_recons(x, y, gx, gy, debug=False):
299 300
     #     div_g[i] = gx[i] + gy[i]  # Simple approximation
300 301
     
301 302
     # Calculate divergence from synthetic gradients (use improved method)
302
-    gx_smooth, gy_smooth = smooth_gradients(gx, gy, 0.02)
303
-    div_g = compute_divergence(points, triangles, gx_smooth, gy_smooth)
303
+    #gx_smooth, gy_smooth = smooth_gradients(gx, gy, 0.02)
304
+    div_g = compute_divergence(points, triangles, gx, gy)
304 305
 
305 306
     # Solve the Poisson equation using the sparse matrix
306 307
     z_reconstructed = spsolve(L, div_g)
@@ -343,33 +344,33 @@ def reconstruction_cumsum(world_points, camera_points, screen_points, debug=Fals
343 344
 
344 345
     # 计算法向量在Y方向的梯度
345 346
     gradient_y = normal_vector[1]
346
-    #gradient_x = np.zeros(gradient_x.shape)
347
-    #gradient_y = np.zeros(gradient_x.shape)
348 347
 
349
-    print('abs mean (gradient_x) = ', np.mean(abs(gradient_x)))
350
-    print('abs mean (gradient_y) = ', np.mean(abs(gradient_y)))
348
+    gradient_x1 = gradient_x - np.mean(gradient_x)
349
+    gradient_y1 = gradient_y - np.mean(gradient_y)
351 350
 
352
-    #gradient_x = gradient_x - np.mean(gradient_x)
353
-    #gradient_y = gradient_y - np.mean(gradient_y)
354
-    # import pdb
355
-    # pdb.set_trace()
356 351
     # 计算z值,假设初始z值为0
357 352
     z = np.zeros(len(x))
358 353
     z[0] = 0  # 初始点的z
359
-    
360
-    z_raw = poisson_recons(x, y, gradient_x, gradient_y)
361
-    #z_raw = poisson_recons_with_smoothed_gradients(x, y, gradient_x, gradient_y)
362
-    #z_raw = z
363 354
 
364
-    #print('gradient_x  = ', x.shape, y.shape, gradient_x.shape, gradient_y.shape)
355
+    gradient_x1 = gradient_x - np.mean(gradient_x)
356
+    gradient_y1 = gradient_y - np.mean(gradient_y)
357
+    
358
+    z_raw = poisson_recons(x, y, gradient_x1, gradient_y1)
365 359
     gradient_xy = np.column_stack((x, y, gradient_x, gradient_y))
360
+
361
+    x_grad_expand, y_grad_expand, x_expand, y_expand = expand_to_rectangle_with_unit_spacing(x, y, gradient_x1, gradient_y1)   
362
+       
363
+    Z_reconstructed = np.cumsum(x_grad_expand, 1)*1 + np.cumsum(y_grad_expand, 0)*1
364
+    z1_lookup = {(x1, y1): z1 for x1, y1, z1 in np.column_stack((x_expand.reshape(-1,1), y_expand.reshape(-1,1), Z_reconstructed.reshape(-1,1)))}
365
+
366
+    z1_values = np.array([z1_lookup.get((x, y), np.nan) for x, y in np.column_stack((x, y))])
366 367
     
367 368
     if align:
368 369
         #points_aligned, rotation_matrix = align2ref(smoothed_points)
369 370
         points_aligned, rotation_matrix = align2ref(np.column_stack((x, y, z_raw)))
370 371
         raw_data = np.column_stack((x, y, z_raw))
371 372
         
372
-        points_aligned[:,2] = points_aligned[:,2] - np.mean(points_aligned[:, 2])
373
+        #points_aligned[:,2] = points_aligned[:,2] - np.mean(points_aligned[:, 2])
373 374
         #print('-- nanmean after alignment: mean - {}; std - {}'.format(np.nanmean(points_aligned[:, 2]), np.nanstd(points_aligned[:, 2])))
374 375
     if smooth:
375 376
         #print("-- point cloud is being smoothed.")
@@ -392,10 +393,10 @@ def reconstruction_cumsum(world_points, camera_points, screen_points, debug=Fals
392 393
         # 提取 x, y, z 坐标
393 394
         x_vals = x
394 395
         y_vals = y
395
-        z_vals = z_raw
396
+        z_vals = z1_values
396 397
 
397 398
         # 绘制3D点云
398
-        ax.scatter(x_vals, y_vals, z_vals, c=z_vals, cmap='viridis', marker='o' ,s=1)
399
+        ax.scatter(x_vals, y_vals, z_vals, c=z_vals, cmap='viridis', marker='o' ,s=5)
399 400
 
400 401
         # 设置轴标签和标题
401 402
         ax.set_xlabel('X (mm)')
@@ -405,7 +406,7 @@ def reconstruction_cumsum(world_points, camera_points, screen_points, debug=Fals
405 406
 
406 407
         plt.show()
407 408
  
408
-    return np.column_stack((x, y, z_raw)), gradient_xy
409
+    return np.column_stack((x, y, z1_values)), gradient_xy
409 410
 
410 411
 
411 412
 

+ 103 - 56
src/utils.py

@@ -25,7 +25,10 @@ from scipy.spatial import Delaunay
25 25
 from scipy.sparse.linalg import lsqr
26 26
 from collections import defaultdict
27 27
 from scipy.sparse import lil_matrix
28
-
28
+from PIL import Image
29
+import pupil_apriltags as apriltag  # 确保使用正确的库
30
+from ctypes import CDLL
31
+import ctypes
29 32
 
30 33
 
31 34
 
@@ -33,13 +36,13 @@ def read_and_undis(img_path, mtx, dist):
33 36
 
34 37
     img = cv2.imread(img_path)
35 38
     dst = cv2.undistort(img, mtx, dist, None, mtx)
39
+    #dst = img
36 40
     gray = cv2.cvtColor(dst, cv2.COLOR_BGR2GRAY)
37 41
     return gray
38 42
 
39 43
 def write_point_cloud(file_path, x, y, z):
40 44
     # 将 x, y, z 列组合成一个二维数组
41 45
     point_cloud = np.column_stack((x, y, z))
42
-    # import pdb; pdb.set_trace()
43 46
     # 将数组写入文本文件,使用空格作为分隔符
44 47
     np.savetxt(file_path, point_cloud, fmt='%.10f', delimiter=',')
45 48
     print(f"Point cloud saved to {file_path}")
@@ -165,7 +168,6 @@ def fit_sector(contour_points, grid_spacing, erosion_pixels):
165 168
    
166 169
     z_col = np.full((len(grid_points_inside_sector), 1), z_contour_points[0])
167 170
 
168
-    
169 171
     # 处理负坐标,将所有坐标平移至非负区域
170 172
     min_x, min_y = np.min(contour_xy, axis=0) - 10
171 173
     offset = np.array([abs(min(min_x, 0)), abs(min(min_y, 0))])
@@ -184,7 +186,6 @@ def fit_sector(contour_points, grid_spacing, erosion_pixels):
184 186
     kernel = np.ones((erosion_pixels, erosion_pixels), np.uint8)
185 187
     eroded_img = cv2.erode(img, kernel, iterations=1)
186 188
 
187
-
188 189
     # 提取腐蚀后的轮廓
189 190
     contours, _ = cv2.findContours(eroded_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
190 191
     if len(contours) == 0:
@@ -210,7 +211,7 @@ def fit_sector(contour_points, grid_spacing, erosion_pixels):
210 211
     return np.column_stack((grid_points_inside_sector, z_col)), np.column_stack((between_points, np.full((len(between_points), 1), z_contour_points[0])))
211 212
 
212 213
 
213
-def get_meshgrid_contour(binary, save_path, debug=False):
214
+def get_meshgrid_contour(binary, debug=False):
214 215
 
215 216
     contours, _ = cv2.findContours(binary, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
216 217
     max_index, max_contour = max(enumerate(contours), key=lambda x: cv2.boundingRect(x[1])[2] * cv2.boundingRect(x[1])[3])
@@ -368,19 +369,14 @@ def get_world_points(contour_points, world_mat_contents, cam_id, grid_spacing, d
368 369
     Tw2c = world_mat_contents['translation_vector']
369 370
     Tc2w = -np.dot(np.linalg.inv(Rw2c), Tw2c)
370 371
     world_points_contours = affine_img2world(contour_points, A, Rc2w, Tc2w, d)
371
-    
372
-    
372
+
373 373
     world_points_fit,  world_points_boundary = fit_sector(world_points_contours, grid_spacing, erosion_pixels)
374 374
     _,  world_points_boundary_3 = fit_sector(world_points_contours, grid_spacing, 3)
375
-    # assert np.abs(world_contour_points[:, 0].max()-world_contour_points[:, 0].min()-(world_contour_points[:, 1].max()-world_contour_points[:, 1].min())) < 1
376
-    # import pdb; pdb.set_trace()
377
-    if debug:
378
-        # print('world x:', world_points_fit[:, 0].max()-world_points_fit[:, 0].min())
379
-        # print('world y:', world_points_fit[:, 1].max()-world_points_fit[:, 1].min())
380
-
381
-        #print('world contour x:', world_contour_points[:, 0].max()-world_contour_points[:, 0].min())
382
-        #print('world contour y:', world_contour_points[:, 1].max()-world_contour_points[:, 1].min())
383 375
 
376
+    print('x range = ',np.max(world_points_contours[:, 0]) - np.min(world_points_contours[:, 0]))
377
+    print('y range = ',np.max(world_points_contours[:, 1]) - np.min(world_points_contours[:, 1]))
378
+    print('z range = ',np.max(world_points_contours[:, 2]) - np.min(world_points_contours[:, 2]))
379
+    if debug:
384 380
         plt.figure()
385 381
         
386 382
         plt.scatter(world_points_fit[:, 0], world_points_fit[:, 1],  color='red', alpha=0.5)
@@ -399,7 +395,7 @@ def get_world_points(contour_points, world_mat_contents, cam_id, grid_spacing, d
399 395
 
400 396
 
401 397
 
402
-def get_camera_points(world_points, world_mat_contents, save_path, cam_id, debug=False):
398
+def get_camera_points(world_points, world_mat_contents, cam_id, debug=False):
403 399
     
404 400
     x_w_data, y_w_data, z_w_data = world_points[:, 0], world_points[:, 1], world_points[:, 2]
405 401
     A = world_mat_contents['camera_matrix']
@@ -457,7 +453,7 @@ def get_camera_points(world_points, world_mat_contents, save_path, cam_id, debug
457 453
 
458 454
     return camera_points, u_p_array, v_p_array
459 455
 
460
-def get_screen_points(point_data, x_phase_unwrapped, y_phase_unwrapped, screen_params, screen_to_world, cfg, save_path, cam_id, debug=False):
456
+def get_screen_points(point_data, x_phase_unwrapped, y_phase_unwrapped, screen_to_world, cfg, cam_id, debug=False):
461 457
     
462 458
     # x_phase_unwrapped = x_phase_unwrapped.T
463 459
     # y_phase_unwrapped = y_phase_unwrapped.T
@@ -488,7 +484,6 @@ def get_screen_points(point_data, x_phase_unwrapped, y_phase_unwrapped, screen_p
488 484
     value_map = np.abs(x_phase_unwrapped - average_x_phase_unwrapped) + np.abs(y_phase_unwrapped - average_y_phase_unwrapped)
489 485
     min_index = np.unravel_index(np.nanargmin(value_map), value_map.shape)
490 486
     
491
-    
492 487
     # import pdb; pdb.set_trace()
493 488
     # 选定一个中心相位值作为绝对相位值
494 489
     # abs_phasepoint_picture = np.array(min_index)
@@ -508,10 +503,10 @@ def get_screen_points(point_data, x_phase_unwrapped, y_phase_unwrapped, screen_p
508 503
     abs_phasepoint_world = np.dot(Rs2w, arrow_abs_mm) + Ts2w.T
509 504
 
510 505
     #checked
511
-    print('abs_phasepoint 绝对相位值:')
512
-    print(abs_phasepoint)
513
-    print('abs_phasepoint_world 绝对相位值对应的世界坐标系的位置:')
514
-    print(abs_phasepoint_world)
506
+    # print('abs_phasepoint 绝对相位值:')
507
+    # print(abs_phasepoint)
508
+    # print('abs_phasepoint_world 绝对相位值对应的世界坐标系的位置:')
509
+    # print(abs_phasepoint_world)
515 510
 
516 511
     # print('----------------------------------------')
517 512
 
@@ -553,8 +548,7 @@ def get_screen_points(point_data, x_phase_unwrapped, y_phase_unwrapped, screen_p
553 548
     x_data = np.array(x_data)
554 549
     y_data = np.array(y_data)
555 550
     z_data = np.array(z_data)   
556
-    print('u_p size = ', x_data.shape)
557
-    print('screen_points = ', x_data, np.nanmean(x_data),np.nanmean(y_data),np.nanmean(z_data))
551
+   
558 552
     screen_points = np.column_stack((x_data, y_data, z_data))
559 553
 
560 554
     if debug:
@@ -572,13 +566,14 @@ def get_screen_points(point_data, x_phase_unwrapped, y_phase_unwrapped, screen_p
572 566
 
573 567
         # 调整子图之间的间距
574 568
         plt.tight_layout()
575
-        plt.savefig(os.path.join(save_path, str(cam_id) + "_screen_points.png"))
569
+        
576 570
         # 显示图像
577 571
         plt.show()
578 572
     # import pdb; pdb.set_trace()
579 573
     return screen_points
580 574
 
581
-def get_white_mask(white_path, bin_thresh=15, debug=False):  
575
+def get_white_mask(white_path, bin_thresh=15, size_thresh=0.8, debug=False):  
576
+    #print('white path = ', white_path)
582 577
     gray = cv2.imread(white_path, 0)
583 578
    
584 579
     mask = np.zeros_like(gray)
@@ -588,11 +583,10 @@ def get_white_mask(white_path, bin_thresh=15, debug=False):
588 583
     erode_thresh_img = cv2.erode(thresh_img, kernel, iterations=1)
589 584
     img_height, img_width = erode_thresh_img.shape[:2]
590 585
     
591
- 
592 586
     contours, _ = cv2.findContours(erode_thresh_img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
593 587
     valid_contours = [
594 588
         contour for contour in contours 
595
-        if cv2.boundingRect(contour)[2] < 0.8*img_width and cv2.boundingRect(contour)[3] < 0.8 * img_height
589
+        if cv2.boundingRect(contour)[2] < size_thresh*img_width and cv2.boundingRect(contour)[3] < size_thresh * img_height
596 590
     ]
597 591
     # 确保有有效的轮廓
598 592
     if valid_contours:
@@ -600,13 +594,16 @@ def get_white_mask(white_path, bin_thresh=15, debug=False):
600 594
         cv2.drawContours(mask, [max_contour], -1, (255, 255, 255), thickness=cv2.FILLED)
601 595
     if debug:
602 596
         
597
+        cv2.namedWindow('thresh_img',0)
598
+        cv2.imshow('thresh_img', thresh_img)
603 599
         cv2.namedWindow('gray',0)
604 600
         cv2.imshow('gray', gray)
605
-        cv2.waitKey(0)
601
+        
606 602
    
607 603
         cv2.namedWindow('mask',0)
608 604
         cv2.imshow('mask', mask)
609 605
         cv2.waitKey(0)
606
+    cv2.destroyAllWindows()
610 607
     return mask
611 608
 
612 609
 # def get_world_points_from_mask(binary_image, world_mat_contents, d, save_path, debug=False):
@@ -1190,7 +1187,7 @@ def southwell_integrate_smooth(x_grad_expand, y_grad_expand, x_expand, y_expand,
1190 1187
 
1191 1188
 def compute_divergence(x_grad_expand, y_grad_expand):
1192 1189
     """
1193
-    计算梯度场的散度(div_G),用于泊松重建。
1190
+    计算梯度场的散度(div_G),用于泊松重建。
1194 1191
     
1195 1192
     Parameters:
1196 1193
     x_grad_expand: numpy.ndarray
@@ -1259,12 +1256,47 @@ def poisson_reconstruct(x_grad_expand, y_grad_expand):
1259 1256
     return Z_reconstructed
1260 1257
 
1261 1258
 
1259
+def adjust_point_clouds(points):
1260
+    print('x mean = ', np.mean(points[:, 0]))
1261
+    print('y mean = ', np.mean(points[:, 1]))
1262
+    points[:, 0] = np.round(points[:, 0]-np.mean(points[:, 0]))
1263
+    points[:, 1] = np.round(points[:, 1]-np.mean(points[:, 1])) 
1264
+    #points[:, 2] = 1000 * points[:,2]
1265
+    z_pingjing = np.loadtxt('D:\\data\\one_cam\\betone1230\\20250103151342402-pingjing\\cloudpoint-bak.txt', delimiter=',')
1266
+
1267
+    # 创建z_pingjing的查找字典,使用整数xy坐标作为键
1268
+    z_pingjing_lookup = {(int(x), int(y)): z for x, y, z in z_pingjing}
1269
+    
1270
+    # 提取points的xy坐标并转为整数
1271
+    xy_coords = np.column_stack((points[:, 0], points[:, 1])).astype(int)
1272
+    z_values = points[:, 2]
1273
+
1274
+    print('z_values = ', z_values)
1275
+    
1276
+    # 创建结果数组
1277
+    adjusted_z = np.full_like(z_values, np.nan)
1278
+    
1279
+    # 对每个点进行处理
1280
+    for i, (x, y) in enumerate(xy_coords):
1281
+        # 直接查找对应的z值
1282
+        if (x, y) in z_pingjing_lookup and not np.isnan(z_values[i]):
1283
+            adjusted_z[i] = 1000 * z_values[i] - z_pingjing_lookup[(x, y)]
1284
+    
1285
+    adjusted_z = adjusted_z / 1000
1286
+    
1287
+    return adjusted_z
1288
+    
1289
+        
1290
+
1262 1291
 
1263 1292
 def post_process_with_grad(img_folder, n_cam, debug=False):
1264 1293
     total_point_grad = np.empty((0, 4))
1265 1294
     for i in range(n_cam):
1266 1295
         txt_file = img_folder + str(i) + '_gradient.txt'
1267 1296
         total_point_grad = np.vstack([total_point_grad, np.loadtxt(os.path.join(txt_file), delimiter=',')])
1297
+        #sub_point_grad = np.loadtxt(os.path.join(txt_file), delimiter=',')
1298
+
1299
+        #total_point_grad = sub_point_grad
1268 1300
      
1269 1301
     print(total_point_grad.shape)
1270 1302
 
@@ -1284,18 +1316,14 @@ def post_process_with_grad(img_folder, n_cam, debug=False):
1284 1316
     sum_x = np.sum(abs(x_gradient))
1285 1317
     sum_y = np.sum(abs(y_gradient))
1286 1318
 
1287
-    print('sum x grad = ', sum_x)
1288
-    print('sum y grad = ', sum_y)
1289
-
1290
-    print('mean x grad = ', mean_x_grad)
1291
-    print('mean y grad = ', mean_y_grad)
1292
-
1293 1319
     x_gradient =  (x_gradient - mean_x_grad)
1294 1320
     y_gradient =  (y_gradient - mean_y_grad)
1295 1321
     
1296 1322
     print('max min')
1297 1323
     print(max(x_gradient), min(x_gradient))
1298 1324
     print(max(y_gradient), min(y_gradient))
1325
+    print('mean x grad = ', np.mean(x_gradient))
1326
+    print('mean y grad = ', np.mean(y_gradient))
1299 1327
     print('max min')
1300 1328
     
1301 1329
     # 使用最小外接矩形,并填充梯度
@@ -1303,24 +1331,26 @@ def post_process_with_grad(img_folder, n_cam, debug=False):
1303 1331
     #print(x_grad_expand.shape, y_grad_expand.shape, x_expand.shape, y_expand.shape)
1304 1332
     # 调用 Southwell 重建函数
1305 1333
     #Z_reconstructed = southwell_integrate_smooth(x_grad_expand, y_grad_expand, x_expand, y_expand, (0, 0))
1306
-    print('x_gradient shape = ', x_gradient.shape)
1307
-    x_gradient = x_gradient.reshape(47, -1)
1308
-    y_gradient = y_gradient.reshape(47, -1)
1309
-    Z_reconstructed = np.cumsum(x_gradient, 1)*1 + np.cumsum(y_gradient, 0)*1
1310 1334
 
1311
-   
1335
+    #x_gradient = x_gradient.reshape(47, -1)
1336
+    #y_gradient = y_gradient.reshape(47, -1)
1337
+    Z_reconstructed = np.cumsum(x_grad_expand, 1)*1 + np.cumsum(y_grad_expand, 0)*1
1338
+    
1312 1339
     #print('y_grad_expand =', np.cumsum(y_grad_expand, 0))
1313 1340
     #print('max_z =', np.max(Z_reconstructed))
1314 1341
 
1315
-    #Z_reconstructed = poisson_reconstruct(x_gradient, y_gradient)
1342
+    #Z_reconstructed = poisson_reconstruct(x_grad_expand, y_grad_expand)
1316 1343
     print('min_z =', np.min(Z_reconstructed))
1317 1344
     print('max_z =', np.max(Z_reconstructed))
1318
-    print('x_grad_expand =', np.cumsum(x_grad_expand, 1))
1319 1345
     
1320
-    #z1_lookup = {(x1, y1): z1 for x1, y1, z1 in np.column_stack((x_expand.reshape(-1,1), y_expand.reshape(-1,1), Z_reconstructed.reshape(-1,1)))}
1346
+    z1_lookup = {(x1, y1): z1 for x1, y1, z1 in np.column_stack((x_expand.reshape(-1,1), y_expand.reshape(-1,1), Z_reconstructed.reshape(-1,1)))}
1347
+
1348
+    z1_values = np.array([z1_lookup.get((x, y), np.nan) for x, y in np.column_stack((x, y))])
1321 1349
 
1322
-    #z1_values = np.array([z1_lookup.get((x, y), np.nan) for x, y in np.column_stack((x, y))])
1350
+    #z1_values = adjust_point_clouds(np.column_stack((x,y,z1_values)))
1323 1351
     #fitted_points = post_process(np.column_stack((x,y,z1_values)), debug=0)
1352
+    
1353
+
1324 1354
 
1325 1355
 
1326 1356
     # 可视化梯度场
@@ -1328,13 +1358,9 @@ def post_process_with_grad(img_folder, n_cam, debug=False):
1328 1358
         fig = plt.figure()
1329 1359
         ax = fig.add_subplot(111, projection='3d')
1330 1360
 
1331
-        #提取 x, y, z 坐标
1332
-        # x_vals = fitted_points[:,0]
1333
-        # y_vals = fitted_points[:,1]
1334
-        # z_vals = fitted_points[:,2]
1335
-        x_vals = x.reshape(47, -1)
1336
-        y_vals = y.reshape(47, -1)
1337
-        z_vals = Z_reconstructed
1361
+        x_vals = x
1362
+        y_vals = y
1363
+        z_vals = z1_values
1338 1364
         # 绘制3D点云
1339 1365
         ax.scatter(x_vals, y_vals, z_vals, c=z_vals, cmap='viridis', marker='o')
1340 1366
 
@@ -1344,10 +1370,10 @@ def post_process_with_grad(img_folder, n_cam, debug=False):
1344 1370
         ax.set_zlabel('Z (mm)')
1345 1371
         ax.set_title('post_process 3D Point Cloud Visualization')
1346 1372
         plt.show()
1347
-    
1373
+        
1348 1374
     #return fitted_points
1349
-    #return np.column_stack((x,y,Z_reconstructed))
1350
-    return 0
1375
+    return np.column_stack((x,y,z1_values))
1376
+    #return 0
1351 1377
 
1352 1378
 
1353 1379
 
@@ -1931,6 +1957,23 @@ def mat2pkl():
1931 1957
 
1932 1958
 
1933 1959
 
1960
+
1961
+def rename_img():
1962
+    input_dir = 'D:\\data\\one_cam\\pad-test-1125\\test7-4\\20241213182756424\\'
1963
+    output_dir = 'D:\\data\\one_cam\\pad-test-1125\\test7-4\\20241213182756424\\'
1964
+   
1965
+    img_paths = glob.glob(os.path.join(input_dir,  "*.bmp"))
1966
+    img_paths.sort()
1967
+    os.makedirs(output_dir, exist_ok=True)
1968
+    i = 0
1969
+    for img in img_paths:
1970
+        shutil.move(img, output_dir + '0_frame_' + str(i) + '.bmp')
1971
+        i += 1
1972
+
1973
+    print(img_paths)
1974
+
1975
+
1976
+
1934 1977
 if __name__ == '__main__':
1935 1978
 
1936 1979
     # white_path = 'D:\\data\\four_cam\\1008_storage\\'
@@ -1942,5 +1985,9 @@ if __name__ == '__main__':
1942 1985
     # img_folder = 'D:\\data\\four_cam\\1008_storage\\20241008210412543'
1943 1986
     # point_cloud_path = os.path.join(img_folder, 'cloudpoint.txt')
1944 1987
     # show_cloud_point(point_cloud_path)
1945
-    img_diff()
1988
+    #img_diff()
1989
+    rename_img()
1990
+   
1991
+
1946 1992
 
1993
+    

+ 3 - 1
src/vis.py

@@ -10,7 +10,7 @@ from scipy.sparse.linalg import spsolve
10 10
 def plot_coords(world_points, camera_points, screen_points):
11 11
 
12 12
     # 选择每隔n个点进行采样
13
-    n = 1000
13
+    n = 200
14 14
     screen_points_sampled = screen_points[::n]
15 15
     camera_points_sampled = camera_points[::n]
16 16
     world_points_sampled = world_points[::n]
@@ -18,6 +18,8 @@ def plot_coords(world_points, camera_points, screen_points):
18 18
     fig = plt.figure()
19 19
     ax = fig.add_subplot(111, projection='3d')
20 20
 
21
+    
22
+
21 23
     # 绘制点
22 24
     ax.scatter(screen_points_sampled[:, 0], screen_points_sampled[:, 1], screen_points_sampled[:, 2], c='blue', label='Screen')
23 25
     ax.scatter(camera_points_sampled[:, 0], camera_points_sampled[:, 1], camera_points_sampled[:, 2], c='yellow', label='Camera')

+ 118 - 0
tools/0_generate_fringe.py

@@ -0,0 +1,118 @@
1
+import numpy as np
2
+import matplotlib.pyplot as plt
3
+import os
4
+
5
+def calculate_phase(images): 
6
+    images = np.array(images) 
7
+    shifts = len(images) 
8
+    phase = np.arctan2(np.sum(images * np.sin(np.linspace(0, 2 * np.pi, shifts)[:, np.newaxis, np.newaxis]), axis=0), 
9
+                       np.sum(images * np.cos(np.linspace(0, 2 * np.pi, shifts)[:, np.newaxis, np.newaxis]), axis=0)) 
10
+    return phase 
11
+def unwrap_phase(phase): 
12
+    unwrapped_phase = np.unwrap(phase, axis=0) 
13
+    unwrapped_phase = np.unwrap(unwrapped_phase, axis=1) 
14
+    return unwrapped_phase 
15
+
16
+def find_subpixel_extrema(phase_map): 
17
+    # 找到最大值和最小值的索引 
18
+    min_pos = np.unravel_index(np.argmin(phase_map), phase_map.shape) 
19
+    max_pos = np.unravel_index(np.argmax(phase_map), phase_map.shape) 
20
+    # 返回亚像素坐标 
21
+    return np.array(min_pos), np.array(max_pos)
22
+
23
+
24
+def generate_phase_shifted_patterns(width, height, shifts, orientation='horizontal', frequency=1):
25
+    """
26
+    Generates multiple phase-shifted fringe patterns, either horizontal or vertical, with phase shifting along the stripes.
27
+    :param width: image width
28
+    :param height: image height
29
+    :param shifts: number of phase shifts
30
+    :param orientation: 'horizontal' or 'vertical' indicating the direction of the stripes
31
+    :param frequency: number of full wave cycles across the dimension
32
+    :return: array of generated fringe patterns
33
+    """
34
+    patterns = []
35
+    for i in range(shifts):
36
+        shift = (i * 2 * np.pi) / shifts  # Calculating the phase shift for each pattern
37
+        if orientation == 'horizontal':
38
+            x = np.linspace(0, 2 * np.pi * frequency, width) + shift  # Apply frequency scaling on horizontal stripes
39
+            print('frequency, horizontal x = ', x, frequency)
40
+            print('step = ', 2 * np.pi * frequency )
41
+            pattern = np.sin(x)
42
+            print('pattern = ', pattern)
43
+            pattern = np.tile(pattern, (height, 1))  # Extend the pattern vertically across the height
44
+        elif orientation == 'vertical':
45
+            y = np.linspace(0, 2 * np.pi * frequency, height) + shift  # Apply frequency scaling on vertical stripes
46
+            pattern = np.sin(y)
47
+            pattern = np.tile(pattern.reshape(-1, 1), (1, width))  # Extend the pattern horizontally across the width
48
+        patterns.append(pattern)
49
+    return patterns
50
+
51
+# Image size parameters
52
+width = 1920
53
+height = 1200
54
+width = 800
55
+height = 400
56
+shifts = 4  # Number of phase shifts
57
+frequencies = [32, 8 ,1]  # Two different frequencies
58
+#frequencies = [32, 1]  # Two different frequencies
59
+
60
+# Ensure the directory exists
61
+directory = 'patterns3_honor_800_400'
62
+os.makedirs(directory, exist_ok=True)
63
+
64
+# Pattern naming and saving
65
+pattern_index = 0  # Start indexing from 0
66
+
67
+for freq in frequencies:
68
+    # Generate horizontal patterns first, then vertical for each frequency
69
+    horizontal_patterns = generate_phase_shifted_patterns(width, height, shifts, 'horizontal', freq)
70
+    vertical_patterns = generate_phase_shifted_patterns(width, height, shifts, 'vertical', freq)
71
+    all_patterns = horizontal_patterns + vertical_patterns  # Combine lists to maintain the order
72
+
73
+    # phase = np.arctan2(
74
+    #     np.sum(horizontal_patterns * np.sin(np.linspace(0, 2 * np.pi, shifts)[:, np.newaxis, np.newaxis]), axis=0),
75
+    #     np.sum(horizontal_patterns * np.cos(np.linspace(0, 2 * np.pi, shifts)[:, np.newaxis, np.newaxis]), axis=0)
76
+    #     )
77
+    # print('截断相位 = ', phase)
78
+    # y_coords, x_coords = np.meshgrid(np.arange(height), np.arange(width), indexing='ij')
79
+
80
+    # # 提取所有像素坐标和截断相位值
81
+    # coordinates = np.stack((y_coords, x_coords), axis=-1)  # 生成每个像素的坐标
82
+    # target_mask = (phase >= -1) & (phase <= 1)
83
+    # target_coordinates = coordinates[target_mask]  # 提取目标范围内的像素坐标
84
+
85
+    # print("对应的像素坐标:", target_coordinates)
86
+    # # 可视化截断相位
87
+    # plt.imshow(phase, cmap='jet')
88
+    # plt.colorbar(label='Phase (wrapped)')
89
+    # plt.scatter(target_coordinates[:, 1], target_coordinates[:, 0], color='red', s=1, label='Target Pixels')
90
+    # plt.title("Wrapped Phase with Target Pixels")
91
+    # plt.legend()
92
+    # plt.show()
93
+    # 计算相位图 
94
+    # phase_map = calculate_phase(horizontal_patterns) 
95
+    # # 展开相位图 
96
+    # unwrapped_phase_map = unwrap_phase(phase_map) 
97
+    # print('unwrapped_phase_map = ', unwrapped_phase_map.shape)
98
+    # # 找到亚像素级别的极值点坐标 
99
+    # min_subpixel, max_subpixel = find_subpixel_extrema(unwrapped_phase_map) 
100
+    # # 可视化相位图并标注极值点 
101
+    # plt.imshow(unwrapped_phase_map, cmap='jet') 
102
+    # plt.colorbar() 
103
+    # plt.scatter([min_subpixel[1]], [min_subpixel[0]], color='red', label='Min') 
104
+    # plt.scatter([max_subpixel[1]], [max_subpixel[0]], color='blue', label='Max') 
105
+    # plt.legend() 
106
+    # plt.title("Unwrapped Phase Map with Extremes") 
107
+    # plt.show() 
108
+    # print(f"Min subpixel position: {min_subpixel}") 
109
+    # print(f"Max subpixel position: {max_subpixel}")
110
+
111
+    # Save the patterns with the new naming convention in the specified directory
112
+    for pattern in all_patterns:
113
+        file_path = os.path.join(directory, f'pat{pattern_index:02}.bmp')
114
+        print('pattern shape =',pattern.shape)
115
+        print(pattern)
116
+        plt.imsave(file_path, pattern, cmap='gray')
117
+        pattern_index += 1
118
+

+ 97 - 49
tools/chessboard.py

@@ -1,59 +1,107 @@
1 1
 import numpy as np
2 2
 import cv2
3 3
 
4
-# Define the size of the image
5
-width, height = 1920, 1080
6
-
7
-# Define the size of the squares
8
-square_size = 80
9
-
10
-# Calculate the total size of the checkerboard
11
-checkerboard_width = 11 * square_size
12
-checkerboard_height = 8 * square_size
13
-
14
-# Create an empty image with white background
15
-image = np.ones((height, width, 3), dtype=np.uint8) * 255
16
-
17
-# Calculate the offsets to center the checkerboard
18
-x_offset = (width - checkerboard_width) // 2
19
-y_offset = (height - checkerboard_height) // 2
20
-
21
-# Create the checkerboard pattern within the offsets
22
-for y in range(8):
23
-    for x in range(11):
24
-        if (x + y) % 2 == 0:
25
-            top_left_y = y_offset + y * square_size
26
-            top_left_x = x_offset + x * square_size
27
-            bottom_right_y = y_offset + (y + 1) * square_size
28
-            bottom_right_x = x_offset + (x + 1) * square_size
29
-            cv2.rectangle(image, (top_left_x, top_left_y), (bottom_right_x, bottom_right_y), (0, 0, 0), -1)
4
+# # Define the size of the image
5
+# width, height = 2360, 1640
6
+
7
+# # Define the size of the squares
8
+# square_size = 40
9
+
10
+# square_cols = 25
11
+# square_rows = 16
12
+
13
+# # Calculate the total size of the checkerboard
14
+# checkerboard_width = square_cols * square_size
15
+# checkerboard_height = square_rows * square_size
16
+
17
+# # Create an empty image with white background
18
+# image = np.ones((height, width, 3), dtype=np.uint8) * 255
19
+
20
+# # Calculate the offsets to center the checkerboard
21
+# x_offset = (width - checkerboard_width) // 2
22
+# y_offset = (height - checkerboard_height) // 2
23
+
24
+# # Create the checkerboard pattern within the offsets
25
+# for y in range(square_rows):
26
+#     for x in range(square_cols):
27
+#         if (x + y) % 2 == 0:
28
+#             top_left_y = y_offset + y * square_size
29
+#             top_left_x = x_offset + x * square_size
30
+#             bottom_right_y = y_offset + (y + 1) * square_size
31
+#             bottom_right_x = x_offset + (x + 1) * square_size
32
+#             cv2.rectangle(image, (top_left_x, top_left_y), (bottom_right_x, bottom_right_y), (0, 0, 0), -1)
33
+
34
+# # Determine the coordinates for the circle just right of the bottom-right white square
35
+# right_most_white_x = x_offset + square_cols * square_size  # Rightmost column
36
+# right_most_white_y = y_offset + square_rows * square_size  # Bottommost row
37
+
38
+# # Check if the bottom-right square is white, if not adjust the x coordinate
39
+# if (square_cols + square_rows - 2) % 2 == 1:  # If it's a black square, move one square to the left
40
+#     right_most_white_x -= square_size
30 41
 
31
-# Determine the coordinates for the circle just right of the bottom-right white square
32
-right_most_white_x = x_offset + 10 * square_size  # Rightmost column
33
-right_most_white_y = y_offset + 7 * square_size  # Bottommost row
42
+# # Draw a circle to the right of the bottom-right white square
43
+# cv2.circle(image, (right_most_white_x + square_size + square_size // 2, right_most_white_y + square_size // 2), 20, (255, 0, 0), -1)
34 44
 
35
-# Check if the bottom-right square is white, if not adjust the x coordinate
36
-if (10 + 7) % 2 == 1:  # If it's a black square, move one square to the left
37
-    right_most_white_x -= square_size
45
+# # Save the image with the circle but without coordinates
46
+# cv2.imwrite('checkerboard_with_circle_no_coords.png', image)
38 47
 
39
-# Draw a circle to the right of the bottom-right white square
40
-cv2.circle(image, (right_most_white_x + square_size + square_size // 2, right_most_white_y + square_size // 2), 20, (255, 0, 0), -1)
48
+# # Create a copy of the image to draw the coordinates
49
+# # image_with_coords = image.copy()
41 50
 
42
-# Save the image with the circle but without coordinates
43
-cv2.imwrite('checkerboard_with_circle_no_coords.png', image)
51
+# # # Calculate and mark all the corners
52
+# # for y in range(9):  # Include one extra row for bottom edge points
53
+# #     for x in range(12):  # Include one extra column for right edge points
54
+# #         corner_x = x_offset + x * square_size
55
+# #         corner_y = y_offset + y * square_size
56
+# #         if corner_x < width and corner_y < height:  # Ensure points are within image bounds
57
+# #             cv2.circle(image_with_coords, (corner_x, corner_y), 5, (0, 255, 0), -1)  # Green dot
58
+# #             cv2.putText(image_with_coords, f"({corner_x}, {corner_y})", (corner_x + 5, corner_y - 10),
59
+# #                         cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)  # Red color, thickness = 2 for bold
60
+
61
+# # # Save the image with both the circle and coordinates
62
+# # cv2.imwrite('checkerboard_with_circle_and_coords.png', image_with_coords)
63
+
64
+
65
+# 图像大小
66
+image_width = 1920
67
+image_height = 1200
68
+
69
+# 棋盘格大小和数量
70
+grid_size = 30
71
+num_cols = 25
72
+num_rows = 16
73
+
74
+# 计算棋盘格区域的总大小
75
+board_width = grid_size * num_cols
76
+board_height = grid_size * num_rows
77
+
78
+# 计算棋盘格左上角的起始坐标,使其居中
79
+start_x = (image_width - board_width) // 2
80
+start_y = (image_height - board_height) // 2
81
+
82
+# 创建白色背景的图像
83
+image = np.ones((image_height, image_width, 3), dtype=np.uint8) * 255  # 白色背景
84
+
85
+# 绘制棋盘格
86
+for row in range(num_rows):
87
+    for col in range(num_cols):
88
+        # 计算每个格子的左上角和右下角坐标
89
+        top_left_x = start_x + col * grid_size
90
+        top_left_y = start_y + row * grid_size
91
+        bottom_right_x = top_left_x + grid_size - 1 
92
+        bottom_right_y = top_left_y + grid_size - 1
93
+        
94
+        # 棋盘格的颜色交替为黑白
95
+        if (row + col) % 2 == 0:
96
+            cv2.rectangle(image, (top_left_x, top_left_y), (bottom_right_x, bottom_right_y), (0, 0, 0), -1)
44 97
 
45
-# Create a copy of the image to draw the coordinates
46
-image_with_coords = image.copy()
98
+cv2.circle(image, (top_left_x + 2 * grid_size, top_left_y + 2 * grid_size), 20, (255, 0, 0), -1)
99
+            
47 100
 
48
-# Calculate and mark all the corners
49
-for y in range(9):  # Include one extra row for bottom edge points
50
-    for x in range(12):  # Include one extra column for right edge points
51
-        corner_x = x_offset + x * square_size
52
-        corner_y = y_offset + y * square_size
53
-        if corner_x < width and corner_y < height:  # Ensure points are within image bounds
54
-            cv2.circle(image_with_coords, (corner_x, corner_y), 5, (0, 255, 0), -1)  # Green dot
55
-            cv2.putText(image_with_coords, f"({corner_x}, {corner_y})", (corner_x + 5, corner_y - 10),
56
-                        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)  # Red color, thickness = 2 for bold
101
+# 显示图像
102
+cv2.imshow("Chessboard", image)
103
+cv2.waitKey(0)
104
+cv2.destroyAllWindows()
57 105
 
58
-# Save the image with both the circle and coordinates
59
-cv2.imwrite('checkerboard_with_circle_and_coords.png', image_with_coords)
106
+# 或者保存图像
107
+cv2.imwrite('chessboard_' + str(image_width) + '_' + str(image_height) + '.png', image)

+ 2 - 2
tools/generate_fringe.py

@@ -7,8 +7,8 @@ import matplotlib.pyplot as plt
7 7
 def parse_args():
8 8
     parser = argparse.ArgumentParser(description="")
9 9
     parser.add_argument("--shifts", type=int, default=4, help="相移步数")
10
-    parser.add_argument("--img_w", type=int, default=1920, help="图片宽度")
11
-    parser.add_argument("--img_h", type=int, default=1080, help="图片高度")
10
+    parser.add_argument("--img_w", type=int, default=2360, help="图片宽度")
11
+    parser.add_argument("--img_h", type=int, default=1640, help="图片高度")
12 12
     parser.add_argument("--freq_x", type=int, default=32, help="x轴相移频率")
13 13
     parser.add_argument("--freq_y", type=int, default=1, help="y轴相移频率")
14 14
     args = parser.parse_args()

File diff suppressed because it is too large
+ 1001 - 12
unit_test.py