pmd.py 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387
  1. #encoding=utf-8
  2. import time
  3. import os
  4. import sys
  5. import pandas as pd
  6. sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
  7. import time
  8. import glob
  9. import numpy as np
  10. np.random.seed(42)
  11. from datetime import datetime
  12. import json
  13. from src.utils import get_meshgrid, get_world_points, get_camera_points, get_screen_points, write_point_cloud, get_white_mask, get_meshgrid_contour, post_process, find_notch, post_process_with_grad
  14. from src.phase import extract_phase, unwrap_phase
  15. from src.recons import reconstruction_cumsum, poisson_recons_with_smoothed_gradients
  16. from src.pcl_postproc import smooth_pcl, align2ref
  17. import matplotlib.pyplot as plt
  18. from src.calibration import calibrate_world, calibrate_screen, map_screen_to_world
  19. import argparse
  20. from src.vis import plot_coords
  21. import cv2
  22. from src.eval import get_eval_result
  23. import pickle
  24. from collections import defaultdict
  25. from scipy.io import loadmat, savemat
  26. def pmdstart(config_path, img_folder):
  27. start_time = time.time()
  28. print(f"config_path: {config_path}")
  29. #time.sleep(15)
  30. main(config_path, img_folder)
  31. print(f"img_folder: {img_folder}")
  32. print('test pass')
  33. end_time = time.time()
  34. print(f"Time taken: {end_time - start_time} seconds")
  35. return True
  36. def main(config_path, img_folder):
  37. current_dir = os.path.dirname(os.path.abspath(__file__))
  38. os.chdir(current_dir)
  39. cfg = json.load(open(config_path, 'r'))
  40. n_cam = cfg['cam_num']
  41. num_freq = cfg['num_freq']
  42. save_path = 'debug'
  43. debug = False
  44. grid_spacing = cfg['grid_spacing']
  45. num_freq = cfg['num_freq']
  46. smooth = False
  47. align = False
  48. denoise = False
  49. #cammera_img_path = 'D:\\data\\four_cam\\calibrate\\calibrate-1008'
  50. screen_img_path = 'D:\\data\\four_cam\\calibrate\\cam3-screen-1008'
  51. cammera_img_path = 'D:\\data\\four_cam\\calibrate\\calibrate-1016'
  52. #screen_img_path = 'D:\\data\\four_cam\\calibrate\\screen0920'
  53. print(f"开始执行时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
  54. print("\n1. 相机标定")
  55. preprocess_start = time.time()
  56. cam_para_path = os.path.join(current_dir, 'config', cfg['cam_params'])
  57. print('cam_para_path = ', cam_para_path)
  58. print('current_dir = ', current_dir)
  59. #cam_para_path = 'D:\\code\\pmd-python\\config\\cam_params.pkl'
  60. if os.path.exists(cam_para_path):
  61. #if False:
  62. with open(cam_para_path, 'rb') as pkl_file:
  63. cam_params = pickle.load(pkl_file)
  64. else:
  65. cam_params = []
  66. camera_subdir = [item for item in os.listdir(cammera_img_path) if os.path.isdir(os.path.join(cammera_img_path, item))]
  67. camera_subdir.sort()
  68. assert len(camera_subdir) == 4, f"found {len(camera_subdir)} cameras, should be 4"
  69. for i in range(n_cam):
  70. cam_img_path = glob.glob(os.path.join(cammera_img_path, camera_subdir[i], "*.bmp"))
  71. cam_img_path.sort()
  72. #print('cam_img_path = ', cam_img_path)
  73. cam_param_raw = calibrate_world(cam_img_path, i, cfg['world_chessboard_size'], cfg['world_square_size'], debug=0)
  74. cam_params.append(cam_param_raw)
  75. # with open(cam_para_path, 'wb') as pkl_file:
  76. # pickle.dump(cam_params, pkl_file)
  77. print("\n2. 屏幕标定")
  78. screen_cal_start = time.time()
  79. screen_img_path = glob.glob(os.path.join(screen_img_path, "*.bmp"))
  80. screen_para_path = os.path.join('config', cfg['screen_params'])
  81. if os.path.exists(screen_para_path):
  82. #if False:
  83. with open(screen_para_path, 'rb') as pkl_file:
  84. screen_params = pickle.load(pkl_file)[0]
  85. else:
  86. screen_params = calibrate_screen(screen_img_path, cam_params[3]['camera_matrix'], cam_params[3]['distortion_coefficients'], cfg['screen_chessboard_size'], cfg['screen_square_size'], debug=0)
  87. # with open(screen_para_path, 'wb') as pkl_file:
  88. # pickle.dump([screen_params], pkl_file)
  89. screen_cal_end = time.time()
  90. print(f" 完成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
  91. print(f" 耗时: {screen_cal_end - screen_cal_start:.2f} 秒")
  92. print("\n3. 相位提取,相位展开")
  93. phase_start = time.time()
  94. x_uns, y_uns = [], []
  95. binary_masks = []
  96. for cam_id in range(n_cam):
  97. print('cam_id = ', cam_id)
  98. #white_path = os.path.join(img_folder, f'{cam_id}_frame_24.bmp')
  99. white_path = 'D:\\code\\code of PMD\\code of PMD\\picture\\white.bmp'
  100. if n_cam == 3:
  101. binary = get_white_mask(white_path, bin_thresh=12, debug=0)
  102. elif n_cam == 1:
  103. #angle_rad, binary = find_notch(white_path, n_cam, debug=0)
  104. binary = get_white_mask(white_path, bin_thresh=12, debug=0)
  105. binary_masks.append(binary)
  106. #phases = extract_phase(img_folder, cam_id, binary, cam_params[cam_id]['camera_matrix'], cam_params[cam_id]['distortion_coefficients'], num_freq=num_freq)
  107. #x_un, y_un = unwrap_phase(phases, save_path, num_freq, debug=0)
  108. #x_uns.append(x_un)
  109. #y_uns.append(y_un)
  110. phase_end = time.time()
  111. print(f" 完成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
  112. print(f" 耗时: {phase_end - phase_start:.2f} 秒")
  113. # matlab align
  114. #cam_params = loadmat('D:\\code\\code of PMD\\code of PMD\\calibration\\calibrationSessionworld.mat')
  115. #screen_params = loadmat('D:\\code\\code of PMD\\code of PMD\\calibration\\calibrationSessionscreen.mat')
  116. #print('cam_params = ', cam_params)
  117. #print('screen_params = ', screen_params)
  118. cam_params = []
  119. screen_params = []
  120. mtx = np.array([[6944.89018564351, 0, 2709.44699784446], [0, 6942.91962497637, 1882.05677580185], [0,0,1]])
  121. R = cv2.Rodrigues(np.array([0.0732148059333282,-0.265812028310130,-0.0532640604086260]).reshape(3,1))[0]
  122. T = np.array([-15.179977,-42.247126,246.92182]).reshape(3,1)
  123. cam_calibration_data = {
  124. 'camera_matrix': mtx,
  125. 'distortion_coefficients': 0,
  126. 'rotation_matrix': R,
  127. 'translation_vector': T,
  128. 'error': 0
  129. }
  130. mtx = np.array([[6944.89018564351, 0, 2709.44699784446], [0, 6942.91962497637, 1882.05677580185], [0,0,1]])
  131. R = np.array([[0.96514996,-0.042578806,0.25821037],[0.029285983,0.99805061,0.055111798],[-0.26005361,-0.045629206,0.96451547]])
  132. T = np.array([30.1970,-49.3108,507.4424]).reshape(3,1)
  133. screen_calibration_data = {
  134. 'screen_matrix': mtx,
  135. 'screen_distortion': 0,
  136. 'screen_rotation_matrix': R,
  137. 'screen_translation_vector': T,
  138. 'screen_error': 0
  139. }
  140. cam_params.append(cam_calibration_data)
  141. #screen_params.append(screen_calibration_data)
  142. screen_params = screen_calibration_data
  143. x_un = loadmat('D:\\code\\code of PMD\\code of PMD\\phase_information\\x_phase_unwrapped.mat')
  144. y_un = loadmat('D:\\code\\code of PMD\\code of PMD\\phase_information\\y_phase_unwrapped.mat')
  145. x_un = x_un['x_phase_unwrapped']
  146. y_un = y_un['y_phase_unwrapped']
  147. x_uns, y_uns = [], []
  148. x_uns.append(x_un)
  149. y_uns.append(y_un)
  150. #fig, axes = plt.subplots(1, 2, figsize=(12, 6))
  151. # 第一个子图
  152. # cax0 = axes[0].imshow(x_un)
  153. # axes[0].set_title('x_phase_unwrapped')
  154. # axes[0].set_xlabel('X Axis')
  155. # axes[0].set_ylabel('Y Axis')
  156. # fig.colorbar(cax0, ax=axes[0])
  157. # # 第二个子图
  158. # cax1 = axes[1].imshow(y_un)
  159. # axes[1].set_title('y_phase_unwrapped')
  160. # axes[1].set_xlabel('X Axis')
  161. # axes[1].set_ylabel('Y Axis')
  162. # fig.colorbar(cax0, ax=axes[1])
  163. # # 调整子图之间的间距
  164. # plt.tight_layout()
  165. #plt.savefig(os.path.join(save_path, "phase_unwrapped.png"))
  166. #plt.show()
  167. #return 0
  168. print('screen_params = ', screen_params)
  169. if n_cam == 1:
  170. screen_to_world = map_screen_to_world(screen_params, cam_params[0])
  171. elif n_cam == 4:
  172. screen_to_world = map_screen_to_world(screen_params, cam_params[3])
  173. else:
  174. print('camera number should be 1 or 4')
  175. return 0
  176. print("\n4. 获得不同坐标系下点的位置")
  177. get_point_start = time.time()
  178. total_cloud_point = np.empty((0, 3))
  179. total_gradient = np.empty((0, 4))
  180. total_boundary_point = np.empty((0, 3))
  181. for i in range(n_cam):
  182. print('cam_id = ', i)
  183. contours_point = get_meshgrid_contour(binary_masks[i], save_path, debug=False)
  184. world_points, world_points_boundary, world_points_boundary_3 = get_world_points(contours_point, cam_params[i], i, grid_spacing, cfg['d'], erosion_pixels=2, debug=0)
  185. camera_points, u_p, v_p = get_camera_points(world_points, cam_params[i], save_path, i, debug=0)
  186. point_data = {'x_w': world_points[:, 0], 'y_w': world_points[:, 1], 'z_w': world_points[:, 2],
  187. 'x_c': camera_points[:, 0], 'y_c': camera_points[:, 1], 'z_c': camera_points[:, 2],
  188. 'u_p': u_p, 'v_p': v_p}
  189. screen_points = get_screen_points(point_data, x_uns[i], y_uns[i], screen_params, screen_to_world, cfg, save_path, i, debug=debug)
  190. #plot_coords(world_points, camera_points, screen_points)
  191. z_raw, gradient_xy = reconstruction_cumsum(world_points, camera_points, screen_points, debug=0, smooth=smooth, align=align, denoise=denoise)
  192. z_raw_xy = np.round(z_raw[:, :2]).astype(int)
  193. # # 创建布尔掩码,初始为 True
  194. # mask = np.ones(len(z_raw_xy), dtype=bool)
  195. # # 遍历每个边界点,标记它们在 aligned 中的位置
  196. # for boundary_point in world_points_boundary:
  197. # # 标记与当前边界点相同 xy 坐标的行
  198. # mask &= ~np.all(z_raw_xy == boundary_point[:2], axis=1)
  199. # # 使用掩码过滤出非边界点
  200. # non_boundary_points = z_raw[mask]
  201. # non_boundary_aligned, rotation_matrix = align2ref(non_boundary_points)
  202. # 创建布尔掩码,初始为 True
  203. mask = np.ones(len(z_raw_xy), dtype=bool)
  204. # 遍历每个边界点,标记它们在 aligned 中的位置
  205. for boundary_point in world_points_boundary_3:
  206. # 标记与当前边界点相同 xy 坐标的行
  207. mask &= ~np.all(z_raw_xy == boundary_point[:2], axis=1)
  208. # 使用掩码过滤出非边界点
  209. non_boundary_points = z_raw[mask]
  210. # z_raw_aligned = non_boundary_points @ rotation_matrix.T
  211. # z_raw_aligned[:,2] = z_raw_aligned[:,2] - np.mean(z_raw_aligned[:, 2])
  212. #z_raw_aligned = non_boundary_points
  213. z_raw_aligned, _ = align2ref(non_boundary_points)
  214. #non_boundary_points = smoothed
  215. write_point_cloud(os.path.join(img_folder, str(i) + '_cloudpoint.txt'), np.round(z_raw_aligned[:, 0]), np.round(z_raw_aligned[:, 1]), z_raw_aligned[:, 2])
  216. np.savetxt(os.path.join(img_folder, str(i) + '_gradient.txt'), gradient_xy, fmt='%.10f', delimiter=',')
  217. total_cloud_point = np.vstack([total_cloud_point, np.column_stack((z_raw_aligned[:, 0], z_raw_aligned[:, 1], z_raw_aligned[:, 2]))])
  218. total_gradient = np.vstack([total_gradient, np.column_stack((gradient_xy[:, 0], gradient_xy[:, 1], gradient_xy[:, 2], gradient_xy[:, 3]))])
  219. if 0:
  220. fig = plt.figure()
  221. ax = fig.add_subplot(111, projection='3d')
  222. # 提取 x, y, z 坐标
  223. x_vals = total_cloud_point[:, 0]
  224. y_vals = total_cloud_point[:, 1]
  225. z_vals = total_cloud_point[:, 2]
  226. # 绘制3D点云
  227. ax.scatter(x_vals, y_vals, z_vals, c=z_vals, cmap='viridis', marker='o')
  228. # 设置轴标签和标题
  229. ax.set_xlabel('X (mm)')
  230. ax.set_ylabel('Y (mm)')
  231. ax.set_zlabel('Z (mm)')
  232. ax.set_title('z_raw 3D Point Cloud Visualization gradient')
  233. plt.show()
  234. # fig = plt.figure()
  235. # ax = fig.add_subplot(111, projection='3d')
  236. # smoothed_total = smooth_pcl(total_cloud_point, 3)
  237. # # 提取 x, y, z 坐标
  238. # x_vals = smoothed_total[:, 0]
  239. # y_vals = smoothed_total[:, 1]
  240. # z_vals = smoothed_total[:, 2]
  241. # # 绘制3D点云
  242. # ax.scatter(x_vals, y_vals, z_vals, c=z_vals, cmap='viridis', marker='o')
  243. # # 设置轴标签和标题
  244. # ax.set_xlabel('X (mm)')
  245. # ax.set_ylabel('Y (mm)')
  246. # ax.set_zlabel('Z (mm)')
  247. # ax.set_title('smoothed 3D Point Cloud Visualization')
  248. get_point_end = time.time()
  249. print(f" 完成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
  250. print(f" 耗时: {get_point_end - get_point_start:.2f} 秒")
  251. print("\n5. 后处理")
  252. post_process_start = time.time()
  253. total_cloud_point[:,0] = np.round(total_cloud_point[:,0])
  254. total_cloud_point[:,1] = np.round(total_cloud_point[:,1])
  255. #fitted_points = post_process(total_cloud_point, debug=0)
  256. fitted_points = post_process_with_grad(img_folder, n_cam, 1)
  257. #fitted_points = total_cloud_point
  258. #align_fitted, _ = align2ref(fitted_points)
  259. align_fitted = fitted_points
  260. write_point_cloud(os.path.join(img_folder, 'cloudpoint.txt'), np.round(align_fitted[:, 0]-np.mean(align_fitted[:, 0])), np.round(align_fitted[:, 1]-np.mean(align_fitted[:, 1])), align_fitted[:,2]-np.min(align_fitted[:,2]))
  261. if 1:
  262. fig = plt.figure()
  263. ax = fig.add_subplot(111, projection='3d')
  264. # 提取 x, y, z 坐标
  265. x_vals = np.round(fitted_points[:, 0]-np.mean(fitted_points[:, 0]))
  266. y_vals = np.round(fitted_points[:, 1]-np.mean(fitted_points[:, 1]))
  267. z_vals = align_fitted[:,2]-np.min(align_fitted[:,2])
  268. x_vals = fitted_points[:, 0]
  269. y_vals = fitted_points[:, 1]
  270. z_vals = fitted_points[:, 2]
  271. # 绘制3D点云
  272. ax.scatter(x_vals, y_vals, z_vals, c=z_vals, cmap='viridis', marker='o')
  273. # 设置轴标签和标题
  274. ax.set_xlabel('X (mm)')
  275. ax.set_ylabel('Y (mm)')
  276. ax.set_zlabel('Z (mm)')
  277. ax.set_title('post 3D Point Cloud Visualization')
  278. plt.show()
  279. post_process_end = time.time()
  280. print(f" 完成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
  281. print(f" 耗时: {post_process_end - post_process_start:.2f} 秒")
  282. print("\n6. 评估")
  283. eval_start = time.time()
  284. point_cloud_path = os.path.join(img_folder, 'cloudpoint.txt')
  285. json_path = os.path.join(img_folder, 'result.json')
  286. theta_notch = 0
  287. #get_eval_result(point_cloud_path, json_path, theta_notch, 0)
  288. eval_end = time.time()
  289. print(f" 完成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
  290. print(f" 耗时: {eval_end - eval_start:.2f} 秒")
  291. return True
  292. if __name__ == '__main__':
  293. config_path = 'config\\cfg_3freq_wafer_matlab.json'
  294. #img_folder = 'D:\\data\\four_cam\\1008_storage\\pingjing_20241017092315228\\' #'D:\\data\\four_cam\\betone_1011\\20241011142348762-1'
  295. # img_folder = 'D:\\huchao\\inspect_server_202409241013_py\\storage\\20241023193453708\\'
  296. img_folder = 'D:\\data\\one_cam\\pingjing_20241024154405250\\'
  297. #img_folder = 'D:\\data\\one_cam\\betone-20241025095352783\\'
  298. #img_folder = 'D:\\data\\one_cam\\20241025113857041-neg\\'
  299. #'
  300. json_path = os.path.join(img_folder, 'result.json')
  301. # 20241011142348762-1 20241011142901251-2 20241011143925746-3 20241011144821292-4
  302. # 0.60 0.295 0.221 0.346
  303. # x max = 653.5925
  304. # y max = 692.1735648
  305. # x max = 251.0775
  306. # y max = 293.05856482
  307. # x max = 184.7275
  308. # y max = 239.00919669
  309. # x max = 342.2525
  310. # y max = 386.92087185
  311. pmdstart(config_path, img_folder)
  312. #fitted_points = post_process_with_grad(img_folder, 1)