ArithPmd.cpp 41 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152
  1. #include "ArithPmd.h"
  2. #include <fstream>
  3. #define _USE_MATH_DEFINES
  4. #include <math.h>
  5. #include <Eigen/LU>
  6. #include <Eigen/Eigenvalues>
  7. #include <pcl/filters/voxel_grid.h>
  8. ArithPmd::ArithPmd()
  9. {
  10. }
  11. ArithPmd::~ArithPmd()
  12. {
  13. }
  14. // 扩边
  15. static void expand_border(cv::InputOutputArray _src)
  16. {
  17. cv::Mat src = cv::Mat::zeros(_src.size() + cv::Size(2, 2), _src.type());
  18. _src.getMat().copyTo(src(cv::Rect(1, 1, _src.size().width, _src.size().height)));
  19. _src.assign(src);
  20. }
  21. // 去除边界
  22. static void remove_border(cv::InputOutputArray _src)
  23. {
  24. cv::Mat src;
  25. _src.getMat()(cv::Rect(1, 1, _src.size().width - 2, _src.size().height - 2)).copyTo(src);
  26. _src.assign(src);
  27. }
  28. // 填充算法(漫水填充)
  29. static void fill_hole(cv::InputArray _src, cv::OutputArray _dst) {
  30. cv::Mat src = _src.getMat();
  31. cv::Mat dst = src.clone();
  32. expand_border(dst);
  33. cv::floodFill(dst, cv::Point(0, 0), cv::Scalar(255));
  34. remove_border(dst);
  35. dst = src | (~dst);
  36. _dst.assign(dst);
  37. }
  38. // 小面积剔除
  39. static void remove_small_region(cv::InputArray _src, cv::OutputArray _dst, int area_limit, int check_mode = 1, int neighbor_mode = 1) {
  40. cv::Mat src = _src.getMat();
  41. cv::Mat dst = _dst.getMat();
  42. int remove_count = 0;
  43. cv::Mat point_label = cv::Mat::zeros(src.size(), CV_8UC1);
  44. // 去除小连通区域的白色点
  45. if (check_mode == 1) {
  46. for (int i = 0; i < src.rows; ++i) {
  47. for (int j = 0; j < src.cols; ++j) {
  48. if (src.at<uchar>(i, j) < 10) {
  49. point_label.at<uchar>(i, j) = 3;
  50. }
  51. }
  52. }
  53. }
  54. else
  55. {
  56. // 去除孔洞
  57. for (int i = 0; i < src.rows; i++)
  58. {
  59. for (int j = 0; j < src.cols; j++)
  60. {
  61. if (src.at<uchar>(i, j) > 10)
  62. {
  63. point_label.at<uchar>(i, j) = 3;
  64. }
  65. }
  66. }
  67. }
  68. std::vector<cv::Point2i>neighbor_pos;
  69. neighbor_pos.push_back(cv::Point2i(-1, 0));
  70. neighbor_pos.push_back(cv::Point2i(1, 0));
  71. neighbor_pos.push_back(cv::Point2i(0, -1));
  72. neighbor_pos.push_back(cv::Point2i(0, 1));
  73. if (neighbor_mode == 1) {
  74. neighbor_pos.push_back(cv::Point2i(-1, -1));
  75. neighbor_pos.push_back(cv::Point2i(-1, 1));
  76. neighbor_pos.push_back(cv::Point2i(1, -1));
  77. neighbor_pos.push_back(cv::Point2i(1, 1));
  78. }
  79. int neighbor_count = 4 + 4 * neighbor_mode;
  80. int currx = 0, curry = 0;
  81. for (int i = 0; i < src.rows; i++)
  82. {
  83. for (int j = 0; j < src.cols; j++)
  84. {
  85. if (point_label.at<uchar>(i, j) == 0)
  86. {
  87. std::vector<cv::Point2i>grow_buffer;
  88. grow_buffer.push_back(cv::Point2i(j, i));
  89. point_label.at<uchar>(i, j) = 1;
  90. int check_result = 0;
  91. for (int z = 0; z < grow_buffer.size(); z++)
  92. {
  93. for (int q = 0; q < neighbor_count; q++)
  94. {
  95. currx = grow_buffer.at(z).x + neighbor_pos.at(q).x;
  96. curry = grow_buffer.at(z).y + neighbor_pos.at(q).y;
  97. // 防越界
  98. if (currx >= 0 && currx < src.cols && curry >= 0 && curry < src.rows)
  99. {
  100. if (point_label.at<uchar>(curry, currx) == 0)
  101. {
  102. grow_buffer.push_back(cv::Point2i(currx, curry));
  103. point_label.at<uchar>(curry, currx) = 1;
  104. }
  105. }
  106. }
  107. }
  108. if (grow_buffer.size() > area_limit)
  109. check_result = 2;
  110. else
  111. {
  112. check_result = 1;
  113. remove_count++;
  114. }
  115. for (int z = 0; z < grow_buffer.size(); z++)
  116. {
  117. currx = grow_buffer.at(z).x;
  118. curry = grow_buffer.at(z).y;
  119. point_label.at<uchar>(curry, currx) += check_result;
  120. }
  121. }
  122. }
  123. }
  124. check_mode = 255 * (1 - check_mode);
  125. // 反转面积过小的区域
  126. for (int i = 0; i < src.rows; ++i)
  127. {
  128. for (int j = 0; j < src.cols; ++j)
  129. {
  130. if (point_label.at<uchar>(i, j) == 2)
  131. {
  132. dst.at<uchar>(i, j) = check_mode;
  133. }
  134. else if (point_label.at<uchar>(i, j) == 3)
  135. {
  136. dst.at<uchar>(i, j) = src.at<uchar>(i, j);
  137. }
  138. }
  139. }
  140. _dst.assign(dst);
  141. }
  142. void static preprocess(cv::InputArray _src, cv::OutputArray _dst, int area_threshold)
  143. {
  144. cv::Mat gray_image = _src.getMat();
  145. cv::Mat thresh_image, little_image, fill_image;
  146. // cvtColor(src, gray_image, cv::COLOR_BGR2GRAY);
  147. threshold(gray_image, thresh_image, 85, 255, cv::THRESH_BINARY);
  148. little_image = cv::Mat(thresh_image.size(), CV_8UC1, cv::Scalar(0));
  149. remove_small_region(thresh_image, little_image, area_threshold);
  150. fill_hole(little_image, fill_image);
  151. // cv::Mat mask;
  152. // threshold(fill_image, mask, 0, 1, cv::THRESH_BINARY);
  153. // cv::imwrite("mask.bmp", mask * 255);
  154. _dst.assign(fill_image);
  155. }
  156. bool save_matrix_as_img(const MatrixXf& m, std::string path) {
  157. MatrixXf t = (m.array() - m.minCoeff()).matrix();
  158. t = t * 255 / t.maxCoeff();
  159. Mat img;
  160. cv::eigen2cv(t, img);
  161. cv::imwrite(path, img);
  162. return true;
  163. }
  164. bool save_matrix_as_txt(const MatrixXf& m, std::string path, bool add_index, int orientation) {
  165. std::ofstream fout(path);
  166. MatrixXf m2 = m;
  167. if (add_index) {
  168. if (orientation == 0) {
  169. VectorXf t(m2.cols());
  170. for (uint32_t i = 0; i < t.rows(); ++i)
  171. t(i) = i;
  172. m2.row(0) = t.transpose();
  173. }
  174. else {
  175. VectorXf t(m.rows());
  176. for (uint32_t i = 0; i < t.rows(); ++i)
  177. t(i) = i;
  178. m2.col(0) = t;
  179. }
  180. }
  181. fout << m2;
  182. fout.close();
  183. return true;
  184. }
  185. bool save_matrix_as_ply(const MatrixXf& m, std::string path) {
  186. std::ofstream fout(path);
  187. fout << "ply\n"
  188. "format ascii 1.0\n"
  189. "element vertex " << m.cols() << "\n"
  190. "property float32 x\n"
  191. "property float32 y\n"
  192. "property float32 z\n"
  193. "end_header\n";
  194. for (uint32_t j = 0; j < m.cols(); j++)
  195. fout << m(0, j) << " " << m(1, j) << " " << m(2, j) << "\n";
  196. fout.close();
  197. return true;
  198. }
  199. MatrixXf matrix_to_home(const Eigen::MatrixXf& origin) {
  200. MatrixXf m(origin.rows() + 1, origin.cols());
  201. m.block(0, 0, origin.rows(), origin.cols()) = origin;
  202. m.row(origin.rows()) = VectorXf::Constant(origin.cols(), 1).transpose();
  203. return m;
  204. }
  205. bool configCameraRay(const Eigen::Matrix3f& cameraMatrix, const cv::Size& imageSize, float Z, MatrixXf& camera_rays) {
  206. const uint16_t width = imageSize.width;
  207. const uint16_t height = imageSize.height;
  208. // 初始化camera_rays,初始值为归一化后的像素坐标(第三行为1)
  209. camera_rays.resize(3, height * width);
  210. {
  211. // row 1
  212. VectorXf t1(width);
  213. for (uint16_t i = 0; i < width; ++i)
  214. t1(i) = i;
  215. for (uint16_t i = 0; i < height; ++i)
  216. camera_rays.block(0, i * width, 1, width) = t1.transpose();
  217. // row 2
  218. for (uint16_t i = 0; i < height; i++)
  219. camera_rays.block(1, i * width, 1, width) = VectorXf::Constant(width, i).transpose();
  220. // row 3
  221. camera_rays.row(2) = VectorXf::Constant(height * width, 1).transpose();
  222. }
  223. // 反归一化
  224. if (Z != 1.0)
  225. camera_rays *= Z;
  226. // 忽略镜头畸变,反相机内参
  227. camera_rays = cameraMatrix.inverse() * camera_rays;
  228. return true;
  229. }
  230. bool configRefPlane(const Eigen::Vector3f& plane_point, const Eigen::Vector3f& plane_normal, const Eigen::MatrixXf& camera_rays,
  231. Eigen::MatrixXf& refPlane) {
  232. Vector3f M(0, 0, 0);
  233. float t1 = (plane_point - M).transpose() * plane_normal;
  234. ArrayXXf t = t1 / (plane_normal.transpose() * camera_rays).array();
  235. refPlane.resize(camera_rays.rows(), camera_rays.cols());
  236. for (int i = 0; i < camera_rays.rows(); ++i)
  237. refPlane.row(i) = camera_rays.row(i).array() * t.array();
  238. if (M(0) != 0 || M(1) != 0 || M(2) != 0)
  239. refPlane = refPlane.colwise() + M;
  240. return true;
  241. }
  242. bool configScreenPixelPos(const Screen& S, const Matrix4f& WST, MatrixXf& screen_pix_pos) {
  243. screen_pix_pos.resize(3, S.rows * S.cols);
  244. // 每个像素去中心位置,如第一个像素取(0.5,0.5)
  245. // row 1
  246. VectorXf t1(S.cols);
  247. for (uint16_t i = 0; i < S.cols; ++i)
  248. t1(i) = i + 0.5;
  249. for (uint16_t i = 0; i < S.rows; ++i)
  250. screen_pix_pos.block(0, i * S.cols, 1, S.cols) = t1.transpose();
  251. screen_pix_pos.row(0) = S.width / S.cols * screen_pix_pos.row(0);
  252. // row 2
  253. for (uint16_t i = 0; i < S.rows; ++i)
  254. screen_pix_pos.block(1, i * S.cols, 1, S.cols) = VectorXf::Constant(S.cols, (i + 0.5) * S.height / S.rows).transpose();
  255. // row 3
  256. screen_pix_pos.row(2) = VectorXf::Constant(screen_pix_pos.cols(), 0).transpose();
  257. // 从屏幕坐标系转到世界坐标系
  258. MatrixXf t2 = matrix_to_home(screen_pix_pos);
  259. screen_pix_pos = (WST * t2).block(0, 0, 3, t2.cols());
  260. return true;
  261. }
  262. bool phase_shifting(const std::vector<MatrixXf>& imgs, std::vector<MatrixXf>& wraped_ps_maps) {
  263. // atan2((I1=I3)/(I0-I2))
  264. // 循环四组,而非每组四张图片
  265. for (uint16_t i = 0; i < 4; i++) {
  266. wraped_ps_maps[i] = MatrixXf(imgs[0].rows(), imgs[0].cols());
  267. MatrixXf tem0, tem1;
  268. tem0 = imgs[i * 4 + 1] - imgs[i * 4 + 3];
  269. tem1 = imgs[i * 4] - imgs[i * 4 + 2];
  270. wraped_ps_maps[i] = tem0.binaryExpr(tem1, [](float a, float b) {
  271. return atan2(a, b);
  272. });
  273. }
  274. return true;
  275. }
  276. bool phase_unwrapping(const vector<MatrixXf>& wraped_ps_maps, uint16_t period_width, uint16_t period_height, vector<MatrixXf>& unwrap_ps_maps) {
  277. // 由于制作条纹时相位减了pi,因此加上一个pi(参照低频条纹,不减pi相位是分段的),来作为真实相位
  278. for (int i = 0; i < 2; ++i) {
  279. uint16_t period = i == 0 ? period_width : period_height;
  280. unwrap_ps_maps[i] = ((period * (wraped_ps_maps[i + 2].array() + M_PI).matrix() - (wraped_ps_maps[i].array() + M_PI).matrix()) /
  281. (2 * M_PI)).array().round().matrix(); // 计算得到 K
  282. unwrap_ps_maps[i] = (wraped_ps_maps[i].array() + M_PI).matrix() + 2 * M_PI * unwrap_ps_maps[i]; // phi + 2*pi*k
  283. }
  284. return true;
  285. }
  286. bool screen_camera_phase_match(const vector<MatrixXf>& upm, const Screen& s, uint16_t pw, uint16_t ph, const MatrixXf& spp, MatrixXf& res) {
  287. uint32_t img_size = upm[0].cols() * upm[0].rows();
  288. MatrixXfR coor_x_f = upm[0] * s.cols / (2 * pw * M_PI);
  289. MatrixXfR coor_y_f = upm[1] * s.rows / (2 * ph * M_PI);
  290. coor_x_f.resize(img_size, 1);
  291. coor_y_f.resize(img_size, 1);
  292. VectorXUI coor_x = coor_x_f.array().round().matrix().cast<uint32_t>();
  293. VectorXUI coor_y = coor_y_f.array().round().matrix().cast<uint32_t>();
  294. res.resize(3, img_size);
  295. for (uint32_t i = 0; i < coor_x.rows(); ++i) {
  296. if (coor_x(i) >= s.cols)
  297. coor_x(i) = s.cols - 1;
  298. if (coor_y(i) >= s.rows)
  299. coor_y(i) = s.rows - 1;
  300. res.col(i) = spp.col(coor_y(i) * s.cols + coor_x(i));
  301. }
  302. return true;
  303. }
  304. bool slope_calculate(const Vector3f& camera_world, const MatrixXf& refPlane, const MatrixXf& screen_camera_phase_match_pos,
  305. std::vector<MatrixXfR>& slope) {
  306. ArrayXXf dm2c, dm2s, M_C(3, refPlane.cols()), M_S;
  307. if (camera_world.cols() == 1) {
  308. M_C.row(0) = refPlane.row(0).array() - camera_world(0, 0);
  309. M_C.row(1) = refPlane.row(1).array() - camera_world(1, 0);
  310. M_C.row(2) = refPlane.row(2).array() - camera_world(2, 0);
  311. }
  312. else
  313. M_C = (refPlane - camera_world).array();
  314. M_S = (refPlane - screen_camera_phase_match_pos).array();
  315. dm2c = M_C.matrix().colwise().norm().array();
  316. dm2s = M_S.matrix().colwise().norm().array();
  317. ArrayXXf denominator = -(M_C.row(2) / dm2c + M_S.row(2) / dm2s);
  318. for (uint32_t i = 0; i < 2; ++i)
  319. slope[i] = ((M_C.row(i) / dm2c + M_S.row(i) / dm2s) / denominator).matrix();
  320. return true;
  321. }
  322. bool modal_reconstruction(const MatrixXf& sx, const MatrixXf& sy, MatrixXfR& Z, const std::vector<float>& rg, uint32_t terms, const MatrixXf& mask) {
  323. // resize 斜率,2*M*N x 1
  324. MatrixXfR s(sx.rows() * 2, sx.cols());
  325. s.block(0, 0, sx.rows(), sx.cols()) = sx;
  326. s.block(sx.rows(), 0, sy.rows(), sy.cols()) = sy;
  327. s.resize(s.rows() * s.cols(), 1);
  328. // 多项式矩阵
  329. MatrixXfR slope_p(s.rows(), terms * terms); // 斜率多项式矩阵
  330. MatrixXfR Z_p(s.rows() / 2, terms * terms); // 高度多项式矩阵
  331. double xa = rg[0], xb = rg[1], ya = rg[2], yb = rg[3];
  332. for (uint32_t i = 0; i < sx.rows(); ++i) {
  333. for (uint32_t j = 0; j < sx.cols(); ++j) {
  334. if (mask(i, j) == 0) continue; // 跳过 mask 为 0 的部分
  335. double y = (ya + yb - 2 * (yb - (yb - ya) / (sx.rows() - 1) * i)) / (ya - yb);
  336. double x = (xa + xb - 2 * ((xb - xa) / (sx.cols() - 1) * j + xa)) / (xa - xb);
  337. // 多项式有terms * terms 项,两层循环,先x后y, Dy_2 指 D_(n-2)
  338. float Dy_2 = 1, Dy_1 = y, dy_1 = 0;
  339. for (uint32_t n = 0; n < terms; ++n) {
  340. float Dy, dy;
  341. if (n == 0) {
  342. Dy = 1;
  343. dy = 0;
  344. }
  345. else if (n == 1) {
  346. Dy = y;
  347. dy = n * Dy_1 + y * dy_1;
  348. }
  349. else {
  350. Dy = ((2 * n - 1) * y * Dy_1 - (n - 1) * Dy_2) / n;
  351. dy = n * Dy_1 + y * dy_1;
  352. }
  353. float Dx_2 = 1, Dx_1 = x, dx_1 = 0;
  354. for (uint32_t m = 0; m < terms; ++m) {
  355. float Dx, dx;
  356. if (m == 0) {
  357. Dx = 1;
  358. dx = 0;
  359. }
  360. else if (m == 1) {
  361. Dx = x;
  362. dx = m * Dx_1 + x * dx_1;
  363. }
  364. else {
  365. Dx = ((2 * m - 1) * x * Dx_1 - (m - 1) * Dx_2) / m;
  366. dx = m * Dx_1 + x * dx_1;
  367. }
  368. slope_p(i * sx.cols() + j, n * terms + m) = dx * Dy;
  369. slope_p(sx.cols() * sx.rows() + i * sx.cols() + j, n * terms + m) = Dx * dy;
  370. Z_p(i * sx.cols() + j, n * terms + m) = Dx * Dy;
  371. Dx_2 = Dx_1;
  372. Dx_1 = Dx;
  373. dx_1 = dx;
  374. }
  375. Dy_2 = Dy_1;
  376. Dy_1 = Dy;
  377. dy_1 = dy;
  378. }
  379. }
  380. }
  381. // 计算系数
  382. VectorXf C = slope_p.bdcSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(s);
  383. // 恢复高度
  384. Z = Z_p * C;
  385. Z.resize(sx.rows(), sx.cols());
  386. /*
  387. float Z_max = Z.maxCoeff();
  388. float Z_min = Z.minCoeff();
  389. Z = (Z.array() - Z_min) / (Z_max - Z_min); // 归一化到 [0, 1]
  390. Z = 2 * Z.array() - 1; // 再归一化到 [-1, 1]
  391. Z /= 1000;
  392. */
  393. return true;
  394. }
  395. double computeReprojectionErrors(
  396. const vector<vector<cv::Point3f>>& objectPoints,
  397. const vector<vector<cv::Point2f>>& imagePoints,
  398. const vector<Mat>& rvecs, const vector<Mat>& tvecs,
  399. const Mat& cameraMatrix, const Mat& distCoeffs,
  400. vector<float>& perViewErrors) {
  401. vector<cv::Point2f> imagePoints2;
  402. int i, totalPoints = 0;
  403. double totalErr = 0, err;
  404. perViewErrors.resize(objectPoints.size());
  405. for (i = 0; i < (int)objectPoints.size(); i++) {
  406. projectPoints(Mat(objectPoints[i]), rvecs[i], tvecs[i],
  407. cameraMatrix, distCoeffs, imagePoints2);
  408. err = cv::norm(Mat(imagePoints[i]), Mat(imagePoints2), cv::NORM_L2);
  409. int n = (int)objectPoints[i].size();
  410. perViewErrors[i] = (float)std::sqrt(err * err / n);
  411. totalErr += err * err;
  412. totalPoints += n;
  413. }
  414. return std::sqrt(totalErr / totalPoints);
  415. }
  416. /*
  417. bool aruco_analyze(const vector<Mat>& imgs_board, const ArucoBoard& board, const Mat& cameraMatrix, const Mat& distCoeffs, vector<Mat>& Rmats,
  418. vector<cv::Vec3f>& Tvecs, vector<vector<int>>& Ids, vector<vector<vector<cv::Point2f>>>& corners,
  419. vector<vector<vector<cv::Point2f>>>& rejectedCandidates, std::string output_dir) {
  420. for (uint16_t i = 0; i < imgs_board.size(); i++) {
  421. cv::Mat img = imgs_board[i].clone();
  422. auto parameters = cv::aruco::DetectorParameters::create();
  423. cv::aruco::detectMarkers(img, board.dictionary, corners[i], Ids[i], parameters, rejectedCandidates[i], cameraMatrix, distCoeffs);
  424. Mat output_img;
  425. cvtColor(imgs_board[i], output_img, cv::COLOR_GRAY2RGB);
  426. cv::aruco::drawDetectedMarkers(output_img, corners[i], Ids[i], cv::Scalar(0, 255, 0));
  427. // transforms points from the board coordinate system to the camera coordinate system
  428. cv::Ptr<cv::aruco::GridBoard> aruco_board = cv::aruco::GridBoard::create(board.markers_size.width, board.markers_size.height, board.markerLength,
  429. board.markerSeparation, board.dictionary, 0);
  430. cv::Vec3f rvec;
  431. int boardNum = cv::aruco::estimatePoseBoard(corners[i], Ids[i], aruco_board, cameraMatrix, distCoeffs, rvec, Tvecs[i]);
  432. // 检测到标记板
  433. if (boardNum > 0) {
  434. Rodrigues(rvec, Rmats[i]);
  435. cv::drawFrameAxes(output_img, cameraMatrix, distCoeffs, rvec, Tvecs[i], 0.04, 1);
  436. imwrite(output_dir + "/sys_calib_output/img_poseBoard_" + std::to_string(i) + ".bmp", output_img);
  437. }
  438. else
  439. return false;
  440. }
  441. return true;
  442. }
  443. */
  444. bool system_calib(vector<Matrix3f>& CVR, vector<Vector3f>& CVTL, Matrix3f& CSR, VectorXf& CSTL_D, vector<Vector3f>& n) {
  445. vector<Vector3f> m(3); // m12,m23,m31
  446. for (uint16_t i = 0; i < CVR.size(); i++) {
  447. Eigen::EigenSolver<Matrix3f> es(CVR[i] * CVR[(i + 1) % 3].transpose(), true);
  448. auto eval = es.eigenvalues();
  449. // get eigenvector whos corresponfing eigenvalue is 1
  450. for (int j = 0; j < eval.rows(); j++) {
  451. std::complex<double> evalj = eval(j);
  452. if (fabs(1 - evalj.real()) <= 0.0001) {
  453. auto evec = es.eigenvectors();
  454. for (int t = 0; t < evec.rows(); t++) {
  455. m[i](t) = evec(t, j).real();
  456. }
  457. break;
  458. }
  459. }
  460. }
  461. // sovling three normal vector(n)
  462. n[0] = m[0].cross(m[2]);
  463. n[1] = m[0].cross(m[1]);
  464. n[2] = m[1].cross(m[2]);
  465. for (int i = 0; i < 3; i++) {
  466. if (n[i](2) > 0)
  467. n[i] = -n[i];
  468. n[i].normalize();
  469. }
  470. // sovling CSR
  471. Matrix3f I3 = Matrix3f::Identity(3, 3);
  472. Matrix3f CSR0 = (I3 - 2 * n[0] * n[0].transpose()).inverse() * CVR[0];
  473. Matrix3f CSR1 = (I3 - 2 * n[1] * n[1].transpose()).inverse() * CVR[1];
  474. Matrix3f CSR2 = (I3 - 2 * n[2] * n[2].transpose()).inverse() * CVR[2];
  475. CSR = (CSR0 + CSR1 + CSR2) / 3;
  476. MatrixXf T_A(9, 6);
  477. MatrixXf T_B(9, 1);
  478. Vector3f Zero3 = Vector3f::Zero(3);
  479. T_A << (I3 - 2 * n[0] * n[0].transpose()), -2 * n[0], Zero3, Zero3,
  480. (I3 - 2 * n[1] * n[1].transpose()), Zero3, -2 * n[1], Zero3,
  481. (I3 - 2 * n[2] * n[2].transpose()), Zero3, Zero3, -2 * n[2];
  482. T_B << CVTL[0],
  483. CVTL[1],
  484. CVTL[2];
  485. CSTL_D = T_A.bdcSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(T_B);
  486. return true;
  487. }
  488. int ArithPmd::Initlization(const nlohmann::json& param)
  489. {
  490. Matrix3f cameraMatrix0, cameraMatrix1, cameraMatrix2, cameraMatrix3; // 相机内参
  491. MatrixXf camera_rays0, camera_rays1, camera_rays2, camera_rays3; // 相机光线,相机坐标系下
  492. cv::Mat distcoeffs0, distcoeffs1, distcoeffs2, distcoeffs3; // 相机镜头畸变
  493. Matrix4f CWT0, CWT1, CWT2, CWT3; // 世界坐标系(参考平面坐标系)向量转到相机坐标系下的变换矩阵
  494. Matrix4f CST0, CST1, CST2, CST3; // screen 坐标系向量转到相机坐标系下的变换矩阵
  495. std::vector<uint32_t> t0 = param.at("ROI");
  496. roi0 = { t0[0], t0[1], t0[2], t0[3] };
  497. roi1 = { t0[0], t0[1], t0[2], t0[3] };
  498. roi2 = { t0[0], t0[1], t0[2], t0[3] };
  499. roi3 = { t0[0], t0[1], t0[2], t0[3] };
  500. std::vector<float> t1 = param.at("cameraMatrix0");
  501. MatrixXfR t2(1, t1.size());
  502. t2.row(0) = VectorXf::Map(&t1[0], t1.size());
  503. t2.resize(3, 3);
  504. cameraMatrix0 = t2;
  505. std::vector<float> t1_1 = param.at("cameraMatrix1");
  506. MatrixXfR t2_1(1, t1_1.size());
  507. t2_1.row(0) = VectorXf::Map(&t1_1[0], t1_1.size());
  508. t2_1.resize(3, 3);
  509. cameraMatrix1 = t2_1;
  510. std::vector<float> t1_2 = param.at("cameraMatrix2");
  511. MatrixXfR t2_2(1, t1_2.size());
  512. t2_2.row(0) = VectorXf::Map(&t1_2[0], t1_2.size());
  513. t2_2.resize(3, 3);
  514. cameraMatrix2 = t2_2;
  515. std::vector<float> t1_3 = param.at("cameraMatrix3");
  516. MatrixXfR t2_3(1, t1_3.size());
  517. t2_3.row(0) = VectorXf::Map(&t1_3[0], t1_3.size());
  518. t2_3.resize(3, 3);
  519. cameraMatrix3 = t2_3;
  520. std::vector<float> t3 = param.at("distCoeffs0");
  521. Mat t5 = Mat(t3);
  522. distcoeffs0 = t5.clone();
  523. std::vector<float> t3_1 = param.at("distCoeffs1");
  524. Mat t5_1 = Mat(t3_1);
  525. distcoeffs1 = t5_1.clone();
  526. std::vector<float> t3_2 = param.at("distCoeffs2");
  527. Mat t5_2 = Mat(t3_2);
  528. distcoeffs2 = t5_2.clone();
  529. std::vector<float> t3_3 = param.at("distCoeffs3");
  530. Mat t5_3 = Mat(t3_3);
  531. distcoeffs3 = t5_3.clone();
  532. std::vector<uint16_t> t4 = param.at("image_size");
  533. img_size.width = t4[0];
  534. img_size.height = t4[1];
  535. Screen screen0 = { param.at("screenWidth"), param.at("screenHeight"), param.at("screenCols"), param.at("screenRows") };
  536. screen = screen0;
  537. period_width = param.at("period_width");
  538. period_height = param.at("period_height");
  539. configCameraRay(cameraMatrix0, img_size, 1.0, camera_rays0);
  540. configCameraRay(cameraMatrix1, img_size, 1.0, camera_rays1);
  541. configCameraRay(cameraMatrix2, img_size, 1.0, camera_rays2);
  542. configCameraRay(cameraMatrix3, img_size, 1.0, camera_rays3);
  543. // ----------------计算--------------------
  544. // 读取CWT,计算参考平面与相机光线交点
  545. std::vector<float> t7 = param.at("CWT0");
  546. MatrixXfR t8(1, t7.size());
  547. t8.row(0) = VectorXf::Map(&t7[0], t7.size());
  548. t8.resize(4, 4);
  549. CWT0 = t8;
  550. Vector4f O = matrix_to_home(Vector3f::Constant(3, 0));
  551. camera_origin_world0 = (CWT0.inverse() * O).head(3);
  552. // camera_origin_world.push_back(&camera_origin_world0);
  553. configRefPlane(CWT0.block(0, 3, 3, 1), CWT0.block(0, 2, 3, 1), camera_rays0, refPlane0);
  554. camera_rays0.resize(0, 0);
  555. // 上面计算的是再相机坐标系下的参考平面,将其转换到世界坐标系下
  556. MatrixXf ref_homo = matrix_to_home(refPlane0);
  557. ref_homo = CWT0.inverse() * ref_homo;
  558. refPlane0 = ref_homo.block(0, 0, 3, ref_homo.cols());
  559. // refPlane.push_back(&refPlane0);
  560. // 读取屏幕信息,条纹信息,投影条纹
  561. std::vector<float> t9 = param.at("CST0");
  562. MatrixXfR t10(1, t9.size());
  563. t10.row(0) = VectorXf::Map(&t9[0], t9.size());
  564. t10.resize(4, 4);
  565. CST0 = t10;
  566. Matrix4f WST = CWT0.inverse() * CST0;
  567. configScreenPixelPos(screen, WST, screen_pix_pos0);
  568. // screen_pix_pos.push_back(&screen_pix_pos0);
  569. // 读取CWT,计算参考平面与相机光线交点
  570. std::vector<float> t7_1 = param.at("CWT1");
  571. MatrixXfR t8_1(1, t7_1.size());
  572. t8_1.row(0) = VectorXf::Map(&t7_1[0], t7_1.size());
  573. t8_1.resize(4, 4);
  574. CWT1 = t8_1;
  575. Vector4f O_1 = matrix_to_home(Vector3f::Constant(3, 0));
  576. camera_origin_world1 = (CWT1.inverse() * O_1).head(3);
  577. // camera_origin_world.push_back(&camera_origin_world1);
  578. configRefPlane(CWT1.block(0, 3, 3, 1), CWT1.block(0, 2, 3, 1), camera_rays1, refPlane1);
  579. camera_rays1.resize(0, 0);
  580. // 上面计算的是再相机坐标系下的参考平面,将其转换到世界坐标系下
  581. MatrixXf ref_homo_1 = matrix_to_home(refPlane1);
  582. ref_homo_1 = CWT1.inverse() * ref_homo_1;
  583. refPlane1 = ref_homo_1.block(0, 0, 3, ref_homo_1.cols());
  584. // refPlane.push_back(&refPlane1);
  585. // 读取屏幕信息,条纹信息,投影条纹
  586. std::vector<float> t9_1 = param.at("CST1");
  587. MatrixXfR t10_1(1, t9_1.size());
  588. t10_1.row(0) = VectorXf::Map(&t9_1[0], t9_1.size());
  589. t10_1.resize(4, 4);
  590. CST1 = t10_1;
  591. Matrix4f WST_1 = CWT1.inverse() * CST1;
  592. configScreenPixelPos(screen, WST_1, screen_pix_pos1);
  593. // screen_pix_pos.push_back(&screen_pix_pos1);
  594. // 读取CWT,计算参考平面与相机光线交点
  595. std::vector<float> t7_2 = param.at("CWT2");
  596. MatrixXfR t8_2(1, t7_2.size());
  597. t8_2.row(0) = VectorXf::Map(&t7_2[0], t7_2.size());
  598. t8_2.resize(4, 4);
  599. CWT2 = t8_2;
  600. Vector4f O_2 = matrix_to_home(Vector3f::Constant(3, 0));
  601. camera_origin_world2= (CWT2.inverse() * O_2).head(3);
  602. // camera_origin_world.push_back(&camera_origin_world2);
  603. configRefPlane(CWT2.block(0, 3, 3, 1), CWT2.block(0, 2, 3, 1), camera_rays2, refPlane2);
  604. camera_rays2.resize(0, 0);
  605. // 上面计算的是再相机坐标系下的参考平面,将其转换到世界坐标系下
  606. MatrixXf ref_homo_2 = matrix_to_home(refPlane2);
  607. ref_homo_2 = CWT2.inverse() * ref_homo_2;
  608. refPlane2 = ref_homo_2.block(0, 0, 3, ref_homo_2.cols());
  609. // refPlane.push_back(&refPlane2);
  610. // 读取屏幕信息,条纹信息,投影条纹
  611. std::vector<float> t9_2 = param.at("CST2");
  612. MatrixXfR t10_2(1, t9_2.size());
  613. t10_2.row(0) = VectorXf::Map(&t9_2[0], t9_2.size());
  614. t10_2.resize(4, 4);
  615. CST2 = t10_2;
  616. Matrix4f WST_2 = CWT2.inverse() * CST2;
  617. configScreenPixelPos(screen, WST_2, screen_pix_pos2);
  618. // screen_pix_pos.push_back(&screen_pix_pos2);
  619. // 读取CWT,计算参考平面与相机光线交点
  620. std::vector<float> t7_3 = param.at("CWT3");
  621. MatrixXfR t8_3(1, t7_3.size());
  622. t8_3.row(0) = VectorXf::Map(&t7_3[0], t7_3.size());
  623. t8_3.resize(4, 4);
  624. CWT3 = t8_3;
  625. Vector4f O_3 = matrix_to_home(Vector3f::Constant(3, 0));
  626. camera_origin_world3 = (CWT3.inverse() * O_3).head(3);
  627. // camera_origin_world.push_back(&camera_origin_world3);
  628. configRefPlane(CWT3.block(0, 3, 3, 1), CWT3.block(0, 2, 3, 1), camera_rays3, refPlane3);
  629. camera_rays3.resize(0, 0);
  630. // 上面计算的是再相机坐标系下的参考平面,将其转换到世界坐标系下
  631. MatrixXf ref_homo_3 = matrix_to_home(refPlane3);
  632. ref_homo_3 = CWT3.inverse() * ref_homo_3;
  633. refPlane3 = ref_homo_3.block(0, 0, 3, ref_homo_3.cols());
  634. // refPlane.push_back(&refPlane3);
  635. // 读取屏幕信息,条纹信息,投影条纹
  636. std::vector<float> t9_3 = param.at("CST3");
  637. MatrixXfR t10_3(1, t9_3.size());
  638. t10_3.row(0) = VectorXf::Map(&t9_3[0], t9_3.size());
  639. t10_3.resize(4, 4);
  640. CST3 = t10_3;
  641. Matrix4f WST_3 = CWT3.inverse() * CST3;
  642. configScreenPixelPos(screen, WST_3, screen_pix_pos3);
  643. // screen_pix_pos.push_back(&screen_pix_pos3);
  644. std::vector<float> t11 = param.at("CTC1");
  645. MatrixXfR t12(1, t11.size());
  646. t12.row(0) = VectorXf::Map(&t11[0], t11.size());
  647. t12.resize(4, 4);
  648. CTC0 = t12;
  649. // CTC.push_back(&CTC0);
  650. std::vector<float> t11_1 = param.at("CTC2");
  651. MatrixXfR t12_1(1, t11_1.size());
  652. t12_1.row(0) = VectorXf::Map(&t11_1[0], t11_1.size());
  653. t12_1.resize(4, 4);
  654. CTC1 = t12_1;
  655. // CTC.push_back(&CTC1);
  656. std::vector<float> t11_2 = param.at("CTC3");
  657. MatrixXfR t12_2(1, t11_2.size());
  658. t12_2.row(0) = VectorXf::Map(&t11_2[0], t11_2.size());
  659. t12_2.resize(4, 4);
  660. CTC2 = t12_2;
  661. // CTC.push_back(&CTC2);
  662. return 0;
  663. }
  664. static int product_mask(const MatrixXf& x, MatrixXf& y, MatrixXf& roi_m)
  665. {
  666. // 检查输入矩阵的尺寸是否匹配
  667. if (x.rows() != roi_m.rows() || x.cols() != roi_m.cols()) {
  668. return -1; // 返回错误代码,表示尺寸不匹配
  669. }
  670. // 初始化输出矩阵y,大小与x相同
  671. y = MatrixXf::Zero(x.rows(), x.cols());
  672. // 逐元素相乘
  673. y = x.array() * roi_m.array();
  674. return 0; // 返回0表示成功
  675. }
  676. int ArithPmd::Preprocess(const std::vector<cv::Mat>& images, int area_threshold, int32_t camera_index)
  677. {
  678. cv::Mat input = cv::Mat::zeros(images[0].size(), images[0].type());
  679. for (const auto& img : images) {
  680. cv::add(input, img, input);
  681. }
  682. cv::Mat fill_image;
  683. preprocess(input, fill_image, area_threshold);
  684. std::vector<std::vector<cv::Point>> contours;
  685. cv::findContours(fill_image, contours, cv::RETR_EXTERNAL, cv::CHAIN_APPROX_SIMPLE);
  686. cv::Rect largest_bbox;
  687. double max_area = 0;
  688. for (const auto& contour : contours) {
  689. double area = cv::contourArea(contour);
  690. if (area > max_area) {
  691. max_area = area;
  692. largest_bbox = cv::boundingRect(contour);
  693. }
  694. }
  695. // cv::Mat mask;
  696. if (camera_index == 0)
  697. {
  698. threshold(fill_image, mask0, 0, 1, cv::THRESH_BINARY);
  699. // cv::imwrite("mask.bmp", mask * 255);
  700. cv::Mat roi_mask = mask0(largest_bbox);
  701. cv::cv2eigen(roi_mask, roi_m0);
  702. roi0.startRow = largest_bbox.y;
  703. roi0.startCol = largest_bbox.x;
  704. roi0.blockRows = largest_bbox.height;
  705. roi0.blockCols = largest_bbox.width;
  706. }
  707. else if (camera_index == 1)
  708. {
  709. threshold(fill_image, mask1, 0, 1, cv::THRESH_BINARY);
  710. // cv::imwrite("mask.bmp", mask * 255);
  711. cv::Mat roi_mask = mask1(largest_bbox);
  712. cv::cv2eigen(roi_mask, roi_m1);
  713. roi1.startRow = largest_bbox.y;
  714. roi1.startCol = largest_bbox.x;
  715. roi1.blockRows = largest_bbox.height;
  716. roi1.blockCols = largest_bbox.width;
  717. }
  718. else if (camera_index == 2)
  719. {
  720. threshold(fill_image, mask2, 0, 1, cv::THRESH_BINARY);
  721. // cv::imwrite("mask.bmp", mask * 255);
  722. cv::Mat roi_mask = mask2(largest_bbox);
  723. cv::cv2eigen(roi_mask, roi_m2);
  724. roi2.startRow = largest_bbox.y;
  725. roi2.startCol = largest_bbox.x;
  726. roi2.blockRows = largest_bbox.height;
  727. roi2.blockCols = largest_bbox.width;
  728. }
  729. else
  730. {
  731. threshold(fill_image, mask3, 0, 1, cv::THRESH_BINARY);
  732. // cv::imwrite("mask.bmp", mask * 255);
  733. cv::Mat roi_mask = mask3(largest_bbox);
  734. cv::cv2eigen(roi_mask, roi_m3);
  735. roi3.startRow = largest_bbox.y;
  736. roi3.startCol = largest_bbox.x;
  737. roi3.blockRows = largest_bbox.height;
  738. roi3.blockCols = largest_bbox.width;
  739. }
  740. return 0;
  741. }
  742. int ArithPmd::Predict(const std::vector<Eigen::MatrixXf>& img_pats, MatrixXfR& pcl, int32_t camera_index)
  743. {
  744. ROI roi;
  745. MatrixXf roi_m;
  746. if (camera_index == 0)
  747. {
  748. roi = roi0;
  749. roi_m = roi_m0;
  750. }
  751. else if (camera_index == 1)
  752. {
  753. roi = roi1;
  754. roi_m = roi_m1;
  755. }
  756. else if (camera_index == 2)
  757. {
  758. roi = roi2;
  759. roi_m = roi_m2;
  760. }
  761. else
  762. {
  763. roi = roi3;
  764. roi_m = roi_m3;
  765. }
  766. std::vector<MatrixXf> ps_maps(2);
  767. {
  768. std::vector<MatrixXf> wraped_ps_maps(4);
  769. phase_shifting(img_pats, wraped_ps_maps);
  770. phase_unwrapping(wraped_ps_maps, period_width, period_height, ps_maps);
  771. }
  772. // save_matrix_as_img(ps_maps[0], "phase_unwrap_x.bmp");
  773. // save_matrix_as_img(ps_maps[1], "phase_unwrap_y.bmp");
  774. // 屏幕像素与像素相位匹配,获取屏幕像素对应三维坐标,世界坐标系下
  775. MatrixXf screen_camera_phase_match_pos; // 尺寸为 roi
  776. {
  777. std::vector<MatrixXf> ps_maps_roi0(2);
  778. std::vector<MatrixXf> ps_maps_roi(2);
  779. for (uint32_t i = 0; i < 2; ++i)
  780. {
  781. ps_maps_roi0[i] = ps_maps[i].block(roi.startRow, roi.startCol, roi.blockRows, roi.blockCols);
  782. ps_maps_roi[i] = MatrixXf::Zero(ps_maps_roi0[i].rows(), ps_maps_roi0[i].cols());
  783. int r = product_mask(ps_maps_roi0[i], ps_maps_roi[i],roi_m);
  784. }
  785. if (camera_index == 0)
  786. {
  787. screen_camera_phase_match(ps_maps_roi, screen, period_width, period_height, screen_pix_pos0,
  788. screen_camera_phase_match_pos);
  789. }
  790. else if (camera_index == 1)
  791. {
  792. screen_camera_phase_match(ps_maps_roi, screen, period_width, period_height, screen_pix_pos1,
  793. screen_camera_phase_match_pos);
  794. }
  795. else if (camera_index == 2)
  796. {
  797. screen_camera_phase_match(ps_maps_roi, screen, period_width, period_height, screen_pix_pos2,
  798. screen_camera_phase_match_pos);
  799. }
  800. else
  801. {
  802. screen_camera_phase_match(ps_maps_roi, screen, period_width, period_height, screen_pix_pos3,
  803. screen_camera_phase_match_pos);
  804. }
  805. // screen_camera_phase_match(ps_maps_roi, screen, period_width, period_height, *screen_pix_pos[camera_index],screen_camera_phase_match_pos);
  806. save_matrix_as_img(ps_maps_roi[0], "ps_maps_roi_x.bmp");
  807. save_matrix_as_img(ps_maps_roi[1], "ps_maps_roi_y.bmp");
  808. }
  809. // 斜率计算
  810. std::vector<MatrixXfR> slope(2);
  811. {
  812. // 取 refPlane 的 roi
  813. std::vector<MatrixXf> refP(2);
  814. MatrixXfR temp;
  815. MatrixXf ref_roi(3, roi.blockRows * roi.blockCols);
  816. for (uint32_t i = 0; i < 3; ++i) {
  817. if (camera_index == 0)
  818. {
  819. refP[0] = refPlane0.row(i);
  820. }
  821. else if (camera_index == 1)
  822. {
  823. refP[0] = refPlane1.row(i);
  824. }
  825. else if (camera_index == 2)
  826. {
  827. refP[0] = refPlane2.row(i);
  828. }
  829. else
  830. {
  831. refP[0] = refPlane3.row(i);
  832. }
  833. refP[0].resize(img_size.height, img_size.width);
  834. temp = refP[0].block(roi.startRow, roi.startCol, roi.blockRows, roi.blockCols);
  835. refP[1] = MatrixXf::Zero(temp.rows(), temp.cols());
  836. int r1 = product_mask(temp, refP[1], roi_m);
  837. /*
  838. refP[1] = refP[0].block(roi.startRow, roi.startCol, roi.blockRows, roi.blockCols);
  839. */
  840. refP[1].resize(1, roi.blockRows * roi.blockCols);
  841. ref_roi.row(i) = refP[1];
  842. }
  843. if (camera_index == 0)
  844. {
  845. slope_calculate(camera_origin_world0, ref_roi, screen_camera_phase_match_pos, slope);
  846. }
  847. else if (camera_index == 1)
  848. {
  849. slope_calculate(camera_origin_world1, ref_roi, screen_camera_phase_match_pos, slope);
  850. }
  851. else if (camera_index == 2)
  852. {
  853. slope_calculate(camera_origin_world2, ref_roi, screen_camera_phase_match_pos, slope);
  854. }
  855. else
  856. {
  857. slope_calculate(camera_origin_world3, ref_roi, screen_camera_phase_match_pos, slope);
  858. }
  859. // slope_calculate(*camera_origin_world[camera_index], ref_roi, screen_camera_phase_match_pos, slope);
  860. for (uint32_t i = 0; i < 2; ++i)
  861. slope[i].resize(roi.blockRows, roi.blockCols);
  862. }
  863. MatrixXfR Z;
  864. MatrixXf refPlane_t;
  865. if (camera_index == 0)
  866. {
  867. refPlane_t = refPlane0;
  868. }
  869. else if (camera_index == 1)
  870. {
  871. refPlane_t = refPlane1;
  872. }
  873. else if (camera_index == 2)
  874. {
  875. refPlane_t = refPlane2;
  876. }
  877. else
  878. {
  879. refPlane_t = refPlane3;
  880. }
  881. MatrixXfR X_ref = refPlane_t.row(0), Y_ref = refPlane_t.row(1);
  882. X_ref.resize(img_size.height, img_size.width);
  883. Y_ref.resize(img_size.height, img_size.width);
  884. MatrixXfR temp_1;
  885. temp_1 = X_ref.block(roi.startRow, roi.startCol, roi.blockRows, roi.blockCols);
  886. MatrixXf X_roi = MatrixXf::Zero(temp_1.rows(), temp_1.cols());
  887. int r_x = product_mask(temp_1, X_roi, roi_m);
  888. MatrixXfR temp_2;
  889. temp_2 = Y_ref.block(roi.startRow, roi.startCol, roi.blockRows, roi.blockCols);
  890. MatrixXf Y_roi = MatrixXf::Zero(temp_2.rows(), temp_2.cols());
  891. int r_y = product_mask(temp_2, Y_roi, roi_m);
  892. // MatrixXfR Y_roi = Y_ref.block(roi.startRow, roi.startCol, roi.blockRows, roi.blockCols);
  893. std::vector<float> range = { temp_1(0, 0) * 1000, temp_1(0, roi.blockCols - 1) * 1000,
  894. temp_2(0, 0) * 1000, temp_2(roi.blockRows - 1, 0) * 1000,
  895. };
  896. modal_reconstruction(slope[0], slope[1], Z, range, 4, roi_m);
  897. roi_m.resize(0, 0);
  898. // 第一种做法
  899. X_roi.resize(1, roi.blockCols * roi.blockRows);
  900. Y_roi.resize(1, roi.blockCols * roi.blockRows);
  901. Z.resize(1, roi.blockCols * roi.blockRows);
  902. // Temporary vectors to store non-zero elements
  903. vector<float> new_X, new_Y, new_Z;
  904. // Traverse the matrices and add non-zero elements to the new vectors
  905. for (int i = 0; i < roi.blockRows * roi.blockCols; ++i) {
  906. if (X_roi(0, i) != 0 || Y_roi(0, i) != 0) {
  907. new_X.push_back(X_roi(0, i));
  908. new_Y.push_back(Y_roi(0, i));
  909. new_Z.push_back(Z(0, i));
  910. }
  911. }
  912. // Resize the matrices
  913. MatrixXf X_roi_, Y_roi_, Z_;
  914. X_roi_.resize(1, new_X.size());
  915. Y_roi_.resize(1, new_Y.size());
  916. Z_.resize(1, new_Z.size());
  917. // Copy the non-zero elements back to the matrices
  918. for (int i = 0; i < new_X.size(); ++i) {
  919. X_roi_(0, i) = new_X[i];
  920. Y_roi_(0, i) = new_Y[i];
  921. Z_(0, i) = new_Z[i];
  922. }
  923. float X_max = X_roi_.maxCoeff();
  924. float X_min = X_roi_.minCoeff();
  925. X_roi_ = (X_roi_.array() - X_min) / (X_max - X_min); // 归一化到 [0, 1]
  926. X_roi_ = 2 * X_roi_.array() - 1; // 再归一化到 [-1, 1]
  927. // X_roi_ = X_roi_ / 10;
  928. float Y_max = Y_roi_.maxCoeff();
  929. float Y_min = Y_roi_.minCoeff();
  930. Y_roi_ = (Y_roi_.array() - Y_min) / (Y_max - Y_min); // 归一化到 [0, 1]
  931. Y_roi_ = 2 * Y_roi_.array() - 1; // 再归一化到 [-1, 1]
  932. // Y_roi_ = Y_roi_ / 10;
  933. float Z_max = Z_.maxCoeff();
  934. float Z_min = Z_.minCoeff();
  935. Z_ = (Z_.array() - Z_min) / (Z_max - Z_min); // 归一化到 [0, 1]
  936. Z_ = 2 * Z_.array() - 1; // 再归一化到 [-1, 1]
  937. Z_ = Z_ / 1000;
  938. pcl.resize(3, new_X.size());
  939. pcl.row(0) = X_roi_;
  940. pcl.row(1) = Y_roi_;
  941. pcl.row(2) = Z_;
  942. roi.startRow = 0;
  943. roi.startCol = 0;
  944. roi.blockRows = 0;
  945. roi.blockCols = 0;
  946. /*
  947. X_roi.resize(1, roi.blockCols * roi.blockRows);
  948. Y_roi.resize(1, roi.blockCols * roi.blockRows);
  949. Z.resize(1, roi.blockCols * roi.blockRows);
  950. pcl.resize(3, roi.blockCols * roi.blockRows);
  951. pcl.row(0) = X_roi;
  952. pcl.row(1) = Y_roi;
  953. pcl.row(2) = Z;
  954. */
  955. /*
  956. // 第二种做法
  957. Eigen::Map<Eigen::MatrixXf> flatX(X_roi.data(), X_roi.size(), 1);
  958. Eigen::Map<Eigen::MatrixXf> flatY(Y_roi.data(), X_roi.size(), 1);
  959. Eigen::Map<Eigen::MatrixXf> flatZ(Z.data(), X_roi.size(), 1);
  960. Eigen::Map<Eigen::MatrixXf> flatMask(roi_m.data(), roi_m.size(), 1);
  961. Eigen::MatrixXf product_x = flatX.array() * flatMask.array();
  962. Eigen::MatrixXf product_y = flatY.array() * flatMask.array();
  963. Eigen::MatrixXf product_z = flatZ.array() * flatMask.array();
  964. std::vector<float> filtered_x, filtered_y, filtered_z;
  965. for (int i = 0; i < product_x.size(); ++i) {
  966. if (product_x(i) != 0) {
  967. filtered_x.push_back(product_x(i));
  968. filtered_y.push_back(product_y(i));
  969. filtered_z.push_back(product_z(i));
  970. }
  971. }
  972. // X_roi.resize(1, roi.blockCols * roi.blockRows);
  973. // Y_roi.resize(1, roi.blockCols * roi.blockRows);
  974. // Z.resize(1, roi.blockCols * roi.blockRows);
  975. pcl.resize(3, filtered_x.size());
  976. for (int i = 0; i < filtered_x.size(); ++i) {
  977. pcl(0, i) = filtered_x[i]; // X坐标
  978. pcl(1, i) = filtered_y[i]; // Y坐标
  979. pcl(2, i) = filtered_z[i]; // Z坐标
  980. }
  981. */
  982. return 0;
  983. }
  984. int ArithPmd::Stitch(const pcl::PointCloud<pcl::PointXYZ>& cloud1, const pcl::PointCloud<pcl::PointXYZ>& cloud2, pcl::PointCloud<pcl::PointXYZ>& cloud_output, int32_t camera_index)
  985. {
  986. pcl::VoxelGrid<pcl::PointXYZ> downSizeFilter;
  987. pcl::PointCloud<pcl::PointXYZ>::Ptr transformed_cloud(new pcl::PointCloud<pcl::PointXYZ>);
  988. if (camera_index == 0)
  989. {
  990. pcl::transformPointCloud(cloud1, *transformed_cloud, CTC0);
  991. }
  992. else if (camera_index == 1)
  993. {
  994. pcl::transformPointCloud(cloud1, *transformed_cloud, CTC1);
  995. }
  996. else if (camera_index == 2)
  997. {
  998. pcl::transformPointCloud(cloud1, *transformed_cloud, CTC2);
  999. }
  1000. else
  1001. {
  1002. pcl::transformPointCloud(cloud1, *transformed_cloud, CTC2);
  1003. }
  1004. pcl::PointCloud<pcl::PointXYZ>::Ptr cloud3(new pcl::PointCloud<pcl::PointXYZ>);
  1005. pcl::PointCloud<pcl::PointXYZ> cloud_all = (*transformed_cloud) + cloud2;
  1006. cloud3 = cloud_all.makeShared();
  1007. double Res = 0.4;
  1008. downSizeFilter.setLeafSize(Res, Res, Res);
  1009. downSizeFilter.setInputCloud(cloud3);
  1010. downSizeFilter.filter(cloud_output);
  1011. return 0;
  1012. }