depth.hpp 40 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194
  1. // This file is part of OpenCV project.
  2. // It is subject to the license terms in the LICENSE file found in the top-level directory
  3. // of this distribution and at http://opencv.org/license.html
  4. // This code is also subject to the license terms in the LICENSE_WillowGarage.md file found in this module's directory
  5. #ifndef __OPENCV_RGBD_DEPTH_HPP__
  6. #define __OPENCV_RGBD_DEPTH_HPP__
  7. #include <opencv2/core.hpp>
  8. #include <limits>
  9. namespace cv
  10. {
  11. namespace rgbd
  12. {
  13. //! @addtogroup rgbd
  14. //! @{
  15. /** Checks if the value is a valid depth. For CV_16U or CV_16S, the convention is to be invalid if it is
  16. * a limit. For a float/double, we just check if it is a NaN
  17. * @param depth the depth to check for validity
  18. */
  19. CV_EXPORTS
  20. inline bool
  21. isValidDepth(const float & depth)
  22. {
  23. return !cvIsNaN(depth);
  24. }
  25. CV_EXPORTS
  26. inline bool
  27. isValidDepth(const double & depth)
  28. {
  29. return !cvIsNaN(depth);
  30. }
  31. CV_EXPORTS
  32. inline bool
  33. isValidDepth(const short int & depth)
  34. {
  35. return (depth != std::numeric_limits<short int>::min()) && (depth != std::numeric_limits<short int>::max());
  36. }
  37. CV_EXPORTS
  38. inline bool
  39. isValidDepth(const unsigned short int & depth)
  40. {
  41. return (depth != std::numeric_limits<unsigned short int>::min())
  42. && (depth != std::numeric_limits<unsigned short int>::max());
  43. }
  44. CV_EXPORTS
  45. inline bool
  46. isValidDepth(const int & depth)
  47. {
  48. return (depth != std::numeric_limits<int>::min()) && (depth != std::numeric_limits<int>::max());
  49. }
  50. CV_EXPORTS
  51. inline bool
  52. isValidDepth(const unsigned int & depth)
  53. {
  54. return (depth != std::numeric_limits<unsigned int>::min()) && (depth != std::numeric_limits<unsigned int>::max());
  55. }
  56. /** Object that can compute the normals in an image.
  57. * It is an object as it can cache data for speed efficiency
  58. * The implemented methods are either:
  59. * - FALS (the fastest) and SRI from
  60. * ``Fast and Accurate Computation of Surface Normals from Range Images``
  61. * by H. Badino, D. Huber, Y. Park and T. Kanade
  62. * - the normals with bilateral filtering on a depth image from
  63. * ``Gradient Response Maps for Real-Time Detection of Texture-Less Objects``
  64. * by S. Hinterstoisser, C. Cagniart, S. Ilic, P. Sturm, N. Navab, P. Fua, and V. Lepetit
  65. */
  66. class CV_EXPORTS_W RgbdNormals: public Algorithm
  67. {
  68. public:
  69. enum RGBD_NORMALS_METHOD
  70. {
  71. RGBD_NORMALS_METHOD_FALS = 0,
  72. RGBD_NORMALS_METHOD_LINEMOD = 1,
  73. RGBD_NORMALS_METHOD_SRI = 2
  74. };
  75. RgbdNormals()
  76. :
  77. rows_(0),
  78. cols_(0),
  79. depth_(0),
  80. K_(Mat()),
  81. window_size_(0),
  82. method_(RGBD_NORMALS_METHOD_FALS),
  83. rgbd_normals_impl_(0)
  84. {
  85. }
  86. /** Constructor
  87. * @param rows the number of rows of the depth image normals will be computed on
  88. * @param cols the number of cols of the depth image normals will be computed on
  89. * @param depth the depth of the normals (only CV_32F or CV_64F)
  90. * @param K the calibration matrix to use
  91. * @param window_size the window size to compute the normals: can only be 1,3,5 or 7
  92. * @param method one of the methods to use: RGBD_NORMALS_METHOD_SRI, RGBD_NORMALS_METHOD_FALS
  93. */
  94. RgbdNormals(int rows, int cols, int depth, InputArray K, int window_size = 5, int method =
  95. RgbdNormals::RGBD_NORMALS_METHOD_FALS);
  96. ~RgbdNormals();
  97. CV_WRAP static Ptr<RgbdNormals> create(int rows, int cols, int depth, InputArray K, int window_size = 5, int method =
  98. RgbdNormals::RGBD_NORMALS_METHOD_FALS);
  99. /** Given a set of 3d points in a depth image, compute the normals at each point.
  100. * @param points a rows x cols x 3 matrix of CV_32F/CV64F or a rows x cols x 1 CV_U16S
  101. * @param normals a rows x cols x 3 matrix
  102. */
  103. CV_WRAP_AS(apply) void
  104. operator()(InputArray points, OutputArray normals) const;
  105. /** Initializes some data that is cached for later computation
  106. * If that function is not called, it will be called the first time normals are computed
  107. */
  108. CV_WRAP void
  109. initialize() const;
  110. CV_WRAP int getRows() const
  111. {
  112. return rows_;
  113. }
  114. CV_WRAP void setRows(int val)
  115. {
  116. rows_ = val;
  117. }
  118. CV_WRAP int getCols() const
  119. {
  120. return cols_;
  121. }
  122. CV_WRAP void setCols(int val)
  123. {
  124. cols_ = val;
  125. }
  126. CV_WRAP int getWindowSize() const
  127. {
  128. return window_size_;
  129. }
  130. CV_WRAP void setWindowSize(int val)
  131. {
  132. window_size_ = val;
  133. }
  134. CV_WRAP int getDepth() const
  135. {
  136. return depth_;
  137. }
  138. CV_WRAP void setDepth(int val)
  139. {
  140. depth_ = val;
  141. }
  142. CV_WRAP cv::Mat getK() const
  143. {
  144. return K_;
  145. }
  146. CV_WRAP void setK(const cv::Mat &val)
  147. {
  148. K_ = val;
  149. }
  150. CV_WRAP int getMethod() const
  151. {
  152. return method_;
  153. }
  154. CV_WRAP void setMethod(int val)
  155. {
  156. method_ = val;
  157. }
  158. protected:
  159. void
  160. initialize_normals_impl(int rows, int cols, int depth, const Mat & K, int window_size, int method) const;
  161. int rows_, cols_, depth_;
  162. Mat K_;
  163. int window_size_;
  164. int method_;
  165. mutable void* rgbd_normals_impl_;
  166. };
  167. /** Object that can clean a noisy depth image
  168. */
  169. class CV_EXPORTS_W DepthCleaner: public Algorithm
  170. {
  171. public:
  172. /** NIL method is from
  173. * ``Modeling Kinect Sensor Noise for Improved 3d Reconstruction and Tracking``
  174. * by C. Nguyen, S. Izadi, D. Lovel
  175. */
  176. enum DEPTH_CLEANER_METHOD
  177. {
  178. DEPTH_CLEANER_NIL
  179. };
  180. DepthCleaner()
  181. :
  182. depth_(0),
  183. window_size_(0),
  184. method_(DEPTH_CLEANER_NIL),
  185. depth_cleaner_impl_(0)
  186. {
  187. }
  188. /** Constructor
  189. * @param depth the depth of the normals (only CV_32F or CV_64F)
  190. * @param window_size the window size to compute the normals: can only be 1,3,5 or 7
  191. * @param method one of the methods to use: RGBD_NORMALS_METHOD_SRI, RGBD_NORMALS_METHOD_FALS
  192. */
  193. DepthCleaner(int depth, int window_size = 5, int method = DepthCleaner::DEPTH_CLEANER_NIL);
  194. ~DepthCleaner();
  195. CV_WRAP static Ptr<DepthCleaner> create(int depth, int window_size = 5, int method = DepthCleaner::DEPTH_CLEANER_NIL);
  196. /** Given a set of 3d points in a depth image, compute the normals at each point.
  197. * @param points a rows x cols x 3 matrix of CV_32F/CV64F or a rows x cols x 1 CV_U16S
  198. * @param depth a rows x cols matrix of the cleaned up depth
  199. */
  200. CV_WRAP_AS(apply) void
  201. operator()(InputArray points, OutputArray depth) const;
  202. /** Initializes some data that is cached for later computation
  203. * If that function is not called, it will be called the first time normals are computed
  204. */
  205. CV_WRAP void
  206. initialize() const;
  207. CV_WRAP int getWindowSize() const
  208. {
  209. return window_size_;
  210. }
  211. CV_WRAP void setWindowSize(int val)
  212. {
  213. window_size_ = val;
  214. }
  215. CV_WRAP int getDepth() const
  216. {
  217. return depth_;
  218. }
  219. CV_WRAP void setDepth(int val)
  220. {
  221. depth_ = val;
  222. }
  223. CV_WRAP int getMethod() const
  224. {
  225. return method_;
  226. }
  227. CV_WRAP void setMethod(int val)
  228. {
  229. method_ = val;
  230. }
  231. protected:
  232. void
  233. initialize_cleaner_impl() const;
  234. int depth_;
  235. int window_size_;
  236. int method_;
  237. mutable void* depth_cleaner_impl_;
  238. };
  239. /** Registers depth data to an external camera
  240. * Registration is performed by creating a depth cloud, transforming the cloud by
  241. * the rigid body transformation between the cameras, and then projecting the
  242. * transformed points into the RGB camera.
  243. *
  244. * uv_rgb = K_rgb * [R | t] * z * inv(K_ir) * uv_ir
  245. *
  246. * Currently does not check for negative depth values.
  247. *
  248. * @param unregisteredCameraMatrix the camera matrix of the depth camera
  249. * @param registeredCameraMatrix the camera matrix of the external camera
  250. * @param registeredDistCoeffs the distortion coefficients of the external camera
  251. * @param Rt the rigid body transform between the cameras. Transforms points from depth camera frame to external camera frame.
  252. * @param unregisteredDepth the input depth data
  253. * @param outputImagePlaneSize the image plane dimensions of the external camera (width, height)
  254. * @param registeredDepth the result of transforming the depth into the external camera
  255. * @param depthDilation whether or not the depth is dilated to avoid holes and occlusion errors (optional)
  256. */
  257. CV_EXPORTS_W
  258. void
  259. registerDepth(InputArray unregisteredCameraMatrix, InputArray registeredCameraMatrix, InputArray registeredDistCoeffs,
  260. InputArray Rt, InputArray unregisteredDepth, const Size& outputImagePlaneSize,
  261. OutputArray registeredDepth, bool depthDilation=false);
  262. /**
  263. * @param depth the depth image
  264. * @param in_K
  265. * @param in_points the list of xy coordinates
  266. * @param points3d the resulting 3d points
  267. */
  268. CV_EXPORTS_W
  269. void
  270. depthTo3dSparse(InputArray depth, InputArray in_K, InputArray in_points, OutputArray points3d);
  271. /** Converts a depth image to an organized set of 3d points.
  272. * The coordinate system is x pointing left, y down and z away from the camera
  273. * @param depth the depth image (if given as short int CV_U, it is assumed to be the depth in millimeters
  274. * (as done with the Microsoft Kinect), otherwise, if given as CV_32F or CV_64F, it is assumed in meters)
  275. * @param K The calibration matrix
  276. * @param points3d the resulting 3d points. They are of depth the same as `depth` if it is CV_32F or CV_64F, and the
  277. * depth of `K` if `depth` is of depth CV_U
  278. * @param mask the mask of the points to consider (can be empty)
  279. */
  280. CV_EXPORTS_W
  281. void
  282. depthTo3d(InputArray depth, InputArray K, OutputArray points3d, InputArray mask = noArray());
  283. /** If the input image is of type CV_16UC1 (like the Kinect one), the image is converted to floats, divided
  284. * by depth_factor to get a depth in meters, and the values 0 are converted to std::numeric_limits<float>::quiet_NaN()
  285. * Otherwise, the image is simply converted to floats
  286. * @param in the depth image (if given as short int CV_U, it is assumed to be the depth in millimeters
  287. * (as done with the Microsoft Kinect), it is assumed in meters)
  288. * @param depth the desired output depth (floats or double)
  289. * @param out The rescaled float depth image
  290. * @param depth_factor (optional) factor by which depth is converted to distance (by default = 1000.0 for Kinect sensor)
  291. */
  292. CV_EXPORTS_W
  293. void
  294. rescaleDepth(InputArray in, int depth, OutputArray out, double depth_factor = 1000.0);
  295. /** Object that can compute planes in an image
  296. */
  297. class CV_EXPORTS_W RgbdPlane: public Algorithm
  298. {
  299. public:
  300. enum RGBD_PLANE_METHOD
  301. {
  302. RGBD_PLANE_METHOD_DEFAULT
  303. };
  304. RgbdPlane(int method = RgbdPlane::RGBD_PLANE_METHOD_DEFAULT)
  305. :
  306. method_(method),
  307. block_size_(40),
  308. min_size_(block_size_*block_size_),
  309. threshold_(0.01),
  310. sensor_error_a_(0),
  311. sensor_error_b_(0),
  312. sensor_error_c_(0)
  313. {
  314. }
  315. /** Constructor
  316. * @param block_size The size of the blocks to look at for a stable MSE
  317. * @param min_size The minimum size of a cluster to be considered a plane
  318. * @param threshold The maximum distance of a point from a plane to belong to it (in meters)
  319. * @param sensor_error_a coefficient of the sensor error. 0 by default, 0.0075 for a Kinect
  320. * @param sensor_error_b coefficient of the sensor error. 0 by default
  321. * @param sensor_error_c coefficient of the sensor error. 0 by default
  322. * @param method The method to use to compute the planes.
  323. */
  324. RgbdPlane(int method, int block_size,
  325. int min_size, double threshold, double sensor_error_a = 0,
  326. double sensor_error_b = 0, double sensor_error_c = 0);
  327. ~RgbdPlane();
  328. CV_WRAP static Ptr<RgbdPlane> create(int method, int block_size, int min_size, double threshold,
  329. double sensor_error_a = 0, double sensor_error_b = 0,
  330. double sensor_error_c = 0);
  331. /** Find The planes in a depth image
  332. * @param points3d the 3d points organized like the depth image: rows x cols with 3 channels
  333. * @param normals the normals for every point in the depth image
  334. * @param mask An image where each pixel is labeled with the plane it belongs to
  335. * and 255 if it does not belong to any plane
  336. * @param plane_coefficients the coefficients of the corresponding planes (a,b,c,d) such that ax+by+cz+d=0, norm(a,b,c)=1
  337. * and c < 0 (so that the normal points towards the camera)
  338. */
  339. CV_WRAP_AS(apply) void
  340. operator()(InputArray points3d, InputArray normals, OutputArray mask,
  341. OutputArray plane_coefficients);
  342. /** Find The planes in a depth image but without doing a normal check, which is faster but less accurate
  343. * @param points3d the 3d points organized like the depth image: rows x cols with 3 channels
  344. * @param mask An image where each pixel is labeled with the plane it belongs to
  345. * and 255 if it does not belong to any plane
  346. * @param plane_coefficients the coefficients of the corresponding planes (a,b,c,d) such that ax+by+cz+d=0
  347. */
  348. CV_WRAP_AS(apply) void
  349. operator()(InputArray points3d, OutputArray mask, OutputArray plane_coefficients);
  350. CV_WRAP int getBlockSize() const
  351. {
  352. return block_size_;
  353. }
  354. CV_WRAP void setBlockSize(int val)
  355. {
  356. block_size_ = val;
  357. }
  358. CV_WRAP int getMinSize() const
  359. {
  360. return min_size_;
  361. }
  362. CV_WRAP void setMinSize(int val)
  363. {
  364. min_size_ = val;
  365. }
  366. CV_WRAP int getMethod() const
  367. {
  368. return method_;
  369. }
  370. CV_WRAP void setMethod(int val)
  371. {
  372. method_ = val;
  373. }
  374. CV_WRAP double getThreshold() const
  375. {
  376. return threshold_;
  377. }
  378. CV_WRAP void setThreshold(double val)
  379. {
  380. threshold_ = val;
  381. }
  382. CV_WRAP double getSensorErrorA() const
  383. {
  384. return sensor_error_a_;
  385. }
  386. CV_WRAP void setSensorErrorA(double val)
  387. {
  388. sensor_error_a_ = val;
  389. }
  390. CV_WRAP double getSensorErrorB() const
  391. {
  392. return sensor_error_b_;
  393. }
  394. CV_WRAP void setSensorErrorB(double val)
  395. {
  396. sensor_error_b_ = val;
  397. }
  398. CV_WRAP double getSensorErrorC() const
  399. {
  400. return sensor_error_c_;
  401. }
  402. CV_WRAP void setSensorErrorC(double val)
  403. {
  404. sensor_error_c_ = val;
  405. }
  406. private:
  407. /** The method to use to compute the planes */
  408. int method_;
  409. /** The size of the blocks to look at for a stable MSE */
  410. int block_size_;
  411. /** The minimum size of a cluster to be considered a plane */
  412. int min_size_;
  413. /** How far a point can be from a plane to belong to it (in meters) */
  414. double threshold_;
  415. /** coefficient of the sensor error with respect to the. All 0 by default but you want a=0.0075 for a Kinect */
  416. double sensor_error_a_, sensor_error_b_, sensor_error_c_;
  417. };
  418. /** Object that contains a frame data.
  419. */
  420. struct CV_EXPORTS_W RgbdFrame
  421. {
  422. RgbdFrame();
  423. RgbdFrame(const Mat& image, const Mat& depth, const Mat& mask=Mat(), const Mat& normals=Mat(), int ID=-1);
  424. virtual ~RgbdFrame();
  425. CV_WRAP static Ptr<RgbdFrame> create(const Mat& image=Mat(), const Mat& depth=Mat(), const Mat& mask=Mat(), const Mat& normals=Mat(), int ID=-1);
  426. CV_WRAP virtual void
  427. release();
  428. CV_PROP int ID;
  429. CV_PROP Mat image;
  430. CV_PROP Mat depth;
  431. CV_PROP Mat mask;
  432. CV_PROP Mat normals;
  433. };
  434. /** Object that contains a frame data that is possibly needed for the Odometry.
  435. * It's used for the efficiency (to pass precomputed/cached data of the frame that participates
  436. * in the Odometry processing several times).
  437. */
  438. struct CV_EXPORTS_W OdometryFrame : public RgbdFrame
  439. {
  440. /** These constants are used to set a type of cache which has to be prepared depending on the frame role:
  441. * srcFrame or dstFrame (see compute method of the Odometry class). For the srcFrame and dstFrame different cache data may be required,
  442. * some part of a cache may be common for both frame roles.
  443. * @param CACHE_SRC The cache data for the srcFrame will be prepared.
  444. * @param CACHE_DST The cache data for the dstFrame will be prepared.
  445. * @param CACHE_ALL The cache data for both srcFrame and dstFrame roles will be computed.
  446. */
  447. enum
  448. {
  449. CACHE_SRC = 1, CACHE_DST = 2, CACHE_ALL = CACHE_SRC + CACHE_DST
  450. };
  451. OdometryFrame();
  452. OdometryFrame(const Mat& image, const Mat& depth, const Mat& mask=Mat(), const Mat& normals=Mat(), int ID=-1);
  453. CV_WRAP static Ptr<OdometryFrame> create(const Mat& image=Mat(), const Mat& depth=Mat(), const Mat& mask=Mat(), const Mat& normals=Mat(), int ID=-1);
  454. CV_WRAP virtual void
  455. release() CV_OVERRIDE;
  456. CV_WRAP void
  457. releasePyramids();
  458. CV_PROP std::vector<Mat> pyramidImage;
  459. CV_PROP std::vector<Mat> pyramidDepth;
  460. CV_PROP std::vector<Mat> pyramidMask;
  461. CV_PROP std::vector<Mat> pyramidCloud;
  462. CV_PROP std::vector<Mat> pyramid_dI_dx;
  463. CV_PROP std::vector<Mat> pyramid_dI_dy;
  464. CV_PROP std::vector<Mat> pyramidTexturedMask;
  465. CV_PROP std::vector<Mat> pyramidNormals;
  466. CV_PROP std::vector<Mat> pyramidNormalsMask;
  467. };
  468. /** Base class for computation of odometry.
  469. */
  470. class CV_EXPORTS_W Odometry: public Algorithm
  471. {
  472. public:
  473. /** A class of transformation*/
  474. enum
  475. {
  476. ROTATION = 1, TRANSLATION = 2, RIGID_BODY_MOTION = 4
  477. };
  478. CV_WRAP static inline float
  479. DEFAULT_MIN_DEPTH()
  480. {
  481. return 0.f; // in meters
  482. }
  483. CV_WRAP static inline float
  484. DEFAULT_MAX_DEPTH()
  485. {
  486. return 4.f; // in meters
  487. }
  488. CV_WRAP static inline float
  489. DEFAULT_MAX_DEPTH_DIFF()
  490. {
  491. return 0.07f; // in meters
  492. }
  493. CV_WRAP static inline float
  494. DEFAULT_MAX_POINTS_PART()
  495. {
  496. return 0.07f; // in [0, 1]
  497. }
  498. CV_WRAP static inline float
  499. DEFAULT_MAX_TRANSLATION()
  500. {
  501. return 0.15f; // in meters
  502. }
  503. CV_WRAP static inline float
  504. DEFAULT_MAX_ROTATION()
  505. {
  506. return 15; // in degrees
  507. }
  508. /** Method to compute a transformation from the source frame to the destination one.
  509. * Some odometry algorithms do not used some data of frames (eg. ICP does not use images).
  510. * In such case corresponding arguments can be set as empty Mat.
  511. * The method returns true if all internal computations were possible (e.g. there were enough correspondences,
  512. * system of equations has a solution, etc) and resulting transformation satisfies some test if it's provided
  513. * by the Odometry inheritor implementation (e.g. thresholds for maximum translation and rotation).
  514. * @param srcImage Image data of the source frame (CV_8UC1)
  515. * @param srcDepth Depth data of the source frame (CV_32FC1, in meters)
  516. * @param srcMask Mask that sets which pixels have to be used from the source frame (CV_8UC1)
  517. * @param dstImage Image data of the destination frame (CV_8UC1)
  518. * @param dstDepth Depth data of the destination frame (CV_32FC1, in meters)
  519. * @param dstMask Mask that sets which pixels have to be used from the destination frame (CV_8UC1)
  520. * @param Rt Resulting transformation from the source frame to the destination one (rigid body motion):
  521. dst_p = Rt * src_p, where dst_p is a homogeneous point in the destination frame and src_p is
  522. homogeneous point in the source frame,
  523. Rt is 4x4 matrix of CV_64FC1 type.
  524. * @param initRt Initial transformation from the source frame to the destination one (optional)
  525. */
  526. CV_WRAP bool
  527. compute(const Mat& srcImage, const Mat& srcDepth, const Mat& srcMask, const Mat& dstImage, const Mat& dstDepth,
  528. const Mat& dstMask, OutputArray Rt, const Mat& initRt = Mat()) const;
  529. /** One more method to compute a transformation from the source frame to the destination one.
  530. * It is designed to save on computing the frame data (image pyramids, normals, etc.).
  531. */
  532. CV_WRAP_AS(compute2) bool
  533. compute(Ptr<OdometryFrame>& srcFrame, Ptr<OdometryFrame>& dstFrame, OutputArray Rt, const Mat& initRt = Mat()) const;
  534. /** Prepare a cache for the frame. The function checks the precomputed/passed data (throws the error if this data
  535. * does not satisfy) and computes all remaining cache data needed for the frame. Returned size is a resolution
  536. * of the prepared frame.
  537. * @param frame The odometry which will process the frame.
  538. * @param cacheType The cache type: CACHE_SRC, CACHE_DST or CACHE_ALL.
  539. */
  540. CV_WRAP virtual Size prepareFrameCache(Ptr<OdometryFrame>& frame, int cacheType) const;
  541. CV_WRAP static Ptr<Odometry> create(const String & odometryType);
  542. /** @see setCameraMatrix */
  543. CV_WRAP virtual cv::Mat getCameraMatrix() const = 0;
  544. /** @copybrief getCameraMatrix @see getCameraMatrix */
  545. CV_WRAP virtual void setCameraMatrix(const cv::Mat &val) = 0;
  546. /** @see setTransformType */
  547. CV_WRAP virtual int getTransformType() const = 0;
  548. /** @copybrief getTransformType @see getTransformType */
  549. CV_WRAP virtual void setTransformType(int val) = 0;
  550. protected:
  551. virtual void
  552. checkParams() const = 0;
  553. virtual bool
  554. computeImpl(const Ptr<OdometryFrame>& srcFrame, const Ptr<OdometryFrame>& dstFrame, OutputArray Rt,
  555. const Mat& initRt) const = 0;
  556. };
  557. /** Odometry based on the paper "Real-Time Visual Odometry from Dense RGB-D Images",
  558. * F. Steinbucker, J. Strum, D. Cremers, ICCV, 2011.
  559. */
  560. class CV_EXPORTS_W RgbdOdometry: public Odometry
  561. {
  562. public:
  563. RgbdOdometry();
  564. /** Constructor.
  565. * @param cameraMatrix Camera matrix
  566. * @param minDepth Pixels with depth less than minDepth will not be used (in meters)
  567. * @param maxDepth Pixels with depth larger than maxDepth will not be used (in meters)
  568. * @param maxDepthDiff Correspondences between pixels of two given frames will be filtered out
  569. * if their depth difference is larger than maxDepthDiff (in meters)
  570. * @param iterCounts Count of iterations on each pyramid level.
  571. * @param minGradientMagnitudes For each pyramid level the pixels will be filtered out
  572. * if they have gradient magnitude less than minGradientMagnitudes[level].
  573. * @param maxPointsPart The method uses a random pixels subset of size frameWidth x frameHeight x pointsPart
  574. * @param transformType Class of transformation
  575. */
  576. RgbdOdometry(const Mat& cameraMatrix, float minDepth = Odometry::DEFAULT_MIN_DEPTH(), float maxDepth = Odometry::DEFAULT_MAX_DEPTH(),
  577. float maxDepthDiff = Odometry::DEFAULT_MAX_DEPTH_DIFF(), const std::vector<int>& iterCounts = std::vector<int>(),
  578. const std::vector<float>& minGradientMagnitudes = std::vector<float>(), float maxPointsPart = Odometry::DEFAULT_MAX_POINTS_PART(),
  579. int transformType = Odometry::RIGID_BODY_MOTION);
  580. CV_WRAP static Ptr<RgbdOdometry> create(const Mat& cameraMatrix = Mat(), float minDepth = Odometry::DEFAULT_MIN_DEPTH(), float maxDepth = Odometry::DEFAULT_MAX_DEPTH(),
  581. float maxDepthDiff = Odometry::DEFAULT_MAX_DEPTH_DIFF(), const std::vector<int>& iterCounts = std::vector<int>(),
  582. const std::vector<float>& minGradientMagnitudes = std::vector<float>(), float maxPointsPart = Odometry::DEFAULT_MAX_POINTS_PART(),
  583. int transformType = Odometry::RIGID_BODY_MOTION);
  584. CV_WRAP virtual Size prepareFrameCache(Ptr<OdometryFrame>& frame, int cacheType) const CV_OVERRIDE;
  585. CV_WRAP cv::Mat getCameraMatrix() const CV_OVERRIDE
  586. {
  587. return cameraMatrix;
  588. }
  589. CV_WRAP void setCameraMatrix(const cv::Mat &val) CV_OVERRIDE
  590. {
  591. cameraMatrix = val;
  592. }
  593. CV_WRAP double getMinDepth() const
  594. {
  595. return minDepth;
  596. }
  597. CV_WRAP void setMinDepth(double val)
  598. {
  599. minDepth = val;
  600. }
  601. CV_WRAP double getMaxDepth() const
  602. {
  603. return maxDepth;
  604. }
  605. CV_WRAP void setMaxDepth(double val)
  606. {
  607. maxDepth = val;
  608. }
  609. CV_WRAP double getMaxDepthDiff() const
  610. {
  611. return maxDepthDiff;
  612. }
  613. CV_WRAP void setMaxDepthDiff(double val)
  614. {
  615. maxDepthDiff = val;
  616. }
  617. CV_WRAP cv::Mat getIterationCounts() const
  618. {
  619. return iterCounts;
  620. }
  621. CV_WRAP void setIterationCounts(const cv::Mat &val)
  622. {
  623. iterCounts = val;
  624. }
  625. CV_WRAP cv::Mat getMinGradientMagnitudes() const
  626. {
  627. return minGradientMagnitudes;
  628. }
  629. CV_WRAP void setMinGradientMagnitudes(const cv::Mat &val)
  630. {
  631. minGradientMagnitudes = val;
  632. }
  633. CV_WRAP double getMaxPointsPart() const
  634. {
  635. return maxPointsPart;
  636. }
  637. CV_WRAP void setMaxPointsPart(double val)
  638. {
  639. maxPointsPart = val;
  640. }
  641. CV_WRAP int getTransformType() const CV_OVERRIDE
  642. {
  643. return transformType;
  644. }
  645. CV_WRAP void setTransformType(int val) CV_OVERRIDE
  646. {
  647. transformType = val;
  648. }
  649. CV_WRAP double getMaxTranslation() const
  650. {
  651. return maxTranslation;
  652. }
  653. CV_WRAP void setMaxTranslation(double val)
  654. {
  655. maxTranslation = val;
  656. }
  657. CV_WRAP double getMaxRotation() const
  658. {
  659. return maxRotation;
  660. }
  661. CV_WRAP void setMaxRotation(double val)
  662. {
  663. maxRotation = val;
  664. }
  665. protected:
  666. virtual void
  667. checkParams() const CV_OVERRIDE;
  668. virtual bool
  669. computeImpl(const Ptr<OdometryFrame>& srcFrame, const Ptr<OdometryFrame>& dstFrame, OutputArray Rt,
  670. const Mat& initRt) const CV_OVERRIDE;
  671. // Some params have commented desired type. It's due to AlgorithmInfo::addParams does not support it now.
  672. /*float*/
  673. double minDepth, maxDepth, maxDepthDiff;
  674. /*vector<int>*/
  675. Mat iterCounts;
  676. /*vector<float>*/
  677. Mat minGradientMagnitudes;
  678. double maxPointsPart;
  679. Mat cameraMatrix;
  680. int transformType;
  681. double maxTranslation, maxRotation;
  682. };
  683. /** Odometry based on the paper "KinectFusion: Real-Time Dense Surface Mapping and Tracking",
  684. * Richard A. Newcombe, Andrew Fitzgibbon, at al, SIGGRAPH, 2011.
  685. */
  686. class CV_EXPORTS_W ICPOdometry: public Odometry
  687. {
  688. public:
  689. ICPOdometry();
  690. /** Constructor.
  691. * @param cameraMatrix Camera matrix
  692. * @param minDepth Pixels with depth less than minDepth will not be used
  693. * @param maxDepth Pixels with depth larger than maxDepth will not be used
  694. * @param maxDepthDiff Correspondences between pixels of two given frames will be filtered out
  695. * if their depth difference is larger than maxDepthDiff
  696. * @param maxPointsPart The method uses a random pixels subset of size frameWidth x frameHeight x pointsPart
  697. * @param iterCounts Count of iterations on each pyramid level.
  698. * @param transformType Class of trasformation
  699. */
  700. ICPOdometry(const Mat& cameraMatrix, float minDepth = Odometry::DEFAULT_MIN_DEPTH(), float maxDepth = Odometry::DEFAULT_MAX_DEPTH(),
  701. float maxDepthDiff = Odometry::DEFAULT_MAX_DEPTH_DIFF(), float maxPointsPart = Odometry::DEFAULT_MAX_POINTS_PART(),
  702. const std::vector<int>& iterCounts = std::vector<int>(), int transformType = Odometry::RIGID_BODY_MOTION);
  703. CV_WRAP static Ptr<ICPOdometry> create(const Mat& cameraMatrix = Mat(), float minDepth = Odometry::DEFAULT_MIN_DEPTH(), float maxDepth = Odometry::DEFAULT_MAX_DEPTH(),
  704. float maxDepthDiff = Odometry::DEFAULT_MAX_DEPTH_DIFF(), float maxPointsPart = Odometry::DEFAULT_MAX_POINTS_PART(),
  705. const std::vector<int>& iterCounts = std::vector<int>(), int transformType = Odometry::RIGID_BODY_MOTION);
  706. CV_WRAP virtual Size prepareFrameCache(Ptr<OdometryFrame>& frame, int cacheType) const CV_OVERRIDE;
  707. CV_WRAP cv::Mat getCameraMatrix() const CV_OVERRIDE
  708. {
  709. return cameraMatrix;
  710. }
  711. CV_WRAP void setCameraMatrix(const cv::Mat &val) CV_OVERRIDE
  712. {
  713. cameraMatrix = val;
  714. }
  715. CV_WRAP double getMinDepth() const
  716. {
  717. return minDepth;
  718. }
  719. CV_WRAP void setMinDepth(double val)
  720. {
  721. minDepth = val;
  722. }
  723. CV_WRAP double getMaxDepth() const
  724. {
  725. return maxDepth;
  726. }
  727. CV_WRAP void setMaxDepth(double val)
  728. {
  729. maxDepth = val;
  730. }
  731. CV_WRAP double getMaxDepthDiff() const
  732. {
  733. return maxDepthDiff;
  734. }
  735. CV_WRAP void setMaxDepthDiff(double val)
  736. {
  737. maxDepthDiff = val;
  738. }
  739. CV_WRAP cv::Mat getIterationCounts() const
  740. {
  741. return iterCounts;
  742. }
  743. CV_WRAP void setIterationCounts(const cv::Mat &val)
  744. {
  745. iterCounts = val;
  746. }
  747. CV_WRAP double getMaxPointsPart() const
  748. {
  749. return maxPointsPart;
  750. }
  751. CV_WRAP void setMaxPointsPart(double val)
  752. {
  753. maxPointsPart = val;
  754. }
  755. CV_WRAP int getTransformType() const CV_OVERRIDE
  756. {
  757. return transformType;
  758. }
  759. CV_WRAP void setTransformType(int val) CV_OVERRIDE
  760. {
  761. transformType = val;
  762. }
  763. CV_WRAP double getMaxTranslation() const
  764. {
  765. return maxTranslation;
  766. }
  767. CV_WRAP void setMaxTranslation(double val)
  768. {
  769. maxTranslation = val;
  770. }
  771. CV_WRAP double getMaxRotation() const
  772. {
  773. return maxRotation;
  774. }
  775. CV_WRAP void setMaxRotation(double val)
  776. {
  777. maxRotation = val;
  778. }
  779. CV_WRAP Ptr<RgbdNormals> getNormalsComputer() const
  780. {
  781. return normalsComputer;
  782. }
  783. protected:
  784. virtual void
  785. checkParams() const CV_OVERRIDE;
  786. virtual bool
  787. computeImpl(const Ptr<OdometryFrame>& srcFrame, const Ptr<OdometryFrame>& dstFrame, OutputArray Rt,
  788. const Mat& initRt) const CV_OVERRIDE;
  789. // Some params have commented desired type. It's due to AlgorithmInfo::addParams does not support it now.
  790. /*float*/
  791. double minDepth, maxDepth, maxDepthDiff;
  792. /*float*/
  793. double maxPointsPart;
  794. /*vector<int>*/
  795. Mat iterCounts;
  796. Mat cameraMatrix;
  797. int transformType;
  798. double maxTranslation, maxRotation;
  799. mutable Ptr<RgbdNormals> normalsComputer;
  800. };
  801. /** Odometry that merges RgbdOdometry and ICPOdometry by minimize sum of their energy functions.
  802. */
  803. class CV_EXPORTS_W RgbdICPOdometry: public Odometry
  804. {
  805. public:
  806. RgbdICPOdometry();
  807. /** Constructor.
  808. * @param cameraMatrix Camera matrix
  809. * @param minDepth Pixels with depth less than minDepth will not be used
  810. * @param maxDepth Pixels with depth larger than maxDepth will not be used
  811. * @param maxDepthDiff Correspondences between pixels of two given frames will be filtered out
  812. * if their depth difference is larger than maxDepthDiff
  813. * @param maxPointsPart The method uses a random pixels subset of size frameWidth x frameHeight x pointsPart
  814. * @param iterCounts Count of iterations on each pyramid level.
  815. * @param minGradientMagnitudes For each pyramid level the pixels will be filtered out
  816. * if they have gradient magnitude less than minGradientMagnitudes[level].
  817. * @param transformType Class of trasformation
  818. */
  819. RgbdICPOdometry(const Mat& cameraMatrix, float minDepth = Odometry::DEFAULT_MIN_DEPTH(), float maxDepth = Odometry::DEFAULT_MAX_DEPTH(),
  820. float maxDepthDiff = Odometry::DEFAULT_MAX_DEPTH_DIFF(), float maxPointsPart = Odometry::DEFAULT_MAX_POINTS_PART(),
  821. const std::vector<int>& iterCounts = std::vector<int>(),
  822. const std::vector<float>& minGradientMagnitudes = std::vector<float>(),
  823. int transformType = Odometry::RIGID_BODY_MOTION);
  824. CV_WRAP static Ptr<RgbdICPOdometry> create(const Mat& cameraMatrix = Mat(), float minDepth = Odometry::DEFAULT_MIN_DEPTH(), float maxDepth = Odometry::DEFAULT_MAX_DEPTH(),
  825. float maxDepthDiff = Odometry::DEFAULT_MAX_DEPTH_DIFF(), float maxPointsPart = Odometry::DEFAULT_MAX_POINTS_PART(),
  826. const std::vector<int>& iterCounts = std::vector<int>(),
  827. const std::vector<float>& minGradientMagnitudes = std::vector<float>(),
  828. int transformType = Odometry::RIGID_BODY_MOTION);
  829. CV_WRAP virtual Size prepareFrameCache(Ptr<OdometryFrame>& frame, int cacheType) const CV_OVERRIDE;
  830. CV_WRAP cv::Mat getCameraMatrix() const CV_OVERRIDE
  831. {
  832. return cameraMatrix;
  833. }
  834. CV_WRAP void setCameraMatrix(const cv::Mat &val) CV_OVERRIDE
  835. {
  836. cameraMatrix = val;
  837. }
  838. CV_WRAP double getMinDepth() const
  839. {
  840. return minDepth;
  841. }
  842. CV_WRAP void setMinDepth(double val)
  843. {
  844. minDepth = val;
  845. }
  846. CV_WRAP double getMaxDepth() const
  847. {
  848. return maxDepth;
  849. }
  850. CV_WRAP void setMaxDepth(double val)
  851. {
  852. maxDepth = val;
  853. }
  854. CV_WRAP double getMaxDepthDiff() const
  855. {
  856. return maxDepthDiff;
  857. }
  858. CV_WRAP void setMaxDepthDiff(double val)
  859. {
  860. maxDepthDiff = val;
  861. }
  862. CV_WRAP double getMaxPointsPart() const
  863. {
  864. return maxPointsPart;
  865. }
  866. CV_WRAP void setMaxPointsPart(double val)
  867. {
  868. maxPointsPart = val;
  869. }
  870. CV_WRAP cv::Mat getIterationCounts() const
  871. {
  872. return iterCounts;
  873. }
  874. CV_WRAP void setIterationCounts(const cv::Mat &val)
  875. {
  876. iterCounts = val;
  877. }
  878. CV_WRAP cv::Mat getMinGradientMagnitudes() const
  879. {
  880. return minGradientMagnitudes;
  881. }
  882. CV_WRAP void setMinGradientMagnitudes(const cv::Mat &val)
  883. {
  884. minGradientMagnitudes = val;
  885. }
  886. CV_WRAP int getTransformType() const CV_OVERRIDE
  887. {
  888. return transformType;
  889. }
  890. CV_WRAP void setTransformType(int val) CV_OVERRIDE
  891. {
  892. transformType = val;
  893. }
  894. CV_WRAP double getMaxTranslation() const
  895. {
  896. return maxTranslation;
  897. }
  898. CV_WRAP void setMaxTranslation(double val)
  899. {
  900. maxTranslation = val;
  901. }
  902. CV_WRAP double getMaxRotation() const
  903. {
  904. return maxRotation;
  905. }
  906. CV_WRAP void setMaxRotation(double val)
  907. {
  908. maxRotation = val;
  909. }
  910. CV_WRAP Ptr<RgbdNormals> getNormalsComputer() const
  911. {
  912. return normalsComputer;
  913. }
  914. protected:
  915. virtual void
  916. checkParams() const CV_OVERRIDE;
  917. virtual bool
  918. computeImpl(const Ptr<OdometryFrame>& srcFrame, const Ptr<OdometryFrame>& dstFrame, OutputArray Rt,
  919. const Mat& initRt) const CV_OVERRIDE;
  920. // Some params have commented desired type. It's due to AlgorithmInfo::addParams does not support it now.
  921. /*float*/
  922. double minDepth, maxDepth, maxDepthDiff;
  923. /*float*/
  924. double maxPointsPart;
  925. /*vector<int>*/
  926. Mat iterCounts;
  927. /*vector<float>*/
  928. Mat minGradientMagnitudes;
  929. Mat cameraMatrix;
  930. int transformType;
  931. double maxTranslation, maxRotation;
  932. mutable Ptr<RgbdNormals> normalsComputer;
  933. };
  934. /** A faster version of ICPOdometry which is used in KinectFusion implementation
  935. * Partial list of differences:
  936. * - Works in parallel
  937. * - Written in universal intrinsics
  938. * - Filters points by angle
  939. * - Interpolates points and normals
  940. * - Doesn't use masks or min/max depth filtering
  941. * - Doesn't use random subsets of points
  942. * - Supports only Rt transform type
  943. * - Supports only 4-float vectors as input type
  944. */
  945. class CV_EXPORTS_W FastICPOdometry: public Odometry
  946. {
  947. public:
  948. FastICPOdometry();
  949. /** Constructor.
  950. * @param cameraMatrix Camera matrix
  951. * @param maxDistDiff Correspondences between pixels of two given frames will be filtered out
  952. * if their depth difference is larger than maxDepthDiff
  953. * @param angleThreshold Correspondence will be filtered out
  954. * if an angle between their normals is bigger than threshold
  955. * @param sigmaDepth Depth sigma in meters for bilateral smooth
  956. * @param sigmaSpatial Spatial sigma in pixels for bilateral smooth
  957. * @param kernelSize Kernel size in pixels for bilateral smooth
  958. * @param iterCounts Count of iterations on each pyramid level
  959. */
  960. FastICPOdometry(const Mat& cameraMatrix,
  961. float maxDistDiff = Odometry::DEFAULT_MAX_DEPTH_DIFF(),
  962. float angleThreshold = (float)(30. * CV_PI / 180.),
  963. float sigmaDepth = 0.04f,
  964. float sigmaSpatial = 4.5f,
  965. int kernelSize = 7,
  966. const std::vector<int>& iterCounts = std::vector<int>());
  967. CV_WRAP static Ptr<FastICPOdometry> create(const Mat& cameraMatrix,
  968. float maxDistDiff = Odometry::DEFAULT_MAX_DEPTH_DIFF(),
  969. float angleThreshold = (float)(30. * CV_PI / 180.),
  970. float sigmaDepth = 0.04f,
  971. float sigmaSpatial = 4.5f,
  972. int kernelSize = 7,
  973. const std::vector<int>& iterCounts = std::vector<int>());
  974. CV_WRAP virtual Size prepareFrameCache(Ptr<OdometryFrame>& frame, int cacheType) const CV_OVERRIDE;
  975. CV_WRAP cv::Mat getCameraMatrix() const CV_OVERRIDE
  976. {
  977. return cameraMatrix;
  978. }
  979. CV_WRAP void setCameraMatrix(const cv::Mat &val) CV_OVERRIDE
  980. {
  981. cameraMatrix = val;
  982. }
  983. CV_WRAP double getMaxDistDiff() const
  984. {
  985. return maxDistDiff;
  986. }
  987. CV_WRAP void setMaxDistDiff(float val)
  988. {
  989. maxDistDiff = val;
  990. }
  991. CV_WRAP float getAngleThreshold() const
  992. {
  993. return angleThreshold;
  994. }
  995. CV_WRAP void setAngleThreshold(float f)
  996. {
  997. angleThreshold = f;
  998. }
  999. CV_WRAP float getSigmaDepth() const
  1000. {
  1001. return sigmaDepth;
  1002. }
  1003. CV_WRAP void setSigmaDepth(float f)
  1004. {
  1005. sigmaDepth = f;
  1006. }
  1007. CV_WRAP float getSigmaSpatial() const
  1008. {
  1009. return sigmaSpatial;
  1010. }
  1011. CV_WRAP void setSigmaSpatial(float f)
  1012. {
  1013. sigmaSpatial = f;
  1014. }
  1015. CV_WRAP int getKernelSize() const
  1016. {
  1017. return kernelSize;
  1018. }
  1019. CV_WRAP void setKernelSize(int f)
  1020. {
  1021. kernelSize = f;
  1022. }
  1023. CV_WRAP cv::Mat getIterationCounts() const
  1024. {
  1025. return iterCounts;
  1026. }
  1027. CV_WRAP void setIterationCounts(const cv::Mat &val)
  1028. {
  1029. iterCounts = val;
  1030. }
  1031. CV_WRAP int getTransformType() const CV_OVERRIDE
  1032. {
  1033. return Odometry::RIGID_BODY_MOTION;
  1034. }
  1035. CV_WRAP void setTransformType(int val) CV_OVERRIDE
  1036. {
  1037. if(val != Odometry::RIGID_BODY_MOTION)
  1038. throw std::runtime_error("Rigid Body Motion is the only accepted transformation type"
  1039. " for this odometry method");
  1040. }
  1041. protected:
  1042. virtual void
  1043. checkParams() const CV_OVERRIDE;
  1044. virtual bool
  1045. computeImpl(const Ptr<OdometryFrame>& srcFrame, const Ptr<OdometryFrame>& dstFrame, OutputArray Rt,
  1046. const Mat& initRt) const CV_OVERRIDE;
  1047. // Some params have commented desired type. It's due to AlgorithmInfo::addParams does not support it now.
  1048. float maxDistDiff;
  1049. float angleThreshold;
  1050. float sigmaDepth;
  1051. float sigmaSpatial;
  1052. int kernelSize;
  1053. /*vector<int>*/
  1054. Mat iterCounts;
  1055. Mat cameraMatrix;
  1056. };
  1057. /** Warp the image: compute 3d points from the depth, transform them using given transformation,
  1058. * then project color point cloud to an image plane.
  1059. * This function can be used to visualize results of the Odometry algorithm.
  1060. * @param image The image (of CV_8UC1 or CV_8UC3 type)
  1061. * @param depth The depth (of type used in depthTo3d fuction)
  1062. * @param mask The mask of used pixels (of CV_8UC1), it can be empty
  1063. * @param Rt The transformation that will be applied to the 3d points computed from the depth
  1064. * @param cameraMatrix Camera matrix
  1065. * @param distCoeff Distortion coefficients
  1066. * @param warpedImage The warped image.
  1067. * @param warpedDepth The warped depth.
  1068. * @param warpedMask The warped mask.
  1069. */
  1070. CV_EXPORTS_W
  1071. void
  1072. warpFrame(const Mat& image, const Mat& depth, const Mat& mask, const Mat& Rt, const Mat& cameraMatrix,
  1073. const Mat& distCoeff, OutputArray warpedImage, OutputArray warpedDepth = noArray(), OutputArray warpedMask = noArray());
  1074. // TODO Depth interpolation
  1075. // Curvature
  1076. // Get rescaleDepth return dubles if asked for
  1077. //! @}
  1078. } /* namespace rgbd */
  1079. } /* namespace cv */
  1080. #endif
  1081. /* End of file. */