dynafu.hpp 4.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121
  1. // This file is part of OpenCV project.
  2. // It is subject to the license terms in the LICENSE file found in the top-level directory
  3. // of this distribution and at http://opencv.org/license.html
  4. // This code is also subject to the license terms in the LICENSE_KinectFusion.md file found in this module's directory
  5. #ifndef __OPENCV_RGBD_DYNAFU_HPP__
  6. #define __OPENCV_RGBD_DYNAFU_HPP__
  7. #include "opencv2/core.hpp"
  8. #include "opencv2/core/affine.hpp"
  9. #include "kinfu.hpp"
  10. namespace cv {
  11. namespace dynafu {
  12. /** @brief DynamicFusion implementation
  13. This class implements a 3d reconstruction algorithm as described in @cite dynamicfusion.
  14. It takes a sequence of depth images taken from depth sensor
  15. (or any depth images source such as stereo camera matching algorithm or even raymarching renderer).
  16. The output can be obtained as a vector of points and their normals
  17. or can be Phong-rendered from given camera pose.
  18. It extends the KinectFusion algorithm to handle non-rigidly deforming scenes by maintaining a sparse
  19. set of nodes covering the geometry such that each node contains a warp to transform it from a canonical
  20. space to the live frame.
  21. An internal representation of a model is a voxel cuboid that keeps TSDF values
  22. which are a sort of distances to the surface (for details read the @cite kinectfusion article about TSDF).
  23. There is no interface to that representation yet.
  24. Note that DynamicFusion is based on the KinectFusion algorithm which is patented and its use may be
  25. restricted by the list of patents mentioned in README.md file in this module directory.
  26. That's why you need to set the OPENCV_ENABLE_NONFREE option in CMake to use DynamicFusion.
  27. */
  28. /** Backwards compatibility for old versions */
  29. using Params = kinfu::Params;
  30. class CV_EXPORTS_W DynaFu
  31. {
  32. public:
  33. CV_WRAP static Ptr<DynaFu> create(const Ptr<kinfu::Params>& _params);
  34. virtual ~DynaFu();
  35. /** @brief Get current parameters */
  36. virtual const kinfu::Params& getParams() const = 0;
  37. /** @brief Renders a volume into an image
  38. Renders a 0-surface of TSDF using Phong shading into a CV_8UC4 Mat.
  39. Light pose is fixed in DynaFu params.
  40. @param image resulting image
  41. @param cameraPose pose of camera to render from. If empty then render from current pose
  42. which is a last frame camera pose.
  43. */
  44. CV_WRAP virtual void render(OutputArray image, const Matx44f& cameraPose = Matx44f::eye()) const = 0;
  45. /** @brief Gets points and normals of current 3d mesh
  46. The order of normals corresponds to order of points.
  47. The order of points is undefined.
  48. @param points vector of points which are 4-float vectors
  49. @param normals vector of normals which are 4-float vectors
  50. */
  51. CV_WRAP virtual void getCloud(OutputArray points, OutputArray normals) const = 0;
  52. /** @brief Gets points of current 3d mesh
  53. The order of points is undefined.
  54. @param points vector of points which are 4-float vectors
  55. */
  56. CV_WRAP virtual void getPoints(OutputArray points) const = 0;
  57. /** @brief Calculates normals for given points
  58. @param points input vector of points which are 4-float vectors
  59. @param normals output vector of corresponding normals which are 4-float vectors
  60. */
  61. CV_WRAP virtual void getNormals(InputArray points, OutputArray normals) const = 0;
  62. /** @brief Resets the algorithm
  63. Clears current model and resets a pose.
  64. */
  65. CV_WRAP virtual void reset() = 0;
  66. /** @brief Get current pose in voxel space */
  67. virtual const Affine3f getPose() const = 0;
  68. /** @brief Process next depth frame
  69. Integrates depth into voxel space with respect to its ICP-calculated pose.
  70. Input image is converted to CV_32F internally if has another type.
  71. @param depth one-channel image which size and depth scale is described in algorithm's parameters
  72. @return true if succeeded to align new frame with current scene, false if opposite
  73. */
  74. CV_WRAP virtual bool update(InputArray depth) = 0;
  75. virtual std::vector<Point3f> getNodesPos() const = 0;
  76. virtual void marchCubes(OutputArray vertices, OutputArray edges) const = 0;
  77. virtual void renderSurface(OutputArray depthImage, OutputArray vertImage, OutputArray normImage, bool warp=true) = 0;
  78. };
  79. //! @}
  80. }
  81. }
  82. #endif