aruco.hpp 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617
  1. /*
  2. By downloading, copying, installing or using the software you agree to this
  3. license. If you do not agree to this license, do not download, install,
  4. copy or use the software.
  5. License Agreement
  6. For Open Source Computer Vision Library
  7. (3-clause BSD License)
  8. Copyright (C) 2013, OpenCV Foundation, all rights reserved.
  9. Third party copyrights are property of their respective owners.
  10. Redistribution and use in source and binary forms, with or without modification,
  11. are permitted provided that the following conditions are met:
  12. * Redistributions of source code must retain the above copyright notice,
  13. this list of conditions and the following disclaimer.
  14. * Redistributions in binary form must reproduce the above copyright notice,
  15. this list of conditions and the following disclaimer in the documentation
  16. and/or other materials provided with the distribution.
  17. * Neither the names of the copyright holders nor the names of the contributors
  18. may be used to endorse or promote products derived from this software
  19. without specific prior written permission.
  20. This software is provided by the copyright holders and contributors "as is" and
  21. any express or implied warranties, including, but not limited to, the implied
  22. warranties of merchantability and fitness for a particular purpose are
  23. disclaimed. In no event shall copyright holders or contributors be liable for
  24. any direct, indirect, incidental, special, exemplary, or consequential damages
  25. (including, but not limited to, procurement of substitute goods or services;
  26. loss of use, data, or profits; or business interruption) however caused
  27. and on any theory of liability, whether in contract, strict liability,
  28. or tort (including negligence or otherwise) arising in any way out of
  29. the use of this software, even if advised of the possibility of such damage.
  30. */
  31. #ifndef __OPENCV_ARUCO_HPP__
  32. #define __OPENCV_ARUCO_HPP__
  33. #include <opencv2/core.hpp>
  34. #include <vector>
  35. #include "opencv2/aruco/dictionary.hpp"
  36. /**
  37. * @defgroup aruco ArUco Marker Detection
  38. * This module is dedicated to square fiducial markers (also known as Augmented Reality Markers)
  39. * These markers are useful for easy, fast and robust camera pose estimation.ç
  40. *
  41. * The main functionalities are:
  42. * - Detection of markers in an image
  43. * - Pose estimation from a single marker or from a board/set of markers
  44. * - Detection of ChArUco board for high subpixel accuracy
  45. * - Camera calibration from both, ArUco boards and ChArUco boards.
  46. * - Detection of ChArUco diamond markers
  47. * The samples directory includes easy examples of how to use the module.
  48. *
  49. * The implementation is based on the ArUco Library by R. Muñoz-Salinas and S. Garrido-Jurado @cite Aruco2014.
  50. *
  51. * Markers can also be detected based on the AprilTag 2 @cite wang2016iros fiducial detection method.
  52. *
  53. * @sa S. Garrido-Jurado, R. Muñoz-Salinas, F. J. Madrid-Cuevas, and M. J. Marín-Jiménez. 2014.
  54. * "Automatic generation and detection of highly reliable fiducial markers under occlusion".
  55. * Pattern Recogn. 47, 6 (June 2014), 2280-2292. DOI=10.1016/j.patcog.2014.01.005
  56. *
  57. * @sa http://www.uco.es/investiga/grupos/ava/node/26
  58. *
  59. * This module has been originally developed by Sergio Garrido-Jurado as a project
  60. * for Google Summer of Code 2015 (GSoC 15).
  61. *
  62. *
  63. */
  64. namespace cv {
  65. namespace aruco {
  66. //! @addtogroup aruco
  67. //! @{
  68. enum CornerRefineMethod{
  69. CORNER_REFINE_NONE, ///< Tag and corners detection based on the ArUco approach
  70. CORNER_REFINE_SUBPIX, ///< ArUco approach and refine the corners locations using corner subpixel accuracy
  71. CORNER_REFINE_CONTOUR, ///< ArUco approach and refine the corners locations using the contour-points line fitting
  72. CORNER_REFINE_APRILTAG, ///< Tag and corners detection based on the AprilTag 2 approach @cite wang2016iros
  73. };
  74. /**
  75. * @brief Parameters for the detectMarker process:
  76. * - adaptiveThreshWinSizeMin: minimum window size for adaptive thresholding before finding
  77. * contours (default 3).
  78. * - adaptiveThreshWinSizeMax: maximum window size for adaptive thresholding before finding
  79. * contours (default 23).
  80. * - adaptiveThreshWinSizeStep: increments from adaptiveThreshWinSizeMin to adaptiveThreshWinSizeMax
  81. * during the thresholding (default 10).
  82. * - adaptiveThreshConstant: constant for adaptive thresholding before finding contours (default 7)
  83. * - minMarkerPerimeterRate: determine minimum perimeter for marker contour to be detected. This
  84. * is defined as a rate respect to the maximum dimension of the input image (default 0.03).
  85. * - maxMarkerPerimeterRate: determine maximum perimeter for marker contour to be detected. This
  86. * is defined as a rate respect to the maximum dimension of the input image (default 4.0).
  87. * - polygonalApproxAccuracyRate: minimum accuracy during the polygonal approximation process to
  88. * determine which contours are squares. (default 0.03)
  89. * - minCornerDistanceRate: minimum distance between corners for detected markers relative to its
  90. * perimeter (default 0.05)
  91. * - minDistanceToBorder: minimum distance of any corner to the image border for detected markers
  92. * (in pixels) (default 3)
  93. * - minMarkerDistanceRate: minimum mean distance beetween two marker corners to be considered
  94. * similar, so that the smaller one is removed. The rate is relative to the smaller perimeter
  95. * of the two markers (default 0.05).
  96. * - cornerRefinementMethod: corner refinement method. (CORNER_REFINE_NONE, no refinement.
  97. * CORNER_REFINE_SUBPIX, do subpixel refinement. CORNER_REFINE_CONTOUR use contour-Points,
  98. * CORNER_REFINE_APRILTAG use the AprilTag2 approach). (default CORNER_REFINE_NONE)
  99. * - cornerRefinementWinSize: window size for the corner refinement process (in pixels) (default 5).
  100. * - cornerRefinementMaxIterations: maximum number of iterations for stop criteria of the corner
  101. * refinement process (default 30).
  102. * - cornerRefinementMinAccuracy: minimum error for the stop cristeria of the corner refinement
  103. * process (default: 0.1)
  104. * - markerBorderBits: number of bits of the marker border, i.e. marker border width (default 1).
  105. * - perspectiveRemovePixelPerCell: number of bits (per dimension) for each cell of the marker
  106. * when removing the perspective (default 4).
  107. * - perspectiveRemoveIgnoredMarginPerCell: width of the margin of pixels on each cell not
  108. * considered for the determination of the cell bit. Represents the rate respect to the total
  109. * size of the cell, i.e. perspectiveRemovePixelPerCell (default 0.13)
  110. * - maxErroneousBitsInBorderRate: maximum number of accepted erroneous bits in the border (i.e.
  111. * number of allowed white bits in the border). Represented as a rate respect to the total
  112. * number of bits per marker (default 0.35).
  113. * - minOtsuStdDev: minimun standard deviation in pixels values during the decodification step to
  114. * apply Otsu thresholding (otherwise, all the bits are set to 0 or 1 depending on mean higher
  115. * than 128 or not) (default 5.0)
  116. * - errorCorrectionRate error correction rate respect to the maximun error correction capability
  117. * for each dictionary. (default 0.6).
  118. * - aprilTagMinClusterPixels: reject quads containing too few pixels. (default 5)
  119. * - aprilTagMaxNmaxima: how many corner candidates to consider when segmenting a group of pixels into a quad. (default 10)
  120. * - aprilTagCriticalRad: Reject quads where pairs of edges have angles that are close to straight or close to
  121. * 180 degrees. Zero means that no quads are rejected. (In radians) (default 10*PI/180)
  122. * - aprilTagMaxLineFitMse: When fitting lines to the contours, what is the maximum mean squared error
  123. * allowed? This is useful in rejecting contours that are far from being quad shaped; rejecting
  124. * these quads "early" saves expensive decoding processing. (default 10.0)
  125. * - aprilTagMinWhiteBlackDiff: When we build our model of black & white pixels, we add an extra check that
  126. * the white model must be (overall) brighter than the black model. How much brighter? (in pixel values, [0,255]). (default 5)
  127. * - aprilTagDeglitch: should the thresholded image be deglitched? Only useful for very noisy images. (default 0)
  128. * - aprilTagQuadDecimate: Detection of quads can be done on a lower-resolution image, improving speed at a
  129. * cost of pose accuracy and a slight decrease in detection rate. Decoding the binary payload is still
  130. * done at full resolution. (default 0.0)
  131. * - aprilTagQuadSigma: What Gaussian blur should be applied to the segmented image (used for quad detection?)
  132. * Parameter is the standard deviation in pixels. Very noisy images benefit from non-zero values (e.g. 0.8). (default 0.0)
  133. * - detectInvertedMarker: to check if there is a white marker. In order to generate a "white" marker just
  134. * invert a normal marker by using a tilde, ~markerImage. (default false)
  135. */
  136. struct CV_EXPORTS_W DetectorParameters {
  137. DetectorParameters();
  138. CV_WRAP static Ptr<DetectorParameters> create();
  139. CV_WRAP static bool readDetectorParameters(const FileNode& fn, Ptr<DetectorParameters>& params);
  140. CV_PROP_RW int adaptiveThreshWinSizeMin;
  141. CV_PROP_RW int adaptiveThreshWinSizeMax;
  142. CV_PROP_RW int adaptiveThreshWinSizeStep;
  143. CV_PROP_RW double adaptiveThreshConstant;
  144. CV_PROP_RW double minMarkerPerimeterRate;
  145. CV_PROP_RW double maxMarkerPerimeterRate;
  146. CV_PROP_RW double polygonalApproxAccuracyRate;
  147. CV_PROP_RW double minCornerDistanceRate;
  148. CV_PROP_RW int minDistanceToBorder;
  149. CV_PROP_RW double minMarkerDistanceRate;
  150. CV_PROP_RW int cornerRefinementMethod;
  151. CV_PROP_RW int cornerRefinementWinSize;
  152. CV_PROP_RW int cornerRefinementMaxIterations;
  153. CV_PROP_RW double cornerRefinementMinAccuracy;
  154. CV_PROP_RW int markerBorderBits;
  155. CV_PROP_RW int perspectiveRemovePixelPerCell;
  156. CV_PROP_RW double perspectiveRemoveIgnoredMarginPerCell;
  157. CV_PROP_RW double maxErroneousBitsInBorderRate;
  158. CV_PROP_RW double minOtsuStdDev;
  159. CV_PROP_RW double errorCorrectionRate;
  160. // April :: User-configurable parameters.
  161. CV_PROP_RW float aprilTagQuadDecimate;
  162. CV_PROP_RW float aprilTagQuadSigma;
  163. // April :: Internal variables
  164. CV_PROP_RW int aprilTagMinClusterPixels;
  165. CV_PROP_RW int aprilTagMaxNmaxima;
  166. CV_PROP_RW float aprilTagCriticalRad;
  167. CV_PROP_RW float aprilTagMaxLineFitMse;
  168. CV_PROP_RW int aprilTagMinWhiteBlackDiff;
  169. CV_PROP_RW int aprilTagDeglitch;
  170. // to detect white (inverted) markers
  171. CV_PROP_RW bool detectInvertedMarker;
  172. };
  173. /**
  174. * @brief Basic marker detection
  175. *
  176. * @param image input image
  177. * @param dictionary indicates the type of markers that will be searched
  178. * @param corners vector of detected marker corners. For each marker, its four corners
  179. * are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers,
  180. * the dimensions of this array is Nx4. The order of the corners is clockwise.
  181. * @param ids vector of identifiers of the detected markers. The identifier is of type int
  182. * (e.g. std::vector<int>). For N detected markers, the size of ids is also N.
  183. * The identifiers have the same order than the markers in the imgPoints array.
  184. * @param parameters marker detection parameters
  185. * @param rejectedImgPoints contains the imgPoints of those squares whose inner code has not a
  186. * correct codification. Useful for debugging purposes.
  187. * @param cameraMatrix optional input 3x3 floating-point camera matrix
  188. * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$
  189. * @param distCoeff optional vector of distortion coefficients
  190. * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
  191. *
  192. * Performs marker detection in the input image. Only markers included in the specific dictionary
  193. * are searched. For each detected marker, it returns the 2D position of its corner in the image
  194. * and its corresponding identifier.
  195. * Note that this function does not perform pose estimation.
  196. * @sa estimatePoseSingleMarkers, estimatePoseBoard
  197. *
  198. */
  199. CV_EXPORTS_W void detectMarkers(InputArray image, const Ptr<Dictionary> &dictionary, OutputArrayOfArrays corners,
  200. OutputArray ids, const Ptr<DetectorParameters> &parameters = DetectorParameters::create(),
  201. OutputArrayOfArrays rejectedImgPoints = noArray(), InputArray cameraMatrix= noArray(), InputArray distCoeff= noArray());
  202. /**
  203. * @brief Pose estimation for single markers
  204. *
  205. * @param corners vector of already detected markers corners. For each marker, its four corners
  206. * are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers,
  207. * the dimensions of this array should be Nx4. The order of the corners should be clockwise.
  208. * @sa detectMarkers
  209. * @param markerLength the length of the markers' side. The returning translation vectors will
  210. * be in the same unit. Normally, unit is meters.
  211. * @param cameraMatrix input 3x3 floating-point camera matrix
  212. * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$
  213. * @param distCoeffs vector of distortion coefficients
  214. * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
  215. * @param rvecs array of output rotation vectors (@sa Rodrigues) (e.g. std::vector<cv::Vec3d>).
  216. * Each element in rvecs corresponds to the specific marker in imgPoints.
  217. * @param tvecs array of output translation vectors (e.g. std::vector<cv::Vec3d>).
  218. * Each element in tvecs corresponds to the specific marker in imgPoints.
  219. * @param _objPoints array of object points of all the marker corners
  220. *
  221. * This function receives the detected markers and returns their pose estimation respect to
  222. * the camera individually. So for each marker, one rotation and translation vector is returned.
  223. * The returned transformation is the one that transforms points from each marker coordinate system
  224. * to the camera coordinate system.
  225. * The marker corrdinate system is centered on the middle of the marker, with the Z axis
  226. * perpendicular to the marker plane.
  227. * The coordinates of the four corners of the marker in its own coordinate system are:
  228. * (-markerLength/2, markerLength/2, 0), (markerLength/2, markerLength/2, 0),
  229. * (markerLength/2, -markerLength/2, 0), (-markerLength/2, -markerLength/2, 0)
  230. */
  231. CV_EXPORTS_W void estimatePoseSingleMarkers(InputArrayOfArrays corners, float markerLength,
  232. InputArray cameraMatrix, InputArray distCoeffs,
  233. OutputArray rvecs, OutputArray tvecs, OutputArray _objPoints = noArray());
  234. /**
  235. * @brief Board of markers
  236. *
  237. * A board is a set of markers in the 3D space with a common coordinate system.
  238. * The common form of a board of marker is a planar (2D) board, however any 3D layout can be used.
  239. * A Board object is composed by:
  240. * - The object points of the marker corners, i.e. their coordinates respect to the board system.
  241. * - The dictionary which indicates the type of markers of the board
  242. * - The identifier of all the markers in the board.
  243. */
  244. class CV_EXPORTS_W Board {
  245. public:
  246. /**
  247. * @brief Provide way to create Board by passing necessary data. Specially needed in Python.
  248. *
  249. * @param objPoints array of object points of all the marker corners in the board
  250. * @param dictionary the dictionary of markers employed for this board
  251. * @param ids vector of the identifiers of the markers in the board
  252. *
  253. */
  254. CV_WRAP static Ptr<Board> create(InputArrayOfArrays objPoints, const Ptr<Dictionary> &dictionary, InputArray ids);
  255. /**
  256. * @brief Set ids vector
  257. *
  258. * @param ids vector of the identifiers of the markers in the board (should be the same size
  259. * as objPoints)
  260. *
  261. * Recommended way to set ids vector, which will fail if the size of ids does not match size
  262. * of objPoints.
  263. */
  264. CV_WRAP void setIds(InputArray ids);
  265. /// array of object points of all the marker corners in the board
  266. /// each marker include its 4 corners in CCW order. For M markers, the size is Mx4.
  267. CV_PROP std::vector< std::vector< Point3f > > objPoints;
  268. /// the dictionary of markers employed for this board
  269. CV_PROP Ptr<Dictionary> dictionary;
  270. /// vector of the identifiers of the markers in the board (same size than objPoints)
  271. /// The identifiers refers to the board dictionary
  272. CV_PROP_RW std::vector< int > ids;
  273. };
  274. /**
  275. * @brief Planar board with grid arrangement of markers
  276. * More common type of board. All markers are placed in the same plane in a grid arrangement.
  277. * The board can be drawn using drawPlanarBoard() function (@sa drawPlanarBoard)
  278. */
  279. class CV_EXPORTS_W GridBoard : public Board {
  280. public:
  281. /**
  282. * @brief Draw a GridBoard
  283. *
  284. * @param outSize size of the output image in pixels.
  285. * @param img output image with the board. The size of this image will be outSize
  286. * and the board will be on the center, keeping the board proportions.
  287. * @param marginSize minimum margins (in pixels) of the board in the output image
  288. * @param borderBits width of the marker borders.
  289. *
  290. * This function return the image of the GridBoard, ready to be printed.
  291. */
  292. CV_WRAP void draw(Size outSize, OutputArray img, int marginSize = 0, int borderBits = 1);
  293. /**
  294. * @brief Create a GridBoard object
  295. *
  296. * @param markersX number of markers in X direction
  297. * @param markersY number of markers in Y direction
  298. * @param markerLength marker side length (normally in meters)
  299. * @param markerSeparation separation between two markers (same unit as markerLength)
  300. * @param dictionary dictionary of markers indicating the type of markers
  301. * @param firstMarker id of first marker in dictionary to use on board.
  302. * @return the output GridBoard object
  303. *
  304. * This functions creates a GridBoard object given the number of markers in each direction and
  305. * the marker size and marker separation.
  306. */
  307. CV_WRAP static Ptr<GridBoard> create(int markersX, int markersY, float markerLength,
  308. float markerSeparation, const Ptr<Dictionary> &dictionary, int firstMarker = 0);
  309. /**
  310. *
  311. */
  312. CV_WRAP Size getGridSize() const { return Size(_markersX, _markersY); }
  313. /**
  314. *
  315. */
  316. CV_WRAP float getMarkerLength() const { return _markerLength; }
  317. /**
  318. *
  319. */
  320. CV_WRAP float getMarkerSeparation() const { return _markerSeparation; }
  321. private:
  322. // number of markers in X and Y directions
  323. int _markersX, _markersY;
  324. // marker side length (normally in meters)
  325. float _markerLength;
  326. // separation between markers in the grid
  327. float _markerSeparation;
  328. };
  329. /**
  330. * @brief Pose estimation for a board of markers
  331. *
  332. * @param corners vector of already detected markers corners. For each marker, its four corners
  333. * are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers, the
  334. * dimensions of this array should be Nx4. The order of the corners should be clockwise.
  335. * @param ids list of identifiers for each marker in corners
  336. * @param board layout of markers in the board. The layout is composed by the marker identifiers
  337. * and the positions of each marker corner in the board reference system.
  338. * @param cameraMatrix input 3x3 floating-point camera matrix
  339. * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$
  340. * @param distCoeffs vector of distortion coefficients
  341. * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
  342. * @param rvec Output vector (e.g. cv::Mat) corresponding to the rotation vector of the board
  343. * (see cv::Rodrigues). Used as initial guess if not empty.
  344. * @param tvec Output vector (e.g. cv::Mat) corresponding to the translation vector of the board.
  345. * @param useExtrinsicGuess defines whether initial guess for \b rvec and \b tvec will be used or not.
  346. * Used as initial guess if not empty.
  347. *
  348. * This function receives the detected markers and returns the pose of a marker board composed
  349. * by those markers.
  350. * A Board of marker has a single world coordinate system which is defined by the board layout.
  351. * The returned transformation is the one that transforms points from the board coordinate system
  352. * to the camera coordinate system.
  353. * Input markers that are not included in the board layout are ignored.
  354. * The function returns the number of markers from the input employed for the board pose estimation.
  355. * Note that returning a 0 means the pose has not been estimated.
  356. */
  357. CV_EXPORTS_W int estimatePoseBoard(InputArrayOfArrays corners, InputArray ids, const Ptr<Board> &board,
  358. InputArray cameraMatrix, InputArray distCoeffs, InputOutputArray rvec,
  359. InputOutputArray tvec, bool useExtrinsicGuess = false);
  360. /**
  361. * @brief Refind not detected markers based on the already detected and the board layout
  362. *
  363. * @param image input image
  364. * @param board layout of markers in the board.
  365. * @param detectedCorners vector of already detected marker corners.
  366. * @param detectedIds vector of already detected marker identifiers.
  367. * @param rejectedCorners vector of rejected candidates during the marker detection process.
  368. * @param cameraMatrix optional input 3x3 floating-point camera matrix
  369. * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$
  370. * @param distCoeffs optional vector of distortion coefficients
  371. * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
  372. * @param minRepDistance minimum distance between the corners of the rejected candidate and the
  373. * reprojected marker in order to consider it as a correspondence.
  374. * @param errorCorrectionRate rate of allowed erroneous bits respect to the error correction
  375. * capability of the used dictionary. -1 ignores the error correction step.
  376. * @param checkAllOrders Consider the four posible corner orders in the rejectedCorners array.
  377. * If it set to false, only the provided corner order is considered (default true).
  378. * @param recoveredIdxs Optional array to returns the indexes of the recovered candidates in the
  379. * original rejectedCorners array.
  380. * @param parameters marker detection parameters
  381. *
  382. * This function tries to find markers that were not detected in the basic detecMarkers function.
  383. * First, based on the current detected marker and the board layout, the function interpolates
  384. * the position of the missing markers. Then it tries to find correspondence between the reprojected
  385. * markers and the rejected candidates based on the minRepDistance and errorCorrectionRate
  386. * parameters.
  387. * If camera parameters and distortion coefficients are provided, missing markers are reprojected
  388. * using projectPoint function. If not, missing marker projections are interpolated using global
  389. * homography, and all the marker corners in the board must have the same Z coordinate.
  390. */
  391. CV_EXPORTS_W void refineDetectedMarkers(
  392. InputArray image,const Ptr<Board> &board, InputOutputArrayOfArrays detectedCorners,
  393. InputOutputArray detectedIds, InputOutputArrayOfArrays rejectedCorners,
  394. InputArray cameraMatrix = noArray(), InputArray distCoeffs = noArray(),
  395. float minRepDistance = 10.f, float errorCorrectionRate = 3.f, bool checkAllOrders = true,
  396. OutputArray recoveredIdxs = noArray(), const Ptr<DetectorParameters> &parameters = DetectorParameters::create());
  397. /**
  398. * @brief Draw detected markers in image
  399. *
  400. * @param image input/output image. It must have 1 or 3 channels. The number of channels is not
  401. * altered.
  402. * @param corners positions of marker corners on input image.
  403. * (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers, the dimensions of
  404. * this array should be Nx4. The order of the corners should be clockwise.
  405. * @param ids vector of identifiers for markers in markersCorners .
  406. * Optional, if not provided, ids are not painted.
  407. * @param borderColor color of marker borders. Rest of colors (text color and first corner color)
  408. * are calculated based on this one to improve visualization.
  409. *
  410. * Given an array of detected marker corners and its corresponding ids, this functions draws
  411. * the markers in the image. The marker borders are painted and the markers identifiers if provided.
  412. * Useful for debugging purposes.
  413. */
  414. CV_EXPORTS_W void drawDetectedMarkers(InputOutputArray image, InputArrayOfArrays corners,
  415. InputArray ids = noArray(),
  416. Scalar borderColor = Scalar(0, 255, 0));
  417. /**
  418. * @brief Draw coordinate system axis from pose estimation
  419. *
  420. * @param image input/output image. It must have 1 or 3 channels. The number of channels is not
  421. * altered.
  422. * @param cameraMatrix input 3x3 floating-point camera matrix
  423. * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$
  424. * @param distCoeffs vector of distortion coefficients
  425. * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
  426. * @param rvec rotation vector of the coordinate system that will be drawn. (@sa Rodrigues).
  427. * @param tvec translation vector of the coordinate system that will be drawn.
  428. * @param length length of the painted axis in the same unit than tvec (usually in meters)
  429. *
  430. * Given the pose estimation of a marker or board, this function draws the axis of the world
  431. * coordinate system, i.e. the system centered on the marker/board. Useful for debugging purposes.
  432. *
  433. * @deprecated use cv::drawFrameAxes
  434. */
  435. CV_EXPORTS_W void drawAxis(InputOutputArray image, InputArray cameraMatrix, InputArray distCoeffs,
  436. InputArray rvec, InputArray tvec, float length);
  437. /**
  438. * @brief Draw a canonical marker image
  439. *
  440. * @param dictionary dictionary of markers indicating the type of markers
  441. * @param id identifier of the marker that will be returned. It has to be a valid id
  442. * in the specified dictionary.
  443. * @param sidePixels size of the image in pixels
  444. * @param img output image with the marker
  445. * @param borderBits width of the marker border.
  446. *
  447. * This function returns a marker image in its canonical form (i.e. ready to be printed)
  448. */
  449. CV_EXPORTS_W void drawMarker(const Ptr<Dictionary> &dictionary, int id, int sidePixels, OutputArray img,
  450. int borderBits = 1);
  451. /**
  452. * @brief Draw a planar board
  453. * @sa _drawPlanarBoardImpl
  454. *
  455. * @param board layout of the board that will be drawn. The board should be planar,
  456. * z coordinate is ignored
  457. * @param outSize size of the output image in pixels.
  458. * @param img output image with the board. The size of this image will be outSize
  459. * and the board will be on the center, keeping the board proportions.
  460. * @param marginSize minimum margins (in pixels) of the board in the output image
  461. * @param borderBits width of the marker borders.
  462. *
  463. * This function return the image of a planar board, ready to be printed. It assumes
  464. * the Board layout specified is planar by ignoring the z coordinates of the object points.
  465. */
  466. CV_EXPORTS_W void drawPlanarBoard(const Ptr<Board> &board, Size outSize, OutputArray img,
  467. int marginSize = 0, int borderBits = 1);
  468. /**
  469. * @brief Implementation of drawPlanarBoard that accepts a raw Board pointer.
  470. */
  471. void _drawPlanarBoardImpl(Board *board, Size outSize, OutputArray img,
  472. int marginSize = 0, int borderBits = 1);
  473. /**
  474. * @brief Calibrate a camera using aruco markers
  475. *
  476. * @param corners vector of detected marker corners in all frames.
  477. * The corners should have the same format returned by detectMarkers (see #detectMarkers).
  478. * @param ids list of identifiers for each marker in corners
  479. * @param counter number of markers in each frame so that corners and ids can be split
  480. * @param board Marker Board layout
  481. * @param imageSize Size of the image used only to initialize the intrinsic camera matrix.
  482. * @param cameraMatrix Output 3x3 floating-point camera matrix
  483. * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . If CV\_CALIB\_USE\_INTRINSIC\_GUESS
  484. * and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be
  485. * initialized before calling the function.
  486. * @param distCoeffs Output vector of distortion coefficients
  487. * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
  488. * @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each board view
  489. * (e.g. std::vector<cv::Mat>>). That is, each k-th rotation vector together with the corresponding
  490. * k-th translation vector (see the next output parameter description) brings the board pattern
  491. * from the model coordinate space (in which object points are specified) to the world coordinate
  492. * space, that is, a real position of the board pattern in the k-th pattern view (k=0.. *M* -1).
  493. * @param tvecs Output vector of translation vectors estimated for each pattern view.
  494. * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters.
  495. * Order of deviations values:
  496. * \f$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
  497. * s_4, \tau_x, \tau_y)\f$ If one of parameters is not estimated, it's deviation is equals to zero.
  498. * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters.
  499. * Order of deviations values: \f$(R_1, T_1, \dotsc , R_M, T_M)\f$ where M is number of pattern views,
  500. * \f$R_i, T_i\f$ are concatenated 1x3 vectors.
  501. * @param perViewErrors Output vector of average re-projection errors estimated for each pattern view.
  502. * @param flags flags Different flags for the calibration process (see #calibrateCamera for details).
  503. * @param criteria Termination criteria for the iterative optimization algorithm.
  504. *
  505. * This function calibrates a camera using an Aruco Board. The function receives a list of
  506. * detected markers from several views of the Board. The process is similar to the chessboard
  507. * calibration in calibrateCamera(). The function returns the final re-projection error.
  508. */
  509. CV_EXPORTS_AS(calibrateCameraArucoExtended) double calibrateCameraAruco(
  510. InputArrayOfArrays corners, InputArray ids, InputArray counter, const Ptr<Board> &board,
  511. Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
  512. OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
  513. OutputArray stdDeviationsIntrinsics, OutputArray stdDeviationsExtrinsics,
  514. OutputArray perViewErrors, int flags = 0,
  515. TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON));
  516. /** @brief It's the same function as #calibrateCameraAruco but without calibration error estimation.
  517. */
  518. CV_EXPORTS_W double calibrateCameraAruco(
  519. InputArrayOfArrays corners, InputArray ids, InputArray counter, const Ptr<Board> &board,
  520. Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
  521. OutputArrayOfArrays rvecs = noArray(), OutputArrayOfArrays tvecs = noArray(), int flags = 0,
  522. TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON));
  523. /**
  524. * @brief Given a board configuration and a set of detected markers, returns the corresponding
  525. * image points and object points to call solvePnP
  526. *
  527. * @param board Marker board layout.
  528. * @param detectedCorners List of detected marker corners of the board.
  529. * @param detectedIds List of identifiers for each marker.
  530. * @param objPoints Vector of vectors of board marker points in the board coordinate space.
  531. * @param imgPoints Vector of vectors of the projections of board marker corner points.
  532. */
  533. CV_EXPORTS_W void getBoardObjectAndImagePoints(const Ptr<Board> &board, InputArrayOfArrays detectedCorners,
  534. InputArray detectedIds, OutputArray objPoints, OutputArray imgPoints);
  535. //! @}
  536. }
  537. }
  538. #endif