aruco.hpp 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597
  1. /*
  2. By downloading, copying, installing or using the software you agree to this
  3. license. If you do not agree to this license, do not download, install,
  4. copy or use the software.
  5. License Agreement
  6. For Open Source Computer Vision Library
  7. (3-clause BSD License)
  8. Copyright (C) 2013, OpenCV Foundation, all rights reserved.
  9. Third party copyrights are property of their respective owners.
  10. Redistribution and use in source and binary forms, with or without modification,
  11. are permitted provided that the following conditions are met:
  12. * Redistributions of source code must retain the above copyright notice,
  13. this list of conditions and the following disclaimer.
  14. * Redistributions in binary form must reproduce the above copyright notice,
  15. this list of conditions and the following disclaimer in the documentation
  16. and/or other materials provided with the distribution.
  17. * Neither the names of the copyright holders nor the names of the contributors
  18. may be used to endorse or promote products derived from this software
  19. without specific prior written permission.
  20. This software is provided by the copyright holders and contributors "as is" and
  21. any express or implied warranties, including, but not limited to, the implied
  22. warranties of merchantability and fitness for a particular purpose are
  23. disclaimed. In no event shall copyright holders or contributors be liable for
  24. any direct, indirect, incidental, special, exemplary, or consequential damages
  25. (including, but not limited to, procurement of substitute goods or services;
  26. loss of use, data, or profits; or business interruption) however caused
  27. and on any theory of liability, whether in contract, strict liability,
  28. or tort (including negligence or otherwise) arising in any way out of
  29. the use of this software, even if advised of the possibility of such damage.
  30. */
  31. #ifndef __OPENCV_ARUCO_HPP__
  32. #define __OPENCV_ARUCO_HPP__
  33. #include <opencv2/core.hpp>
  34. #include <vector>
  35. #include "opencv2/aruco/dictionary.hpp"
  36. /**
  37. * @defgroup aruco ArUco Marker Detection
  38. * This module is dedicated to square fiducial markers (also known as Augmented Reality Markers)
  39. * These markers are useful for easy, fast and robust camera pose estimation.ç
  40. *
  41. * The main functionalities are:
  42. * - Detection of markers in an image
  43. * - Pose estimation from a single marker or from a board/set of markers
  44. * - Detection of ChArUco board for high subpixel accuracy
  45. * - Camera calibration from both, ArUco boards and ChArUco boards.
  46. * - Detection of ChArUco diamond markers
  47. * The samples directory includes easy examples of how to use the module.
  48. *
  49. * The implementation is based on the ArUco Library by R. Muñoz-Salinas and S. Garrido-Jurado @cite Aruco2014.
  50. *
  51. * Markers can also be detected based on the AprilTag 2 @cite wang2016iros fiducial detection method.
  52. *
  53. * @sa S. Garrido-Jurado, R. Muñoz-Salinas, F. J. Madrid-Cuevas, and M. J. Marín-Jiménez. 2014.
  54. * "Automatic generation and detection of highly reliable fiducial markers under occlusion".
  55. * Pattern Recogn. 47, 6 (June 2014), 2280-2292. DOI=10.1016/j.patcog.2014.01.005
  56. *
  57. * @sa http://www.uco.es/investiga/grupos/ava/node/26
  58. *
  59. * This module has been originally developed by Sergio Garrido-Jurado as a project
  60. * for Google Summer of Code 2015 (GSoC 15).
  61. *
  62. *
  63. */
  64. namespace cv {
  65. namespace aruco {
  66. //! @addtogroup aruco
  67. //! @{
  68. enum CornerRefineMethod{
  69. CORNER_REFINE_NONE, ///< Tag and corners detection based on the ArUco approach
  70. CORNER_REFINE_SUBPIX, ///< ArUco approach and refine the corners locations using corner subpixel accuracy
  71. CORNER_REFINE_CONTOUR, ///< ArUco approach and refine the corners locations using the contour-points line fitting
  72. CORNER_REFINE_APRILTAG, ///< Tag and corners detection based on the AprilTag 2 approach @cite wang2016iros
  73. };
  74. /**
  75. * @brief Parameters for the detectMarker process:
  76. * - adaptiveThreshWinSizeMin: minimum window size for adaptive thresholding before finding
  77. * contours (default 3).
  78. * - adaptiveThreshWinSizeMax: maximum window size for adaptive thresholding before finding
  79. * contours (default 23).
  80. * - adaptiveThreshWinSizeStep: increments from adaptiveThreshWinSizeMin to adaptiveThreshWinSizeMax
  81. * during the thresholding (default 10).
  82. * - adaptiveThreshConstant: constant for adaptive thresholding before finding contours (default 7)
  83. * - minMarkerPerimeterRate: determine minimum perimeter for marker contour to be detected. This
  84. * is defined as a rate respect to the maximum dimension of the input image (default 0.03).
  85. * - maxMarkerPerimeterRate: determine maximum perimeter for marker contour to be detected. This
  86. * is defined as a rate respect to the maximum dimension of the input image (default 4.0).
  87. * - polygonalApproxAccuracyRate: minimum accuracy during the polygonal approximation process to
  88. * determine which contours are squares.
  89. * - minCornerDistanceRate: minimum distance between corners for detected markers relative to its
  90. * perimeter (default 0.05)
  91. * - minDistanceToBorder: minimum distance of any corner to the image border for detected markers
  92. * (in pixels) (default 3)
  93. * - minMarkerDistanceRate: minimum mean distance beetween two marker corners to be considered
  94. * similar, so that the smaller one is removed. The rate is relative to the smaller perimeter
  95. * of the two markers (default 0.05).
  96. * - cornerRefinementMethod: corner refinement method. (CORNER_REFINE_NONE, no refinement.
  97. * CORNER_REFINE_SUBPIX, do subpixel refinement. CORNER_REFINE_CONTOUR use contour-Points,
  98. * CORNER_REFINE_APRILTAG use the AprilTag2 approach)
  99. * - cornerRefinementWinSize: window size for the corner refinement process (in pixels) (default 5).
  100. * - cornerRefinementMaxIterations: maximum number of iterations for stop criteria of the corner
  101. * refinement process (default 30).
  102. * - cornerRefinementMinAccuracy: minimum error for the stop cristeria of the corner refinement
  103. * process (default: 0.1)
  104. * - markerBorderBits: number of bits of the marker border, i.e. marker border width (default 1).
  105. * - perpectiveRemovePixelPerCell: number of bits (per dimension) for each cell of the marker
  106. * when removing the perspective (default 8).
  107. * - perspectiveRemoveIgnoredMarginPerCell: width of the margin of pixels on each cell not
  108. * considered for the determination of the cell bit. Represents the rate respect to the total
  109. * size of the cell, i.e. perpectiveRemovePixelPerCell (default 0.13)
  110. * - maxErroneousBitsInBorderRate: maximum number of accepted erroneous bits in the border (i.e.
  111. * number of allowed white bits in the border). Represented as a rate respect to the total
  112. * number of bits per marker (default 0.35).
  113. * - minOtsuStdDev: minimun standard deviation in pixels values during the decodification step to
  114. * apply Otsu thresholding (otherwise, all the bits are set to 0 or 1 depending on mean higher
  115. * than 128 or not) (default 5.0)
  116. * - errorCorrectionRate error correction rate respect to the maximun error correction capability
  117. * for each dictionary. (default 0.6).
  118. * - aprilTagMinClusterPixels: reject quads containing too few pixels.
  119. * - aprilTagMaxNmaxima: how many corner candidates to consider when segmenting a group of pixels into a quad.
  120. * - aprilTagCriticalRad: Reject quads where pairs of edges have angles that are close to straight or close to
  121. * 180 degrees. Zero means that no quads are rejected. (In radians).
  122. * - aprilTagMaxLineFitMse: When fitting lines to the contours, what is the maximum mean squared error
  123. * allowed? This is useful in rejecting contours that are far from being quad shaped; rejecting
  124. * these quads "early" saves expensive decoding processing.
  125. * - aprilTagMinWhiteBlackDiff: When we build our model of black & white pixels, we add an extra check that
  126. * the white model must be (overall) brighter than the black model. How much brighter? (in pixel values, [0,255]).
  127. * - aprilTagDeglitch: should the thresholded image be deglitched? Only useful for very noisy images
  128. * - aprilTagQuadDecimate: Detection of quads can be done on a lower-resolution image, improving speed at a
  129. * cost of pose accuracy and a slight decrease in detection rate. Decoding the binary payload is still
  130. * done at full resolution.
  131. * - aprilTagQuadSigma: What Gaussian blur should be applied to the segmented image (used for quad detection?)
  132. * Parameter is the standard deviation in pixels. Very noisy images benefit from non-zero values (e.g. 0.8).
  133. */
  134. struct CV_EXPORTS_W DetectorParameters {
  135. DetectorParameters();
  136. CV_WRAP static Ptr<DetectorParameters> create();
  137. CV_PROP_RW int adaptiveThreshWinSizeMin;
  138. CV_PROP_RW int adaptiveThreshWinSizeMax;
  139. CV_PROP_RW int adaptiveThreshWinSizeStep;
  140. CV_PROP_RW double adaptiveThreshConstant;
  141. CV_PROP_RW double minMarkerPerimeterRate;
  142. CV_PROP_RW double maxMarkerPerimeterRate;
  143. CV_PROP_RW double polygonalApproxAccuracyRate;
  144. CV_PROP_RW double minCornerDistanceRate;
  145. CV_PROP_RW int minDistanceToBorder;
  146. CV_PROP_RW double minMarkerDistanceRate;
  147. CV_PROP_RW int cornerRefinementMethod;
  148. CV_PROP_RW int cornerRefinementWinSize;
  149. CV_PROP_RW int cornerRefinementMaxIterations;
  150. CV_PROP_RW double cornerRefinementMinAccuracy;
  151. CV_PROP_RW int markerBorderBits;
  152. CV_PROP_RW int perspectiveRemovePixelPerCell;
  153. CV_PROP_RW double perspectiveRemoveIgnoredMarginPerCell;
  154. CV_PROP_RW double maxErroneousBitsInBorderRate;
  155. CV_PROP_RW double minOtsuStdDev;
  156. CV_PROP_RW double errorCorrectionRate;
  157. // April :: User-configurable parameters.
  158. CV_PROP_RW float aprilTagQuadDecimate;
  159. CV_PROP_RW float aprilTagQuadSigma;
  160. // April :: Internal variables
  161. CV_PROP_RW int aprilTagMinClusterPixels;
  162. CV_PROP_RW int aprilTagMaxNmaxima;
  163. CV_PROP_RW float aprilTagCriticalRad;
  164. CV_PROP_RW float aprilTagMaxLineFitMse;
  165. CV_PROP_RW int aprilTagMinWhiteBlackDiff;
  166. CV_PROP_RW int aprilTagDeglitch;
  167. };
  168. /**
  169. * @brief Basic marker detection
  170. *
  171. * @param image input image
  172. * @param dictionary indicates the type of markers that will be searched
  173. * @param corners vector of detected marker corners. For each marker, its four corners
  174. * are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers,
  175. * the dimensions of this array is Nx4. The order of the corners is clockwise.
  176. * @param ids vector of identifiers of the detected markers. The identifier is of type int
  177. * (e.g. std::vector<int>). For N detected markers, the size of ids is also N.
  178. * The identifiers have the same order than the markers in the imgPoints array.
  179. * @param parameters marker detection parameters
  180. * @param rejectedImgPoints contains the imgPoints of those squares whose inner code has not a
  181. * correct codification. Useful for debugging purposes.
  182. * @param cameraMatrix optional input 3x3 floating-point camera matrix
  183. * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$
  184. * @param distCoeff optional vector of distortion coefficients
  185. * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
  186. *
  187. * Performs marker detection in the input image. Only markers included in the specific dictionary
  188. * are searched. For each detected marker, it returns the 2D position of its corner in the image
  189. * and its corresponding identifier.
  190. * Note that this function does not perform pose estimation.
  191. * @sa estimatePoseSingleMarkers, estimatePoseBoard
  192. *
  193. */
  194. CV_EXPORTS_W void detectMarkers(InputArray image, const Ptr<Dictionary> &dictionary, OutputArrayOfArrays corners,
  195. OutputArray ids, const Ptr<DetectorParameters> &parameters = DetectorParameters::create(),
  196. OutputArrayOfArrays rejectedImgPoints = noArray(), InputArray cameraMatrix= noArray(), InputArray distCoeff= noArray());
  197. /**
  198. * @brief Pose estimation for single markers
  199. *
  200. * @param corners vector of already detected markers corners. For each marker, its four corners
  201. * are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers,
  202. * the dimensions of this array should be Nx4. The order of the corners should be clockwise.
  203. * @sa detectMarkers
  204. * @param markerLength the length of the markers' side. The returning translation vectors will
  205. * be in the same unit. Normally, unit is meters.
  206. * @param cameraMatrix input 3x3 floating-point camera matrix
  207. * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$
  208. * @param distCoeffs vector of distortion coefficients
  209. * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
  210. * @param rvecs array of output rotation vectors (@sa Rodrigues) (e.g. std::vector<cv::Vec3d>).
  211. * Each element in rvecs corresponds to the specific marker in imgPoints.
  212. * @param tvecs array of output translation vectors (e.g. std::vector<cv::Vec3d>).
  213. * Each element in tvecs corresponds to the specific marker in imgPoints.
  214. * @param _objPoints array of object points of all the marker corners
  215. *
  216. * This function receives the detected markers and returns their pose estimation respect to
  217. * the camera individually. So for each marker, one rotation and translation vector is returned.
  218. * The returned transformation is the one that transforms points from each marker coordinate system
  219. * to the camera coordinate system.
  220. * The marker corrdinate system is centered on the middle of the marker, with the Z axis
  221. * perpendicular to the marker plane.
  222. * The coordinates of the four corners of the marker in its own coordinate system are:
  223. * (-markerLength/2, markerLength/2, 0), (markerLength/2, markerLength/2, 0),
  224. * (markerLength/2, -markerLength/2, 0), (-markerLength/2, -markerLength/2, 0)
  225. */
  226. CV_EXPORTS_W void estimatePoseSingleMarkers(InputArrayOfArrays corners, float markerLength,
  227. InputArray cameraMatrix, InputArray distCoeffs,
  228. OutputArray rvecs, OutputArray tvecs, OutputArray _objPoints = noArray());
  229. /**
  230. * @brief Board of markers
  231. *
  232. * A board is a set of markers in the 3D space with a common cordinate system.
  233. * The common form of a board of marker is a planar (2D) board, however any 3D layout can be used.
  234. * A Board object is composed by:
  235. * - The object points of the marker corners, i.e. their coordinates respect to the board system.
  236. * - The dictionary which indicates the type of markers of the board
  237. * - The identifier of all the markers in the board.
  238. */
  239. class CV_EXPORTS_W Board {
  240. public:
  241. /**
  242. * @brief Provide way to create Board by passing nessesary data. Specially needed in Python.
  243. *
  244. * @param objPoints array of object points of all the marker corners in the board
  245. * @param dictionary the dictionary of markers employed for this board
  246. * @param ids vector of the identifiers of the markers in the board
  247. *
  248. */
  249. CV_WRAP static Ptr<Board> create(InputArrayOfArrays objPoints, const Ptr<Dictionary> &dictionary, InputArray ids);
  250. /// array of object points of all the marker corners in the board
  251. /// each marker include its 4 corners in CCW order. For M markers, the size is Mx4.
  252. CV_PROP std::vector< std::vector< Point3f > > objPoints;
  253. /// the dictionary of markers employed for this board
  254. CV_PROP Ptr<Dictionary> dictionary;
  255. /// vector of the identifiers of the markers in the board (same size than objPoints)
  256. /// The identifiers refers to the board dictionary
  257. CV_PROP std::vector< int > ids;
  258. };
  259. /**
  260. * @brief Planar board with grid arrangement of markers
  261. * More common type of board. All markers are placed in the same plane in a grid arrangment.
  262. * The board can be drawn using drawPlanarBoard() function (@sa drawPlanarBoard)
  263. */
  264. class CV_EXPORTS_W GridBoard : public Board {
  265. public:
  266. /**
  267. * @brief Draw a GridBoard
  268. *
  269. * @param outSize size of the output image in pixels.
  270. * @param img output image with the board. The size of this image will be outSize
  271. * and the board will be on the center, keeping the board proportions.
  272. * @param marginSize minimum margins (in pixels) of the board in the output image
  273. * @param borderBits width of the marker borders.
  274. *
  275. * This function return the image of the GridBoard, ready to be printed.
  276. */
  277. CV_WRAP void draw(Size outSize, OutputArray img, int marginSize = 0, int borderBits = 1);
  278. /**
  279. * @brief Create a GridBoard object
  280. *
  281. * @param markersX number of markers in X direction
  282. * @param markersY number of markers in Y direction
  283. * @param markerLength marker side length (normally in meters)
  284. * @param markerSeparation separation between two markers (same unit as markerLength)
  285. * @param dictionary dictionary of markers indicating the type of markers
  286. * @param firstMarker id of first marker in dictionary to use on board.
  287. * @return the output GridBoard object
  288. *
  289. * This functions creates a GridBoard object given the number of markers in each direction and
  290. * the marker size and marker separation.
  291. */
  292. CV_WRAP static Ptr<GridBoard> create(int markersX, int markersY, float markerLength,
  293. float markerSeparation, const Ptr<Dictionary> &dictionary, int firstMarker = 0);
  294. /**
  295. *
  296. */
  297. CV_WRAP Size getGridSize() const { return Size(_markersX, _markersY); }
  298. /**
  299. *
  300. */
  301. CV_WRAP float getMarkerLength() const { return _markerLength; }
  302. /**
  303. *
  304. */
  305. CV_WRAP float getMarkerSeparation() const { return _markerSeparation; }
  306. private:
  307. // number of markers in X and Y directions
  308. int _markersX, _markersY;
  309. // marker side lenght (normally in meters)
  310. float _markerLength;
  311. // separation between markers in the grid
  312. float _markerSeparation;
  313. };
  314. /**
  315. * @brief Pose estimation for a board of markers
  316. *
  317. * @param corners vector of already detected markers corners. For each marker, its four corners
  318. * are provided, (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers, the
  319. * dimensions of this array should be Nx4. The order of the corners should be clockwise.
  320. * @param ids list of identifiers for each marker in corners
  321. * @param board layout of markers in the board. The layout is composed by the marker identifiers
  322. * and the positions of each marker corner in the board reference system.
  323. * @param cameraMatrix input 3x3 floating-point camera matrix
  324. * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$
  325. * @param distCoeffs vector of distortion coefficients
  326. * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
  327. * @param rvec Output vector (e.g. cv::Mat) corresponding to the rotation vector of the board
  328. * (see cv::Rodrigues). Used as initial guess if not empty.
  329. * @param tvec Output vector (e.g. cv::Mat) corresponding to the translation vector of the board.
  330. * @param useExtrinsicGuess defines whether initial guess for \b rvec and \b tvec will be used or not.
  331. * Used as initial guess if not empty.
  332. *
  333. * This function receives the detected markers and returns the pose of a marker board composed
  334. * by those markers.
  335. * A Board of marker has a single world coordinate system which is defined by the board layout.
  336. * The returned transformation is the one that transforms points from the board coordinate system
  337. * to the camera coordinate system.
  338. * Input markers that are not included in the board layout are ignored.
  339. * The function returns the number of markers from the input employed for the board pose estimation.
  340. * Note that returning a 0 means the pose has not been estimated.
  341. */
  342. CV_EXPORTS_W int estimatePoseBoard(InputArrayOfArrays corners, InputArray ids, const Ptr<Board> &board,
  343. InputArray cameraMatrix, InputArray distCoeffs, OutputArray rvec,
  344. OutputArray tvec, bool useExtrinsicGuess = false);
  345. /**
  346. * @brief Refind not detected markers based on the already detected and the board layout
  347. *
  348. * @param image input image
  349. * @param board layout of markers in the board.
  350. * @param detectedCorners vector of already detected marker corners.
  351. * @param detectedIds vector of already detected marker identifiers.
  352. * @param rejectedCorners vector of rejected candidates during the marker detection process.
  353. * @param cameraMatrix optional input 3x3 floating-point camera matrix
  354. * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$
  355. * @param distCoeffs optional vector of distortion coefficients
  356. * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
  357. * @param minRepDistance minimum distance between the corners of the rejected candidate and the
  358. * reprojected marker in order to consider it as a correspondence.
  359. * @param errorCorrectionRate rate of allowed erroneous bits respect to the error correction
  360. * capability of the used dictionary. -1 ignores the error correction step.
  361. * @param checkAllOrders Consider the four posible corner orders in the rejectedCorners array.
  362. * If it set to false, only the provided corner order is considered (default true).
  363. * @param recoveredIdxs Optional array to returns the indexes of the recovered candidates in the
  364. * original rejectedCorners array.
  365. * @param parameters marker detection parameters
  366. *
  367. * This function tries to find markers that were not detected in the basic detecMarkers function.
  368. * First, based on the current detected marker and the board layout, the function interpolates
  369. * the position of the missing markers. Then it tries to find correspondence between the reprojected
  370. * markers and the rejected candidates based on the minRepDistance and errorCorrectionRate
  371. * parameters.
  372. * If camera parameters and distortion coefficients are provided, missing markers are reprojected
  373. * using projectPoint function. If not, missing marker projections are interpolated using global
  374. * homography, and all the marker corners in the board must have the same Z coordinate.
  375. */
  376. CV_EXPORTS_W void refineDetectedMarkers(
  377. InputArray image,const Ptr<Board> &board, InputOutputArrayOfArrays detectedCorners,
  378. InputOutputArray detectedIds, InputOutputArrayOfArrays rejectedCorners,
  379. InputArray cameraMatrix = noArray(), InputArray distCoeffs = noArray(),
  380. float minRepDistance = 10.f, float errorCorrectionRate = 3.f, bool checkAllOrders = true,
  381. OutputArray recoveredIdxs = noArray(), const Ptr<DetectorParameters> &parameters = DetectorParameters::create());
  382. /**
  383. * @brief Draw detected markers in image
  384. *
  385. * @param image input/output image. It must have 1 or 3 channels. The number of channels is not
  386. * altered.
  387. * @param corners positions of marker corners on input image.
  388. * (e.g std::vector<std::vector<cv::Point2f> > ). For N detected markers, the dimensions of
  389. * this array should be Nx4. The order of the corners should be clockwise.
  390. * @param ids vector of identifiers for markers in markersCorners .
  391. * Optional, if not provided, ids are not painted.
  392. * @param borderColor color of marker borders. Rest of colors (text color and first corner color)
  393. * are calculated based on this one to improve visualization.
  394. *
  395. * Given an array of detected marker corners and its corresponding ids, this functions draws
  396. * the markers in the image. The marker borders are painted and the markers identifiers if provided.
  397. * Useful for debugging purposes.
  398. */
  399. CV_EXPORTS_W void drawDetectedMarkers(InputOutputArray image, InputArrayOfArrays corners,
  400. InputArray ids = noArray(),
  401. Scalar borderColor = Scalar(0, 255, 0));
  402. /**
  403. * @brief Draw coordinate system axis from pose estimation
  404. *
  405. * @param image input/output image. It must have 1 or 3 channels. The number of channels is not
  406. * altered.
  407. * @param cameraMatrix input 3x3 floating-point camera matrix
  408. * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$
  409. * @param distCoeffs vector of distortion coefficients
  410. * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
  411. * @param rvec rotation vector of the coordinate system that will be drawn. (@sa Rodrigues).
  412. * @param tvec translation vector of the coordinate system that will be drawn.
  413. * @param length length of the painted axis in the same unit than tvec (usually in meters)
  414. *
  415. * Given the pose estimation of a marker or board, this function draws the axis of the world
  416. * coordinate system, i.e. the system centered on the marker/board. Useful for debugging purposes.
  417. */
  418. CV_EXPORTS_W void drawAxis(InputOutputArray image, InputArray cameraMatrix, InputArray distCoeffs,
  419. InputArray rvec, InputArray tvec, float length);
  420. /**
  421. * @brief Draw a canonical marker image
  422. *
  423. * @param dictionary dictionary of markers indicating the type of markers
  424. * @param id identifier of the marker that will be returned. It has to be a valid id
  425. * in the specified dictionary.
  426. * @param sidePixels size of the image in pixels
  427. * @param img output image with the marker
  428. * @param borderBits width of the marker border.
  429. *
  430. * This function returns a marker image in its canonical form (i.e. ready to be printed)
  431. */
  432. CV_EXPORTS_W void drawMarker(const Ptr<Dictionary> &dictionary, int id, int sidePixels, OutputArray img,
  433. int borderBits = 1);
  434. /**
  435. * @brief Draw a planar board
  436. * @sa _drawPlanarBoardImpl
  437. *
  438. * @param board layout of the board that will be drawn. The board should be planar,
  439. * z coordinate is ignored
  440. * @param outSize size of the output image in pixels.
  441. * @param img output image with the board. The size of this image will be outSize
  442. * and the board will be on the center, keeping the board proportions.
  443. * @param marginSize minimum margins (in pixels) of the board in the output image
  444. * @param borderBits width of the marker borders.
  445. *
  446. * This function return the image of a planar board, ready to be printed. It assumes
  447. * the Board layout specified is planar by ignoring the z coordinates of the object points.
  448. */
  449. CV_EXPORTS_W void drawPlanarBoard(const Ptr<Board> &board, Size outSize, OutputArray img,
  450. int marginSize = 0, int borderBits = 1);
  451. /**
  452. * @brief Implementation of drawPlanarBoard that accepts a raw Board pointer.
  453. */
  454. void _drawPlanarBoardImpl(Board *board, Size outSize, OutputArray img,
  455. int marginSize = 0, int borderBits = 1);
  456. /**
  457. * @brief Calibrate a camera using aruco markers
  458. *
  459. * @param corners vector of detected marker corners in all frames.
  460. * The corners should have the same format returned by detectMarkers (see #detectMarkers).
  461. * @param ids list of identifiers for each marker in corners
  462. * @param counter number of markers in each frame so that corners and ids can be split
  463. * @param board Marker Board layout
  464. * @param imageSize Size of the image used only to initialize the intrinsic camera matrix.
  465. * @param cameraMatrix Output 3x3 floating-point camera matrix
  466. * \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . If CV\_CALIB\_USE\_INTRINSIC\_GUESS
  467. * and/or CV_CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be
  468. * initialized before calling the function.
  469. * @param distCoeffs Output vector of distortion coefficients
  470. * \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6],[s_1, s_2, s_3, s_4]])\f$ of 4, 5, 8 or 12 elements
  471. * @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each board view
  472. * (e.g. std::vector<cv::Mat>>). That is, each k-th rotation vector together with the corresponding
  473. * k-th translation vector (see the next output parameter description) brings the board pattern
  474. * from the model coordinate space (in which object points are specified) to the world coordinate
  475. * space, that is, a real position of the board pattern in the k-th pattern view (k=0.. *M* -1).
  476. * @param tvecs Output vector of translation vectors estimated for each pattern view.
  477. * @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters.
  478. * Order of deviations values:
  479. * \f$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
  480. * s_4, \tau_x, \tau_y)\f$ If one of parameters is not estimated, it's deviation is equals to zero.
  481. * @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters.
  482. * Order of deviations values: \f$(R_1, T_1, \dotsc , R_M, T_M)\f$ where M is number of pattern views,
  483. * \f$R_i, T_i\f$ are concatenated 1x3 vectors.
  484. * @param perViewErrors Output vector of average re-projection errors estimated for each pattern view.
  485. * @param flags flags Different flags for the calibration process (see #calibrateCamera for details).
  486. * @param criteria Termination criteria for the iterative optimization algorithm.
  487. *
  488. * This function calibrates a camera using an Aruco Board. The function receives a list of
  489. * detected markers from several views of the Board. The process is similar to the chessboard
  490. * calibration in calibrateCamera(). The function returns the final re-projection error.
  491. */
  492. CV_EXPORTS_AS(calibrateCameraArucoExtended) double calibrateCameraAruco(
  493. InputArrayOfArrays corners, InputArray ids, InputArray counter, const Ptr<Board> &board,
  494. Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
  495. OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
  496. OutputArray stdDeviationsIntrinsics, OutputArray stdDeviationsExtrinsics,
  497. OutputArray perViewErrors, int flags = 0,
  498. TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON));
  499. /** @brief It's the same function as #calibrateCameraAruco but without calibration error estimation.
  500. */
  501. CV_EXPORTS_W double calibrateCameraAruco(
  502. InputArrayOfArrays corners, InputArray ids, InputArray counter, const Ptr<Board> &board,
  503. Size imageSize, InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
  504. OutputArrayOfArrays rvecs = noArray(), OutputArrayOfArrays tvecs = noArray(), int flags = 0,
  505. TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON));
  506. /**
  507. * @brief Given a board configuration and a set of detected markers, returns the corresponding
  508. * image points and object points to call solvePnP
  509. *
  510. * @param board Marker board layout.
  511. * @param detectedCorners List of detected marker corners of the board.
  512. * @param detectedIds List of identifiers for each marker.
  513. * @param objPoints Vector of vectors of board marker points in the board coordinate space.
  514. * @param imgPoints Vector of vectors of the projections of board marker corner points.
  515. */
  516. CV_EXPORTS_W void getBoardObjectAndImagePoints(const Ptr<Board> &board, InputArrayOfArrays detectedCorners,
  517. InputArray detectedIds, OutputArray objPoints, OutputArray imgPoints);
  518. //! @}
  519. }
  520. }
  521. #endif