Visual Servoing Platform  version 3.6.1 under development (2024-07-27)
vpDetectorAprilTag.cpp
1 /*
2  * ViSP, open source Visual Servoing Platform software.
3  * Copyright (C) 2005 - 2024 by Inria. All rights reserved.
4  *
5  * This software is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  * See the file LICENSE.txt at the root directory of this source
10  * distribution for additional information about the GNU GPL.
11  *
12  * For using ViSP with software that can not be combined with the GNU
13  * GPL, please contact Inria about acquiring a ViSP Professional
14  * Edition License.
15  *
16  * See https://visp.inria.fr for more information.
17  *
18  * This software was developed at:
19  * Inria Rennes - Bretagne Atlantique
20  * Campus Universitaire de Beaulieu
21  * 35042 Rennes Cedex
22  * France
23  *
24  * If you have questions regarding the use of this file, please contact
25  * Inria at visp@inria.fr
26  *
27  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
28  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
29  *
30  * Description:
31  * Base class for AprilTag detection.
32  */
33 #include <visp3/core/vpConfig.h>
34 
35 #ifdef VISP_HAVE_APRILTAG
36 #include <map>
37 
38 #ifdef __cplusplus
39 extern "C" {
40 #endif
41 #include <apriltag.h>
42 #include <apriltag_pose.h>
43 #include <common/homography.h>
44 #include <tag16h5.h>
45 #include <tag25h7.h>
46 #include <tag25h9.h>
47 #include <tag36h10.h>
48 #include <tag36h11.h>
49 #include <tagCircle21h7.h>
50 #include <tagStandard41h12.h>
51 #if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
52 #include <tagCircle49h12.h>
53 #include <tagCustom48h12.h>
54 #include <tagStandard41h12.h>
55 #include <tagStandard52h13.h>
56 #endif
57 #ifdef __cplusplus
58 }
59 #endif
60 
61 #include <visp3/core/vpDisplay.h>
62 #include <visp3/core/vpPixelMeterConversion.h>
63 #include <visp3/core/vpPoint.h>
64 #include <visp3/detection/vpDetectorAprilTag.h>
65 #include <visp3/vision/vpPose.h>
66 
68 
69 #ifndef DOXYGEN_SHOULD_SKIP_THIS
70 class vpDetectorAprilTag::Impl
71 {
72 public:
73  Impl(const vpAprilTagFamily &tagFamily, const vpPoseEstimationMethod &method)
74  : m_poseEstimationMethod(method), m_tagsId(), m_tagFamily(tagFamily), m_td(nullptr), m_tf(nullptr), m_detections(nullptr),
75  m_zAlignedWithCameraFrame(false)
76  {
77  switch (m_tagFamily) {
78  case TAG_36h11:
79  m_tf = tag36h11_create();
80  break;
81 
82  case TAG_36h10:
83  m_tf = tag36h10_create();
84  break;
85 
86  case TAG_36ARTOOLKIT:
87  break;
88 
89  case TAG_25h9:
90  m_tf = tag25h9_create();
91  break;
92 
93  case TAG_25h7:
94  m_tf = tag25h7_create();
95  break;
96 
97  case TAG_16h5:
98  m_tf = tag16h5_create();
99  break;
100 
101  case TAG_CIRCLE21h7:
102  m_tf = tagCircle21h7_create();
103  break;
104 
105  case TAG_CIRCLE49h12:
106 #if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
107  m_tf = tagCircle49h12_create();
108 #endif
109  break;
110 
111  case TAG_CUSTOM48h12:
112 #if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
113  m_tf = tagCustom48h12_create();
114 #endif
115  break;
116 
117  case TAG_STANDARD52h13:
118 #if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
119  m_tf = tagStandard52h13_create();
120 #endif
121  break;
122 
123  case TAG_STANDARD41h12:
124 #if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
125  m_tf = tagStandard41h12_create();
126 #endif
127  break;
128 
129  default:
130  throw vpException(vpException::fatalError, "Unknown Tag family!");
131  }
132 
133  if ((m_tagFamily != TAG_36ARTOOLKIT) && m_tf) {
134  m_td = apriltag_detector_create();
135  apriltag_detector_add_family(m_td, m_tf);
136  }
137 
138  m_mapOfCorrespondingPoseMethods[DEMENTHON_VIRTUAL_VS] = vpPose::DEMENTHON;
139  m_mapOfCorrespondingPoseMethods[LAGRANGE_VIRTUAL_VS] = vpPose::LAGRANGE;
140  }
141 
142  Impl(const Impl &o)
143  : m_poseEstimationMethod(o.m_poseEstimationMethod), m_tagsId(o.m_tagsId), m_tagFamily(o.m_tagFamily), m_td(nullptr),
144  m_tf(nullptr), m_detections(nullptr), m_zAlignedWithCameraFrame(o.m_zAlignedWithCameraFrame)
145  {
146  switch (m_tagFamily) {
147  case TAG_36h11:
148  m_tf = tag36h11_create();
149  break;
150 
151  case TAG_36h10:
152  m_tf = tag36h10_create();
153  break;
154 
155  case TAG_36ARTOOLKIT:
156  break;
157 
158  case TAG_25h9:
159  m_tf = tag25h9_create();
160  break;
161 
162  case TAG_25h7:
163  m_tf = tag25h7_create();
164  break;
165 
166  case TAG_16h5:
167  m_tf = tag16h5_create();
168  break;
169 
170  case TAG_CIRCLE21h7:
171  m_tf = tagCircle21h7_create();
172  break;
173 
174  case TAG_CIRCLE49h12:
175 #if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
176  m_tf = tagCircle49h12_create();
177 #endif
178  break;
179 
180  case TAG_CUSTOM48h12:
181 #if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
182  m_tf = tagCustom48h12_create();
183 #endif
184  break;
185 
186  case TAG_STANDARD52h13:
187 #if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
188  m_tf = tagStandard52h13_create();
189 #endif
190  break;
191 
192  case TAG_STANDARD41h12:
193 #if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
194  m_tf = tagStandard41h12_create();
195 #endif
196  break;
197 
198  default:
199  throw vpException(vpException::fatalError, "Unknown Tag family!");
200  }
201 
202  if ((m_tagFamily != TAG_36ARTOOLKIT) && m_tf) {
203  m_td = apriltag_detector_copy(o.m_td);
204  apriltag_detector_add_family(m_td, m_tf);
205  }
206 
207  m_mapOfCorrespondingPoseMethods[DEMENTHON_VIRTUAL_VS] = vpPose::DEMENTHON;
208  m_mapOfCorrespondingPoseMethods[LAGRANGE_VIRTUAL_VS] = vpPose::LAGRANGE;
209 
210  if (o.m_detections != nullptr) {
211  m_detections = apriltag_detections_copy(o.m_detections);
212  }
213  }
214 
215  ~Impl()
216  {
217  if (m_td) {
218  apriltag_detector_destroy(m_td);
219  }
220 
221  if (m_tf) {
222  switch (m_tagFamily) {
223  case TAG_36h11:
224  tag36h11_destroy(m_tf);
225  break;
226 
227  case TAG_36h10:
228  tag36h10_destroy(m_tf);
229  break;
230 
231  case TAG_36ARTOOLKIT:
232  break;
233 
234  case TAG_25h9:
235  tag25h9_destroy(m_tf);
236  break;
237 
238  case TAG_25h7:
239  tag25h7_destroy(m_tf);
240  break;
241 
242  case TAG_16h5:
243  tag16h5_destroy(m_tf);
244  break;
245 
246  case TAG_CIRCLE21h7:
247  tagCircle21h7_destroy(m_tf);
248  break;
249 
250  case TAG_CIRCLE49h12:
251 #if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
252  tagCustom48h12_destroy(m_tf);
253 #endif
254  break;
255 
256  case TAG_CUSTOM48h12:
257 #if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
258  tagCustom48h12_destroy(m_tf);
259 #endif
260  break;
261 
262  case TAG_STANDARD52h13:
263 #if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
264  tagStandard52h13_destroy(m_tf);
265 #endif
266  break;
267 
268  case TAG_STANDARD41h12:
269 #if defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
270  tagStandard41h12_destroy(m_tf);
271 #endif
272  break;
273 
274  default:
275  break;
276  }
277  }
278 
279  if (m_detections) {
280  apriltag_detections_destroy(m_detections);
281  m_detections = nullptr;
282  }
283  }
284 
285  void convertHomogeneousMatrix(const apriltag_pose_t &pose, vpHomogeneousMatrix &cMo)
286  {
287  const unsigned int val_3 = 3;
288  for (unsigned int i = 0; i < val_3; ++i) {
289  for (unsigned int j = 0; j < val_3; ++j) {
290  cMo[i][j] = MATD_EL(pose.R, i, j);
291  }
292  cMo[i][val_3] = MATD_EL(pose.t, i, 0);
293  }
294  }
295 
296  bool detect(const vpImage<unsigned char> &I, double tagSize, const vpCameraParameters &cam,
297  std::vector<std::vector<vpImagePoint> > &polygons, std::vector<std::string> &messages, bool displayTag,
298  const vpColor color, unsigned int thickness, std::vector<vpHomogeneousMatrix> *cMo_vec,
299  std::vector<vpHomogeneousMatrix> *cMo_vec2, std::vector<double> *projErrors,
300  std::vector<double> *projErrors2)
301  {
302  if (m_tagFamily == TAG_36ARTOOLKIT) {
303  // TAG_36ARTOOLKIT is not available anymore
304  std::cerr << "TAG_36ARTOOLKIT detector is not available anymore." << std::endl;
305  return false;
306  }
307 #if !defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
310  std::cerr << "TAG_CIRCLE49h12, TAG_CUSTOM48h12, TAG_STANDARD41h12 and TAG_STANDARD52h13 are disabled."
311  << std::endl;
312  return false;
313  }
314 #endif
315 
316  const bool computePose = (cMo_vec != nullptr);
317 
318  image_u8_t im = {/*.width =*/static_cast<int32_t>(I.getWidth()),
319  /*.height =*/static_cast<int32_t>(I.getHeight()),
320  /*.stride =*/static_cast<int32_t>(I.getWidth()),
321  /*.buf =*/I.bitmap };
322 
323  if (m_detections) {
324  apriltag_detections_destroy(m_detections);
325  m_detections = nullptr;
326  }
327 
328  m_detections = apriltag_detector_detect(m_td, &im);
329  int nb_detections = zarray_size(m_detections);
330  bool detected = nb_detections > 0;
331 
332  polygons.resize(static_cast<size_t>(nb_detections));
333  messages.resize(static_cast<size_t>(nb_detections));
334  m_tagsId.resize(static_cast<size_t>(nb_detections));
335 
336  int zarray_size_m_detections = zarray_size(m_detections);
337  for (int i = 0; i < zarray_size_m_detections; ++i) {
338  apriltag_detection_t *det;
339  zarray_get(m_detections, i, &det);
340 
341  std::vector<vpImagePoint> polygon;
342  const int polygonSize = 4;
343  for (int j = 0; j < polygonSize; ++j) {
344  polygon.push_back(vpImagePoint(det->p[j][1], det->p[j][0]));
345  }
346  polygons[static_cast<size_t>(i)] = polygon;
347  std::stringstream ss;
348  ss << m_tagFamily << " id: " << det->id;
349  messages[static_cast<size_t>(i)] = ss.str();
350  m_tagsId[static_cast<size_t>(i)] = det->id;
351 
352  if (displayTag) {
353  vpColor Ox = (color == vpColor::none) ? vpColor::red : color;
354  vpColor Oy = (color == vpColor::none) ? vpColor::green : color;
355  vpColor Ox2 = (color == vpColor::none) ? vpColor::yellow : color;
356  vpColor Oy2 = (color == vpColor::none) ? vpColor::blue : color;
357 
358  const unsigned int polyId0 = 0, polyId1 = 1, polyId2 = 2, polyId3 = 3;
359  vpDisplay::displayLine(I, static_cast<int>(det->p[polyId0][1]), static_cast<int>(det->p[polyId0][0]), static_cast<int>(det->p[polyId1][1]), static_cast<int>(det->p[polyId1][0]), Ox,
360  thickness);
361  vpDisplay::displayLine(I, static_cast<int>(det->p[polyId0][1]), static_cast<int>(det->p[polyId0][0]), static_cast<int>(det->p[polyId3][1]), static_cast<int>(det->p[polyId3][0]), Oy,
362  thickness);
363  vpDisplay::displayLine(I, static_cast<int>(det->p[polyId1][1]), static_cast<int>(det->p[polyId1][0]), static_cast<int>(det->p[polyId2][1]), static_cast<int>(det->p[polyId2][0]), Ox2,
364  thickness);
365  vpDisplay::displayLine(I, static_cast<int>(det->p[polyId2][1]), static_cast<int>(det->p[polyId2][0]), static_cast<int>(det->p[polyId3][1]), static_cast<int>(det->p[polyId3][0]), Oy2,
366  thickness);
367  }
368 
369  if (computePose) {
370  vpHomogeneousMatrix cMo, cMo2;
371  double err1, err2;
372  if (getPose(static_cast<size_t>(i), tagSize, cam, cMo, cMo_vec2 ? &cMo2 : nullptr, projErrors ? &err1 : nullptr,
373  projErrors2 ? &err2 : nullptr)) {
374  cMo_vec->push_back(cMo);
375  if (cMo_vec2) {
376  cMo_vec2->push_back(cMo2);
377  }
378  if (projErrors) {
379  projErrors->push_back(err1);
380  }
381  if (projErrors2) {
382  projErrors2->push_back(err2);
383  }
384  }
385  // else case should never happen
386  }
387  }
388 
389  return detected;
390  }
391 
392  void displayFrames(const vpImage<unsigned char> &I, const std::vector<vpHomogeneousMatrix> &cMo_vec,
393  const vpCameraParameters &cam, double size, const vpColor &color, unsigned int thickness) const
394  {
395  size_t cmo_vec_size = cMo_vec.size();
396  for (size_t i = 0; i < cmo_vec_size; ++i) {
397  const vpHomogeneousMatrix &cMo = cMo_vec[i];
398  vpDisplay::displayFrame(I, cMo, cam, size, color, thickness);
399  }
400  }
401 
402  void displayFrames(const vpImage<vpRGBa> &I, const std::vector<vpHomogeneousMatrix> &cMo_vec,
403  const vpCameraParameters &cam, double size, const vpColor &color, unsigned int thickness) const
404  {
405  size_t cmo_vec_size = cMo_vec.size();
406  for (size_t i = 0; i < cmo_vec_size; ++i) {
407  const vpHomogeneousMatrix &cMo = cMo_vec[i];
408  vpDisplay::displayFrame(I, cMo, cam, size, color, thickness);
409  }
410  }
411 
412  void displayTags(const vpImage<unsigned char> &I, const std::vector<std::vector<vpImagePoint> > &tagsCorners,
413  const vpColor &color, unsigned int thickness) const
414  {
415  size_t tagscorners_size = tagsCorners.size();
416  for (size_t i = 0; i < tagscorners_size; ++i) {
417  const vpColor Ox = (color == vpColor::none) ? vpColor::red : color;
418  const vpColor Oy = (color == vpColor::none) ? vpColor::green : color;
419  const vpColor Ox2 = (color == vpColor::none) ? vpColor::yellow : color;
420  const vpColor Oy2 = (color == vpColor::none) ? vpColor::blue : color;
421 
422  const std::vector<vpImagePoint> &corners = tagsCorners[i];
423  assert(corners.size() == 4);
424  const unsigned int cornerId0 = 0, cornerId1 = 1, cornerId2 = 2, cornerId3 = 3;
425 
426  vpDisplay::displayLine(I, static_cast<int>(corners[cornerId0].get_i()), static_cast<int>(corners[cornerId0].get_j()), static_cast<int>(corners[cornerId1].get_i()), static_cast<int>(corners[cornerId1].get_j()),
427  Ox, thickness);
428  vpDisplay::displayLine(I, static_cast<int>(corners[cornerId0].get_i()), static_cast<int>(corners[cornerId0].get_j()), static_cast<int>(corners[cornerId3].get_i()), static_cast<int>(corners[cornerId3].get_j()),
429  Oy, thickness);
430  vpDisplay::displayLine(I, static_cast<int>(corners[cornerId1].get_i()), static_cast<int>(corners[cornerId1].get_j()), static_cast<int>(corners[cornerId2].get_i()), static_cast<int>(corners[cornerId2].get_j()),
431  Ox2, thickness);
432  vpDisplay::displayLine(I, static_cast<int>(corners[cornerId2].get_i()), static_cast<int>(corners[cornerId2].get_j()), static_cast<int>(corners[cornerId3].get_i()), static_cast<int>(corners[cornerId3].get_j()),
433  Oy2, thickness);
434  }
435  }
436 
437  void displayTags(const vpImage<vpRGBa> &I, const std::vector<std::vector<vpImagePoint> > &tagsCorners,
438  const vpColor &color, unsigned int thickness) const
439  {
440  size_t tagscorners_size = tagsCorners.size();
441  for (size_t i = 0; i < tagscorners_size; ++i) {
442  const vpColor Ox = (color == vpColor::none) ? vpColor::red : color;
443  const vpColor Oy = (color == vpColor::none) ? vpColor::green : color;
444  const vpColor Ox2 = (color == vpColor::none) ? vpColor::yellow : color;
445  const vpColor Oy2 = (color == vpColor::none) ? vpColor::blue : color;
446 
447  const std::vector<vpImagePoint> &corners = tagsCorners[i];
448  assert(corners.size() == 4);
449  const unsigned int cornerId0 = 0, cornerId1 = 1, cornerId2 = 2, cornerId3 = 3;
450 
451  vpDisplay::displayLine(I, static_cast<int>(corners[cornerId0].get_i()), static_cast<int>(corners[cornerId0].get_j()), static_cast<int>(corners[cornerId1].get_i()), static_cast<int>(corners[cornerId1].get_j()),
452  Ox, thickness);
453  vpDisplay::displayLine(I, static_cast<int>(corners[cornerId0].get_i()), static_cast<int>(corners[cornerId0].get_j()), static_cast<int>(corners[cornerId3].get_i()), static_cast<int>(corners[cornerId3].get_j()),
454  Oy, thickness);
455  vpDisplay::displayLine(I, static_cast<int>(corners[cornerId1].get_i()), static_cast<int>(corners[cornerId1].get_j()), static_cast<int>(corners[cornerId2].get_i()), static_cast<int>(corners[cornerId2].get_j()),
456  Ox2, thickness);
457  vpDisplay::displayLine(I, static_cast<int>(corners[cornerId2].get_i()), static_cast<int>(corners[cornerId2].get_j()), static_cast<int>(corners[cornerId3].get_i()), static_cast<int>(corners[cornerId3].get_j()),
458  Oy2, thickness);
459  }
460  }
461 
462  bool getPose(size_t tagIndex, double tagSize, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo,
463  vpHomogeneousMatrix *cMo2, double *projErrors, double *projErrors2)
464  {
465  if (m_detections == nullptr) {
466  throw(vpException(vpException::fatalError, "Cannot get tag index=%d pose: detection empty", tagIndex));
467  }
468  if (m_tagFamily == TAG_36ARTOOLKIT) {
469  // TAG_36ARTOOLKIT is not available anymore
470  std::cerr << "TAG_36ARTOOLKIT detector is not available anymore." << std::endl;
471  return false;
472  }
473 #if !defined(VISP_HAVE_APRILTAG_BIG_FAMILY)
476  std::cerr << "TAG_CIRCLE49h12, TAG_CUSTOM48h12, TAG_STANDARD41h12 and TAG_STANDARD52h13 are disabled."
477  << std::endl;
478  return false;
479  }
480 #endif
481 
482  apriltag_detection_t *det;
483  zarray_get(m_detections, static_cast<int>(tagIndex), &det);
484 
485  int nb_detections = zarray_size(m_detections);
486  if (tagIndex >= static_cast<size_t>(nb_detections)) {
487  return false;
488  }
489 
490  // In AprilTag3, estimate_pose_for_tag_homography() and estimate_tag_pose() have been added.
491  // They use a tag frame aligned with the camera frame
492  // Before the release of AprilTag3, convention used was to define the z-axis of the tag going upward.
493  // To keep compatibility, we maintain the same convention than before and there is setZAlignedWithCameraAxis().
494  // Under the hood, we use aligned frames everywhere and transform the pose according to the option.
495 
496  vpHomogeneousMatrix cMo_homography_ortho_iter;
499  double fx = cam.get_px(), fy = cam.get_py();
500  double cx = cam.get_u0(), cy = cam.get_v0();
501 
502  apriltag_detection_info_t info;
503  info.det = det;
504  info.tagsize = tagSize;
505  info.fx = fx;
506  info.fy = fy;
507  info.cx = cx;
508  info.cy = cy;
509 
510  // projErrors and projErrors2 will be override later
511  getPoseWithOrthogonalMethod(info, cMo, cMo2, projErrors, projErrors2);
512  cMo_homography_ortho_iter = cMo;
513  }
514 
515  vpHomogeneousMatrix cMo_homography;
518  double fx = cam.get_px(), fy = cam.get_py();
519  double cx = cam.get_u0(), cy = cam.get_v0();
520 
521  apriltag_detection_info_t info;
522  info.det = det;
523  info.tagsize = tagSize;
524  info.fx = fx;
525  info.fy = fy;
526  info.cx = cx;
527  info.cy = cy;
528 
529  apriltag_pose_t pose;
530  estimate_pose_for_tag_homography(&info, &pose);
531  convertHomogeneousMatrix(pose, cMo);
532 
533  matd_destroy(pose.R);
534  matd_destroy(pose.t);
535 
536  cMo_homography = cMo;
537  }
538 
539  // Add marker object points
540  vpPose pose;
541  vpPoint pt;
542 
543  vpImagePoint imPt;
544  double x = 0.0, y = 0.0;
545  std::vector<vpPoint> pts(4);
546  pt.setWorldCoordinates(-tagSize / 2.0, tagSize / 2.0, 0.0);
547  imPt.set_uv(det->p[0][0], det->p[0][1]);
548  vpPixelMeterConversion::convertPoint(cam, imPt, x, y);
549  pt.set_x(x);
550  pt.set_y(y);
551  pts[0] = pt;
552 
553  pt.setWorldCoordinates(tagSize / 2.0, tagSize / 2.0, 0.0);
554  imPt.set_uv(det->p[1][0], det->p[1][1]);
555  vpPixelMeterConversion::convertPoint(cam, imPt, x, y);
556  pt.set_x(x);
557  pt.set_y(y);
558  pts[1] = pt;
559 
560  const int idCorner2 = 2;
561  pt.setWorldCoordinates(tagSize / 2.0, -tagSize / 2.0, 0.0);
562  imPt.set_uv(det->p[idCorner2][0], det->p[idCorner2][1]);
563  vpPixelMeterConversion::convertPoint(cam, imPt, x, y);
564  pt.set_x(x);
565  pt.set_y(y);
566  pts[idCorner2] = pt;
567 
568  const int idCorner3 = 3;
569  pt.setWorldCoordinates(-tagSize / 2.0, -tagSize / 2.0, 0.0);
570  imPt.set_uv(det->p[idCorner3][0], det->p[idCorner3][1]);
571  vpPixelMeterConversion::convertPoint(cam, imPt, x, y);
572  pt.set_x(x);
573  pt.set_y(y);
574  pts[idCorner3] = pt;
575 
576  pose.addPoints(pts);
577 
581  vpHomogeneousMatrix cMo_dementhon, cMo_lagrange;
582 
583  double residual_dementhon = std::numeric_limits<double>::max(),
584  residual_lagrange = std::numeric_limits<double>::max();
585  double residual_homography = pose.computeResidual(cMo_homography);
586  double residual_homography_ortho_iter = pose.computeResidual(cMo_homography_ortho_iter);
587 
588  if (pose.computePose(vpPose::DEMENTHON, cMo_dementhon)) {
589  residual_dementhon = pose.computeResidual(cMo_dementhon);
590  }
591 
592  if (pose.computePose(vpPose::LAGRANGE, cMo_lagrange)) {
593  residual_lagrange = pose.computeResidual(cMo_lagrange);
594  }
595 
596  std::vector<double> residuals;
597  residuals.push_back(residual_dementhon);
598  residuals.push_back(residual_lagrange);
599  residuals.push_back(residual_homography);
600  residuals.push_back(residual_homography_ortho_iter);
601  std::vector<vpHomogeneousMatrix> poses;
602  poses.push_back(cMo_dementhon);
603  poses.push_back(cMo_lagrange);
604  poses.push_back(cMo_homography);
605  poses.push_back(cMo_homography_ortho_iter);
606 
607  std::ptrdiff_t minIndex = std::min_element(residuals.begin(), residuals.end()) - residuals.begin();
608  cMo = *(poses.begin() + minIndex);
609  }
610  else {
611  pose.computePose(m_mapOfCorrespondingPoseMethods[m_poseEstimationMethod], cMo);
612  }
613  }
614 
616  // Compute final pose using VVS
617  pose.computePose(vpPose::VIRTUAL_VS, cMo);
618  }
619 
620  // Only with HOMOGRAPHY_ORTHOGONAL_ITERATION we can directly get two solutions
622  if (cMo2) {
623  double scale = tagSize / 2.0;
624  double data_p0[] = { -scale, scale, 0 };
625  double data_p1[] = { scale, scale, 0 };
626  double data_p2[] = { scale, -scale, 0 };
627  double data_p3[] = { -scale, -scale, 0 };
628  const unsigned int nbPoints = 4;
629  const int nbRows = 3;
630  matd_t *p[nbPoints] = { matd_create_data(nbRows, 1, data_p0), matd_create_data(nbRows, 1, data_p1),
631  matd_create_data(nbRows, 1, data_p2), matd_create_data(nbRows, 1, data_p3) };
632  matd_t *v[nbPoints];
633  for (unsigned int i = 0; i < nbPoints; ++i) {
634  double data_v[] = { (det->p[i][0] - cam.get_u0()) / cam.get_px(), (det->p[i][1] - cam.get_v0()) / cam.get_py(),
635  1 };
636  v[i] = matd_create_data(nbRows, 1, data_v);
637  }
638 
639  apriltag_pose_t solution1, solution2;
640  const int nIters = 50;
641  const int nbCols = 3;
642  solution1.R = matd_create_data(nbRows, nbCols, cMo.getRotationMatrix().data);
643  solution1.t = matd_create_data(nbRows, 1, cMo.getTranslationVector().data);
644 
645  double err2;
646  get_second_solution(v, p, &solution1, &solution2, nIters, &err2);
647 
648  for (unsigned int i = 0; i < nbPoints; ++i) {
649  matd_destroy(p[i]);
650  matd_destroy(v[i]);
651  }
652 
653  if (solution2.R) {
654  convertHomogeneousMatrix(solution2, *cMo2);
655 
656  matd_destroy(solution2.R);
657  matd_destroy(solution2.t);
658  }
659 
660  matd_destroy(solution1.R);
661  matd_destroy(solution1.t);
662  }
663  }
664 
665  // Compute projection error with vpPose::computeResidual() for consistency
666  if (projErrors) {
667  *projErrors = pose.computeResidual(cMo);
668  }
669  if (projErrors2 && cMo2) {
670  *projErrors2 = pose.computeResidual(*cMo2);
671  }
672 
673  if (!m_zAlignedWithCameraFrame) {
674  const unsigned int idX = 0, idY = 1, idZ = 2;
676  // Apply a rotation of 180deg around x axis
677  oMo[idX][idX] = 1;
678  oMo[idX][idY] = 0;
679  oMo[idX][idZ] = 0;
680  oMo[idY][idX] = 0;
681  oMo[idY][idY] = -1;
682  oMo[idY][idZ] = 0;
683  oMo[idZ][idX] = 0;
684  oMo[idZ][idY] = 0;
685  oMo[idZ][idZ] = -1;
686  cMo = cMo * oMo;
687  if (cMo2) {
688  *cMo2 = *cMo2 * oMo;
689  }
690  }
691 
692  return true;
693  }
694 
695  void getPoseWithOrthogonalMethod(apriltag_detection_info_t &info, vpHomogeneousMatrix &cMo1,
696  vpHomogeneousMatrix *cMo2, double *err1, double *err2)
697  {
698  apriltag_pose_t pose1, pose2;
699  double err_1, err_2;
700  const unsigned int nbIters = 50;
701  estimate_tag_pose_orthogonal_iteration(&info, &err_1, &pose1, &err_2, &pose2, nbIters);
702  if (err_1 <= err_2) {
703  convertHomogeneousMatrix(pose1, cMo1);
704  if (cMo2) {
705  if (pose2.R) {
706  convertHomogeneousMatrix(pose2, *cMo2);
707  }
708  else {
709  *cMo2 = cMo1;
710  }
711  }
712  }
713  else {
714  convertHomogeneousMatrix(pose2, cMo1);
715  if (cMo2) {
716  convertHomogeneousMatrix(pose1, *cMo2);
717  }
718  }
719 
720  matd_destroy(pose1.R);
721  matd_destroy(pose1.t);
722  if (pose2.R) {
723  matd_destroy(pose2.t);
724  }
725  matd_destroy(pose2.R);
726 
727  if (err1) {
728  *err1 = err_1;
729  }
730  if (err2) {
731  *err2 = err_2;
732  }
733  }
734 
735  bool getZAlignedWithCameraAxis() { return m_zAlignedWithCameraFrame; }
736 
737  bool getAprilTagDecodeSharpening(double &decodeSharpening) const
738  {
739  if (m_td) {
740  decodeSharpening = m_td->decode_sharpening;
741  return true;
742  }
743  return false;
744  }
745 
746  bool getNbThreads(int &nThreads) const
747  {
748  if (m_td) {
749  nThreads = m_td->nthreads;
750  return true;
751  }
752  return false;
753  }
754 
755  bool getQuadDecimate(float &quadDecimate) const
756  {
757  if (m_td) {
758  quadDecimate = m_td->quad_decimate;
759  return true;
760  }
761  return false;
762  }
763 
764  bool getQuadSigma(float &quadSigma) const
765  {
766  if (m_td) {
767  quadSigma = m_td->quad_sigma;
768  return true;
769  }
770  return false;
771  }
772 
773  bool getRefineEdges(bool &refineEdges) const
774  {
775  if (m_td) {
776  refineEdges = (m_td->refine_edges ? true : false);
777  return true;
778  }
779  return false;
780  }
781 
782  bool getZAlignedWithCameraAxis() const { return m_zAlignedWithCameraFrame; }
783 
784  std::vector<int> getTagsId() const { return m_tagsId; }
785 
786  void setAprilTagDecodeSharpening(double decodeSharpening)
787  {
788  if (m_td) {
789  m_td->decode_sharpening = decodeSharpening;
790  }
791  }
792 
793  void setNbThreads(int nThreads)
794  {
795  if (m_td) {
796  m_td->nthreads = nThreads;
797  }
798  }
799 
800  void setQuadDecimate(float quadDecimate)
801  {
802  if (m_td) {
803  m_td->quad_decimate = quadDecimate;
804  }
805  }
806 
807  void setQuadSigma(float quadSigma)
808  {
809  if (m_td) {
810  m_td->quad_sigma = quadSigma;
811  }
812  }
813 
814  void setRefineDecode(bool) { }
815 
816  void setRefineEdges(bool refineEdges)
817  {
818  if (m_td) {
819  m_td->refine_edges = (refineEdges ? true : false);
820  }
821  }
822 
823  void setRefinePose(bool) { }
824 
825  void setPoseEstimationMethod(const vpPoseEstimationMethod &method) { m_poseEstimationMethod = method; }
826 
827  void setZAlignedWithCameraAxis(bool zAlignedWithCameraFrame) { m_zAlignedWithCameraFrame = zAlignedWithCameraFrame; }
828 
829 protected:
830  std::map<vpPoseEstimationMethod, vpPose::vpPoseMethodType> m_mapOfCorrespondingPoseMethods;
832  std::vector<int> m_tagsId;
834  apriltag_detector_t *m_td;
835  apriltag_family_t *m_tf;
836  zarray_t *m_detections;
837  bool m_zAlignedWithCameraFrame;
838 };
839 
840 namespace
841 {
842 const unsigned int def_tagThickness = 2;
843 }
844 #endif // DOXYGEN_SHOULD_SKIP_THIS
845 
847  const vpPoseEstimationMethod &poseEstimationMethod)
848  : m_displayTag(false), m_displayTagColor(vpColor::none), m_displayTagThickness(def_tagThickness),
849  m_poseEstimationMethod(poseEstimationMethod), m_tagFamily(tagFamily), m_defaultCam(),
850  m_impl(new Impl(tagFamily, poseEstimationMethod))
851 { }
852 
854  : vpDetectorBase(o), m_displayTag(false), m_displayTagColor(vpColor::none), m_displayTagThickness(def_tagThickness),
855  m_poseEstimationMethod(o.m_poseEstimationMethod), m_tagFamily(o.m_tagFamily), m_defaultCam(),
856  m_impl(new Impl(*o.m_impl))
857 { }
858 
860 {
861  swap(*this, o);
862  return *this;
863 }
864 
866 
875 {
876  m_message.clear();
877  m_polygon.clear();
878  m_nb_objects = 0;
879 
880  std::vector<vpHomogeneousMatrix> cMo_vec;
881  const double tagSize = 1.0;
882  bool detected = m_impl->detect(I, tagSize, m_defaultCam, m_polygon, m_message, m_displayTag, m_displayTagColor,
883  m_displayTagThickness, nullptr, nullptr, nullptr, nullptr);
884  m_nb_objects = m_message.size();
885 
886  return detected;
887 }
888 
906 bool vpDetectorAprilTag::detect(const vpImage<unsigned char> &I, double tagSize, const vpCameraParameters &cam,
907  std::vector<vpHomogeneousMatrix> &cMo_vec, std::vector<vpHomogeneousMatrix> *cMo_vec2,
908  std::vector<double> *projErrors, std::vector<double> *projErrors2)
909 {
910  m_message.clear();
911  m_polygon.clear();
912  m_nb_objects = 0;
913 
914  cMo_vec.clear();
915  if (cMo_vec2) {
916  cMo_vec2->clear();
917  }
918  bool detected = m_impl->detect(I, tagSize, cam, m_polygon, m_message, m_displayTag, m_displayTagColor,
919  m_displayTagThickness, &cMo_vec, cMo_vec2, projErrors, projErrors2);
920  m_nb_objects = m_message.size();
921 
922  return detected;
923 }
924 
935 void vpDetectorAprilTag::displayFrames(const vpImage<unsigned char> &I, const std::vector<vpHomogeneousMatrix> &cMo_vec,
936  const vpCameraParameters &cam, double size, const vpColor &color, unsigned int thickness) const
937 {
938  m_impl->displayFrames(I, cMo_vec, cam, size, color, thickness);
939 }
940 
951 void vpDetectorAprilTag::displayFrames(const vpImage<vpRGBa> &I, const std::vector<vpHomogeneousMatrix> &cMo_vec,
952  const vpCameraParameters &cam, double size, const vpColor &color, unsigned int thickness) const
953 {
954  m_impl->displayFrames(I, cMo_vec, cam, size, color, thickness);
955 }
956 
965 void vpDetectorAprilTag::displayTags(const vpImage<unsigned char> &I, const std::vector<std::vector<vpImagePoint> > &tagsCorners,
966  const vpColor &color, unsigned int thickness) const
967 {
968  m_impl->displayTags(I, tagsCorners, color, thickness);
969 }
970 
979 void vpDetectorAprilTag::displayTags(const vpImage<vpRGBa> &I, const std::vector<std::vector<vpImagePoint> > &tagsCorners,
980  const vpColor &color, unsigned int thickness) const
981 {
982  m_impl->displayTags(I, tagsCorners, color, thickness);
983 }
984 
1016 bool vpDetectorAprilTag::getPose(size_t tagIndex, double tagSize, const vpCameraParameters &cam,
1017  vpHomogeneousMatrix &cMo, vpHomogeneousMatrix *cMo2, double *projError,
1018  double *projError2)
1019 {
1020  return m_impl->getPose(tagIndex, tagSize, cam, cMo, cMo2, projError, projError2);
1021 }
1022 
1040 std::vector<std::vector<vpPoint> > vpDetectorAprilTag::getTagsPoints3D(const std::vector<int> &tagsId,
1041  const std::map<int, double> &tagsSize) const
1042 {
1043  std::vector<std::vector<vpPoint> > tagsPoints3D;
1044 
1045  double default_size = -1;
1046 
1047  std::map<int, double>::const_iterator it = tagsSize.find(-1);
1048  if (it != tagsSize.end()) {
1049  default_size = it->second; // Default size
1050  }
1051 
1052  size_t tagsid_size = tagsId.size();
1053  for (size_t i = 0; i < tagsid_size; ++i) {
1054  it = tagsSize.find(tagsId[i]);
1055  double tagSize = default_size; // Default size
1056  if (it == tagsSize.end()) {
1057  if (default_size < 0) { // no default size found
1059  "Tag with id %d has no 3D size or there is no default 3D size defined", tagsId[i]));
1060  }
1061  }
1062  else {
1063  tagSize = it->second;
1064  }
1065  std::vector<vpPoint> points3D(4);
1066  const unsigned int idX = 0, idY = 1, idZ = 2, idHomogen = 3;
1067  const double middleFactor = 2.0;
1068  if (m_impl->getZAlignedWithCameraAxis()) {
1069  points3D[idX] = vpPoint(-tagSize / middleFactor, tagSize / middleFactor, 0);
1070  points3D[idY] = vpPoint(tagSize / middleFactor, tagSize / middleFactor, 0);
1071  points3D[idZ] = vpPoint(tagSize / middleFactor, -tagSize / middleFactor, 0);
1072  points3D[idHomogen] = vpPoint(-tagSize / middleFactor, -tagSize / middleFactor, 0);
1073  }
1074  else {
1075  points3D[idX] = vpPoint(-tagSize / middleFactor, -tagSize / middleFactor, 0);
1076  points3D[idY] = vpPoint(tagSize / middleFactor, -tagSize / middleFactor, 0);
1077  points3D[idZ] = vpPoint(tagSize / middleFactor, tagSize / middleFactor, 0);
1078  points3D[idHomogen] = vpPoint(-tagSize / middleFactor, tagSize / middleFactor, 0);
1079  }
1080  tagsPoints3D.push_back(points3D);
1081  }
1082 
1083  return tagsPoints3D;
1084 }
1085 
1091 std::vector<std::vector<vpImagePoint> > vpDetectorAprilTag::getTagsCorners() const { return m_polygon; }
1092 
1098 std::vector<int> vpDetectorAprilTag::getTagsId() const { return m_impl->getTagsId(); }
1099 
1101 {
1102  return m_impl->setAprilTagDecodeSharpening(decodeSharpening);
1103 }
1104 
1106 {
1107  // back-up settings
1108  double decodeSharpening = 0.25;
1109  m_impl->getAprilTagDecodeSharpening(decodeSharpening);
1110  int nThreads = 1;
1111  m_impl->getNbThreads(nThreads);
1112  float quadDecimate = 1;
1113  m_impl->getQuadDecimate(quadDecimate);
1114  float quadSigma = 0;
1115  m_impl->getQuadSigma(quadSigma);
1116  bool refineEdges = true;
1117  m_impl->getRefineEdges(refineEdges);
1118  bool zAxis = m_impl->getZAlignedWithCameraAxis();
1119 
1120  delete m_impl;
1121  m_impl = new Impl(tagFamily, m_poseEstimationMethod);
1122  m_impl->setAprilTagDecodeSharpening(decodeSharpening);
1123  m_impl->setNbThreads(nThreads);
1124  m_impl->setQuadDecimate(quadDecimate);
1125  m_impl->setQuadSigma(quadSigma);
1126  m_impl->setRefineEdges(refineEdges);
1127  m_impl->setZAlignedWithCameraAxis(zAxis);
1128 }
1129 
1136 {
1137  if (nThreads > 0) {
1138  m_impl->setNbThreads(nThreads);
1139  }
1140 }
1141 
1148 {
1149  m_poseEstimationMethod = poseEstimationMethod;
1150  m_impl->setPoseEstimationMethod(poseEstimationMethod);
1151 }
1152 
1165 void vpDetectorAprilTag::setAprilTagQuadDecimate(float quadDecimate) { m_impl->setQuadDecimate(quadDecimate); }
1166 
1179 void vpDetectorAprilTag::setAprilTagQuadSigma(float quadSigma) { m_impl->setQuadSigma(quadSigma); }
1180 
1181 #if defined(VISP_BUILD_DEPRECATED_FUNCTIONS)
1186 {
1187  m_impl->setRefineDecode(refineDecode);
1188 }
1189 #endif
1190 
1205 void vpDetectorAprilTag::setAprilTagRefineEdges(bool refineEdges) { m_impl->setRefineEdges(refineEdges); }
1206 
1207 #if defined(VISP_BUILD_DEPRECATED_FUNCTIONS)
1211 void vpDetectorAprilTag::setAprilTagRefinePose(bool refinePose) { m_impl->setRefinePose(refinePose); }
1212 #endif
1213 
1219 void vpDetectorAprilTag::setZAlignedWithCameraAxis(bool zAlignedWithCameraFrame)
1220 {
1221  m_impl->setZAlignedWithCameraAxis(zAlignedWithCameraFrame);
1222 }
1223 END_VISP_NAMESPACE
1224 #elif !defined(VISP_BUILD_SHARED_LIBS)
1225 // Work around to avoid warning: libvisp_core.a(vpDetectorAprilTag.cpp.o) has
1226 // no symbols
1227 void dummy_vpDetectorAprilTag() { }
1228 #endif
Type * data
Address of the first element of the data array.
Definition: vpArray2D.h:148
Generic class defining intrinsic camera parameters.
Class to define RGB colors available for display functionalities.
Definition: vpColor.h:157
static const vpColor red
Definition: vpColor.h:217
static const vpColor none
Definition: vpColor.h:229
static const vpColor blue
Definition: vpColor.h:223
static const vpColor yellow
Definition: vpColor.h:225
static const vpColor green
Definition: vpColor.h:220
void setZAlignedWithCameraAxis(bool zAlignedWithCameraFrame)
std::vector< std::vector< vpImagePoint > > getTagsCorners() const
void setAprilTagQuadDecimate(float quadDecimate)
friend void swap(vpDetectorAprilTag &o1, vpDetectorAprilTag &o2)
void displayFrames(const vpImage< unsigned char > &I, const std::vector< vpHomogeneousMatrix > &cMo_vec, const vpCameraParameters &cam, double size, const vpColor &color, unsigned int thickness=1) const
vpDetectorAprilTag & operator=(vpDetectorAprilTag o)
std::vector< std::vector< vpPoint > > getTagsPoints3D(const std::vector< int > &tagsId, const std::map< int, double > &tagsSize) const
vpDetectorAprilTag(const vpAprilTagFamily &tagFamily=TAG_36h11, const vpPoseEstimationMethod &poseEstimationMethod=HOMOGRAPHY_VIRTUAL_VS)
bool detect(const vpImage< unsigned char > &I) VP_OVERRIDE
unsigned int m_displayTagThickness
vpAprilTagFamily m_tagFamily
void setAprilTagRefineEdges(bool refineEdges)
vpPoseEstimationMethod m_poseEstimationMethod
VP_DEPRECATED void setAprilTagRefinePose(bool refinePose)
@ TAG_CIRCLE21h7
AprilTag Circle21h7 pattern.
@ TAG_25h7
DEPRECATED AND POOR DETECTION PERFORMANCE.
@ TAG_36ARTOOLKIT
DEPRECATED AND WILL NOT DETECT ARTOOLKIT TAGS.
@ TAG_25h9
AprilTag 25h9 pattern.
@ TAG_CUSTOM48h12
AprilTag Custom48h12 pattern.
@ TAG_36h11
AprilTag 36h11 pattern (recommended)
@ TAG_STANDARD52h13
AprilTag Standard52h13 pattern.
@ TAG_16h5
AprilTag 16h5 pattern.
@ TAG_STANDARD41h12
AprilTag Standard41h12 pattern.
@ TAG_CIRCLE49h12
AprilTag Circle49h12 pattern.
void setAprilTagQuadSigma(float quadSigma)
void setAprilTagNbThreads(int nThreads)
VP_DEPRECATED void setAprilTagRefineDecode(bool refineDecode)
void setAprilTagPoseEstimationMethod(const vpPoseEstimationMethod &poseEstimationMethod)
std::vector< int > getTagsId() const
bool getPose(size_t tagIndex, double tagSize, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, vpHomogeneousMatrix *cMo2=nullptr, double *projError=nullptr, double *projError2=nullptr)
void displayTags(const vpImage< unsigned char > &I, const std::vector< std::vector< vpImagePoint > > &tagsCorners, const vpColor &color=vpColor::none, unsigned int thickness=1) const
void setAprilTagFamily(const vpAprilTagFamily &tagFamily)
virtual ~vpDetectorAprilTag() VP_OVERRIDE
void setAprilTagDecodeSharpening(double decodeSharpening)
std::vector< std::string > m_message
Message attached to each object.
std::vector< std::vector< vpImagePoint > > m_polygon
For each object, defines the polygon that contains the object.
size_t m_nb_objects
Number of detected objects.
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
error that can be emitted by ViSP classes.
Definition: vpException.h:60
@ fatalError
Fatal error.
Definition: vpException.h:72
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpRotationMatrix getRotationMatrix() const
vpTranslationVector getTranslationVector() const
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:82
void set_uv(double u, double v)
Definition: vpImagePoint.h:357
unsigned int getWidth() const
Definition: vpImage.h:242
Type * bitmap
points toward the bitmap
Definition: vpImage.h:135
unsigned int getHeight() const
Definition: vpImage.h:181
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Definition: vpPoint.h:79
void set_x(double x)
Set the point x coordinate in the image plane.
Definition: vpPoint.cpp:464
void setWorldCoordinates(double oX, double oY, double oZ)
Definition: vpPoint.cpp:111
void set_y(double y)
Set the point y coordinate in the image plane.
Definition: vpPoint.cpp:466
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
Definition: vpPose.h:77
@ DEMENTHON
Definition: vpPose.h:83
@ VIRTUAL_VS
Definition: vpPose.h:92
@ LAGRANGE
Definition: vpPose.h:82
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, FuncCheckValidityPose func=nullptr)
Definition: vpPose.cpp:385
void addPoints(const std::vector< vpPoint > &lP)
Definition: vpPose.cpp:103
double computeResidual(const vpHomogeneousMatrix &cMo) const
Compute and return the sum of squared residuals expressed in meter^2 for the pose matrix cMo.
Definition: vpPose.cpp:298