Visual Servoing Platform  version 3.6.1 under development (2024-07-27)
tutorial-apriltag-detector.cpp
1 #include <visp3/core/vpConfig.h>
4 #include <visp3/detection/vpDetectorAprilTag.h>
6 #include <visp3/core/vpXmlParserCamera.h>
7 #include <visp3/gui/vpDisplayGDI.h>
8 #include <visp3/gui/vpDisplayOpenCV.h>
9 #include <visp3/gui/vpDisplayX.h>
10 #include <visp3/io/vpImageIo.h>
11 
12 int main(int argc, const char **argv)
13 {
14 #ifdef ENABLE_VISP_NAMESPACE
15  using namespace VISP_NAMESPACE_NAME;
16 #endif
18 #if defined(VISP_HAVE_APRILTAG) && (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV))
20 #ifdef ENABLE_VISP_NAMESPACE
21  using namespace VISP_NAMESPACE_NAME;
22 #endif
23  std::string input_filename = "AprilTag.pgm";
26  double tagSize = 0.053;
27  float quad_decimate = 1.0;
28  int nThreads = 1;
29  std::string intrinsic_file = "";
30  std::string camera_name = "";
31  bool display_tag = false;
32  int color_id = -1;
33  unsigned int thickness = 2;
34  bool z_aligned = false;
35 
36  for (int i = 1; i < argc; i++) {
37  if (std::string(argv[i]) == "--pose_method" && i + 1 < argc) {
38  poseEstimationMethod = static_cast<vpDetectorAprilTag::vpPoseEstimationMethod>(atoi(argv[i + 1]));
39  }
40  else if (std::string(argv[i]) == "--tag_size" && i + 1 < argc) {
41  tagSize = atof(argv[i + 1]);
42  }
43  else if (std::string(argv[i]) == "--input" && i + 1 < argc) {
44  input_filename = std::string(argv[i + 1]);
45  }
46  else if (std::string(argv[i]) == "--quad_decimate" && i + 1 < argc) {
47  quad_decimate = static_cast<float>(atof(argv[i + 1]));
48  }
49  else if (std::string(argv[i]) == "--nthreads" && i + 1 < argc) {
50  nThreads = atoi(argv[i + 1]);
51  }
52 #if defined(VISP_HAVE_PUGIXML)
53  else if (std::string(argv[i]) == "--intrinsic" && i + 1 < argc) {
54  intrinsic_file = std::string(argv[i + 1]);
55  }
56  else if (std::string(argv[i]) == "--camera_name" && i + 1 < argc) {
57  camera_name = std::string(argv[i + 1]);
58  }
59 #endif
60  else if (std::string(argv[i]) == "--display_tag") {
61  display_tag = true;
62  }
63  else if (std::string(argv[i]) == "--color" && i + 1 < argc) {
64  color_id = atoi(argv[i + 1]);
65  }
66  else if (std::string(argv[i]) == "--thickness" && i + 1 < argc) {
67  thickness = static_cast<unsigned int>(atoi(argv[i + 1]));
68  }
69  else if (std::string(argv[i]) == "--tag_family" && i + 1 < argc) {
70  tagFamily = static_cast<vpDetectorAprilTag::vpAprilTagFamily>(atoi(argv[i + 1]));
71  }
72  else if (std::string(argv[i]) == "--z_aligned") {
73  z_aligned = true;
74  }
75  else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") {
76  std::cout << "Usage: " << argv[0]
77  << " [--input <input file>] [--tag_size <tag_size in m>]"
78  " [--quad_decimate <quad_decimate>] [--nthreads <nb>]"
79  " [--intrinsic <intrinsic file>] [--camera_name <camera name>]"
80  " [--pose_method <method> (0: HOMOGRAPHY, 1: HOMOGRAPHY_VIRTUAL_VS, "
81  " 2: DEMENTHON_VIRTUAL_VS, 3: LAGRANGE_VIRTUAL_VS, "
82  " 4: BEST_RESIDUAL_VIRTUAL_VS, 5: HOMOGRAPHY_ORTHOGONAL_ITERATION) (default: 0)]"
83  " [--tag_family <family> (0: TAG_36h11, 1: TAG_36h10 (DEPRECATED), 2: TAG_36ARTOOLKIT (DEPRECATED),"
84  " 3: TAG_25h9, 4: TAG_25h7 (DEPRECATED), 5: TAG_16h5, 6: TAG_CIRCLE21h7, 7: TAG_CIRCLE49h12,"
85  " 8: TAG_CUSTOM48h12, 9: TAG_STANDARD41h12, 10: TAG_STANDARD52h13) (default: 0)]"
86  " [--display_tag] [--color <color_id (0, 1, ...)>]"
87  " [--thickness <thickness>] [--z_aligned]"
88  " [--help]"
89  << std::endl;
90  return EXIT_SUCCESS;
91  }
92  }
93 
95  cam.initPersProjWithoutDistortion(615.1674805, 615.1675415, 312.1889954, 243.4373779);
96 #if defined(VISP_HAVE_PUGIXML)
97  vpXmlParserCamera parser;
98  if (!intrinsic_file.empty() && !camera_name.empty()) {
99  parser.parse(cam, intrinsic_file, camera_name, vpCameraParameters::perspectiveProjWithoutDistortion);
100  }
101 #endif
102 
103  std::cout << cam << std::endl;
104  std::cout << "poseEstimationMethod: " << poseEstimationMethod << std::endl;
105  std::cout << "tagFamily: " << tagFamily << std::endl;
106  std::cout << "nThreads : " << nThreads << std::endl;
107  std::cout << "Z aligned: " << z_aligned << std::endl;
108 
109  try {
110  vpImage<vpRGBa> I_color;
111  vpImageIo::read(I_color, input_filename);
113  vpImageConvert::convert(I_color, I);
114 
115 #ifdef VISP_HAVE_X11
116  vpDisplayX d(I);
117 #elif defined(VISP_HAVE_GDI)
118  vpDisplayGDI d(I);
119 #elif defined(HAVE_OPENCV_HIGHGUI)
120  vpDisplayOpenCV d(I);
121 #endif
122 
124  vpDetectorAprilTag detector(tagFamily);
126 
128  detector.setAprilTagQuadDecimate(quad_decimate);
129  detector.setAprilTagPoseEstimationMethod(poseEstimationMethod);
130  detector.setAprilTagNbThreads(nThreads);
131  detector.setDisplayTag(display_tag, color_id < 0 ? vpColor::none : vpColor::getColor(color_id), thickness);
132  detector.setZAlignedWithCameraAxis(z_aligned);
134 
136 
137  double t = vpTime::measureTimeMs();
139  std::vector<vpHomogeneousMatrix> cMo_vec;
140  detector.detect(I, tagSize, cam, cMo_vec);
142  t = vpTime::measureTimeMs() - t;
143 
144  std::stringstream ss;
145  ss << "Detection time: " << t << " ms for " << detector.getNbObjects() << " tags";
146  vpDisplay::displayText(I, 40, 20, ss.str(), vpColor::red);
147 
149  for (size_t i = 0; i < detector.getNbObjects(); i++) {
152  std::vector<vpImagePoint> p = detector.getPolygon(i);
153  vpRect bbox = detector.getBBox(i);
157  std::string message = detector.getMessage(i);
160  std::size_t tag_id_pos = message.find("id: ");
161  if (tag_id_pos != std::string::npos) {
162  int tag_id = atoi(message.substr(tag_id_pos + 4).c_str());
163  ss.str("");
164  ss << "Tag id: " << tag_id;
165  vpDisplay::displayText(I, static_cast<int>(bbox.getTop() - 10), static_cast<int>(bbox.getLeft()), ss.str(), vpColor::red);
166  }
168  for (size_t j = 0; j < p.size(); j++) {
169  vpDisplay::displayCross(I, p[j], 14, vpColor::red, 3);
170  std::ostringstream number;
171  number << j;
172  vpDisplay::displayText(I, p[j] + vpImagePoint(15, 5), number.str(), vpColor::blue);
173  }
174  }
175 
176  vpDisplay::displayText(I, 20, 20, "Click to display tag poses", vpColor::red);
177  vpDisplay::flush(I);
179 
181 
183  for (size_t i = 0; i < cMo_vec.size(); i++) {
184  vpDisplay::displayFrame(I, cMo_vec[i], cam, tagSize / 2, vpColor::none, 3);
185  }
187 
188  vpDisplay::displayText(I, 20, 20, "Click to quit.", vpColor::red);
189  vpDisplay::flush(I);
191 
192 #ifdef VISP_HAVE_X11
193  vpDisplayX d2(I_color, 50, 50);
194 #elif defined(VISP_HAVE_GDI)
195  vpDisplayGDI d2(I_color, 50, 50);
196 #elif defined(HAVE_OPENCV_HIGHGUI)
197  vpDisplayOpenCV d2(I_color, 50, 50);
198 #endif
199  // To test the displays on a vpRGBa image
200  vpDisplay::display(I_color);
201 
202  // Display frames and tags but remove the display of the last element
203  std::vector<std::vector<vpImagePoint> > tagsCorners = detector.getTagsCorners();
204  tagsCorners.pop_back();
205  detector.displayTags(I_color, tagsCorners, vpColor::none, 3);
206 
207  cMo_vec.pop_back();
208  detector.displayFrames(I_color, cMo_vec, cam, tagSize / 2, vpColor::none, 3);
209 
210  vpDisplay::displayText(I_color, 20, 20, "Click to quit.", vpColor::red);
211  vpDisplay::flush(I_color);
212  vpDisplay::getClick(I_color);
213  }
214  catch (const vpException &e) {
215  std::cerr << "Catch an exception: " << e.getMessage() << std::endl;
216  }
217 
218 #else
219  (void)argc;
220  (void)argv;
221 #endif
222  return EXIT_SUCCESS;
223 }
Generic class defining intrinsic camera parameters.
void initPersProjWithoutDistortion(double px, double py, double u0, double v0)
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
static vpColor getColor(const unsigned int &i)
Definition: vpColor.h:311
static const vpColor red
Definition: vpColor.h:217
static const vpColor none
Definition: vpColor.h:229
static const vpColor blue
Definition: vpColor.h:223
static const vpColor green
Definition: vpColor.h:220
@ TAG_36h11
AprilTag 36h11 pattern (recommended)
Display for windows using GDI (available on any windows 32 platform).
Definition: vpDisplayGDI.h:130
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Definition: vpDisplayX.h:135
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void flush(const vpImage< unsigned char > &I)
static void displayRectangle(const vpImage< unsigned char > &I, const vpImagePoint &topLeft, unsigned int width, unsigned int height, const vpColor &color, bool fill=false, unsigned int thickness=1)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Definition: vpException.h:60
const char * getMessage() const
Definition: vpException.cpp:65
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Definition: vpImageIo.cpp:147
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:82
Defines a rectangle in the plane.
Definition: vpRect.h:79
double getLeft() const
Definition: vpRect.h:173
double getTop() const
Definition: vpRect.h:192
XML parser to load and save intrinsic camera parameters.
int parse(vpCameraParameters &cam, const std::string &filename, const std::string &camera_name, const vpCameraParameters::vpCameraParametersProjType &projModel, unsigned int image_width=0, unsigned int image_height=0, bool verbose=true)
VISP_EXPORT double measureTimeMs()