Visual Servoing Platform  version 3.1.0
tutorial-apriltag-detector-live.cpp
1 #include <visp3/detection/vpDetectorAprilTag.h>
5 #include <visp3/gui/vpDisplayGDI.h>
6 #include <visp3/gui/vpDisplayOpenCV.h>
7 #include <visp3/gui/vpDisplayX.h>
8 #ifdef VISP_HAVE_XML2
9 #include <visp3/core/vpXmlParserCamera.h>
10 #endif
11 #ifdef VISP_HAVE_V4L2
12 #include <visp3/sensor/vpV4l2Grabber.h>
13 #endif
14 #include <visp3/io/vpImageIo.h>
15 
16 int main(int argc, const char **argv)
17 {
19 #if defined(VISP_HAVE_APRILTAG) && (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV)) && \
20  (defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_OPENCV))
21 
23  int opt_device = 0;
26  double tagSize = 0.053;
27  float quad_decimate = 1.0;
28  int nThreads = 1;
29  std::string intrinsic_file = "";
30  std::string camera_name = "";
31  bool display_tag = false;
32 
33  for (int i = 1; i < argc; i++) {
34  if (std::string(argv[i]) == "--pose_method" && i + 1 < argc) {
35  poseEstimationMethod = (vpDetectorAprilTag::vpPoseEstimationMethod)atoi(argv[i + 1]);
36  } else if (std::string(argv[i]) == "--tag_size" && i + 1 < argc) {
37  tagSize = atof(argv[i + 1]);
38  } else if (std::string(argv[i]) == "--input" && i + 1 < argc) {
39  opt_device = atoi(argv[i + 1]);
40  } else if (std::string(argv[i]) == "--quad_decimate" && i + 1 < argc) {
41  quad_decimate = (float)atof(argv[i + 1]);
42  } else if (std::string(argv[i]) == "--nthreads" && i + 1 < argc) {
43  nThreads = atoi(argv[i + 1]);
44  } else if (std::string(argv[i]) == "--intrinsic" && i + 1 < argc) {
45  intrinsic_file = std::string(argv[i + 1]);
46  } else if (std::string(argv[i]) == "--camera_name" && i + 1 < argc) {
47  camera_name = std::string(argv[i + 1]);
48  } else if (std::string(argv[i]) == "--display_tag") {
49  display_tag = true;
50  } else if (std::string(argv[i]) == "--tag_family" && i + 1 < argc) {
51  tagFamily = (vpDetectorAprilTag::vpAprilTagFamily)atoi(argv[i + 1]);
52  } else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") {
53  std::cout << "Usage: " << argv[0]
54  << " [--input <camera input>] [--tag_size <tag_size in m>]"
55  " [--quad_decimate <quad_decimate>] [--nthreads <nb>]"
56  " [--intrinsic <intrinsic file>] [--camera_name <camera name>]"
57  " [--pose_method <method> (0: HOMOGRAPHY_VIRTUAL_VS, 1: "
58  "DEMENTHON_VIRTUAL_VS,"
59  " 2: LAGRANGE_VIRTUAL_VS, 3: BEST_RESIDUAL_VIRTUAL_VS)]"
60  " [--tag_family <family> (0: TAG_36h11, 1: TAG_36h10, 2: "
61  "TAG_36ARTOOLKIT,"
62  " 3: TAG_25h9, 4: TAG_25h7, 5: TAG_16h5)]"
63  " [--display_tag] [--help]"
64  << std::endl;
65  return EXIT_SUCCESS;
66  }
67  }
68 
70  cam.initPersProjWithoutDistortion(615.1674805, 615.1675415, 312.1889954, 243.4373779);
71 #ifdef VISP_HAVE_XML2
72  vpXmlParserCamera parser;
73  if (!intrinsic_file.empty() && !camera_name.empty())
74  parser.parse(cam, intrinsic_file, camera_name, vpCameraParameters::perspectiveProjWithoutDistortion);
75 #endif
76  std::cout << "cam:\n" << cam << std::endl;
77  std::cout << "poseEstimationMethod: " << poseEstimationMethod << std::endl;
78  std::cout << "tagFamily: " << tagFamily << std::endl;
79 
80  try {
82 
84 #if defined(VISP_HAVE_V4L2)
85  vpV4l2Grabber g;
86  std::ostringstream device;
87  device << "/dev/video" << opt_device;
88  g.setDevice(device.str());
89  g.setScale(1);
90  g.acquire(I);
91 #elif defined(VISP_HAVE_OPENCV)
92  cv::VideoCapture cap(opt_device); // open the default camera
93  if (!cap.isOpened()) { // check if we succeeded
94  std::cout << "Failed to open the camera" << std::endl;
95  return EXIT_FAILURE;
96  }
97  cv::Mat frame;
98  cap >> frame; // get a new frame from camera
99  vpImageConvert::convert(frame, I);
100 #endif
101 
103 #ifdef VISP_HAVE_X11
104  vpDisplayX d(I);
105 #elif defined(VISP_HAVE_GDI)
106  vpDisplayGDI d(I);
107 #elif defined(VISP_HAVE_OPENCV)
108  vpDisplayOpenCV d(I);
109 #endif
110 
112  vpDetectorBase *detector = new vpDetectorAprilTag(tagFamily);
114 
116  dynamic_cast<vpDetectorAprilTag *>(detector)->setAprilTagQuadDecimate(quad_decimate);
117  dynamic_cast<vpDetectorAprilTag *>(detector)->setAprilTagPoseEstimationMethod(poseEstimationMethod);
118  dynamic_cast<vpDetectorAprilTag *>(detector)->setAprilTagNbThreads(nThreads);
119  dynamic_cast<vpDetectorAprilTag *>(detector)->setDisplayTag(display_tag);
121 
122  std::vector<double> time_vec;
123  for (;;) {
125 #if defined(VISP_HAVE_V4L2)
126  g.acquire(I);
127 #elif defined(VISP_HAVE_OPENCV)
128  cap >> frame; // get a new frame from camera
129  vpImageConvert::convert(frame, I);
130 #endif
131 
134 
135  double t = vpTime::measureTimeMs();
137  std::vector<vpHomogeneousMatrix> cMo_vec;
138  dynamic_cast<vpDetectorAprilTag *>(detector)->detect(I, tagSize, cam, cMo_vec);
140  t = vpTime::measureTimeMs() - t;
141  time_vec.push_back(t);
142 
143  std::stringstream ss;
144  ss << "Detection time: " << t << " ms for " << detector->getNbObjects() << " tags";
145  vpDisplay::displayText(I, 40, 20, ss.str(), vpColor::red);
146 
148  for (size_t i = 0; i < cMo_vec.size(); i++) {
149  vpDisplay::displayFrame(I, cMo_vec[i], cam, tagSize / 2, vpColor::none, 3);
150  }
152 
153  vpDisplay::displayText(I, 20, 20, "Click to quit.", vpColor::red);
154  vpDisplay::flush(I);
155  if (vpDisplay::getClick(I, false))
156  break;
157  }
158 
159  std::cout << "Benchmark computation time" << std::endl;
160  std::cout << "Mean / Median / Std: " << vpMath::getMean(time_vec) << " ms"
161  << " ; " << vpMath::getMedian(time_vec) << " ms"
162  << " ; " << vpMath::getStdev(time_vec) << " ms" << std::endl;
163 
164  delete detector;
165  } catch (const vpException &e) {
166  std::cerr << "Catch an exception: " << e.getMessage() << std::endl;
167  }
168 
169  return EXIT_SUCCESS;
170 #else
171  (void)argc;
172  (void)argv;
173  return 0;
174 #endif
175 }
void acquire(vpImage< unsigned char > &I)
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static double getStdev(const std::vector< double > &v, const bool useBesselCorrection=false)
Definition: vpMath.cpp:252
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static double getMedian(const std::vector< double > &v)
Definition: vpMath.cpp:222
Display for windows using GDI (available on any windows 32 platform).
Definition: vpDisplayGDI.h:129
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Definition: vpDisplayX.h:151
void setDevice(const std::string &devname)
static const vpColor none
Definition: vpColor.h:192
error that can be emited by ViSP classes.
Definition: vpException.h:71
size_t getNbObjects() const
XML parser to load and save intrinsic camera parameters.
static void flush(const vpImage< unsigned char > &I)
VISP_EXPORT double measureTimeMs()
Definition: vpTime.cpp:88
static const vpColor red
Definition: vpColor.h:180
void initPersProjWithoutDistortion(const double px, const double py, const double u0, const double v0)
static double getMean(const std::vector< double > &v)
Definition: vpMath.cpp:202
static void display(const vpImage< unsigned char > &I)
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Generic class defining intrinsic camera parameters.
void setScale(unsigned scale=vpV4l2Grabber::DEFAULT_SCALE)
const char * getMessage(void) const
Definition: vpException.cpp:90
Class that is a wrapper over the Video4Linux2 (V4L2) driver.
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0))
int parse(vpCameraParameters &cam, const std::string &filename, const std::string &camera_name, const vpCameraParameters::vpCameraParametersProjType &projModel, const unsigned int image_width=0, const unsigned int image_height=0)