Visual Servoing Platform  version 3.6.1 under development (2025-02-18)
tutorial-mb-generic-tracker-live.cpp
1 #include <iostream>
3 
4 #include <visp3/core/vpConfig.h>
5 
7 // #undef VISP_HAVE_V4L2
8 // #undef VISP_HAVE_DC1394
9 // #undef VISP_HAVE_CMU1394
10 // #undef VISP_HAVE_FLYCAPTURE
11 // #undef VISP_HAVE_REALSENSE2
12 // #undef HAVE_OPENCV_HIGHGUI
13 // #undef HAVE_OPENCV_VIDEOIO
15 
16 #if (defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || \
17  defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2) || \
18  ((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI)) || \
19  ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_VIDEOIO))) && defined(VISP_HAVE_DISPLAY) && \
20  ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_CALIB3D) && defined(HAVE_OPENCV_FEATURES2D)) || \
21  ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_3D) && defined(HAVE_OPENCV_FEATURES))
22 
23 #ifdef VISP_HAVE_MODULE_SENSOR
24 #include <visp3/sensor/vp1394CMUGrabber.h>
25 #include <visp3/sensor/vp1394TwoGrabber.h>
26 #include <visp3/sensor/vpFlyCaptureGrabber.h>
27 #include <visp3/sensor/vpRealSense2.h>
28 #include <visp3/sensor/vpV4l2Grabber.h>
29 #endif
30 #include <visp3/core/vpIoTools.h>
31 #include <visp3/core/vpXmlParserCamera.h>
32 #include <visp3/gui/vpDisplayFactory.h>
33 #include <visp3/io/vpImageIo.h>
34 #include <visp3/vision/vpKeyPoint.h>
36 #include <visp3/mbt/vpMbGenericTracker.h>
38 
39 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI)
40 #include <opencv2/highgui/highgui.hpp> // for cv::VideoCapture
41 #elif (VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_VIDEOIO)
42 #include <opencv2/videoio/videoio.hpp> // for cv::VideoCapture
43 #endif
44 
45 int main(int argc, char **argv)
46 {
47 #ifdef ENABLE_VISP_NAMESPACE
48  using namespace VISP_NAMESPACE_NAME;
49 #endif
50 
51 #if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
52  std::shared_ptr<vpDisplay> display;
53 #else
54  vpDisplay *display = nullptr;
55 #endif
56 
57  try {
58  std::string opt_modelname = "model/teabox/teabox.cao";
59  int opt_tracker = 2;
60  int opt_device = 0; // For OpenCV and V4l2 grabber to set the camera device
61  double opt_proj_error_threshold = 30.;
62  bool opt_use_ogre = false;
63  bool opt_use_scanline = false;
64  bool opt_display_projection_error = false;
65  bool opt_learn = false;
66  bool opt_auto_init = false;
67  std::string opt_learning_data = "learning/data-learned.bin";
68  std::string opt_intrinsic_file = "";
69  std::string opt_camera_name = "";
70 
71  for (int i = 1; i < argc; i++) {
72  if (std::string(argv[i]) == "--model" && i + 1 < argc) {
73  opt_modelname = std::string(argv[++i]);
74  }
75  else if (std::string(argv[i]) == "--tracker" && i + 1 < argc) {
76  opt_tracker = atoi(argv[++i]);
77  }
78  else if (std::string(argv[i]) == "--camera-device" && i + 1 < argc) {
79  opt_device = atoi(argv[++i]);
80  }
81  else if (std::string(argv[i]) == "--max_proj_error" && i + 1 < argc) {
82  opt_proj_error_threshold = atof(argv[++i]);
83  }
84  else if (std::string(argv[i]) == "--use_ogre") {
85  opt_use_ogre = true;
86  }
87  else if (std::string(argv[i]) == "--use_scanline") {
88  opt_use_scanline = true;
89  }
90  else if (std::string(argv[i]) == "--learn") {
91  opt_learn = true;
92  }
93  else if (std::string(argv[i]) == "--learning_data" && i + 1 < argc) {
94  opt_learning_data = argv[++i];
95  }
96  else if (std::string(argv[i]) == "--auto_init") {
97  opt_auto_init = true;
98  }
99  else if (std::string(argv[i]) == "--display_proj_error") {
100  opt_display_projection_error = true;
101  }
102  else if (std::string(argv[i]) == "--intrinsic" && i + 1 < argc) {
103  opt_intrinsic_file = std::string(argv[++i]);
104  }
105  else if (std::string(argv[i]) == "--camera-name" && i + 1 < argc) {
106  opt_camera_name = std::string(argv[++i]);
107  }
108  else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") {
109  std::cout
110  << "\nUsage: " << argv[0] << " [--camera-device <camera device> (default: 0)]"
111  << " [--intrinsic <intrinsic file> (default: empty)]"
112  << " [--camera-name <camera name> (default: empty)]"
113  << " [--model <model name> (default: teabox)]"
114  << " [--tracker <0=egde|1=keypoint|2=hybrid> (default: 2)]"
115  << " [--use_ogre] [--use_scanline]"
116  << " [--max_proj_error <allowed projection error> (default: 30)]"
117  << " [--learn]"
118  << " [--auto_init]"
119  << " [--learning_data <data-learned.bin> (default: learning/data-learned.bin)]"
120  << " [--display_proj_error]"
121  << " [--help] [-h]\n"
122  << std::endl;
123  return EXIT_SUCCESS;
124  }
125  }
126  std::string parentname = vpIoTools::getParent(opt_modelname);
127  std::string objectname = vpIoTools::getNameWE(opt_modelname);
128 
129  if (!parentname.empty())
130  objectname = parentname + "/" + objectname;
131 
132  std::cout << "Tracker requested config files: " << objectname << ".[init, cao]" << std::endl;
133  std::cout << "Tracker optional config files: " << objectname << ".[ppm]" << std::endl;
134 
135  std::cout << "Tracked features: " << std::endl;
136  std::cout << " Use edges : " << (opt_tracker == 0 || opt_tracker == 2) << std::endl;
137  std::cout << " Use klt : " << (opt_tracker == 1 || opt_tracker == 2) << std::endl;
138  std::cout << "Tracker options: " << std::endl;
139  std::cout << " Use ogre : " << opt_use_ogre << std::endl;
140  std::cout << " Use scanline: " << opt_use_scanline << std::endl;
141  std::cout << " Proj. error : " << opt_proj_error_threshold << std::endl;
142  std::cout << " Display proj. error: " << opt_display_projection_error << std::endl;
143  std::cout << "Config files: " << std::endl;
144  std::cout << " Config file : "
145  << "\"" << objectname + ".xml"
146  << "\"" << std::endl;
147  std::cout << " Model file : "
148  << "\"" << objectname + ".cao"
149  << "\"" << std::endl;
150  std::cout << " Init file : "
151  << "\"" << objectname + ".init"
152  << "\"" << std::endl;
153  std::cout << "Learning options : " << std::endl;
154  std::cout << " Learn : " << opt_learn << std::endl;
155  std::cout << " Auto init : " << opt_auto_init << std::endl;
156  std::cout << " Learning data: " << opt_learning_data << std::endl;
157 
159 #if VISP_VERSION_INT > VP_VERSION_INT(3, 2, 0)
160  vpImage<vpRGBa> I; // Since ViSP 3.2.0 we support model-based tracking on color images
161 #else
162  vpImage<unsigned char> I; // Tracking on gray level images
163 #endif
165 
167  vpCameraParameters cam;
168  cam.initPersProjWithoutDistortion(839, 839, 325, 243);
170 
171 #if defined(VISP_HAVE_PUGIXML)
172  vpXmlParserCamera parser;
173  if (!opt_intrinsic_file.empty() && !opt_camera_name.empty()) {
174  parser.parse(cam, opt_intrinsic_file, opt_camera_name, vpCameraParameters::perspectiveProjWithoutDistortion);
175  }
176 #endif
177 
181 
183 #if defined(VISP_HAVE_V4L2)
184  vpV4l2Grabber g;
185  std::ostringstream device;
186  device << "/dev/video" << opt_device;
187  std::cout << "Use Video 4 Linux grabber on device " << device.str() << std::endl;
188  g.setDevice(device.str());
189  g.setScale(1);
190  g.open(I);
191 #elif defined(VISP_HAVE_DC1394)
192  (void)opt_device; // To avoid non used warning
193  std::cout << "Use DC1394 grabber" << std::endl;
195  g.open(I);
196 #elif defined(VISP_HAVE_CMU1394)
197  (void)opt_device; // To avoid non used warning
198  std::cout << "Use CMU1394 grabber" << std::endl;
200  g.open(I);
201 #elif defined(VISP_HAVE_FLYCAPTURE)
202  (void)opt_device; // To avoid non used warning
203  std::cout << "Use FlyCapture grabber" << std::endl;
205  g.open(I);
206 #elif defined(VISP_HAVE_REALSENSE2)
207  (void)opt_device; // To avoid non used warning
208  std::cout << "Use Realsense 2 grabber" << std::endl;
209  vpRealSense2 g;
210  rs2::config config;
211  config.disable_stream(RS2_STREAM_DEPTH);
212  config.disable_stream(RS2_STREAM_INFRARED);
213  config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30);
214  g.open(config);
215  g.acquire(I);
216 
217  std::cout << "Read camera parameters from Realsense device" << std::endl;
219 #elif ((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI))|| ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_VIDEOIO))
220  std::cout << "Use OpenCV grabber on device " << opt_device << std::endl;
221  cv::VideoCapture g(opt_device); // Open the default camera
222  if (!g.isOpened()) { // Check if we succeeded
223  std::cout << "Failed to open the camera" << std::endl;
224  return EXIT_FAILURE;
225  }
226  cv::Mat frame;
227  g >> frame; // get a new frame from camera
228  vpImageConvert::convert(frame, I);
229 #endif
231 
232 #if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
234 #else
236 #endif
237  display->init(I, 100, 100, "Model-based tracker");
238 
239  while (true) {
240 #if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2)
241  g.acquire(I);
242 #elif ((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI))|| ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_VIDEOIO))
243  g >> frame;
244  vpImageConvert::convert(frame, I);
245 #endif
246 
248  vpDisplay::displayText(I, 20, 20, "Click when ready.", vpColor::red);
249  vpDisplay::flush(I);
250 
251  if (vpDisplay::getClick(I, false)) {
252  break;
253  }
254  }
255 
257  vpMbGenericTracker tracker;
258  if (opt_tracker == 0)
260 #if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
261  else if (opt_tracker == 1)
262  tracker.setTrackerType(vpMbGenericTracker::KLT_TRACKER);
263  else
264  tracker.setTrackerType(vpMbGenericTracker::EDGE_TRACKER | vpMbGenericTracker::KLT_TRACKER);
265 #else
266  else {
267 #if !defined(VISP_HAVE_MODULE_KLT)
268  std::cout << "klt and hybrid model-based tracker are not available since visp_klt module is not available. "
269  "In CMakeGUI turn visp_klt module ON, configure and build ViSP again."
270  << std::endl;
271 #else
272  std::cout << "Hybrid tracking is impossible since OpenCV is not enabled. "
273  << "Install OpenCV, configure and build ViSP again to run this tutorial." << std::endl;
274 #endif
275  return EXIT_SUCCESS;
276  }
277 #endif
279 
280  bool usexml = false;
282 #if defined(VISP_HAVE_PUGIXML)
283  if (vpIoTools::checkFilename(objectname + ".xml")) {
284  tracker.loadConfigFile(objectname + ".xml");
285  usexml = true;
286  }
287 #endif
289 
290  if (!usexml) {
292  if (opt_tracker == 0 || opt_tracker == 2) {
294  vpMe me;
295  me.setMaskSize(5);
296  me.setMaskNumber(180);
297  me.setRange(8);
299  me.setThreshold(20);
300  me.setMu1(0.5);
301  me.setMu2(0.5);
302  me.setSampleStep(4);
303  tracker.setMovingEdge(me);
305  }
306 
307 #if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
308  if (opt_tracker == 1 || opt_tracker == 2) {
310  vpKltOpencv klt_settings;
311  klt_settings.setMaxFeatures(300);
312  klt_settings.setWindowSize(5);
313  klt_settings.setQuality(0.015);
314  klt_settings.setMinDistance(8);
315  klt_settings.setHarrisFreeParameter(0.01);
316  klt_settings.setBlockSize(3);
317  klt_settings.setPyramidLevels(3);
318  tracker.setKltOpencv(klt_settings);
319  tracker.setKltMaskBorder(5);
321  }
322 #endif
323  }
324 
325  tracker.setCameraParameters(cam);
327 
329  tracker.loadModel(objectname + ".cao");
332  tracker.setDisplayFeatures(true);
335  tracker.setOgreVisibilityTest(opt_use_ogre);
336  tracker.setScanLineVisibilityTest(opt_use_scanline);
339  tracker.setProjectionErrorComputation(true);
340  tracker.setProjectionErrorDisplay(opt_display_projection_error);
342 
343 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_XFEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
344  std::string detectorName = "SIFT";
345  std::string extractorName = "SIFT";
346  std::string matcherName = "BruteForce";
347 #elif ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
348  std::string detectorName = "FAST";
349  std::string extractorName = "ORB";
350  std::string matcherName = "BruteForce-Hamming";
351 #endif
352  vpKeyPoint keypoint;
353  if (opt_learn || opt_auto_init) {
354  keypoint.setDetector(detectorName);
355  keypoint.setExtractor(extractorName);
356  keypoint.setMatcher(matcherName);
357 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
358 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
359  keypoint.setDetectorParameter("ORB", "nLevels", 1);
360 #else
361  cv::Ptr<cv::ORB> orb_detector = keypoint.getDetector("ORB").dynamicCast<cv::ORB>();
362  if (orb_detector) {
363  orb_detector->setNLevels(1);
364  }
365 #endif
366 #endif
367  }
368 
369  if (opt_auto_init) {
370  if (!vpIoTools::checkFilename(opt_learning_data)) {
371  std::cout << "Cannot enable auto detection. Learning file \"" << opt_learning_data << "\" doesn't exist"
372  << std::endl;
373  return EXIT_FAILURE;
374  }
375  keypoint.loadLearningData(opt_learning_data, true);
376  }
377  else {
378  tracker.initClick(I, objectname + ".init", true);
379  }
380 
381  bool learn_position = false;
382  bool run_auto_init = false;
383  if (opt_auto_init) {
384  run_auto_init = true;
385  }
386 
387  // To be able to display keypoints matching with test-detection-rs2
388  int learn_id = 1;
389  unsigned int learn_cpt = 0;
390  bool quit = false;
391  bool tracking_failed = false;
392 
393  while (!quit) {
394  double t_begin = vpTime::measureTimeMs();
395 #if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2)
396  g.acquire(I);
397 #elif ((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI))|| ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_VIDEOIO))
398  g >> frame;
399  vpImageConvert::convert(frame, I);
400 #endif
402 
403  // Run auto initialization from learned data
404  if (run_auto_init) {
405  tracking_failed = false;
406  if (keypoint.matchPoint(I, cam, cMo)) {
407  std::cout << "Auto init succeed" << std::endl;
408  tracker.initFromPose(I, cMo);
409  }
410  else {
411  vpDisplay::flush(I);
412  continue;
413  }
414  }
415  else if (tracking_failed) {
416  // Manual init
417  tracking_failed = false;
418  tracker.initClick(I, objectname + ".init", true);
419  }
420 
421  // Run the tracker
422  try {
423  if (run_auto_init) {
424  // Turn display features off just after auto init to not display wrong moving-edge if the tracker fails
425  tracker.setDisplayFeatures(false);
426 
427  run_auto_init = false;
428  }
429  tracker.track(I);
430  }
431  catch (const vpException &e) {
432  std::cout << "Tracker exception: " << e.getStringMessage() << std::endl;
433  tracking_failed = true;
434  if (opt_auto_init) {
435  std::cout << "Tracker needs to restart (tracking exception)" << std::endl;
436  run_auto_init = true;
437  }
438  }
439 
440  if (!tracking_failed) {
441  double proj_error = 0;
443  // Check tracking errors
444  proj_error = tracker.getProjectionError();
445  }
446  else {
447  tracker.getPose(cMo);
448  tracker.getCameraParameters(cam);
449  proj_error = tracker.computeCurrentProjectionError(I, cMo, cam);
450  }
451  if (proj_error > opt_proj_error_threshold) {
452  std::cout << "Tracker needs to restart (projection error detected: " << proj_error << ")" << std::endl;
453  if (opt_auto_init) {
454  run_auto_init = true;
455  }
456  tracking_failed = true;
457  }
458  }
459 
460  if (!tracking_failed) {
461  tracker.setDisplayFeatures(true);
463  tracker.getPose(cMo);
466  tracker.getCameraParameters(cam);
467  tracker.display(I, cMo, cam, vpColor::green, 2, false);
469  vpDisplay::displayFrame(I, cMo, cam, 0.025, vpColor::none, 3);
470 
471  { // Display estimated pose in [m] and [deg]
472  vpPoseVector pose(cMo);
473  std::stringstream ss;
474  ss << "Translation: " << std::setprecision(5) << pose[0] << " " << pose[1] << " " << pose[2] << " [m]";
475  vpDisplay::displayText(I, 80, 20, ss.str(), vpColor::green);
476  ss.str(""); // erase ss
477  ss << "Rotation tu: " << std::setprecision(4) << vpMath::deg(pose[3]) << " " << vpMath::deg(pose[4]) << " "
478  << vpMath::deg(pose[5]) << " [deg]";
479  vpDisplay::displayText(I, 100, 20, ss.str(), vpColor::green);
480  }
481  {
482  std::stringstream ss;
483  ss << "Features: edges " << tracker.getNbFeaturesEdge() << ", klt " << tracker.getNbFeaturesKlt();
484  vpDisplay::displayText(I, 120, 20, ss.str(), vpColor::red);
485  }
486  }
487 
488  if (learn_position) {
489  learn_cpt++;
490  // Detect keypoints on the current image
491  std::vector<cv::KeyPoint> trainKeyPoints;
492  keypoint.detect(I, trainKeyPoints);
493 
494  // Keep only keypoints on the cube
495  std::vector<vpPolygon> polygons;
496  std::vector<std::vector<vpPoint> > roisPt;
497  std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair = tracker.getPolygonFaces();
498  polygons = pair.first;
499  roisPt = pair.second;
500 
501  // Compute the 3D coordinates
502  std::vector<cv::Point3f> points3f;
503  vpKeyPoint::compute3DForPointsInPolygons(cMo, cam, trainKeyPoints, polygons, roisPt, points3f);
504 
505  // Build the reference keypoints
506  keypoint.buildReference(I, trainKeyPoints, points3f, true, learn_id++);
507 
508  // Display learned data
509  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
510  vpDisplay::displayCross(I, (int)it->pt.y, (int)it->pt.x, 10, vpColor::yellow, 3);
511  }
512  learn_position = false;
513  std::cout << "Data learned" << std::endl;
514  }
515 
516  std::stringstream ss;
517  ss << "Loop time: " << vpTime::measureTimeMs() - t_begin << " ms";
518  vpDisplay::displayText(I, 20, 20, ss.str(), vpColor::red);
519  if (opt_learn)
520  vpDisplay::displayText(I, 35, 20, "Left click: learn Right click: quit", vpColor::red);
521  else if (opt_auto_init)
522  vpDisplay::displayText(I, 35, 20, "Left click: auto_init Right click: quit", vpColor::red);
523  else
524  vpDisplay::displayText(I, 35, 20, "Right click: quit", vpColor::red);
525 
527  if (vpDisplay::getClick(I, button, false)) {
528  if (button == vpMouseButton::button3) {
529  quit = true;
530  }
531  else if (button == vpMouseButton::button1 && opt_learn) {
532  learn_position = true;
533  }
534  else if (button == vpMouseButton::button1 && opt_auto_init && !opt_learn) {
535  run_auto_init = true;
536  }
537  }
538 
539  vpDisplay::flush(I);
540  }
541  if (opt_learn && learn_cpt) {
542  std::cout << "Save learning from " << learn_cpt << " images in file: " << opt_learning_data << std::endl;
543  keypoint.saveLearningData(opt_learning_data, true, true);
544  }
545  }
546  catch (const vpException &e) {
547  std::cout << "Catch a ViSP exception: " << e << std::endl;
548  }
549 
551 #if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11)
552  if (display != nullptr) {
553  delete display;
554  }
555 #endif
557 }
558 
559 #else
560 
561 int main()
562 {
563 #if defined(VISP_HAVE_OPENCV)
564  std::cout << "Install a 3rd party dedicated to frame grabbing (dc1394, cmu1394, v4l2, OpenCV, FlyCapture, "
565  << "Realsense2), configure and build ViSP again to use this tutorial."
566  << std::endl;
567 #else
568  std::cout << "Install OpenCV 3rd party, configure and build ViSP again to use this tutorial." << std::endl;
569 #endif
570  return EXIT_SUCCESS;
571  }
572 
573 #endif
Firewire cameras video capture based on CMU 1394 Digital Camera SDK.
void open(vpImage< unsigned char > &I)
Class for firewire ieee1394 video devices using libdc1394-2.x api.
void open(vpImage< unsigned char > &I)
Generic class defining intrinsic camera parameters.
void initPersProjWithoutDistortion(double px, double py, double u0, double v0)
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
static const vpColor red
Definition: vpColor.h:198
static const vpColor none
Definition: vpColor.h:210
static const vpColor yellow
Definition: vpColor.h:206
static const vpColor green
Definition: vpColor.h:201
Class that defines generic functionalities for display.
Definition: vpDisplay.h:178
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Definition: vpException.h:60
const std::string & getStringMessage() const
Definition: vpException.cpp:67
void open(vpImage< unsigned char > &I)
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static bool checkFilename(const std::string &filename)
Definition: vpIoTools.cpp:786
static std::string getNameWE(const std::string &pathname)
Definition: vpIoTools.cpp:1227
static std::string getParent(const std::string &pathname)
Definition: vpIoTools.cpp:1314
Class that allows keypoints 2D features detection (and descriptors extraction) and matching thanks to...
Definition: vpKeyPoint.h:267
unsigned int matchPoint(const vpImage< unsigned char > &I)
void setExtractor(const vpFeatureDescriptorType &extractorType)
Definition: vpKeyPoint.h:1711
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
Definition: vpKeyPoint.cpp:989
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
Definition: vpKeyPoint.cpp:481
void setMatcher(const std::string &matcherName)
Definition: vpKeyPoint.h:1787
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
void setDetector(const vpFeatureDetectorType &detectorType)
Definition: vpKeyPoint.h:1653
unsigned int buildReference(const vpImage< unsigned char > &I)
Definition: vpKeyPoint.cpp:211
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
Definition: vpKeyPoint.h:1091
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
Definition: vpKltOpencv.h:79
void setBlockSize(int blockSize)
Definition: vpKltOpencv.h:272
void setQuality(double qualityLevel)
Definition: vpKltOpencv.h:361
void setHarrisFreeParameter(double harris_k)
Definition: vpKltOpencv.h:280
void setMaxFeatures(int maxCount)
Definition: vpKltOpencv.h:320
void setMinDistance(double minDistance)
Definition: vpKltOpencv.h:329
void setWindowSize(int winSize)
Definition: vpKltOpencv.h:382
void setPyramidLevels(int pyrMaxLevel)
Definition: vpKltOpencv.h:348
static double deg(double rad)
Definition: vpMath.h:119
Real-time 6D object pose tracking using its CAD model.
virtual void setCameraParameters(const vpCameraParameters &camera) VP_OVERRIDE
virtual int getTrackerType() const
virtual void setOgreVisibilityTest(const bool &v) VP_OVERRIDE
virtual std::pair< std::vector< vpPolygon >, std::vector< std::vector< vpPoint > > > getPolygonFaces(bool orderPolygons=true, bool useVisibility=true, bool clipPolygon=false) VP_OVERRIDE
virtual void setProjectionErrorComputation(const bool &flag) VP_OVERRIDE
virtual void setDisplayFeatures(bool displayF) VP_OVERRIDE
virtual unsigned int getNbFeaturesEdge() const
virtual double computeCurrentProjectionError(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &_cMo, const vpCameraParameters &_cam) VP_OVERRIDE
virtual void getCameraParameters(vpCameraParameters &camera) const VP_OVERRIDE
virtual void initFromPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo) VP_OVERRIDE
virtual void getPose(vpHomogeneousMatrix &cMo) const VP_OVERRIDE
virtual unsigned int getNbFeaturesKlt() const
virtual void setMovingEdge(const vpMe &me)
virtual void track(const vpImage< unsigned char > &I) VP_OVERRIDE
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix()) VP_OVERRIDE
virtual void setTrackerType(int type)
virtual void setScanLineVisibilityTest(const bool &v) VP_OVERRIDE
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false) VP_OVERRIDE
virtual void loadConfigFile(const std::string &configFile, bool verbose=true) VP_OVERRIDE
virtual void setProjectionErrorDisplay(bool display) VP_OVERRIDE
virtual double getProjectionError() const
Definition: vpMbTracker.h:312
Definition: vpMe.h:134
void setMu1(const double &mu_1)
Definition: vpMe.h:385
void setRange(const unsigned int &range)
Definition: vpMe.h:415
void setLikelihoodThresholdType(const vpLikelihoodThresholdType likelihood_threshold_type)
Definition: vpMe.h:505
void setMaskNumber(const unsigned int &mask_number)
Definition: vpMe.cpp:552
void setThreshold(const double &threshold)
Definition: vpMe.h:466
void setSampleStep(const double &sample_step)
Definition: vpMe.h:422
void setMaskSize(const unsigned int &mask_size)
Definition: vpMe.cpp:560
void setMu2(const double &mu_2)
Definition: vpMe.h:392
@ NORMALIZED_THRESHOLD
Definition: vpMe.h:145
Implementation of a pose vector and operations on poses.
Definition: vpPoseVector.h:203
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion, int index=-1) const
void acquire(vpImage< unsigned char > &grey, double *ts=nullptr)
bool open(const rs2::config &cfg=rs2::config())
Class that is a wrapper over the Video4Linux2 (V4L2) driver.
void open(vpImage< unsigned char > &I)
void setScale(unsigned scale=vpV4l2Grabber::DEFAULT_SCALE)
void setDevice(const std::string &devname)
XML parser to load and save intrinsic camera parameters.
int parse(vpCameraParameters &cam, const std::string &filename, const std::string &camera_name, const vpCameraParameters::vpCameraParametersProjType &projModel, unsigned int image_width=0, unsigned int image_height=0, bool verbose=true)
std::shared_ptr< vpDisplay > createDisplay()
Return a smart pointer vpDisplay specialization if a GUI library is available or nullptr otherwise.
vpDisplay * allocateDisplay()
Return a newly allocated vpDisplay specialization if a GUI library is available or nullptr otherwise.
VISP_EXPORT double measureTimeMs()