Visual Servoing Platform  version 3.6.1 under development (2024-05-06)
tutorial-mb-generic-tracker-live.cpp
1 #include <visp3/core/vpConfig.h>
3 #ifdef VISP_HAVE_MODULE_SENSOR
4 #include <visp3/sensor/vp1394CMUGrabber.h>
5 #include <visp3/sensor/vp1394TwoGrabber.h>
6 #include <visp3/sensor/vpFlyCaptureGrabber.h>
7 #include <visp3/sensor/vpRealSense2.h>
8 #include <visp3/sensor/vpV4l2Grabber.h>
9 #endif
10 #include <visp3/core/vpIoTools.h>
11 #include <visp3/core/vpXmlParserCamera.h>
12 #include <visp3/gui/vpDisplayGDI.h>
13 #include <visp3/gui/vpDisplayOpenCV.h>
14 #include <visp3/gui/vpDisplayX.h>
15 #include <visp3/io/vpImageIo.h>
16 #include <visp3/vision/vpKeyPoint.h>
18 #include <visp3/mbt/vpMbGenericTracker.h>
20 
21 #if defined(HAVE_OPENCV_VIDEOIO)
22 #include <opencv2/videoio.hpp>
23 #endif
24 
26 // #undef VISP_HAVE_V4L2
27 // #undef VISP_HAVE_DC1394
28 // #undef VISP_HAVE_CMU1394
29 // #undef VISP_HAVE_FLYCAPTURE
30 // #undef VISP_HAVE_REALSENSE2
31 // #undef VISP_HAVE_OPENCV
33 
34 int main(int argc, char **argv)
35 {
36 #if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_VIDEOIO) && \
37  (defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || \
38  defined(HAVE_OPENCV_HIGHGUI) || defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2))
39 
40  try {
41  std::string opt_modelname = "model/teabox/teabox.cao";
42  int opt_tracker = 2;
43  int opt_device = 0; // For OpenCV and V4l2 grabber to set the camera device
44  double opt_proj_error_threshold = 30.;
45  bool opt_use_ogre = false;
46  bool opt_use_scanline = false;
47  bool opt_display_projection_error = false;
48  bool opt_learn = false;
49  bool opt_auto_init = false;
50  std::string opt_learning_data = "learning/data-learned.bin";
51  std::string opt_intrinsic_file = "";
52  std::string opt_camera_name = "";
53 
54  for (int i = 0; i < argc; i++) {
55  if (std::string(argv[i]) == "--model") {
56  opt_modelname = std::string(argv[i + 1]);
57  }
58  else if (std::string(argv[i]) == "--tracker") {
59  opt_tracker = atoi(argv[i + 1]);
60  }
61  else if (std::string(argv[i]) == "--camera_device" && i + 1 < argc) {
62  opt_device = atoi(argv[i + 1]);
63  }
64  else if (std::string(argv[i]) == "--max_proj_error") {
65  opt_proj_error_threshold = atof(argv[i + 1]);
66  }
67  else if (std::string(argv[i]) == "--use_ogre") {
68  opt_use_ogre = true;
69  }
70  else if (std::string(argv[i]) == "--use_scanline") {
71  opt_use_scanline = true;
72  }
73  else if (std::string(argv[i]) == "--learn") {
74  opt_learn = true;
75  }
76  else if (std::string(argv[i]) == "--learning_data" && i + 1 < argc) {
77  opt_learning_data = argv[i + 1];
78  }
79  else if (std::string(argv[i]) == "--auto_init") {
80  opt_auto_init = true;
81  }
82  else if (std::string(argv[i]) == "--display_proj_error") {
83  opt_display_projection_error = true;
84  }
85  else if (std::string(argv[i]) == "--intrinsic" && i + 1 < argc) {
86  opt_intrinsic_file = std::string(argv[i + 1]);
87  }
88  else if (std::string(argv[i]) == "--camera_name" && i + 1 < argc) {
89  opt_camera_name = std::string(argv[i + 1]);
90  }
91  else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") {
92  std::cout
93  << "\nUsage: " << argv[0] << " [--camera_device <camera device> (default: 0)]"
94  << " [--intrinsic <intrinsic file> (default: empty)]"
95  << " [--camera_name <camera name> (default: empty)]"
96  << " [--model <model name> (default: teabox)]"
97  << " [--tracker <0=egde|1=keypoint|2=hybrid> (default: 2)]"
98  << " [--use_ogre] [--use_scanline]"
99  << " [--max_proj_error <allowed projection error> (default: 30)]"
100  << " [--learn] [--auto_init] [--learning_data <data-learned.bin> (default: learning/data-learned.bin)]"
101  << " [--display_proj_error]"
102  << " [--help] [-h]\n"
103  << std::endl;
104  return EXIT_SUCCESS;
105  }
106  }
107  std::string parentname = vpIoTools::getParent(opt_modelname);
108  std::string objectname = vpIoTools::getNameWE(opt_modelname);
109 
110  if (!parentname.empty())
111  objectname = parentname + "/" + objectname;
112 
113  std::cout << "Tracker requested config files: " << objectname << ".[init, cao]" << std::endl;
114  std::cout << "Tracker optional config files: " << objectname << ".[ppm]" << std::endl;
115 
116  std::cout << "Tracked features: " << std::endl;
117  std::cout << " Use edges : " << (opt_tracker == 0 || opt_tracker == 2) << std::endl;
118  std::cout << " Use klt : " << (opt_tracker == 1 || opt_tracker == 2) << std::endl;
119  std::cout << "Tracker options: " << std::endl;
120  std::cout << " Use ogre : " << opt_use_ogre << std::endl;
121  std::cout << " Use scanline: " << opt_use_scanline << std::endl;
122  std::cout << " Proj. error : " << opt_proj_error_threshold << std::endl;
123  std::cout << " Display proj. error: " << opt_display_projection_error << std::endl;
124  std::cout << "Config files: " << std::endl;
125  std::cout << " Config file : "
126  << "\"" << objectname + ".xml"
127  << "\"" << std::endl;
128  std::cout << " Model file : "
129  << "\"" << objectname + ".cao"
130  << "\"" << std::endl;
131  std::cout << " Init file : "
132  << "\"" << objectname + ".init"
133  << "\"" << std::endl;
134  std::cout << "Learning options : " << std::endl;
135  std::cout << " Learn : " << opt_learn << std::endl;
136  std::cout << " Auto init : " << opt_auto_init << std::endl;
137  std::cout << " Learning data: " << opt_learning_data << std::endl;
138 
140 #if VISP_VERSION_INT > VP_VERSION_INT(3, 2, 0)
141  vpImage<vpRGBa> I; // Since ViSP 3.2.0 we support model-based tracking on color images
142 #else
143  vpImage<unsigned char> I; // Tracking on gray level images
144 #endif
146 
148  vpCameraParameters cam;
149  cam.initPersProjWithoutDistortion(839, 839, 325, 243);
151 
152 #if defined(VISP_HAVE_PUGIXML)
153  vpXmlParserCamera parser;
154  if (!opt_intrinsic_file.empty() && !opt_camera_name.empty()) {
155  parser.parse(cam, opt_intrinsic_file, opt_camera_name, vpCameraParameters::perspectiveProjWithoutDistortion);
156  }
157 #endif
158 
162 
164 #if defined(VISP_HAVE_V4L2)
165  vpV4l2Grabber g;
166  std::ostringstream device;
167  device << "/dev/video" << opt_device;
168  std::cout << "Use Video 4 Linux grabber on device " << device.str() << std::endl;
169  g.setDevice(device.str());
170  g.setScale(1);
171  g.open(I);
172 #elif defined(VISP_HAVE_DC1394)
173  (void)opt_device; // To avoid non used warning
174  std::cout << "Use DC1394 grabber" << std::endl;
176  g.open(I);
177 #elif defined(VISP_HAVE_CMU1394)
178  (void)opt_device; // To avoid non used warning
179  std::cout << "Use CMU1394 grabber" << std::endl;
181  g.open(I);
182 #elif defined(VISP_HAVE_FLYCAPTURE)
183  (void)opt_device; // To avoid non used warning
184  std::cout << "Use FlyCapture grabber" << std::endl;
186  g.open(I);
187 #elif defined(VISP_HAVE_REALSENSE2)
188  (void)opt_device; // To avoid non used warning
189  std::cout << "Use Realsense 2 grabber" << std::endl;
190  vpRealSense2 g;
191  rs2::config config;
192  config.disable_stream(RS2_STREAM_DEPTH);
193  config.disable_stream(RS2_STREAM_INFRARED);
194  config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30);
195  g.open(config);
196  g.acquire(I);
197 
198  std::cout << "Read camera parameters from Realsense device" << std::endl;
200 #elif defined(HAVE_OPENCV_VIDEOIO)
201  std::cout << "Use OpenCV grabber on device " << opt_device << std::endl;
202  cv::VideoCapture g(opt_device); // Open the default camera
203  if (!g.isOpened()) { // Check if we succeeded
204  std::cout << "Failed to open the camera" << std::endl;
205  return EXIT_FAILURE;
206  }
207  cv::Mat frame;
208  g >> frame; // get a new frame from camera
209  vpImageConvert::convert(frame, I);
210 #endif
212 
213  vpDisplay *display = nullptr;
214 #if defined(VISP_HAVE_X11)
215  display = new vpDisplayX;
216 #elif defined(VISP_HAVE_GDI)
217  display = new vpDisplayGDI;
218 #elif defined(HAVE_OPENCV_HIGHGUI)
219  display = new vpDisplayOpenCV;
220 #endif
221  display->init(I, 100, 100, "Model-based tracker");
222 
223  while (true) {
224 #if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || \
225  defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2)
226  g.acquire(I);
227 #elif defined(HAVE_OPENCV_VIDEOIO)
228  g >> frame;
229  vpImageConvert::convert(frame, I);
230 #endif
231 
233  vpDisplay::displayText(I, 20, 20, "Click when ready.", vpColor::red);
234  vpDisplay::flush(I);
235 
236  if (vpDisplay::getClick(I, false)) {
237  break;
238  }
239  }
240 
242  vpMbGenericTracker tracker;
243  if (opt_tracker == 0)
245 #if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
246  else if (opt_tracker == 1)
248  else
250 #else
251  else {
252 #if !defined(VISP_HAVE_MODULE_KLT)
253  std::cout << "klt and hybrid model-based tracker are not available since visp_klt module is not available. "
254  "In CMakeGUI turn visp_klt module ON, configure and build ViSP again."
255  << std::endl;
256 #else
257  std::cout << "Hybrid tracking is impossible since OpenCV is not enabled. "
258  << "Install OpenCV, configure and build ViSP again to run this tutorial." << std::endl;
259 #endif
260  return EXIT_SUCCESS;
261  }
262 #endif
264 
265  bool usexml = false;
267 #if defined(VISP_HAVE_PUGIXML)
268  if (vpIoTools::checkFilename(objectname + ".xml")) {
269  tracker.loadConfigFile(objectname + ".xml");
270  usexml = true;
271  }
272 #endif
274 
275  if (!usexml) {
277  if (opt_tracker == 0 || opt_tracker == 2) {
279  vpMe me;
280  me.setMaskSize(5);
281  me.setMaskNumber(180);
282  me.setRange(8);
284  me.setThreshold(20);
285  me.setMu1(0.5);
286  me.setMu2(0.5);
287  me.setSampleStep(4);
288  tracker.setMovingEdge(me);
290  }
291 
292 #if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
293  if (opt_tracker == 1 || opt_tracker == 2) {
295  vpKltOpencv klt_settings;
296  klt_settings.setMaxFeatures(300);
297  klt_settings.setWindowSize(5);
298  klt_settings.setQuality(0.015);
299  klt_settings.setMinDistance(8);
300  klt_settings.setHarrisFreeParameter(0.01);
301  klt_settings.setBlockSize(3);
302  klt_settings.setPyramidLevels(3);
303  tracker.setKltOpencv(klt_settings);
304  tracker.setKltMaskBorder(5);
306  }
307 #endif
308  }
309 
310  tracker.setCameraParameters(cam);
312 
314  tracker.loadModel(objectname + ".cao");
317  tracker.setDisplayFeatures(true);
320  tracker.setOgreVisibilityTest(opt_use_ogre);
321  tracker.setScanLineVisibilityTest(opt_use_scanline);
324  tracker.setProjectionErrorComputation(true);
325  tracker.setProjectionErrorDisplay(opt_display_projection_error);
327 
328 #if (defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)) || \
329  (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
330  std::string detectorName = "SIFT";
331  std::string extractorName = "SIFT";
332  std::string matcherName = "BruteForce";
333 #else
334  std::string detectorName = "FAST";
335  std::string extractorName = "ORB";
336  std::string matcherName = "BruteForce-Hamming";
337 #endif
338  vpKeyPoint keypoint;
339  if (opt_learn || opt_auto_init) {
340  keypoint.setDetector(detectorName);
341  keypoint.setExtractor(extractorName);
342  keypoint.setMatcher(matcherName);
343 #if !(defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D))
344 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
345  keypoint.setDetectorParameter("ORB", "nLevels", 1);
346 #else
347  cv::Ptr<cv::ORB> orb_detector = keypoint.getDetector("ORB").dynamicCast<cv::ORB>();
348  if (orb_detector) {
349  orb_detector->setNLevels(1);
350  }
351 #endif
352 #endif
353  }
354 
355  if (opt_auto_init) {
356  if (!vpIoTools::checkFilename(opt_learning_data)) {
357  std::cout << "Cannot enable auto detection. Learning file \"" << opt_learning_data << "\" doesn't exist"
358  << std::endl;
359  return EXIT_FAILURE;
360  }
361  keypoint.loadLearningData(opt_learning_data, true);
362  }
363  else {
364  tracker.initClick(I, objectname + ".init", true);
365  }
366 
367  bool learn_position = false;
368  bool run_auto_init = false;
369  if (opt_auto_init) {
370  run_auto_init = true;
371  }
372 
373  // To be able to display keypoints matching with test-detection-rs2
374  int learn_id = 1;
375  unsigned int learn_cpt = 0;
376  bool quit = false;
377  bool tracking_failed = false;
378 
379  while (!quit) {
380  double t_begin = vpTime::measureTimeMs();
381 #if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || \
382  defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2)
383  g.acquire(I);
384 #elif defined(HAVE_OPENCV_VIDEOIO)
385  g >> frame;
386  vpImageConvert::convert(frame, I);
387 #endif
389 
390  // Run auto initialization from learned data
391  if (run_auto_init) {
392  tracking_failed = false;
393  if (keypoint.matchPoint(I, cam, cMo)) {
394  std::cout << "Auto init succeed" << std::endl;
395  tracker.initFromPose(I, cMo);
396  }
397  else {
398  vpDisplay::flush(I);
399  continue;
400  }
401  }
402  else if (tracking_failed) {
403  // Manual init
404  tracking_failed = false;
405  tracker.initClick(I, objectname + ".init", true);
406  }
407 
408  // Run the tracker
409  try {
410  if (run_auto_init) {
411  // Turn display features off just after auto init to not display wrong moving-edge if the tracker fails
412  tracker.setDisplayFeatures(false);
413 
414  run_auto_init = false;
415  }
416  tracker.track(I);
417  }
418  catch (const vpException &e) {
419  std::cout << "Tracker exception: " << e.getStringMessage() << std::endl;
420  tracking_failed = true;
421  if (opt_auto_init) {
422  std::cout << "Tracker needs to restart (tracking exception)" << std::endl;
423  run_auto_init = true;
424  }
425  }
426 
427  if (!tracking_failed) {
428  double proj_error = 0;
430  // Check tracking errors
431  proj_error = tracker.getProjectionError();
432  }
433  else {
434  tracker.getPose(cMo);
435  tracker.getCameraParameters(cam);
436  proj_error = tracker.computeCurrentProjectionError(I, cMo, cam);
437  }
438  if (proj_error > opt_proj_error_threshold) {
439  std::cout << "Tracker needs to restart (projection error detected: " << proj_error << ")" << std::endl;
440  if (opt_auto_init) {
441  run_auto_init = true;
442  }
443  tracking_failed = true;
444  }
445  }
446 
447  if (!tracking_failed) {
448  tracker.setDisplayFeatures(true);
450  tracker.getPose(cMo);
453  tracker.getCameraParameters(cam);
454  tracker.display(I, cMo, cam, vpColor::green, 2, false);
456  vpDisplay::displayFrame(I, cMo, cam, 0.025, vpColor::none, 3);
457 
458  { // Display estimated pose in [m] and [deg]
459  vpPoseVector pose(cMo);
460  std::stringstream ss;
461  ss << "Translation: " << std::setprecision(5) << pose[0] << " " << pose[1] << " " << pose[2] << " [m]";
462  vpDisplay::displayText(I, 80, 20, ss.str(), vpColor::green);
463  ss.str(""); // erase ss
464  ss << "Rotation tu: " << std::setprecision(4) << vpMath::deg(pose[3]) << " " << vpMath::deg(pose[4]) << " "
465  << vpMath::deg(pose[5]) << " [deg]";
466  vpDisplay::displayText(I, 100, 20, ss.str(), vpColor::green);
467  }
468  {
469  std::stringstream ss;
470  ss << "Features: edges " << tracker.getNbFeaturesEdge() << ", klt " << tracker.getNbFeaturesKlt();
471  vpDisplay::displayText(I, 120, 20, ss.str(), vpColor::red);
472  }
473  }
474 
475  if (learn_position) {
476  learn_cpt++;
477  // Detect keypoints on the current image
478  std::vector<cv::KeyPoint> trainKeyPoints;
479  keypoint.detect(I, trainKeyPoints);
480 
481  // Keep only keypoints on the cube
482  std::vector<vpPolygon> polygons;
483  std::vector<std::vector<vpPoint> > roisPt;
484  std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair = tracker.getPolygonFaces();
485  polygons = pair.first;
486  roisPt = pair.second;
487 
488  // Compute the 3D coordinates
489  std::vector<cv::Point3f> points3f;
490  vpKeyPoint::compute3DForPointsInPolygons(cMo, cam, trainKeyPoints, polygons, roisPt, points3f);
491 
492  // Build the reference keypoints
493  keypoint.buildReference(I, trainKeyPoints, points3f, true, learn_id++);
494 
495  // Display learned data
496  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
497  vpDisplay::displayCross(I, (int)it->pt.y, (int)it->pt.x, 10, vpColor::yellow, 3);
498  }
499  learn_position = false;
500  std::cout << "Data learned" << std::endl;
501  }
502 
503  std::stringstream ss;
504  ss << "Loop time: " << vpTime::measureTimeMs() - t_begin << " ms";
505  vpDisplay::displayText(I, 20, 20, ss.str(), vpColor::red);
506  if (opt_learn)
507  vpDisplay::displayText(I, 35, 20, "Left click: learn Right click: quit", vpColor::red);
508  else if (opt_auto_init)
509  vpDisplay::displayText(I, 35, 20, "Left click: auto_init Right click: quit", vpColor::red);
510  else
511  vpDisplay::displayText(I, 35, 20, "Right click: quit", vpColor::red);
512 
514  if (vpDisplay::getClick(I, button, false)) {
515  if (button == vpMouseButton::button3) {
516  quit = true;
517  }
518  else if (button == vpMouseButton::button1 && opt_learn) {
519  learn_position = true;
520  }
521  else if (button == vpMouseButton::button1 && opt_auto_init && !opt_learn) {
522  run_auto_init = true;
523  }
524  }
525 
526  vpDisplay::flush(I);
527  }
528  if (opt_learn && learn_cpt) {
529  std::cout << "Save learning from " << learn_cpt << " images in file: " << opt_learning_data << std::endl;
530  keypoint.saveLearningData(opt_learning_data, true, true);
531  }
532 
534  delete display;
536  }
537  catch (const vpException &e) {
538  std::cout << "Catch a ViSP exception: " << e << std::endl;
539  }
540 #elif defined(VISP_HAVE_OPENCV)
541  (void)argc;
542  (void)argv;
543  std::cout << "Install a 3rd party dedicated to frame grabbing (dc1394, cmu1394, v4l2, OpenCV, FlyCapture, "
544  "Realsense2), configure and build ViSP again to use this example"
545  << std::endl;
546 #else
547  (void)argc;
548  (void)argv;
549  std::cout << "Install OpenCV 3rd party, configure and build ViSP again to use this example" << std::endl;
550 #endif
551 }
Firewire cameras video capture based on CMU 1394 Digital Camera SDK.
void open(vpImage< unsigned char > &I)
Class for firewire ieee1394 video devices using libdc1394-2.x api.
void open(vpImage< unsigned char > &I)
Generic class defining intrinsic camera parameters.
void initPersProjWithoutDistortion(double px, double py, double u0, double v0)
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
static const vpColor red
Definition: vpColor.h:211
static const vpColor none
Definition: vpColor.h:223
static const vpColor yellow
Definition: vpColor.h:219
static const vpColor green
Definition: vpColor.h:214
Display for windows using GDI (available on any windows 32 platform).
Definition: vpDisplayGDI.h:128
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Definition: vpDisplayX.h:128
Class that defines generic functionalities for display.
Definition: vpDisplay.h:173
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Definition: vpException.h:59
const std::string & getStringMessage() const
Definition: vpException.cpp:66
void open(vpImage< unsigned char > &I)
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static bool checkFilename(const std::string &filename)
Definition: vpIoTools.cpp:1213
static std::string getNameWE(const std::string &pathname)
Definition: vpIoTools.cpp:2003
static std::string getParent(const std::string &pathname)
Definition: vpIoTools.cpp:2086
Class that allows keypoints detection (and descriptors extraction) and matching thanks to OpenCV libr...
Definition: vpKeyPoint.h:212
unsigned int matchPoint(const vpImage< unsigned char > &I)
void setExtractor(const vpFeatureDescriptorType &extractorType)
Definition: vpKeyPoint.h:1624
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
Definition: vpKeyPoint.cpp:970
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
Definition: vpKeyPoint.cpp:460
void setMatcher(const std::string &matcherName)
Definition: vpKeyPoint.h:1700
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
void setDetector(const vpFeatureDetectorType &detectorType)
Definition: vpKeyPoint.h:1566
unsigned int buildReference(const vpImage< unsigned char > &I)
Definition: vpKeyPoint.cpp:189
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
Definition: vpKeyPoint.h:998
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
Definition: vpKltOpencv.h:73
void setBlockSize(int blockSize)
Definition: vpKltOpencv.h:266
void setQuality(double qualityLevel)
Definition: vpKltOpencv.h:355
void setHarrisFreeParameter(double harris_k)
Definition: vpKltOpencv.h:274
void setMaxFeatures(int maxCount)
Definition: vpKltOpencv.h:314
void setMinDistance(double minDistance)
Definition: vpKltOpencv.h:323
void setWindowSize(int winSize)
Definition: vpKltOpencv.h:376
void setPyramidLevels(int pyrMaxLevel)
Definition: vpKltOpencv.h:342
static double deg(double rad)
Definition: vpMath.h:117
Real-time 6D object pose tracking using its CAD model.
virtual void initFromPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo) vp_override
virtual int getTrackerType() const
virtual void setKltMaskBorder(const unsigned int &e)
virtual void loadConfigFile(const std::string &configFile, bool verbose=true) vp_override
virtual unsigned int getNbFeaturesEdge() const
virtual void getPose(vpHomogeneousMatrix &cMo) const vp_override
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false) vp_override
virtual void getCameraParameters(vpCameraParameters &camera) const vp_override
virtual void setProjectionErrorDisplay(bool display) vp_override
virtual unsigned int getNbFeaturesKlt() const
virtual void setOgreVisibilityTest(const bool &v) vp_override
virtual void setMovingEdge(const vpMe &me)
virtual std::pair< std::vector< vpPolygon >, std::vector< std::vector< vpPoint > > > getPolygonFaces(bool orderPolygons=true, bool useVisibility=true, bool clipPolygon=false) vp_override
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix()) vp_override
virtual void setKltOpencv(const vpKltOpencv &t)
virtual void setScanLineVisibilityTest(const bool &v) vp_override
virtual void setCameraParameters(const vpCameraParameters &camera) vp_override
virtual void setTrackerType(int type)
virtual void initClick(const vpImage< unsigned char > &I1, const vpImage< unsigned char > &I2, const std::string &initFile1, const std::string &initFile2, bool displayHelp=false, const vpHomogeneousMatrix &T1=vpHomogeneousMatrix(), const vpHomogeneousMatrix &T2=vpHomogeneousMatrix())
virtual double computeCurrentProjectionError(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &_cMo, const vpCameraParameters &_cam) vp_override
virtual void setDisplayFeatures(bool displayF) vp_override
virtual void setProjectionErrorComputation(const bool &flag) vp_override
virtual void track(const vpImage< unsigned char > &I) vp_override
virtual double getProjectionError() const
Definition: vpMbTracker.h:310
Definition: vpMe.h:124
void setMu1(const double &mu_1)
Definition: vpMe.h:399
void setRange(const unsigned int &range)
Definition: vpMe.h:429
void setLikelihoodThresholdType(const vpLikelihoodThresholdType likelihood_threshold_type)
Definition: vpMe.h:519
void setMaskNumber(const unsigned int &mask_number)
Definition: vpMe.cpp:488
void setThreshold(const double &threshold)
Definition: vpMe.h:480
void setSampleStep(const double &sample_step)
Definition: vpMe.h:436
void setMaskSize(const unsigned int &mask_size)
Definition: vpMe.cpp:496
void setMu2(const double &mu_2)
Definition: vpMe.h:406
@ NORMALIZED_THRESHOLD
Definition: vpMe.h:135
Implementation of a pose vector and operations on poses.
Definition: vpPoseVector.h:189
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion, int index=-1) const
void acquire(vpImage< unsigned char > &grey, double *ts=nullptr)
bool open(const rs2::config &cfg=rs2::config())
Class that is a wrapper over the Video4Linux2 (V4L2) driver.
void open(vpImage< unsigned char > &I)
void setScale(unsigned scale=vpV4l2Grabber::DEFAULT_SCALE)
void setDevice(const std::string &devname)
XML parser to load and save intrinsic camera parameters.
int parse(vpCameraParameters &cam, const std::string &filename, const std::string &camera_name, const vpCameraParameters::vpCameraParametersProjType &projModel, unsigned int image_width=0, unsigned int image_height=0, bool verbose=true)
void display(vpImage< unsigned char > &I, const std::string &title)
Display a gray-scale image.
VISP_EXPORT double measureTimeMs()