Visual Servoing Platform  version 3.6.1 under development (2024-03-28)
tutorial-mb-generic-tracker-rgbd-realsense.cpp
1 #include <iostream>
3 
4 #include <visp3/core/vpConfig.h>
5 
6 #if defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_OPENCV) && defined(VISP_HAVE_PUGIXML)
7 #include <visp3/core/vpDisplay.h>
8 #include <visp3/core/vpIoTools.h>
9 #include <visp3/gui/vpDisplayGDI.h>
10 #include <visp3/gui/vpDisplayOpenCV.h>
11 #include <visp3/gui/vpDisplayX.h>
12 #include <visp3/mbt/vpMbGenericTracker.h>
13 #include <visp3/sensor/vpRealSense2.h>
14 #include <visp3/vision/vpKeyPoint.h>
15 
16 int main(int argc, char *argv[])
17 {
18  std::string config_color = "", config_depth = "";
19  std::string model_color = "", model_depth = "";
20  std::string init_file = "";
21  bool use_ogre = false;
22  bool use_scanline = false;
23  bool use_edges = true;
24  bool use_klt = true;
25  bool use_depth = true;
26  bool learn = false;
27  bool auto_init = false;
28  double proj_error_threshold = 25;
29  std::string learning_data = "learning/data-learned.bin";
30  bool display_projection_error = false;
31 
32  for (int i = 1; i < argc; i++) {
33  if (std::string(argv[i]) == "--config_color" && i + 1 < argc) {
34  config_color = std::string(argv[i + 1]);
35  }
36  else if (std::string(argv[i]) == "--config_depth" && i + 1 < argc) {
37  config_depth = std::string(argv[i + 1]);
38  }
39  else if (std::string(argv[i]) == "--model_color" && i + 1 < argc) {
40  model_color = std::string(argv[i + 1]);
41  }
42  else if (std::string(argv[i]) == "--model_depth" && i + 1 < argc) {
43  model_depth = std::string(argv[i + 1]);
44  }
45  else if (std::string(argv[i]) == "--init_file" && i + 1 < argc) {
46  init_file = std::string(argv[i + 1]);
47  }
48  else if (std::string(argv[i]) == "--proj_error_threshold" && i + 1 < argc) {
49  proj_error_threshold = std::atof(argv[i + 1]);
50  }
51  else if (std::string(argv[i]) == "--use_ogre") {
52  use_ogre = true;
53  }
54  else if (std::string(argv[i]) == "--use_scanline") {
55  use_scanline = true;
56  }
57  else if (std::string(argv[i]) == "--use_edges" && i + 1 < argc) {
58  use_edges = (std::atoi(argv[i + 1]) == 0 ? false : true);
59  }
60  else if (std::string(argv[i]) == "--use_klt" && i + 1 < argc) {
61  use_klt = (std::atoi(argv[i + 1]) == 0 ? false : true);
62  }
63  else if (std::string(argv[i]) == "--use_depth" && i + 1 < argc) {
64  use_depth = (std::atoi(argv[i + 1]) == 0 ? false : true);
65  }
66  else if (std::string(argv[i]) == "--learn") {
67  learn = true;
68  }
69  else if (std::string(argv[i]) == "--learning_data" && i + 1 < argc) {
70  learning_data = argv[i + 1];
71  }
72  else if (std::string(argv[i]) == "--auto_init") {
73  auto_init = true;
74  }
75  else if (std::string(argv[i]) == "--display_proj_error") {
76  display_projection_error = true;
77  }
78  else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") {
79  std::cout << "Usage: \n"
80  << argv[0]
81  << " [--model_color <object.cao>] [--model_depth <object.cao>]"
82  " [--config_color <object.xml>] [--config_depth <object.xml>]"
83  " [--init_file <object.init>] [--use_ogre] [--use_scanline]"
84  " [--proj_error_threshold <threshold between 0 and 90> (default: "
85  << proj_error_threshold
86  << ")]"
87  " [--use_edges <0|1> (default: 1)] [--use_klt <0|1> (default: 1)] [--use_depth <0|1> (default: 1)]"
88  " [--learn] [--auto_init] [--learning_data <path to .bin> (default: learning/data-learned.bin)]"
89  " [--display_proj_error]"
90  << std::endl;
91 
92  std::cout << "\n** How to track a 4.2 cm width cube with manual initialization:\n"
93  << argv[0] << " --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth 1" << std::endl;
94  std::cout << "\n** How to learn the cube and create a learning database:\n"
95  << argv[0] << " --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth 1 --learn"
96  << std::endl;
97  std::cout << "\n** How to track the cube with initialization from learning database:\n"
98  << argv[0] << " --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth 1 --auto_init"
99  << std::endl;
100 
101  return EXIT_SUCCESS;
102  }
103  }
104 
105  if (model_depth.empty()) {
106  model_depth = model_color;
107  }
108  std::string parentname = vpIoTools::getParent(model_color);
109  if (config_color.empty()) {
110  config_color = (parentname.empty() ? "" : (parentname + "/")) + vpIoTools::getNameWE(model_color) + ".xml";
111  }
112  if (config_depth.empty()) {
113  config_depth = (parentname.empty() ? "" : (parentname + "/")) + vpIoTools::getNameWE(model_color) + "_depth.xml";
114  }
115  if (init_file.empty()) {
116  init_file = (parentname.empty() ? "" : (parentname + "/")) + vpIoTools::getNameWE(model_color) + ".init";
117  }
118  std::cout << "Tracked features: " << std::endl;
119  std::cout << " Use edges : " << use_edges << std::endl;
120  std::cout << " Use klt : " << use_klt << std::endl;
121  std::cout << " Use depth : " << use_depth << std::endl;
122  std::cout << "Tracker options: " << std::endl;
123  std::cout << " Use ogre : " << use_ogre << std::endl;
124  std::cout << " Use scanline: " << use_scanline << std::endl;
125  std::cout << " Proj. error : " << proj_error_threshold << std::endl;
126  std::cout << " Display proj. error: " << display_projection_error << std::endl;
127  std::cout << "Config files: " << std::endl;
128  std::cout << " Config color: "
129  << "\"" << config_color << "\"" << std::endl;
130  std::cout << " Config depth: "
131  << "\"" << config_depth << "\"" << std::endl;
132  std::cout << " Model color : "
133  << "\"" << model_color << "\"" << std::endl;
134  std::cout << " Model depth : "
135  << "\"" << model_depth << "\"" << std::endl;
136  std::cout << " Init file : "
137  << "\"" << init_file << "\"" << std::endl;
138  std::cout << "Learning options : " << std::endl;
139  std::cout << " Learn : " << learn << std::endl;
140  std::cout << " Auto init : " << auto_init << std::endl;
141  std::cout << " Learning data: " << learning_data << std::endl;
142 
143  if (!use_edges && !use_klt && !use_depth) {
144  std::cout << "You must choose at least one visual features between edge, KLT and depth." << std::endl;
145  return EXIT_FAILURE;
146  }
147 
148  if (config_color.empty() || config_depth.empty() || model_color.empty() || model_depth.empty() || init_file.empty()) {
149  std::cout << "config_color.empty() || config_depth.empty() || model_color.empty() || model_depth.empty() || "
150  "init_file.empty()"
151  << std::endl;
152  return EXIT_FAILURE;
153  }
154 
155  vpRealSense2 realsense;
156  int width = 640, height = 480;
157  int fps = 30;
158  rs2::config config;
159  config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, fps);
160  config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, fps);
161 
162  try {
163  realsense.open(config);
164  }
165  catch (const vpException &e) {
166  std::cout << "Catch an exception: " << e.what() << std::endl;
167  std::cout << "Check if the Realsense camera is connected..." << std::endl;
168  return EXIT_SUCCESS;
169  }
170 
171  vpCameraParameters cam_color =
173  vpCameraParameters cam_depth =
175 
176  std::cout << "Sensor internal camera parameters for color camera: " << cam_color << std::endl;
177  std::cout << "Sensor internal camera parameters for depth camera: " << cam_depth << std::endl;
178 
179  vpImage<vpRGBa> I_color(height, width);
180  vpImage<unsigned char> I_gray(height, width);
181  vpImage<unsigned char> I_depth(height, width);
182  vpImage<uint16_t> I_depth_raw(height, width);
183 
184  unsigned int _posx = 100, _posy = 50;
185 
186 #ifdef VISP_HAVE_X11
187  vpDisplayX d1, d2;
188 #elif defined(VISP_HAVE_GDI)
189  vpDisplayGDI d1, d2;
190 #elif defined(HAVE_OPENCV_HIGHGUI)
191  vpDisplayOpenCV d1, d2;
192 #endif
193  if (use_edges || use_klt)
194  d1.init(I_gray, _posx, _posy, "Color stream");
195  if (use_depth)
196  d2.init(I_depth, _posx + I_gray.getWidth() + 10, _posy, "Depth stream");
197 
198  while (true) {
199  realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, nullptr, nullptr);
200 
201  if (use_edges || use_klt) {
202  vpImageConvert::convert(I_color, I_gray);
203  vpDisplay::display(I_gray);
204  vpDisplay::displayText(I_gray, 20, 20, "Click when ready.", vpColor::red);
205  vpDisplay::flush(I_gray);
206 
207  if (vpDisplay::getClick(I_gray, false)) {
208  break;
209  }
210  }
211  if (use_depth) {
212  vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
213 
214  vpDisplay::display(I_depth);
215  vpDisplay::displayText(I_depth, 20, 20, "Click when ready.", vpColor::red);
216  vpDisplay::flush(I_depth);
217 
218  if (vpDisplay::getClick(I_depth, false)) {
219  break;
220  }
221  }
222  }
223 
224  std::vector<int> trackerTypes;
225  if (use_edges && use_klt)
227  else if (use_edges)
228  trackerTypes.push_back(vpMbGenericTracker::EDGE_TRACKER);
229  else if (use_klt)
230  trackerTypes.push_back(vpMbGenericTracker::KLT_TRACKER);
231 
232  if (use_depth)
233  trackerTypes.push_back(vpMbGenericTracker::DEPTH_DENSE_TRACKER);
234 
235  vpHomogeneousMatrix depth_M_color = realsense.getTransformation(RS2_STREAM_COLOR, RS2_STREAM_DEPTH);
236  std::map<std::string, vpHomogeneousMatrix> mapOfCameraTransformations;
237  std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
238  std::map<std::string, std::string> mapOfInitFiles;
239  std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
240  std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
241  std::map<std::string, vpHomogeneousMatrix> mapOfCameraPoses;
242 
243  std::vector<vpColVector> pointcloud;
244 
245  vpMbGenericTracker tracker(trackerTypes);
246 
247  if ((use_edges || use_klt) && use_depth) {
248  tracker.loadConfigFile(config_color, config_depth);
249  tracker.loadModel(model_color, model_depth);
250  std::cout << "Sensor internal depth_M_color: \n" << depth_M_color << std::endl;
251  mapOfCameraTransformations["Camera2"] = depth_M_color;
252  tracker.setCameraTransformationMatrix(mapOfCameraTransformations);
253  mapOfImages["Camera1"] = &I_gray;
254  mapOfImages["Camera2"] = &I_depth;
255  mapOfInitFiles["Camera1"] = init_file;
256  tracker.setCameraParameters(cam_color, cam_depth);
257  }
258  else if (use_edges || use_klt) {
259  tracker.loadConfigFile(config_color);
260  tracker.loadModel(model_color);
261  tracker.setCameraParameters(cam_color);
262  }
263  else if (use_depth) {
264  tracker.loadConfigFile(config_depth);
265  tracker.loadModel(model_depth);
266  tracker.setCameraParameters(cam_depth);
267  }
268 
269  tracker.setDisplayFeatures(true);
270  tracker.setOgreVisibilityTest(use_ogre);
271  tracker.setScanLineVisibilityTest(use_scanline);
272  tracker.setProjectionErrorComputation(true);
273  tracker.setProjectionErrorDisplay(display_projection_error);
274 
275 #if (defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)) || \
276  (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
277  std::string detectorName = "SIFT";
278  std::string extractorName = "SIFT";
279  std::string matcherName = "BruteForce";
280 #else
281  std::string detectorName = "FAST";
282  std::string extractorName = "ORB";
283  std::string matcherName = "BruteForce-Hamming";
284 #endif
285  vpKeyPoint keypoint;
286  if (learn || auto_init) {
287  keypoint.setDetector(detectorName);
288  keypoint.setExtractor(extractorName);
289  keypoint.setMatcher(matcherName);
290 #if !(defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D))
291 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
292  keypoint.setDetectorParameter("ORB", "nLevels", 1);
293 #else
294  cv::Ptr<cv::ORB> orb_detector = keypoint.getDetector("ORB").dynamicCast<cv::ORB>();
295  if (orb_detector) {
296  orb_detector->setNLevels(1);
297  }
298 #endif
299 #endif
300  }
301 
302  if (auto_init) {
303  if (!vpIoTools::checkFilename(learning_data)) {
304  std::cout << "Cannot enable auto detection. Learning file \"" << learning_data << "\" doesn't exist" << std::endl;
305  return EXIT_FAILURE;
306  }
307  keypoint.loadLearningData(learning_data, true);
308  }
309  else {
310  if ((use_edges || use_klt) && use_depth)
311  tracker.initClick(mapOfImages, mapOfInitFiles, true);
312  else if (use_edges || use_klt)
313  tracker.initClick(I_gray, init_file, true);
314  else if (use_depth)
315  tracker.initClick(I_depth, init_file, true);
316 
317  if (learn)
319  }
320 
321  bool run_auto_init = false;
322  if (auto_init) {
323  run_auto_init = true;
324  }
325  std::vector<double> times_vec;
326 
327  try {
328  // To be able to display keypoints matching with test-detection-rs2
329  int learn_id = 1;
330  bool quit = false;
331  bool learn_position = false;
332  double loop_t = 0;
334 
335  while (!quit) {
336  double t = vpTime::measureTimeMs();
337  bool tracking_failed = false;
338 
339  // Acquire images and update tracker input data
340  realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, &pointcloud, nullptr, nullptr);
341 
342  if (use_edges || use_klt || run_auto_init) {
343  vpImageConvert::convert(I_color, I_gray);
344  vpDisplay::display(I_gray);
345  }
346  if (use_depth) {
347  vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
348  vpDisplay::display(I_depth);
349  }
350 
351  if ((use_edges || use_klt) && use_depth) {
352  mapOfImages["Camera1"] = &I_gray;
353  mapOfPointclouds["Camera2"] = &pointcloud;
354  mapOfWidths["Camera2"] = width;
355  mapOfHeights["Camera2"] = height;
356  }
357  else if (use_edges || use_klt) {
358  mapOfImages["Camera"] = &I_gray;
359  }
360  else if (use_depth) {
361  mapOfPointclouds["Camera"] = &pointcloud;
362  mapOfWidths["Camera"] = width;
363  mapOfHeights["Camera"] = height;
364  }
365 
366  // Run auto initialization from learned data
367  if (run_auto_init) {
368  if (keypoint.matchPoint(I_gray, cam_color, cMo)) {
369  std::cout << "Auto init succeed" << std::endl;
370  if ((use_edges || use_klt) && use_depth) {
371  mapOfCameraPoses["Camera1"] = cMo;
372  mapOfCameraPoses["Camera2"] = depth_M_color * cMo;
373  tracker.initFromPose(mapOfImages, mapOfCameraPoses);
374  }
375  else if (use_edges || use_klt) {
376  tracker.initFromPose(I_gray, cMo);
377  }
378  else if (use_depth) {
379  tracker.initFromPose(I_depth, depth_M_color * cMo);
380  }
381  }
382  else {
383  if (use_edges || use_klt) {
384  vpDisplay::flush(I_gray);
385  }
386  if (use_depth) {
387  vpDisplay::flush(I_depth);
388  }
389  continue;
390  }
391  }
392 
393  // Run the tracker
394  try {
395  if (run_auto_init) {
396  // Turn display features off just after auto init to not display wrong moving-edge if the tracker fails
397  tracker.setDisplayFeatures(false);
398 
399  run_auto_init = false;
400  }
401  if ((use_edges || use_klt) && use_depth) {
402  tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
403  }
404  else if (use_edges || use_klt) {
405  tracker.track(I_gray);
406  }
407  else if (use_depth) {
408  tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
409  }
410  }
411  catch (const vpException &e) {
412  std::cout << "Tracker exception: " << e.getStringMessage() << std::endl;
413  tracking_failed = true;
414  if (auto_init) {
415  std::cout << "Tracker needs to restart (tracking exception)" << std::endl;
416  run_auto_init = true;
417  }
418  }
419 
420  // Get object pose
421  cMo = tracker.getPose();
422 
423  // Check tracking errors
424  double proj_error = 0;
425  if (tracker.getTrackerType() & vpMbGenericTracker::EDGE_TRACKER) {
426  // Check tracking errors
427  proj_error = tracker.getProjectionError();
428  }
429  else {
430  proj_error = tracker.computeCurrentProjectionError(I_gray, cMo, cam_color);
431  }
432 
433  if (auto_init && proj_error > proj_error_threshold) {
434  std::cout << "Tracker needs to restart (projection error detected: " << proj_error << ")" << std::endl;
435  run_auto_init = true;
436  tracking_failed = true;
437  }
438 
439  // Display tracking results
440  if (!tracking_failed) {
441  // Turn display features on
442  tracker.setDisplayFeatures(true);
443 
444  if ((use_edges || use_klt) && use_depth) {
445  tracker.display(I_gray, I_depth, cMo, depth_M_color * cMo, cam_color, cam_depth, vpColor::red, 3);
446  vpDisplay::displayFrame(I_gray, cMo, cam_color, 0.05, vpColor::none, 3);
447  vpDisplay::displayFrame(I_depth, depth_M_color * cMo, cam_depth, 0.05, vpColor::none, 3);
448  }
449  else if (use_edges || use_klt) {
450  tracker.display(I_gray, cMo, cam_color, vpColor::red, 3);
451  vpDisplay::displayFrame(I_gray, cMo, cam_color, 0.05, vpColor::none, 3);
452  }
453  else if (use_depth) {
454  tracker.display(I_depth, cMo, cam_depth, vpColor::red, 3);
455  vpDisplay::displayFrame(I_depth, cMo, cam_depth, 0.05, vpColor::none, 3);
456  }
457 
458  {
459  std::stringstream ss;
460  ss << "Nb features: " << tracker.getError().size();
461  vpDisplay::displayText(I_gray, I_gray.getHeight() - 50, 20, ss.str(), vpColor::red);
462  }
463  {
464  std::stringstream ss;
465  ss << "Features: edges " << tracker.getNbFeaturesEdge() << ", klt " << tracker.getNbFeaturesKlt()
466  << ", depth " << tracker.getNbFeaturesDepthDense();
467  vpDisplay::displayText(I_gray, I_gray.getHeight() - 30, 20, ss.str(), vpColor::red);
468  }
469  }
470 
471  std::stringstream ss;
472  ss << "Loop time: " << loop_t << " ms";
473 
475  if (use_edges || use_klt) {
476  vpDisplay::displayText(I_gray, 20, 20, ss.str(), vpColor::red);
477  if (learn)
478  vpDisplay::displayText(I_gray, 35, 20, "Left click: learn Right click: quit", vpColor::red);
479  else if (auto_init)
480  vpDisplay::displayText(I_gray, 35, 20, "Left click: auto_init Right click: quit", vpColor::red);
481  else
482  vpDisplay::displayText(I_gray, 35, 20, "Right click: quit", vpColor::red);
483 
484  vpDisplay::flush(I_gray);
485 
486  if (vpDisplay::getClick(I_gray, button, false)) {
487  if (button == vpMouseButton::button3) {
488  quit = true;
489  }
490  else if (button == vpMouseButton::button1 && learn) {
491  learn_position = true;
492  }
493  else if (button == vpMouseButton::button1 && auto_init && !learn) {
494  run_auto_init = true;
495  }
496  }
497  }
498  if (use_depth) {
499  vpDisplay::displayText(I_depth, 20, 20, ss.str(), vpColor::red);
500  vpDisplay::displayText(I_depth, 40, 20, "Click to quit", vpColor::red);
501  vpDisplay::flush(I_depth);
502 
503  if (vpDisplay::getClick(I_depth, false)) {
504  quit = true;
505  }
506  }
507 
508  if (learn_position) {
509  // Detect keypoints on the current image
510  std::vector<cv::KeyPoint> trainKeyPoints;
511  keypoint.detect(I_gray, trainKeyPoints);
512 
513  // Keep only keypoints on the cube
514  std::vector<vpPolygon> polygons;
515  std::vector<std::vector<vpPoint> > roisPt;
516  std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair = tracker.getPolygonFaces();
517  polygons = pair.first;
518  roisPt = pair.second;
519 
520  // Compute the 3D coordinates
521  std::vector<cv::Point3f> points3f;
522  vpKeyPoint::compute3DForPointsInPolygons(cMo, cam_color, trainKeyPoints, polygons, roisPt, points3f);
523 
524  // Build the reference keypoints
525  keypoint.buildReference(I_gray, trainKeyPoints, points3f, true, learn_id++);
526 
527  // Display learned data
528  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
529  vpDisplay::displayCross(I_gray, (int)it->pt.y, (int)it->pt.x, 10, vpColor::yellow, 3);
530  }
531  learn_position = false;
532  std::cout << "Data learned" << std::endl;
533  }
534  loop_t = vpTime::measureTimeMs() - t;
535  times_vec.push_back(loop_t);
536  }
537  if (learn) {
538  std::cout << "Save learning file: " << learning_data << std::endl;
539  keypoint.saveLearningData(learning_data, true, true);
540  }
541  }
542  catch (const vpException &e) {
543  std::cout << "Catch an exception: " << e.what() << std::endl;
544  }
545 
546  if (!times_vec.empty()) {
547  std::cout << "\nProcessing time, Mean: " << vpMath::getMean(times_vec)
548  << " ms ; Median: " << vpMath::getMedian(times_vec) << " ; Std: " << vpMath::getStdev(times_vec) << " ms"
549  << std::endl;
550  }
551 
552  return EXIT_SUCCESS;
553 }
554 #elif defined(VISP_HAVE_REALSENSE2)
555 int main()
556 {
557  std::cout << "Install OpenCV 3rd party, configure and build ViSP again to use this example" << std::endl;
558  return EXIT_SUCCESS;
559 }
560 #else
561 int main()
562 {
563  std::cout << "Install librealsense2 3rd party, configure and build ViSP again to use this example" << std::endl;
564  return EXIT_SUCCESS;
565 }
566 #endif
Generic class defining intrinsic camera parameters.
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
static const vpColor red
Definition: vpColor.h:211
static const vpColor none
Definition: vpColor.h:223
static const vpColor yellow
Definition: vpColor.h:219
Display for windows using GDI (available on any windows 32 platform).
Definition: vpDisplayGDI.h:128
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Definition: vpDisplayX.h:128
void init(vpImage< unsigned char > &I, int win_x=-1, int win_y=-1, const std::string &win_title="") vp_override
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Definition: vpException.h:59
const std::string & getStringMessage() const
Definition: vpException.cpp:66
const char * what() const
Definition: vpException.cpp:70
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void createDepthHistogram(const vpImage< uint16_t > &src_depth, vpImage< vpRGBa > &dest_rgba)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static bool checkFilename(const std::string &filename)
Definition: vpIoTools.cpp:1199
static void makeDirectory(const std::string &dirname)
Definition: vpIoTools.cpp:967
static std::string getNameWE(const std::string &pathname)
Definition: vpIoTools.cpp:1950
static std::string getParent(const std::string &pathname)
Definition: vpIoTools.cpp:2033
Class that allows keypoints detection (and descriptors extraction) and matching thanks to OpenCV libr...
Definition: vpKeyPoint.h:212
unsigned int matchPoint(const vpImage< unsigned char > &I)
void setExtractor(const vpFeatureDescriptorType &extractorType)
Definition: vpKeyPoint.h:1624
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
Definition: vpKeyPoint.cpp:970
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
Definition: vpKeyPoint.cpp:460
void setMatcher(const std::string &matcherName)
Definition: vpKeyPoint.h:1700
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
void setDetector(const vpFeatureDetectorType &detectorType)
Definition: vpKeyPoint.h:1566
unsigned int buildReference(const vpImage< unsigned char > &I)
Definition: vpKeyPoint.cpp:189
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
Definition: vpKeyPoint.h:998
static double getMedian(const std::vector< double > &v)
Definition: vpMath.cpp:314
static double getStdev(const std::vector< double > &v, bool useBesselCorrection=false)
Definition: vpMath.cpp:345
static double getMean(const std::vector< double > &v)
Definition: vpMath.cpp:294
Real-time 6D object pose tracking using its CAD model.
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion, int index=-1) const
void acquire(vpImage< unsigned char > &grey, double *ts=nullptr)
bool open(const rs2::config &cfg=rs2::config())
vpHomogeneousMatrix getTransformation(const rs2_stream &from, const rs2_stream &to, int from_index=-1) const
VISP_EXPORT double measureTimeMs()