Visual Servoing Platform  version 3.6.1 under development (2025-02-18)
tutorial-mb-generic-tracker-rgbd-realsense.cpp
1 #include <iostream>
3 
4 #include <visp3/core/vpConfig.h>
5 
6 #if defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_PUGIXML) && \
7  ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_CALIB3D) && defined(HAVE_OPENCV_FEATURES2D)) || \
8  ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_3D) && defined(HAVE_OPENCV_FEATURES))
9 
10 #include <visp3/core/vpDisplay.h>
11 #include <visp3/core/vpIoTools.h>
12 #include <visp3/gui/vpDisplayFactory.h>
13 #include <visp3/mbt/vpMbGenericTracker.h>
14 #include <visp3/sensor/vpRealSense2.h>
15 #include <visp3/vision/vpKeyPoint.h>
16 
17 int main(int argc, char *argv[])
18 {
19 #ifdef ENABLE_VISP_NAMESPACE
20  using namespace VISP_NAMESPACE_NAME;
21 #endif
22 
23  std::string config_color = "", config_depth = "";
24  std::string model_color = "", model_depth = "";
25  std::string init_file = "";
26  bool use_ogre = false;
27  bool use_scanline = false;
28  bool use_edges = true;
29  bool use_klt = true;
30  bool use_depth = true;
31  bool learn = false;
32  bool auto_init = false;
33  double proj_error_threshold = 25;
34  std::string learning_data = "learning/data-learned.bin";
35  bool display_projection_error = false;
36 
37  for (int i = 1; i < argc; i++) {
38  if (std::string(argv[i]) == "--config_color" && i + 1 < argc) {
39  config_color = std::string(argv[i + 1]);
40  }
41  else if (std::string(argv[i]) == "--config_depth" && i + 1 < argc) {
42  config_depth = std::string(argv[i + 1]);
43  }
44  else if (std::string(argv[i]) == "--model_color" && i + 1 < argc) {
45  model_color = std::string(argv[i + 1]);
46  }
47  else if (std::string(argv[i]) == "--model_depth" && i + 1 < argc) {
48  model_depth = std::string(argv[i + 1]);
49  }
50  else if (std::string(argv[i]) == "--init_file" && i + 1 < argc) {
51  init_file = std::string(argv[i + 1]);
52  }
53  else if (std::string(argv[i]) == "--proj_error_threshold" && i + 1 < argc) {
54  proj_error_threshold = std::atof(argv[i + 1]);
55  }
56  else if (std::string(argv[i]) == "--use_ogre") {
57  use_ogre = true;
58  }
59  else if (std::string(argv[i]) == "--use_scanline") {
60  use_scanline = true;
61  }
62  else if (std::string(argv[i]) == "--use_edges" && i + 1 < argc) {
63  use_edges = (std::atoi(argv[i + 1]) == 0 ? false : true);
64  }
65  else if (std::string(argv[i]) == "--use_klt" && i + 1 < argc) {
66  use_klt = (std::atoi(argv[i + 1]) == 0 ? false : true);
67  }
68  else if (std::string(argv[i]) == "--use_depth" && i + 1 < argc) {
69  use_depth = (std::atoi(argv[i + 1]) == 0 ? false : true);
70  }
71  else if (std::string(argv[i]) == "--learn") {
72  learn = true;
73  }
74  else if (std::string(argv[i]) == "--learning_data" && i + 1 < argc) {
75  learning_data = argv[i + 1];
76  }
77  else if (std::string(argv[i]) == "--auto_init") {
78  auto_init = true;
79  }
80  else if (std::string(argv[i]) == "--display_proj_error") {
81  display_projection_error = true;
82  }
83  else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") {
84  std::cout << "Usage: \n"
85  << argv[0]
86  << " [--model_color <object.cao>] [--model_depth <object.cao>]"
87  " [--config_color <object.xml>] [--config_depth <object.xml>]"
88  " [--init_file <object.init>] [--use_ogre] [--use_scanline]"
89  " [--proj_error_threshold <threshold between 0 and 90> (default: "
90  << proj_error_threshold
91  << ")]"
92  " [--use_edges <0|1> (default: 1)] [--use_klt <0|1> (default: 1)] [--use_depth <0|1> (default: 1)]"
93  " [--learn] [--auto_init] [--learning_data <path to .bin> (default: learning/data-learned.bin)]"
94  " [--display_proj_error]"
95  << std::endl;
96 
97  std::cout << "\n** How to track a 4.2 cm width cube with manual initialization:\n"
98  << argv[0] << " --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth 1" << std::endl;
99  std::cout << "\n** How to learn the cube and create a learning database:\n"
100  << argv[0] << " --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth 1 --learn"
101  << std::endl;
102  std::cout << "\n** How to track the cube with initialization from learning database:\n"
103  << argv[0] << " --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth 1 --auto_init"
104  << std::endl;
105 
106  return EXIT_SUCCESS;
107  }
108  }
109 
110  if (model_depth.empty()) {
111  model_depth = model_color;
112  }
113  std::string parentname = vpIoTools::getParent(model_color);
114  if (config_color.empty()) {
115  config_color = (parentname.empty() ? "" : (parentname + "/")) + vpIoTools::getNameWE(model_color) + ".xml";
116  }
117  if (config_depth.empty()) {
118  config_depth = (parentname.empty() ? "" : (parentname + "/")) + vpIoTools::getNameWE(model_color) + "_depth.xml";
119  }
120  if (init_file.empty()) {
121  init_file = (parentname.empty() ? "" : (parentname + "/")) + vpIoTools::getNameWE(model_color) + ".init";
122  }
123  std::cout << "Tracked features: " << std::endl;
124  std::cout << " Use edges : " << use_edges << std::endl;
125  std::cout << " Use klt : " << use_klt << std::endl;
126  std::cout << " Use depth : " << use_depth << std::endl;
127  std::cout << "Tracker options: " << std::endl;
128  std::cout << " Use ogre : " << use_ogre << std::endl;
129  std::cout << " Use scanline: " << use_scanline << std::endl;
130  std::cout << " Proj. error : " << proj_error_threshold << std::endl;
131  std::cout << " Display proj. error: " << display_projection_error << std::endl;
132  std::cout << "Config files: " << std::endl;
133  std::cout << " Config color: "
134  << "\"" << config_color << "\"" << std::endl;
135  std::cout << " Config depth: "
136  << "\"" << config_depth << "\"" << std::endl;
137  std::cout << " Model color : "
138  << "\"" << model_color << "\"" << std::endl;
139  std::cout << " Model depth : "
140  << "\"" << model_depth << "\"" << std::endl;
141  std::cout << " Init file : "
142  << "\"" << init_file << "\"" << std::endl;
143  std::cout << "Learning options : " << std::endl;
144  std::cout << " Learn : " << learn << std::endl;
145  std::cout << " Auto init : " << auto_init << std::endl;
146  std::cout << " Learning data: " << learning_data << std::endl;
147 
148  if (!use_edges && !use_klt && !use_depth) {
149  std::cout << "You must choose at least one visual features between edge, KLT and depth." << std::endl;
150  return EXIT_FAILURE;
151  }
152 
153  if (config_color.empty() || config_depth.empty() || model_color.empty() || model_depth.empty() || init_file.empty()) {
154  std::cout << "config_color.empty() || config_depth.empty() || model_color.empty() || model_depth.empty() || "
155  "init_file.empty()"
156  << std::endl;
157  return EXIT_FAILURE;
158  }
159 
160  vpRealSense2 realsense;
161  int width = 640, height = 480;
162  int fps = 30;
163  rs2::config config;
164  config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, fps);
165  config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, fps);
166 
167  try {
168  realsense.open(config);
169  }
170  catch (const vpException &e) {
171  std::cout << "Catch an exception: " << e.what() << std::endl;
172  std::cout << "Check if the Realsense camera is connected..." << std::endl;
173  return EXIT_SUCCESS;
174  }
175 
176  vpCameraParameters cam_color =
178  vpCameraParameters cam_depth =
180 
181  std::cout << "Sensor internal camera parameters for color camera: " << cam_color << std::endl;
182  std::cout << "Sensor internal camera parameters for depth camera: " << cam_depth << std::endl;
183 
184  vpImage<vpRGBa> I_color(height, width);
185  vpImage<unsigned char> I_gray(height, width);
186  vpImage<unsigned char> I_depth(height, width);
187  vpImage<uint16_t> I_depth_raw(height, width);
188 
189  unsigned int _posx = 100, _posy = 50;
190 
191 #ifdef VISP_HAVE_DISPLAY
192 #if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
193  std::shared_ptr<vpDisplay> display1 = vpDisplayFactory::createDisplay();
194  std::shared_ptr<vpDisplay> display2 = vpDisplayFactory::createDisplay();
195 #else
198 #endif
199  if (use_edges || use_klt)
200  display1->init(I_gray, _posx, _posy, "Color stream");
201  if (use_depth)
202  display2->init(I_depth, _posx + I_gray.getWidth() + 10, _posy, "Depth stream");
203 #endif
204 
205  while (true) {
206  realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, nullptr, nullptr);
207 
208  if (use_edges || use_klt) {
209  vpImageConvert::convert(I_color, I_gray);
210  vpDisplay::display(I_gray);
211  vpDisplay::displayText(I_gray, 20, 20, "Click when ready.", vpColor::red);
212  vpDisplay::flush(I_gray);
213 
214  if (vpDisplay::getClick(I_gray, false)) {
215  break;
216  }
217  }
218  if (use_depth) {
219  vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
220 
221  vpDisplay::display(I_depth);
222  vpDisplay::displayText(I_depth, 20, 20, "Click when ready.", vpColor::red);
223  vpDisplay::flush(I_depth);
224 
225  if (vpDisplay::getClick(I_depth, false)) {
226  break;
227  }
228  }
229  }
230 
231  std::vector<int> trackerTypes;
232  if (use_edges && use_klt)
233  trackerTypes.push_back(vpMbGenericTracker::EDGE_TRACKER | vpMbGenericTracker::KLT_TRACKER);
234  else if (use_edges)
235  trackerTypes.push_back(vpMbGenericTracker::EDGE_TRACKER);
236  else if (use_klt)
237  trackerTypes.push_back(vpMbGenericTracker::KLT_TRACKER);
238 
239  if (use_depth)
240  trackerTypes.push_back(vpMbGenericTracker::DEPTH_DENSE_TRACKER);
241 
242  vpHomogeneousMatrix depth_M_color = realsense.getTransformation(RS2_STREAM_COLOR, RS2_STREAM_DEPTH);
243  std::map<std::string, vpHomogeneousMatrix> mapOfCameraTransformations;
244  std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
245  std::map<std::string, std::string> mapOfInitFiles;
246  std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
247  std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
248  std::map<std::string, vpHomogeneousMatrix> mapOfCameraPoses;
249 
250  std::vector<vpColVector> pointcloud;
251 
252  vpMbGenericTracker tracker(trackerTypes);
253 
254  if ((use_edges || use_klt) && use_depth) {
255  tracker.loadConfigFile(config_color, config_depth);
256  tracker.loadModel(model_color, model_depth);
257  std::cout << "Sensor internal depth_M_color: \n" << depth_M_color << std::endl;
258  mapOfCameraTransformations["Camera2"] = depth_M_color;
259  tracker.setCameraTransformationMatrix(mapOfCameraTransformations);
260  mapOfImages["Camera1"] = &I_gray;
261  mapOfImages["Camera2"] = &I_depth;
262  mapOfInitFiles["Camera1"] = init_file;
263  tracker.setCameraParameters(cam_color, cam_depth);
264  }
265  else if (use_edges || use_klt) {
266  tracker.loadConfigFile(config_color);
267  tracker.loadModel(model_color);
268  tracker.setCameraParameters(cam_color);
269  }
270  else if (use_depth) {
271  tracker.loadConfigFile(config_depth);
272  tracker.loadModel(model_depth);
273  tracker.setCameraParameters(cam_depth);
274  }
275 
276  tracker.setDisplayFeatures(true);
277  tracker.setOgreVisibilityTest(use_ogre);
278  tracker.setScanLineVisibilityTest(use_scanline);
279  tracker.setProjectionErrorComputation(true);
280  tracker.setProjectionErrorDisplay(display_projection_error);
281 
282 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_XFEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
283  std::string detectorName = "SIFT";
284  std::string extractorName = "SIFT";
285  std::string matcherName = "BruteForce";
286 #elif ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
287  std::string detectorName = "FAST";
288  std::string extractorName = "ORB";
289  std::string matcherName = "BruteForce-Hamming";
290 #endif
291  vpKeyPoint keypoint;
292  if (learn || auto_init) {
293  keypoint.setDetector(detectorName);
294  keypoint.setExtractor(extractorName);
295  keypoint.setMatcher(matcherName);
296 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
297  cv::Ptr<cv::ORB> orb_detector = keypoint.getDetector("ORB").dynamicCast<cv::ORB>();
298  if (orb_detector) {
299  orb_detector->setNLevels(1);
300  }
301 #endif
302  }
303 
304  if (auto_init) {
305  if (!vpIoTools::checkFilename(learning_data)) {
306  std::cout << "Cannot enable auto detection. Learning file \"" << learning_data << "\" doesn't exist" << std::endl;
307  return EXIT_FAILURE;
308  }
309  keypoint.loadLearningData(learning_data, true);
310  }
311  else {
312  if ((use_edges || use_klt) && use_depth)
313  tracker.initClick(mapOfImages, mapOfInitFiles, true);
314  else if (use_edges || use_klt)
315  tracker.initClick(I_gray, init_file, true);
316  else if (use_depth)
317  tracker.initClick(I_depth, init_file, true);
318 
319  if (learn)
321  }
322 
323  bool run_auto_init = false;
324  if (auto_init) {
325  run_auto_init = true;
326  }
327  std::vector<double> times_vec;
328 
329  try {
330  // To be able to display keypoints matching with test-detection-rs2
331  int learn_id = 1;
332  bool quit = false;
333  bool learn_position = false;
334  double loop_t = 0;
336 
337  while (!quit) {
338  double t = vpTime::measureTimeMs();
339  bool tracking_failed = false;
340 
341  // Acquire images and update tracker input data
342  realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, &pointcloud, nullptr, nullptr);
343 
344  if (use_edges || use_klt || run_auto_init) {
345  vpImageConvert::convert(I_color, I_gray);
346  vpDisplay::display(I_gray);
347  }
348  if (use_depth) {
349  vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
350  vpDisplay::display(I_depth);
351  }
352 
353  if ((use_edges || use_klt) && use_depth) {
354  mapOfImages["Camera1"] = &I_gray;
355  mapOfPointclouds["Camera2"] = &pointcloud;
356  mapOfWidths["Camera2"] = width;
357  mapOfHeights["Camera2"] = height;
358  }
359  else if (use_edges || use_klt) {
360  mapOfImages["Camera"] = &I_gray;
361  }
362  else if (use_depth) {
363  mapOfPointclouds["Camera"] = &pointcloud;
364  mapOfWidths["Camera"] = width;
365  mapOfHeights["Camera"] = height;
366  }
367 
368  // Run auto initialization from learned data
369  if (run_auto_init) {
370  if (keypoint.matchPoint(I_gray, cam_color, cMo)) {
371  std::cout << "Auto init succeed" << std::endl;
372  if ((use_edges || use_klt) && use_depth) {
373  mapOfCameraPoses["Camera1"] = cMo;
374  mapOfCameraPoses["Camera2"] = depth_M_color * cMo;
375  tracker.initFromPose(mapOfImages, mapOfCameraPoses);
376  }
377  else if (use_edges || use_klt) {
378  tracker.initFromPose(I_gray, cMo);
379  }
380  else if (use_depth) {
381  tracker.initFromPose(I_depth, depth_M_color * cMo);
382  }
383  }
384  else {
385  if (use_edges || use_klt) {
386  vpDisplay::flush(I_gray);
387  }
388  if (use_depth) {
389  vpDisplay::flush(I_depth);
390  }
391  continue;
392  }
393  }
394 
395  // Run the tracker
396  try {
397  if (run_auto_init) {
398  // Turn display features off just after auto init to not display wrong moving-edge if the tracker fails
399  tracker.setDisplayFeatures(false);
400 
401  run_auto_init = false;
402  }
403  if ((use_edges || use_klt) && use_depth) {
404  tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
405  }
406  else if (use_edges || use_klt) {
407  tracker.track(I_gray);
408  }
409  else if (use_depth) {
410  tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
411  }
412  }
413  catch (const vpException &e) {
414  std::cout << "Tracker exception: " << e.getStringMessage() << std::endl;
415  tracking_failed = true;
416  if (auto_init) {
417  std::cout << "Tracker needs to restart (tracking exception)" << std::endl;
418  run_auto_init = true;
419  }
420  }
421 
422  // Get object pose
423  cMo = tracker.getPose();
424 
425  // Check tracking errors
426  double proj_error = 0;
427  if (tracker.getTrackerType() & vpMbGenericTracker::EDGE_TRACKER) {
428  // Check tracking errors
429  proj_error = tracker.getProjectionError();
430  }
431  else {
432  proj_error = tracker.computeCurrentProjectionError(I_gray, cMo, cam_color);
433  }
434 
435  if (auto_init && proj_error > proj_error_threshold) {
436  std::cout << "Tracker needs to restart (projection error detected: " << proj_error << ")" << std::endl;
437  run_auto_init = true;
438  tracking_failed = true;
439  }
440 
441  // Display tracking results
442  if (!tracking_failed) {
443  // Turn display features on
444  tracker.setDisplayFeatures(true);
445 
446  if ((use_edges || use_klt) && use_depth) {
447  tracker.display(I_gray, I_depth, cMo, depth_M_color * cMo, cam_color, cam_depth, vpColor::red, 3);
448  vpDisplay::displayFrame(I_gray, cMo, cam_color, 0.05, vpColor::none, 3);
449  vpDisplay::displayFrame(I_depth, depth_M_color * cMo, cam_depth, 0.05, vpColor::none, 3);
450  }
451  else if (use_edges || use_klt) {
452  tracker.display(I_gray, cMo, cam_color, vpColor::red, 3);
453  vpDisplay::displayFrame(I_gray, cMo, cam_color, 0.05, vpColor::none, 3);
454  }
455  else if (use_depth) {
456  tracker.display(I_depth, cMo, cam_depth, vpColor::red, 3);
457  vpDisplay::displayFrame(I_depth, cMo, cam_depth, 0.05, vpColor::none, 3);
458  }
459 
460  {
461  std::stringstream ss;
462  ss << "Nb features: " << tracker.getError().size();
463  vpDisplay::displayText(I_gray, I_gray.getHeight() - 50, 20, ss.str(), vpColor::red);
464  }
465  {
466  std::stringstream ss;
467  ss << "Features: edges " << tracker.getNbFeaturesEdge() << ", klt " << tracker.getNbFeaturesKlt()
468  << ", depth " << tracker.getNbFeaturesDepthDense();
469  vpDisplay::displayText(I_gray, I_gray.getHeight() - 30, 20, ss.str(), vpColor::red);
470  }
471  }
472 
473  std::stringstream ss;
474  ss << "Loop time: " << loop_t << " ms";
475 
477  if (use_edges || use_klt) {
478  vpDisplay::displayText(I_gray, 20, 20, ss.str(), vpColor::red);
479  if (learn)
480  vpDisplay::displayText(I_gray, 35, 20, "Left click: learn Right click: quit", vpColor::red);
481  else if (auto_init)
482  vpDisplay::displayText(I_gray, 35, 20, "Left click: auto_init Right click: quit", vpColor::red);
483  else
484  vpDisplay::displayText(I_gray, 35, 20, "Right click: quit", vpColor::red);
485 
486  vpDisplay::flush(I_gray);
487 
488  if (vpDisplay::getClick(I_gray, button, false)) {
489  if (button == vpMouseButton::button3) {
490  quit = true;
491  }
492  else if (button == vpMouseButton::button1 && learn) {
493  learn_position = true;
494  }
495  else if (button == vpMouseButton::button1 && auto_init && !learn) {
496  run_auto_init = true;
497  }
498  }
499  }
500  if (use_depth) {
501  vpDisplay::displayText(I_depth, 20, 20, ss.str(), vpColor::red);
502  vpDisplay::displayText(I_depth, 40, 20, "Click to quit", vpColor::red);
503  vpDisplay::flush(I_depth);
504 
505  if (vpDisplay::getClick(I_depth, false)) {
506  quit = true;
507  }
508  }
509 
510  if (learn_position) {
511  // Detect keypoints on the current image
512  std::vector<cv::KeyPoint> trainKeyPoints;
513  keypoint.detect(I_gray, trainKeyPoints);
514 
515  // Keep only keypoints on the cube
516  std::vector<vpPolygon> polygons;
517  std::vector<std::vector<vpPoint> > roisPt;
518  std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair = tracker.getPolygonFaces();
519  polygons = pair.first;
520  roisPt = pair.second;
521 
522  // Compute the 3D coordinates
523  std::vector<cv::Point3f> points3f;
524  vpKeyPoint::compute3DForPointsInPolygons(cMo, cam_color, trainKeyPoints, polygons, roisPt, points3f);
525 
526  // Build the reference keypoints
527  keypoint.buildReference(I_gray, trainKeyPoints, points3f, true, learn_id++);
528 
529  // Display learned data
530  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
531  vpDisplay::displayCross(I_gray, (int)it->pt.y, (int)it->pt.x, 10, vpColor::yellow, 3);
532  }
533  learn_position = false;
534  std::cout << "Data learned" << std::endl;
535  }
536  loop_t = vpTime::measureTimeMs() - t;
537  times_vec.push_back(loop_t);
538  }
539  if (learn) {
540  std::cout << "Save learning file: " << learning_data << std::endl;
541  keypoint.saveLearningData(learning_data, true, true);
542  }
543  }
544  catch (const vpException &e) {
545  std::cout << "Catch an exception: " << e.what() << std::endl;
546  }
547 
548 #if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11) && defined(VISP_HAVE_DISPLAY)
549  if (display1 != nullptr) {
550  delete display1;
551  }
552  if (display2 != nullptr) {
553  delete display2;
554  }
555 #endif
556 
557  if (!times_vec.empty()) {
558  std::cout << "\nProcessing time, Mean: " << vpMath::getMean(times_vec)
559  << " ms ; Median: " << vpMath::getMedian(times_vec) << " ; Std: " << vpMath::getStdev(times_vec) << " ms"
560  << std::endl;
561  }
562 
563  return EXIT_SUCCESS;
564 }
565 #elif defined(VISP_HAVE_REALSENSE2)
566 int main()
567 {
568  std::cout << "Install OpenCV 3rd party, configure and build ViSP again to use this example" << std::endl;
569  return EXIT_SUCCESS;
570 }
571 #else
572 int main()
573 {
574  std::cout << "Install librealsense2 3rd party, configure and build ViSP again to use this example" << std::endl;
575  return EXIT_SUCCESS;
576 }
577 #endif
Generic class defining intrinsic camera parameters.
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
static const vpColor red
Definition: vpColor.h:198
static const vpColor none
Definition: vpColor.h:210
static const vpColor yellow
Definition: vpColor.h:206
Class that defines generic functionalities for display.
Definition: vpDisplay.h:178
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Definition: vpException.h:60
const std::string & getStringMessage() const
Definition: vpException.cpp:67
const char * what() const
Definition: vpException.cpp:71
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void createDepthHistogram(const vpImage< uint16_t > &src_depth, vpImage< vpRGBa > &dest_rgba)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static bool checkFilename(const std::string &filename)
Definition: vpIoTools.cpp:786
static void makeDirectory(const std::string &dirname)
Definition: vpIoTools.cpp:550
static std::string getNameWE(const std::string &pathname)
Definition: vpIoTools.cpp:1227
static std::string getParent(const std::string &pathname)
Definition: vpIoTools.cpp:1314
Class that allows keypoints 2D features detection (and descriptors extraction) and matching thanks to...
Definition: vpKeyPoint.h:267
unsigned int matchPoint(const vpImage< unsigned char > &I)
void setExtractor(const vpFeatureDescriptorType &extractorType)
Definition: vpKeyPoint.h:1711
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
Definition: vpKeyPoint.cpp:989
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
Definition: vpKeyPoint.cpp:481
void setMatcher(const std::string &matcherName)
Definition: vpKeyPoint.h:1787
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
void setDetector(const vpFeatureDetectorType &detectorType)
Definition: vpKeyPoint.h:1653
unsigned int buildReference(const vpImage< unsigned char > &I)
Definition: vpKeyPoint.cpp:211
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
Definition: vpKeyPoint.h:1091
static double getMedian(const std::vector< double > &v)
Definition: vpMath.cpp:322
static double getStdev(const std::vector< double > &v, bool useBesselCorrection=false)
Definition: vpMath.cpp:353
static double getMean(const std::vector< double > &v)
Definition: vpMath.cpp:302
Real-time 6D object pose tracking using its CAD model.
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion, int index=-1) const
void acquire(vpImage< unsigned char > &grey, double *ts=nullptr)
bool open(const rs2::config &cfg=rs2::config())
vpHomogeneousMatrix getTransformation(const rs2_stream &from, const rs2_stream &to, int from_index=-1) const
std::shared_ptr< vpDisplay > createDisplay()
Return a smart pointer vpDisplay specialization if a GUI library is available or nullptr otherwise.
vpDisplay * allocateDisplay()
Return a newly allocated vpDisplay specialization if a GUI library is available or nullptr otherwise.
VISP_EXPORT double measureTimeMs()