Visual Servoing Platform  version 3.6.1 under development (2024-11-15)
tutorial-mb-generic-tracker-rgbd-realsense.cpp
1 #include <iostream>
3 
4 #include <visp3/core/vpConfig.h>
5 
6 #if defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_OPENCV) && defined(VISP_HAVE_PUGIXML)
7 #include <visp3/core/vpDisplay.h>
8 #include <visp3/core/vpIoTools.h>
9 #include <visp3/gui/vpDisplayGDI.h>
10 #include <visp3/gui/vpDisplayOpenCV.h>
11 #include <visp3/gui/vpDisplayX.h>
12 #include <visp3/mbt/vpMbGenericTracker.h>
13 #include <visp3/sensor/vpRealSense2.h>
14 #include <visp3/vision/vpKeyPoint.h>
15 
16 int main(int argc, char *argv[])
17 {
18 #ifdef ENABLE_VISP_NAMESPACE
19  using namespace VISP_NAMESPACE_NAME;
20 #endif
21 
22  std::string config_color = "", config_depth = "";
23  std::string model_color = "", model_depth = "";
24  std::string init_file = "";
25  bool use_ogre = false;
26  bool use_scanline = false;
27  bool use_edges = true;
28  bool use_klt = true;
29  bool use_depth = true;
30  bool learn = false;
31  bool auto_init = false;
32  double proj_error_threshold = 25;
33  std::string learning_data = "learning/data-learned.bin";
34  bool display_projection_error = false;
35 
36  for (int i = 1; i < argc; i++) {
37  if (std::string(argv[i]) == "--config_color" && i + 1 < argc) {
38  config_color = std::string(argv[i + 1]);
39  }
40  else if (std::string(argv[i]) == "--config_depth" && i + 1 < argc) {
41  config_depth = std::string(argv[i + 1]);
42  }
43  else if (std::string(argv[i]) == "--model_color" && i + 1 < argc) {
44  model_color = std::string(argv[i + 1]);
45  }
46  else if (std::string(argv[i]) == "--model_depth" && i + 1 < argc) {
47  model_depth = std::string(argv[i + 1]);
48  }
49  else if (std::string(argv[i]) == "--init_file" && i + 1 < argc) {
50  init_file = std::string(argv[i + 1]);
51  }
52  else if (std::string(argv[i]) == "--proj_error_threshold" && i + 1 < argc) {
53  proj_error_threshold = std::atof(argv[i + 1]);
54  }
55  else if (std::string(argv[i]) == "--use_ogre") {
56  use_ogre = true;
57  }
58  else if (std::string(argv[i]) == "--use_scanline") {
59  use_scanline = true;
60  }
61  else if (std::string(argv[i]) == "--use_edges" && i + 1 < argc) {
62  use_edges = (std::atoi(argv[i + 1]) == 0 ? false : true);
63  }
64  else if (std::string(argv[i]) == "--use_klt" && i + 1 < argc) {
65  use_klt = (std::atoi(argv[i + 1]) == 0 ? false : true);
66  }
67  else if (std::string(argv[i]) == "--use_depth" && i + 1 < argc) {
68  use_depth = (std::atoi(argv[i + 1]) == 0 ? false : true);
69  }
70  else if (std::string(argv[i]) == "--learn") {
71  learn = true;
72  }
73  else if (std::string(argv[i]) == "--learning_data" && i + 1 < argc) {
74  learning_data = argv[i + 1];
75  }
76  else if (std::string(argv[i]) == "--auto_init") {
77  auto_init = true;
78  }
79  else if (std::string(argv[i]) == "--display_proj_error") {
80  display_projection_error = true;
81  }
82  else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") {
83  std::cout << "Usage: \n"
84  << argv[0]
85  << " [--model_color <object.cao>] [--model_depth <object.cao>]"
86  " [--config_color <object.xml>] [--config_depth <object.xml>]"
87  " [--init_file <object.init>] [--use_ogre] [--use_scanline]"
88  " [--proj_error_threshold <threshold between 0 and 90> (default: "
89  << proj_error_threshold
90  << ")]"
91  " [--use_edges <0|1> (default: 1)] [--use_klt <0|1> (default: 1)] [--use_depth <0|1> (default: 1)]"
92  " [--learn] [--auto_init] [--learning_data <path to .bin> (default: learning/data-learned.bin)]"
93  " [--display_proj_error]"
94  << std::endl;
95 
96  std::cout << "\n** How to track a 4.2 cm width cube with manual initialization:\n"
97  << argv[0] << " --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth 1" << std::endl;
98  std::cout << "\n** How to learn the cube and create a learning database:\n"
99  << argv[0] << " --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth 1 --learn"
100  << std::endl;
101  std::cout << "\n** How to track the cube with initialization from learning database:\n"
102  << argv[0] << " --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth 1 --auto_init"
103  << std::endl;
104 
105  return EXIT_SUCCESS;
106  }
107  }
108 
109  if (model_depth.empty()) {
110  model_depth = model_color;
111  }
112  std::string parentname = vpIoTools::getParent(model_color);
113  if (config_color.empty()) {
114  config_color = (parentname.empty() ? "" : (parentname + "/")) + vpIoTools::getNameWE(model_color) + ".xml";
115  }
116  if (config_depth.empty()) {
117  config_depth = (parentname.empty() ? "" : (parentname + "/")) + vpIoTools::getNameWE(model_color) + "_depth.xml";
118  }
119  if (init_file.empty()) {
120  init_file = (parentname.empty() ? "" : (parentname + "/")) + vpIoTools::getNameWE(model_color) + ".init";
121  }
122  std::cout << "Tracked features: " << std::endl;
123  std::cout << " Use edges : " << use_edges << std::endl;
124  std::cout << " Use klt : " << use_klt << std::endl;
125  std::cout << " Use depth : " << use_depth << std::endl;
126  std::cout << "Tracker options: " << std::endl;
127  std::cout << " Use ogre : " << use_ogre << std::endl;
128  std::cout << " Use scanline: " << use_scanline << std::endl;
129  std::cout << " Proj. error : " << proj_error_threshold << std::endl;
130  std::cout << " Display proj. error: " << display_projection_error << std::endl;
131  std::cout << "Config files: " << std::endl;
132  std::cout << " Config color: "
133  << "\"" << config_color << "\"" << std::endl;
134  std::cout << " Config depth: "
135  << "\"" << config_depth << "\"" << std::endl;
136  std::cout << " Model color : "
137  << "\"" << model_color << "\"" << std::endl;
138  std::cout << " Model depth : "
139  << "\"" << model_depth << "\"" << std::endl;
140  std::cout << " Init file : "
141  << "\"" << init_file << "\"" << std::endl;
142  std::cout << "Learning options : " << std::endl;
143  std::cout << " Learn : " << learn << std::endl;
144  std::cout << " Auto init : " << auto_init << std::endl;
145  std::cout << " Learning data: " << learning_data << std::endl;
146 
147  if (!use_edges && !use_klt && !use_depth) {
148  std::cout << "You must choose at least one visual features between edge, KLT and depth." << std::endl;
149  return EXIT_FAILURE;
150  }
151 
152  if (config_color.empty() || config_depth.empty() || model_color.empty() || model_depth.empty() || init_file.empty()) {
153  std::cout << "config_color.empty() || config_depth.empty() || model_color.empty() || model_depth.empty() || "
154  "init_file.empty()"
155  << std::endl;
156  return EXIT_FAILURE;
157  }
158 
159  vpRealSense2 realsense;
160  int width = 640, height = 480;
161  int fps = 30;
162  rs2::config config;
163  config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, fps);
164  config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, fps);
165 
166  try {
167  realsense.open(config);
168  }
169  catch (const vpException &e) {
170  std::cout << "Catch an exception: " << e.what() << std::endl;
171  std::cout << "Check if the Realsense camera is connected..." << std::endl;
172  return EXIT_SUCCESS;
173  }
174 
175  vpCameraParameters cam_color =
177  vpCameraParameters cam_depth =
179 
180  std::cout << "Sensor internal camera parameters for color camera: " << cam_color << std::endl;
181  std::cout << "Sensor internal camera parameters for depth camera: " << cam_depth << std::endl;
182 
183  vpImage<vpRGBa> I_color(height, width);
184  vpImage<unsigned char> I_gray(height, width);
185  vpImage<unsigned char> I_depth(height, width);
186  vpImage<uint16_t> I_depth_raw(height, width);
187 
188  unsigned int _posx = 100, _posy = 50;
189 
190 #ifdef VISP_HAVE_X11
191  vpDisplayX d1, d2;
192 #elif defined(VISP_HAVE_GDI)
193  vpDisplayGDI d1, d2;
194 #elif defined(HAVE_OPENCV_HIGHGUI)
195  vpDisplayOpenCV d1, d2;
196 #endif
197  if (use_edges || use_klt)
198  d1.init(I_gray, _posx, _posy, "Color stream");
199  if (use_depth)
200  d2.init(I_depth, _posx + I_gray.getWidth() + 10, _posy, "Depth stream");
201 
202  while (true) {
203  realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, nullptr, nullptr);
204 
205  if (use_edges || use_klt) {
206  vpImageConvert::convert(I_color, I_gray);
207  vpDisplay::display(I_gray);
208  vpDisplay::displayText(I_gray, 20, 20, "Click when ready.", vpColor::red);
209  vpDisplay::flush(I_gray);
210 
211  if (vpDisplay::getClick(I_gray, false)) {
212  break;
213  }
214  }
215  if (use_depth) {
216  vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
217 
218  vpDisplay::display(I_depth);
219  vpDisplay::displayText(I_depth, 20, 20, "Click when ready.", vpColor::red);
220  vpDisplay::flush(I_depth);
221 
222  if (vpDisplay::getClick(I_depth, false)) {
223  break;
224  }
225  }
226  }
227 
228  std::vector<int> trackerTypes;
229  if (use_edges && use_klt)
230  trackerTypes.push_back(vpMbGenericTracker::EDGE_TRACKER | vpMbGenericTracker::KLT_TRACKER);
231  else if (use_edges)
232  trackerTypes.push_back(vpMbGenericTracker::EDGE_TRACKER);
233  else if (use_klt)
234  trackerTypes.push_back(vpMbGenericTracker::KLT_TRACKER);
235 
236  if (use_depth)
237  trackerTypes.push_back(vpMbGenericTracker::DEPTH_DENSE_TRACKER);
238 
239  vpHomogeneousMatrix depth_M_color = realsense.getTransformation(RS2_STREAM_COLOR, RS2_STREAM_DEPTH);
240  std::map<std::string, vpHomogeneousMatrix> mapOfCameraTransformations;
241  std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
242  std::map<std::string, std::string> mapOfInitFiles;
243  std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
244  std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
245  std::map<std::string, vpHomogeneousMatrix> mapOfCameraPoses;
246 
247  std::vector<vpColVector> pointcloud;
248 
249  vpMbGenericTracker tracker(trackerTypes);
250 
251  if ((use_edges || use_klt) && use_depth) {
252  tracker.loadConfigFile(config_color, config_depth);
253  tracker.loadModel(model_color, model_depth);
254  std::cout << "Sensor internal depth_M_color: \n" << depth_M_color << std::endl;
255  mapOfCameraTransformations["Camera2"] = depth_M_color;
256  tracker.setCameraTransformationMatrix(mapOfCameraTransformations);
257  mapOfImages["Camera1"] = &I_gray;
258  mapOfImages["Camera2"] = &I_depth;
259  mapOfInitFiles["Camera1"] = init_file;
260  tracker.setCameraParameters(cam_color, cam_depth);
261  }
262  else if (use_edges || use_klt) {
263  tracker.loadConfigFile(config_color);
264  tracker.loadModel(model_color);
265  tracker.setCameraParameters(cam_color);
266  }
267  else if (use_depth) {
268  tracker.loadConfigFile(config_depth);
269  tracker.loadModel(model_depth);
270  tracker.setCameraParameters(cam_depth);
271  }
272 
273  tracker.setDisplayFeatures(true);
274  tracker.setOgreVisibilityTest(use_ogre);
275  tracker.setScanLineVisibilityTest(use_scanline);
276  tracker.setProjectionErrorComputation(true);
277  tracker.setProjectionErrorDisplay(display_projection_error);
278 
279 #if (defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)) || \
280  (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
281  std::string detectorName = "SIFT";
282  std::string extractorName = "SIFT";
283  std::string matcherName = "BruteForce";
284 #else
285  std::string detectorName = "FAST";
286  std::string extractorName = "ORB";
287  std::string matcherName = "BruteForce-Hamming";
288 #endif
289  vpKeyPoint keypoint;
290  if (learn || auto_init) {
291  keypoint.setDetector(detectorName);
292  keypoint.setExtractor(extractorName);
293  keypoint.setMatcher(matcherName);
294 #if !(defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D))
295 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
296  keypoint.setDetectorParameter("ORB", "nLevels", 1);
297 #else
298  cv::Ptr<cv::ORB> orb_detector = keypoint.getDetector("ORB").dynamicCast<cv::ORB>();
299  if (orb_detector) {
300  orb_detector->setNLevels(1);
301  }
302 #endif
303 #endif
304  }
305 
306  if (auto_init) {
307  if (!vpIoTools::checkFilename(learning_data)) {
308  std::cout << "Cannot enable auto detection. Learning file \"" << learning_data << "\" doesn't exist" << std::endl;
309  return EXIT_FAILURE;
310  }
311  keypoint.loadLearningData(learning_data, true);
312  }
313  else {
314  if ((use_edges || use_klt) && use_depth)
315  tracker.initClick(mapOfImages, mapOfInitFiles, true);
316  else if (use_edges || use_klt)
317  tracker.initClick(I_gray, init_file, true);
318  else if (use_depth)
319  tracker.initClick(I_depth, init_file, true);
320 
321  if (learn)
323  }
324 
325  bool run_auto_init = false;
326  if (auto_init) {
327  run_auto_init = true;
328  }
329  std::vector<double> times_vec;
330 
331  try {
332  // To be able to display keypoints matching with test-detection-rs2
333  int learn_id = 1;
334  bool quit = false;
335  bool learn_position = false;
336  double loop_t = 0;
338 
339  while (!quit) {
340  double t = vpTime::measureTimeMs();
341  bool tracking_failed = false;
342 
343  // Acquire images and update tracker input data
344  realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, &pointcloud, nullptr, nullptr);
345 
346  if (use_edges || use_klt || run_auto_init) {
347  vpImageConvert::convert(I_color, I_gray);
348  vpDisplay::display(I_gray);
349  }
350  if (use_depth) {
351  vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
352  vpDisplay::display(I_depth);
353  }
354 
355  if ((use_edges || use_klt) && use_depth) {
356  mapOfImages["Camera1"] = &I_gray;
357  mapOfPointclouds["Camera2"] = &pointcloud;
358  mapOfWidths["Camera2"] = width;
359  mapOfHeights["Camera2"] = height;
360  }
361  else if (use_edges || use_klt) {
362  mapOfImages["Camera"] = &I_gray;
363  }
364  else if (use_depth) {
365  mapOfPointclouds["Camera"] = &pointcloud;
366  mapOfWidths["Camera"] = width;
367  mapOfHeights["Camera"] = height;
368  }
369 
370  // Run auto initialization from learned data
371  if (run_auto_init) {
372  if (keypoint.matchPoint(I_gray, cam_color, cMo)) {
373  std::cout << "Auto init succeed" << std::endl;
374  if ((use_edges || use_klt) && use_depth) {
375  mapOfCameraPoses["Camera1"] = cMo;
376  mapOfCameraPoses["Camera2"] = depth_M_color * cMo;
377  tracker.initFromPose(mapOfImages, mapOfCameraPoses);
378  }
379  else if (use_edges || use_klt) {
380  tracker.initFromPose(I_gray, cMo);
381  }
382  else if (use_depth) {
383  tracker.initFromPose(I_depth, depth_M_color * cMo);
384  }
385  }
386  else {
387  if (use_edges || use_klt) {
388  vpDisplay::flush(I_gray);
389  }
390  if (use_depth) {
391  vpDisplay::flush(I_depth);
392  }
393  continue;
394  }
395  }
396 
397  // Run the tracker
398  try {
399  if (run_auto_init) {
400  // Turn display features off just after auto init to not display wrong moving-edge if the tracker fails
401  tracker.setDisplayFeatures(false);
402 
403  run_auto_init = false;
404  }
405  if ((use_edges || use_klt) && use_depth) {
406  tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
407  }
408  else if (use_edges || use_klt) {
409  tracker.track(I_gray);
410  }
411  else if (use_depth) {
412  tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
413  }
414  }
415  catch (const vpException &e) {
416  std::cout << "Tracker exception: " << e.getStringMessage() << std::endl;
417  tracking_failed = true;
418  if (auto_init) {
419  std::cout << "Tracker needs to restart (tracking exception)" << std::endl;
420  run_auto_init = true;
421  }
422  }
423 
424  // Get object pose
425  cMo = tracker.getPose();
426 
427  // Check tracking errors
428  double proj_error = 0;
429  if (tracker.getTrackerType() & vpMbGenericTracker::EDGE_TRACKER) {
430  // Check tracking errors
431  proj_error = tracker.getProjectionError();
432  }
433  else {
434  proj_error = tracker.computeCurrentProjectionError(I_gray, cMo, cam_color);
435  }
436 
437  if (auto_init && proj_error > proj_error_threshold) {
438  std::cout << "Tracker needs to restart (projection error detected: " << proj_error << ")" << std::endl;
439  run_auto_init = true;
440  tracking_failed = true;
441  }
442 
443  // Display tracking results
444  if (!tracking_failed) {
445  // Turn display features on
446  tracker.setDisplayFeatures(true);
447 
448  if ((use_edges || use_klt) && use_depth) {
449  tracker.display(I_gray, I_depth, cMo, depth_M_color * cMo, cam_color, cam_depth, vpColor::red, 3);
450  vpDisplay::displayFrame(I_gray, cMo, cam_color, 0.05, vpColor::none, 3);
451  vpDisplay::displayFrame(I_depth, depth_M_color * cMo, cam_depth, 0.05, vpColor::none, 3);
452  }
453  else if (use_edges || use_klt) {
454  tracker.display(I_gray, cMo, cam_color, vpColor::red, 3);
455  vpDisplay::displayFrame(I_gray, cMo, cam_color, 0.05, vpColor::none, 3);
456  }
457  else if (use_depth) {
458  tracker.display(I_depth, cMo, cam_depth, vpColor::red, 3);
459  vpDisplay::displayFrame(I_depth, cMo, cam_depth, 0.05, vpColor::none, 3);
460  }
461 
462  {
463  std::stringstream ss;
464  ss << "Nb features: " << tracker.getError().size();
465  vpDisplay::displayText(I_gray, I_gray.getHeight() - 50, 20, ss.str(), vpColor::red);
466  }
467  {
468  std::stringstream ss;
469  ss << "Features: edges " << tracker.getNbFeaturesEdge() << ", klt " << tracker.getNbFeaturesKlt()
470  << ", depth " << tracker.getNbFeaturesDepthDense();
471  vpDisplay::displayText(I_gray, I_gray.getHeight() - 30, 20, ss.str(), vpColor::red);
472  }
473  }
474 
475  std::stringstream ss;
476  ss << "Loop time: " << loop_t << " ms";
477 
479  if (use_edges || use_klt) {
480  vpDisplay::displayText(I_gray, 20, 20, ss.str(), vpColor::red);
481  if (learn)
482  vpDisplay::displayText(I_gray, 35, 20, "Left click: learn Right click: quit", vpColor::red);
483  else if (auto_init)
484  vpDisplay::displayText(I_gray, 35, 20, "Left click: auto_init Right click: quit", vpColor::red);
485  else
486  vpDisplay::displayText(I_gray, 35, 20, "Right click: quit", vpColor::red);
487 
488  vpDisplay::flush(I_gray);
489 
490  if (vpDisplay::getClick(I_gray, button, false)) {
491  if (button == vpMouseButton::button3) {
492  quit = true;
493  }
494  else if (button == vpMouseButton::button1 && learn) {
495  learn_position = true;
496  }
497  else if (button == vpMouseButton::button1 && auto_init && !learn) {
498  run_auto_init = true;
499  }
500  }
501  }
502  if (use_depth) {
503  vpDisplay::displayText(I_depth, 20, 20, ss.str(), vpColor::red);
504  vpDisplay::displayText(I_depth, 40, 20, "Click to quit", vpColor::red);
505  vpDisplay::flush(I_depth);
506 
507  if (vpDisplay::getClick(I_depth, false)) {
508  quit = true;
509  }
510  }
511 
512  if (learn_position) {
513  // Detect keypoints on the current image
514  std::vector<cv::KeyPoint> trainKeyPoints;
515  keypoint.detect(I_gray, trainKeyPoints);
516 
517  // Keep only keypoints on the cube
518  std::vector<vpPolygon> polygons;
519  std::vector<std::vector<vpPoint> > roisPt;
520  std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair = tracker.getPolygonFaces();
521  polygons = pair.first;
522  roisPt = pair.second;
523 
524  // Compute the 3D coordinates
525  std::vector<cv::Point3f> points3f;
526  vpKeyPoint::compute3DForPointsInPolygons(cMo, cam_color, trainKeyPoints, polygons, roisPt, points3f);
527 
528  // Build the reference keypoints
529  keypoint.buildReference(I_gray, trainKeyPoints, points3f, true, learn_id++);
530 
531  // Display learned data
532  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
533  vpDisplay::displayCross(I_gray, (int)it->pt.y, (int)it->pt.x, 10, vpColor::yellow, 3);
534  }
535  learn_position = false;
536  std::cout << "Data learned" << std::endl;
537  }
538  loop_t = vpTime::measureTimeMs() - t;
539  times_vec.push_back(loop_t);
540  }
541  if (learn) {
542  std::cout << "Save learning file: " << learning_data << std::endl;
543  keypoint.saveLearningData(learning_data, true, true);
544  }
545  }
546  catch (const vpException &e) {
547  std::cout << "Catch an exception: " << e.what() << std::endl;
548  }
549 
550  if (!times_vec.empty()) {
551  std::cout << "\nProcessing time, Mean: " << vpMath::getMean(times_vec)
552  << " ms ; Median: " << vpMath::getMedian(times_vec) << " ; Std: " << vpMath::getStdev(times_vec) << " ms"
553  << std::endl;
554  }
555 
556  return EXIT_SUCCESS;
557 }
558 #elif defined(VISP_HAVE_REALSENSE2)
559 int main()
560 {
561  std::cout << "Install OpenCV 3rd party, configure and build ViSP again to use this example" << std::endl;
562  return EXIT_SUCCESS;
563 }
564 #else
565 int main()
566 {
567  std::cout << "Install librealsense2 3rd party, configure and build ViSP again to use this example" << std::endl;
568  return EXIT_SUCCESS;
569 }
570 #endif
Generic class defining intrinsic camera parameters.
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
static const vpColor red
Definition: vpColor.h:217
static const vpColor none
Definition: vpColor.h:229
static const vpColor yellow
Definition: vpColor.h:225
Display for windows using GDI (available on any windows 32 platform).
Definition: vpDisplayGDI.h:130
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
void init(vpImage< unsigned char > &I, int winx=-1, int winy=-1, const std::string &title="") VP_OVERRIDE
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Definition: vpException.h:60
const std::string & getStringMessage() const
Definition: vpException.cpp:67
const char * what() const
Definition: vpException.cpp:71
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void createDepthHistogram(const vpImage< uint16_t > &src_depth, vpImage< vpRGBa > &dest_rgba)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static bool checkFilename(const std::string &filename)
Definition: vpIoTools.cpp:786
static void makeDirectory(const std::string &dirname)
Definition: vpIoTools.cpp:550
static std::string getNameWE(const std::string &pathname)
Definition: vpIoTools.cpp:1227
static std::string getParent(const std::string &pathname)
Definition: vpIoTools.cpp:1314
Class that allows keypoints detection (and descriptors extraction) and matching thanks to OpenCV libr...
Definition: vpKeyPoint.h:221
unsigned int matchPoint(const vpImage< unsigned char > &I)
void setExtractor(const vpFeatureDescriptorType &extractorType)
Definition: vpKeyPoint.h:1633
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
Definition: vpKeyPoint.cpp:975
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
Definition: vpKeyPoint.cpp:465
void setMatcher(const std::string &matcherName)
Definition: vpKeyPoint.h:1709
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
void setDetector(const vpFeatureDetectorType &detectorType)
Definition: vpKeyPoint.h:1575
unsigned int buildReference(const vpImage< unsigned char > &I)
Definition: vpKeyPoint.cpp:194
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
Definition: vpKeyPoint.h:1007
static double getMedian(const std::vector< double > &v)
Definition: vpMath.cpp:322
static double getStdev(const std::vector< double > &v, bool useBesselCorrection=false)
Definition: vpMath.cpp:353
static double getMean(const std::vector< double > &v)
Definition: vpMath.cpp:302
Real-time 6D object pose tracking using its CAD model.
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion, int index=-1) const
void acquire(vpImage< unsigned char > &grey, double *ts=nullptr)
bool open(const rs2::config &cfg=rs2::config())
vpHomogeneousMatrix getTransformation(const rs2_stream &from, const rs2_stream &to, int from_index=-1) const
VISP_EXPORT double measureTimeMs()