Visual Servoing Platform  version 3.3.0 under development (2020-02-17)
tutorial-mb-generic-tracker-rgbd-realsense.cpp
1 #include <iostream>
3 
4 #include <visp3/core/vpConfig.h>
5 
6 #if defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_OPENCV)
7 #include <visp3/core/vpDisplay.h>
8 #include <visp3/core/vpIoTools.h>
9 #include <visp3/core/vpXmlParserCamera.h>
10 #include <visp3/gui/vpDisplayX.h>
11 #include <visp3/gui/vpDisplayGDI.h>
12 #include <visp3/gui/vpDisplayOpenCV.h>
13 #include <visp3/mbt/vpMbGenericTracker.h>
14 #include <visp3/sensor/vpRealSense2.h>
15 #include <visp3/vision/vpKeyPoint.h>
16 
17 int main(int argc, char *argv[])
18 {
19  std::string config_color = "", config_depth = "";
20  std::string model_color = "", model_depth = "";
21  std::string init_file = "";
22  bool use_ogre = false;
23  bool use_scanline = false;
24  bool use_edges = true;
25  bool use_klt = true;
26  bool use_depth = true;
27  bool learn = false;
28  bool auto_init = false;
29  double proj_error_threshold = 25;
30  std::string learning_data = "learning/data-learned.bin";
31  bool display_projection_error = false;
32 
33  for (int i = 1; i < argc; i++) {
34  if (std::string(argv[i]) == "--config_color" && i+1 < argc) {
35  config_color = std::string(argv[i+1]);
36  } else if (std::string(argv[i]) == "--config_depth" && i+1 < argc) {
37  config_depth = std::string(argv[i+1]);
38  } else if (std::string(argv[i]) == "--model_color" && i+1 < argc) {
39  model_color = std::string(argv[i+1]);
40  } else if (std::string(argv[i]) == "--model_depth" && i+1 < argc) {
41  model_depth = std::string(argv[i+1]);
42  } else if (std::string(argv[i]) == "--init_file" && i+1 < argc) {
43  init_file = std::string(argv[i+1]);
44  } else if (std::string(argv[i]) == "--proj_error_threshold" && i+1 < argc) {
45  proj_error_threshold = std::atof(argv[i+1]);
46  } else if (std::string(argv[i]) == "--use_ogre") {
47  use_ogre = true;
48  } else if (std::string(argv[i]) == "--use_scanline") {
49  use_scanline = true;
50  } else if (std::string(argv[i]) == "--use_edges" && i+1 < argc) {
51  use_edges = (std::atoi(argv[i+1]) == 0 ? false : true);
52  } else if (std::string(argv[i]) == "--use_klt" && i+1 < argc) {
53  use_klt = (std::atoi(argv[i+1]) == 0 ? false : true);
54  } else if (std::string(argv[i]) == "--use_depth" && i+1 < argc) {
55  use_depth = (std::atoi(argv[i+1]) == 0 ? false : true);
56  } else if (std::string(argv[i]) == "--learn") {
57  learn = true;
58  } else if (std::string(argv[i]) == "--learning_data" && i+1 < argc) {
59  learning_data = argv[i+1];
60  } else if (std::string(argv[i]) == "--auto_init") {
61  auto_init = true;
62  } else if (std::string(argv[i]) == "--display_proj_error") {
63  display_projection_error = true;
64  } else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") {
65  std::cout << "Usage: \n" << argv[0]
66  << " [--model_color <object.cao>] [--model_depth <object.cao>]"
67  " [--config_color <object.xml>] [--config_depth <object.xml>]"
68  " [--init_file <object.init>] [--use_ogre] [--use_scanline]"
69  " [--proj_error_threshold <threshold between 0 and 90> (default: "<< proj_error_threshold << ")]"
70  " [--use_edges <0|1> (default: 1)] [--use_klt <0|1> (default: 1)] [--use_depth <0|1> (default: 1)]"
71  " [--learn] [--auto_init] [--learning_data <path to .bin> (default: learning/data-learned.bin)]"
72  " [--display_proj_error]" << std::endl;
73 
74  std::cout << "\n** How to track a 4.2 cm width cube with manual initialization:\n"
75  << argv[0]
76  << " --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth 1"
77  << std::endl;
78  std::cout << "\n** How to learn the cube and create a learning database:\n" << argv[0]
79  << " --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth 1 --learn"
80  << std::endl;
81  std::cout << "\n** How to track the cube with initialization from learning database:\n" << argv[0]
82  << " --model_color model/cube/cube.cao --use_edges 1 --use_klt 1 --use_depth 1 --auto_init"
83  << std::endl;
84 
85  return 0;
86  }
87  }
88 
89  if (model_depth.empty()) {
90  model_depth = model_color;
91  }
92  std::string parentname = vpIoTools::getParent(model_color);
93  if (config_color.empty()) {
94  config_color = (parentname.empty() ? "" : (parentname + "/")) + vpIoTools::getNameWE(model_color) + ".xml";
95  }
96  if (config_depth.empty()) {
97  config_depth = (parentname.empty() ? "" : (parentname + "/")) + vpIoTools::getNameWE(model_color) + "_depth.xml";
98  }
99  if (init_file.empty()) {
100  init_file = (parentname.empty() ? "" : (parentname + "/")) + vpIoTools::getNameWE(model_color) + ".init";
101  }
102  std::cout << "Tracked features: " << std::endl;
103  std::cout << " Use edges : " << use_edges << std::endl;
104  std::cout << " Use klt : " << use_klt << std::endl;
105  std::cout << " Use depth : " << use_depth << std::endl;
106  std::cout << "Tracker options: " << std::endl;
107  std::cout << " Use ogre : " << use_ogre << std::endl;
108  std::cout << " Use scanline: " << use_scanline << std::endl;
109  std::cout << " Proj. error : " << proj_error_threshold << std::endl;
110  std::cout << " Display proj. error: " << display_projection_error << std::endl;
111  std::cout << "Config files: " << std::endl;
112  std::cout << " Config color: " << "\"" << config_color << "\"" << std::endl;
113  std::cout << " Config depth: " << "\"" << config_depth << "\"" << std::endl;
114  std::cout << " Model color : " << "\"" << model_color << "\"" << std::endl;
115  std::cout << " Model depth : " << "\"" << model_depth << "\"" << std::endl;
116  std::cout << " Init file : " << "\"" << init_file << "\"" << std::endl;
117  std::cout << "Learning options : " << std::endl;
118  std::cout << " Learn : " << learn << std::endl;
119  std::cout << " Auto init : " << auto_init << std::endl;
120  std::cout << " Learning data: " << learning_data << std::endl;
121 
122  if (!use_edges && !use_klt && !use_depth) {
123  std::cout << "You must choose at least one visual features between edge, KLT and depth." << std::endl;
124  return EXIT_FAILURE;
125  }
126 
127  if (config_color.empty() || config_depth.empty() || model_color.empty() || model_depth.empty() || init_file.empty()) {
128  std::cout << "config_color.empty() || config_depth.empty() || model_color.empty() || model_depth.empty() || init_file.empty()" << std::endl;
129  return EXIT_FAILURE;
130  }
131 
132  vpRealSense2 realsense;
133  int width = 640, height = 480;
134  int fps = 30;
135  rs2::config config;
136  config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, fps);
137  config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, fps);
138 
139  try {
140  realsense.open(config);
141  }
142  catch (const vpException &e) {
143  std::cout << "Catch an exception: " << e.what() << std::endl;
144  std::cout << "Check if the Realsense camera is connected..." << std::endl;
145  return EXIT_SUCCESS;
146  }
147 
150 
151  std::cout << "Sensor internal camera parameters for color camera: " << cam_color << std::endl;
152  std::cout << "Sensor internal camera parameters for depth camera: " << cam_depth << std::endl;
153 
154  vpImage<vpRGBa> I_color(height, width);
155  vpImage<unsigned char> I_gray(height, width);
156  vpImage<unsigned char> I_depth(height, width);
157  vpImage<uint16_t> I_depth_raw(height, width);
158 
159  unsigned int _posx = 100, _posy = 50;
160 
161 #ifdef VISP_HAVE_X11
162  vpDisplayX d1, d2;
163 #elif defined(VISP_HAVE_GDI)
164  vpDisplayGDI d1, d2;
165 #elif defined(VISP_HAVE_OPENCV)
166  vpDisplayOpenCV d1, d2;
167 #endif
168  if (use_edges || use_klt)
169  d1.init(I_gray, _posx, _posy, "Color stream");
170  if (use_depth)
171  d2.init(I_depth, _posx + I_gray.getWidth()+10, _posy, "Depth stream");
172 
173  while (true) {
174  realsense.acquire((unsigned char *) I_color.bitmap, (unsigned char *) I_depth_raw.bitmap, NULL, NULL);
175 
176  if (use_edges || use_klt) {
177  vpImageConvert::convert(I_color, I_gray);
178  vpDisplay::display(I_gray);
179  vpDisplay::displayText(I_gray, 20, 20, "Click when ready.", vpColor::red);
180  vpDisplay::flush(I_gray);
181 
182  if (vpDisplay::getClick(I_gray, false)) {
183  break;
184  }
185  }
186  if (use_depth) {
187  vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
188 
189  vpDisplay::display(I_depth);
190  vpDisplay::displayText(I_depth, 20, 20, "Click when ready.", vpColor::red);
191  vpDisplay::flush(I_depth);
192 
193  if (vpDisplay::getClick(I_depth, false)) {
194  break;
195  }
196  }
197  }
198 
199  std::vector<int> trackerTypes;
200  if (use_edges && use_klt)
202  else if (use_edges)
203  trackerTypes.push_back(vpMbGenericTracker::EDGE_TRACKER );
204  else if (use_klt)
205  trackerTypes.push_back(vpMbGenericTracker::KLT_TRACKER);
206 
207  if (use_depth)
208  trackerTypes.push_back(vpMbGenericTracker::DEPTH_DENSE_TRACKER);
209 
210  vpHomogeneousMatrix depth_M_color = realsense.getTransformation(RS2_STREAM_COLOR, RS2_STREAM_DEPTH);
211  std::map<std::string, vpHomogeneousMatrix> mapOfCameraTransformations;
212  std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
213  std::map<std::string, std::string> mapOfInitFiles;
214  std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
215  std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
216  std::map<std::string, vpHomogeneousMatrix> mapOfCameraPoses;
217 
218  std::vector<vpColVector> pointcloud;
219 
220  vpMbGenericTracker tracker(trackerTypes);
221 
222  if ((use_edges || use_klt) && use_depth) {
223  tracker.loadConfigFile(config_color, config_depth);
224  tracker.loadModel(model_color, model_depth);
225  std::cout << "Sensor internal depth_M_color: \n" << depth_M_color << std::endl;
226  mapOfCameraTransformations["Camera2"] = depth_M_color;
227  tracker.setCameraTransformationMatrix(mapOfCameraTransformations);
228  mapOfImages["Camera1"] = &I_gray;
229  mapOfImages["Camera2"] = &I_depth;
230  mapOfInitFiles["Camera1"] = init_file;
231  tracker.setCameraParameters(cam_color, cam_depth);
232  }
233  else if (use_edges || use_klt) {
234  tracker.loadConfigFile(config_color);
235  tracker.loadModel(model_color);
236  tracker.setCameraParameters(cam_color);
237  }
238  else if (use_depth) {
239  tracker.loadConfigFile(config_depth);
240  tracker.loadModel(model_depth);
241  tracker.setCameraParameters(cam_depth);
242  }
243 
244  tracker.setDisplayFeatures(true);
245  tracker.setOgreVisibilityTest(use_ogre);
246  tracker.setScanLineVisibilityTest(use_scanline);
247  tracker.setProjectionErrorComputation(true);
248  tracker.setProjectionErrorDisplay(display_projection_error);
249 
250 #if (defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D))
251  std::string detectorName = "SIFT";
252  std::string extractorName = "SIFT";
253  std::string matcherName = "BruteForce";
254 #else
255  std::string detectorName = "FAST";
256  std::string extractorName = "ORB";
257  std::string matcherName = "BruteForce-Hamming";
258 #endif
259  vpKeyPoint keypoint;
260  if (learn || auto_init) {
261  keypoint.setDetector(detectorName);
262  keypoint.setExtractor(extractorName);
263  keypoint.setMatcher(matcherName);
264 #if !(defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D))
265 # if (VISP_HAVE_OPENCV_VERSION < 0x030000)
266  keypoint.setDetectorParameter("ORB", "nLevels", 1);
267 # else
268  cv::Ptr<cv::ORB> orb_detector = keypoint.getDetector("ORB").dynamicCast<cv::ORB>();
269  if (orb_detector) {
270  orb_detector->setNLevels(1);
271  }
272 # endif
273 #endif
274  }
275 
276  if (auto_init) {
277  if (!vpIoTools::checkFilename(learning_data)) {
278  std::cout << "Cannot enable auto detection. Learning file \"" << learning_data << "\" doesn't exist" << std::endl;
279  return EXIT_FAILURE;
280  }
281  keypoint.loadLearningData(learning_data, true);
282  } else {
283  if ((use_edges || use_klt) && use_depth)
284  tracker.initClick(mapOfImages, mapOfInitFiles, true);
285  else if (use_edges || use_klt)
286  tracker.initClick(I_gray, init_file, true);
287  else if (use_depth)
288  tracker.initClick(I_depth, init_file, true);
289 
290  if (learn)
292  }
293 
294 
295  bool run_auto_init = false;
296  if (auto_init) {
297  run_auto_init = true;
298  }
299  std::vector<double> times_vec;
300 
301  try {
302  //To be able to display keypoints matching with test-detection-rs2
303  int learn_id = 1;
304  bool quit = false;
305  bool learn_position = false;
306  double loop_t = 0;
308 
309  while (!quit) {
310  double t = vpTime::measureTimeMs();
311  bool tracking_failed = false;
312 
313  // Acquire images and update tracker input data
314  realsense.acquire((unsigned char *) I_color.bitmap, (unsigned char *) I_depth_raw.bitmap, &pointcloud, NULL, NULL);
315 
316  if (use_edges || use_klt || run_auto_init) {
317  vpImageConvert::convert(I_color, I_gray);
318  vpDisplay::display(I_gray);
319  }
320  if (use_depth) {
321  vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
322  vpDisplay::display(I_depth);
323  }
324 
325  if ((use_edges || use_klt) && use_depth) {
326  mapOfImages["Camera1"] = &I_gray;
327  mapOfPointclouds["Camera2"] = &pointcloud;
328  mapOfWidths["Camera2"] = width;
329  mapOfHeights["Camera2"] = height;
330  } else if (use_edges || use_klt) {
331  mapOfImages["Camera"] = &I_gray;
332  } else if (use_depth) {
333  mapOfPointclouds["Camera"] = &pointcloud;
334  mapOfWidths["Camera"] = width;
335  mapOfHeights["Camera"] = height;
336  }
337 
338  // Run auto initialization from learned data
339  if (run_auto_init) {
340  if (keypoint.matchPoint(I_gray, cam_color, cMo)) {
341  std::cout << "Auto init succeed" << std::endl;
342  if ((use_edges || use_klt) && use_depth) {
343  mapOfCameraPoses["Camera1"] = cMo;
344  mapOfCameraPoses["Camera2"] = depth_M_color *cMo;
345  tracker.initFromPose(mapOfImages, mapOfCameraPoses);
346  } else if (use_edges || use_klt) {
347  tracker.initFromPose(I_gray, cMo);
348  } else if (use_depth) {
349  tracker.initFromPose(I_depth, depth_M_color*cMo);
350  }
351  } else {
352  if (use_edges || use_klt) {
353  vpDisplay::flush(I_gray);
354  }
355  if (use_depth) {
356  vpDisplay::flush(I_depth);
357  }
358  continue;
359  }
360  }
361 
362  // Run the tracker
363  try {
364  if (run_auto_init) {
365  // Turn display features off just after auto init to not display wrong moving-edge if the tracker fails
366  tracker.setDisplayFeatures(false);
367 
368  run_auto_init = false;
369  }
370  if ((use_edges || use_klt) && use_depth) {
371  tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
372  } else if (use_edges || use_klt) {
373  tracker.track(I_gray);
374  } else if (use_depth) {
375  tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
376  }
377  } catch (const vpException &e) {
378  std::cout << "Tracker exception: " << e.getStringMessage() << std::endl;
379  tracking_failed = true;
380  if (auto_init) {
381  std::cout << "Tracker needs to restart (tracking exception)" << std::endl;
382  run_auto_init = true;
383  }
384  }
385 
386  // Get object pose
387  cMo = tracker.getPose();
388 
389  // Check tracking errors
390  double proj_error = 0;
391  if (tracker.getTrackerType() & vpMbGenericTracker::EDGE_TRACKER) {
392  // Check tracking errors
393  proj_error = tracker.getProjectionError();
394  }
395  else {
396  proj_error = tracker.computeCurrentProjectionError(I_gray, cMo, cam_color);
397  }
398 
399  if (auto_init && proj_error > proj_error_threshold) {
400  std::cout << "Tracker needs to restart (projection error detected: " << proj_error << ")" << std::endl;
401  run_auto_init = true;
402  tracking_failed = true;
403  }
404 
405  // Display tracking results
406  if (!tracking_failed) {
407  // Turn display features on
408  tracker.setDisplayFeatures(true);
409 
410  if ((use_edges || use_klt) && use_depth) {
411  tracker.display(I_gray, I_depth, cMo, depth_M_color*cMo, cam_color, cam_depth, vpColor::red, 3);
412  vpDisplay::displayFrame(I_gray, cMo, cam_color, 0.05, vpColor::none, 3);
413  vpDisplay::displayFrame(I_depth, depth_M_color*cMo, cam_depth, 0.05, vpColor::none, 3);
414  } else if (use_edges || use_klt) {
415  tracker.display(I_gray, cMo, cam_color, vpColor::red, 3);
416  vpDisplay::displayFrame(I_gray, cMo, cam_color, 0.05, vpColor::none, 3);
417  } else if (use_depth) {
418  tracker.display(I_depth, cMo, cam_depth, vpColor::red, 3);
419  vpDisplay::displayFrame(I_depth, cMo, cam_depth, 0.05, vpColor::none, 3);
420  }
421 
422  if (use_edges || use_klt) {
423  std::stringstream ss;
424  ss << "Nb features: " << tracker.getError().getRows();
425  vpDisplay::displayText(I_gray, 20, I_gray.getWidth() - 150, ss.str(), vpColor::red);
426  } else if (use_depth) {
427  std::stringstream ss;
428  ss << "Nb features: " << tracker.getError().getRows();
429  vpDisplay::displayText(I_depth, 20, I_depth.getWidth() - 150, ss.str(), vpColor::red);
430  }
431  }
432 
433  std::stringstream ss;
434  ss << "Loop time: " << loop_t << " ms";
435 
437  if (use_edges || use_klt) {
438  vpDisplay::displayText(I_gray, 20, 20, ss.str(), vpColor::red);
439  if (learn)
440  vpDisplay::displayText(I_gray, 35, 20, "Left click: learn Right click: quit", vpColor::red);
441  else if (auto_init)
442  vpDisplay::displayText(I_gray, 35, 20, "Left click: auto_init Right click: quit", vpColor::red);
443  else
444  vpDisplay::displayText(I_gray, 35, 20, "Right click: quit", vpColor::red);
445 
446  vpDisplay::flush(I_gray);
447 
448  if (vpDisplay::getClick(I_gray, button, false)) {
449  if (button == vpMouseButton::button3) {
450  quit = true;
451  } else if (button == vpMouseButton::button1 && learn) {
452  learn_position = true;
453  } else if (button == vpMouseButton::button1 && auto_init && !learn) {
454  run_auto_init = true;
455  }
456  }
457  }
458  if (use_depth) {
459  vpDisplay::displayText(I_depth, 20, 20, ss.str(), vpColor::red);
460  vpDisplay::displayText(I_depth, 40, 20, "Click to quit", vpColor::red);
461  vpDisplay::flush(I_depth);
462 
463  if (vpDisplay::getClick(I_depth, false)) {
464  quit = true;
465  }
466  }
467 
468  if (learn_position) {
469  // Detect keypoints on the current image
470  std::vector<cv::KeyPoint> trainKeyPoints;
471  keypoint.detect(I_gray, trainKeyPoints);
472 
473  // Keep only keypoints on the cube
474  std::vector<vpPolygon> polygons;
475  std::vector<std::vector<vpPoint> > roisPt;
476  std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair = tracker.getPolygonFaces();
477  polygons = pair.first;
478  roisPt = pair.second;
479 
480  // Compute the 3D coordinates
481  std::vector<cv::Point3f> points3f;
482  vpKeyPoint::compute3DForPointsInPolygons(cMo, cam_color, trainKeyPoints, polygons, roisPt, points3f);
483 
484  // Build the reference keypoints
485  keypoint.buildReference(I_gray, trainKeyPoints, points3f, true, learn_id++);
486 
487  // Display learned data
488  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
489  vpDisplay::displayCross(I_gray, (int)it->pt.y, (int)it->pt.x, 10, vpColor::yellow, 3);
490  }
491  learn_position = false;
492  std::cout << "Data learned" << std::endl;
493  }
494  loop_t = vpTime::measureTimeMs() - t;
495  times_vec.push_back(loop_t);
496  }
497  if (learn) {
498  std::cout << "Save learning file: " << learning_data << std::endl;
499  keypoint.saveLearningData(learning_data, true, true);
500  }
501  } catch (const vpException &e) {
502  std::cout << "Catch an exception: " << e.what() << std::endl;
503  }
504 
505  if (!times_vec.empty()) {
506  std::cout << "\nProcessing time, Mean: " << vpMath::getMean(times_vec) << " ms ; Median: " << vpMath::getMedian(times_vec)
507  << " ; Std: " << vpMath::getStdev(times_vec) << " ms" << std::endl;
508  }
509 
510  return EXIT_SUCCESS;
511 }
512 #elif defined(VISP_HAVE_REALSENSE2)
513 int main() {
514  std::cout << "Install OpenCV 3rd party, configure and build ViSP again to use this example" << std::endl;
515  return 0;
516 }
517 #else
518 int main() {
519  std::cout << "Install librealsense2 3rd party, configure and build ViSP again to use this example" << std::endl;
520  return 0;
521 }
522 #endif
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
Definition: vpKeyPoint.h:487
static void makeDirectory(const std::string &dirname)
Definition: vpIoTools.cpp:572
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Implementation of an homogeneous matrix and operations on such kind of matrices.
static double getMedian(const std::vector< double > &v)
Definition: vpMath.cpp:222
Display for windows using GDI (available on any windows 32 platform).
Definition: vpDisplayGDI.h:128
static double getStdev(const std::vector< double > &v, bool useBesselCorrection=false)
Definition: vpMath.cpp:252
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Definition: vpDisplayX.h:150
static const vpColor none
Definition: vpColor.h:191
error that can be emited by ViSP classes.
Definition: vpException.h:71
void init(vpImage< unsigned char > &I, int winx=-1, int winy=-1, const std::string &title="")
void open(const rs2::config &cfg=rs2::config())
static std::string getParent(const std::string &pathname)
Definition: vpIoTools.cpp:1473
Real-time 6D object pose tracking using its CAD model.
static void flush(const vpImage< unsigned char > &I)
VISP_EXPORT double measureTimeMs()
Definition: vpTime.cpp:126
static const vpColor red
Definition: vpColor.h:179
void setMatcher(const std::string &matcherName)
Definition: vpKeyPoint.h:911
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
Definition: vpKeyPoint.cpp:632
static double getMean(const std::vector< double > &v)
Definition: vpMath.cpp:202
static void display(const vpImage< unsigned char > &I)
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Generic class defining intrinsic camera parameters.
void setDetector(const vpFeatureDetectorType &detectorType)
Definition: vpKeyPoint.h:777
unsigned int buildReference(const vpImage< unsigned char > &I)
Definition: vpKeyPoint.cpp:240
void acquire(vpImage< unsigned char > &grey)
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
const char * what() const
unsigned int matchPoint(const vpImage< unsigned char > &I)
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0))
Class that allows keypoints detection (and descriptors extraction) and matching thanks to OpenCV libr...
Definition: vpKeyPoint.h:222
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion) const
static std::string getNameWE(const std::string &pathname)
Definition: vpIoTools.cpp:1460
static bool checkFilename(const std::string &filename)
Definition: vpIoTools.cpp:730
static const vpColor yellow
Definition: vpColor.h:187
vpHomogeneousMatrix getTransformation(const rs2_stream &from, const rs2_stream &to) const
void setExtractor(const vpFeatureDescriptorType &extractorType)
Definition: vpKeyPoint.h:835
static void createDepthHistogram(const vpImage< uint16_t > &src_depth, vpImage< vpRGBa > &dest_rgba)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
const std::string & getStringMessage(void) const
Send a reference (constant) related the error message (can be empty).
Definition: vpException.cpp:92