Visual Servoing Platform  version 3.3.0 under development (2020-02-17)
tutorial-mb-generic-tracker-live.cpp
1 #include <visp3/core/vpConfig.h>
3 #ifdef VISP_HAVE_MODULE_SENSOR
4 #include <visp3/sensor/vpV4l2Grabber.h>
5 #include <visp3/sensor/vp1394CMUGrabber.h>
6 #include <visp3/sensor/vp1394TwoGrabber.h>
7 #include <visp3/sensor/vpFlyCaptureGrabber.h>
8 #include <visp3/sensor/vpRealSense2.h>
9 #endif
10 #include <visp3/core/vpIoTools.h>
11 #include <visp3/core/vpXmlParserCamera.h>
12 #include <visp3/gui/vpDisplayGDI.h>
13 #include <visp3/gui/vpDisplayOpenCV.h>
14 #include <visp3/gui/vpDisplayX.h>
15 #include <visp3/io/vpImageIo.h>
16 #include <visp3/vision/vpKeyPoint.h>
18 #include <visp3/mbt/vpMbGenericTracker.h>
20 
22 //#undef VISP_HAVE_V4L2
23 //#undef VISP_HAVE_DC1394
24 //#undef VISP_HAVE_CMU1394
25 //#undef VISP_HAVE_FLYCAPTURE
26 //#undef VISP_HAVE_REALSENSE2
27 //#undef VISP_HAVE_OPENCV
29 
30 int main(int argc, char **argv)
31 {
32 #if defined(VISP_HAVE_OPENCV) && \
33  (defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || (VISP_HAVE_OPENCV_VERSION >= 0x020100) || \
34  defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2) )
35 
36  try {
37  std::string opt_modelname = "model/teabox/teabox.cao";
38  int opt_tracker = 2;
39  int opt_device = 0; // For OpenCV and V4l2 grabber to set the camera device
40  double opt_proj_error_threshold = 30.;
41  bool opt_use_ogre = false;
42  bool opt_use_scanline = false;
43  bool opt_display_projection_error = false;
44  bool opt_learn = false;
45  bool opt_auto_init = false;
46  std::string opt_learning_data = "learning/data-learned.bin";
47  std::string opt_intrinsic_file = "";
48  std::string opt_camera_name = "";
49 
50  for (int i = 0; i < argc; i++) {
51  if (std::string(argv[i]) == "--model") {
52  opt_modelname = std::string(argv[i + 1]);
53  }
54  else if (std::string(argv[i]) == "--tracker") {
55  opt_tracker = atoi(argv[i + 1]);
56  }
57  else if (std::string(argv[i]) == "--camera_device" && i + 1 < argc) {
58  opt_device = atoi(argv[i + 1]);
59  }
60  else if (std::string(argv[i]) == "--max_proj_error") {
61  opt_proj_error_threshold = atof(argv[i + 1]);
62  } else if (std::string(argv[i]) == "--use_ogre") {
63  opt_use_ogre = true;
64  } else if (std::string(argv[i]) == "--use_scanline") {
65  opt_use_scanline = true;
66  } else if (std::string(argv[i]) == "--learn") {
67  opt_learn = true;
68  } else if (std::string(argv[i]) == "--learning_data" && i+1 < argc) {
69  opt_learning_data = argv[i+1];
70  } else if (std::string(argv[i]) == "--auto_init") {
71  opt_auto_init = true;
72  } else if (std::string(argv[i]) == "--display_proj_error") {
73  opt_display_projection_error = true;
74  } else if (std::string(argv[i]) == "--intrinsic" && i + 1 < argc) {
75  opt_intrinsic_file = std::string(argv[i + 1]);
76  } else if (std::string(argv[i]) == "--camera_name" && i + 1 < argc) {
77  opt_camera_name = std::string(argv[i + 1]);
78  }
79  else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") {
80  std::cout << "\nUsage: " << argv[0]
81  << " [--camera_device <camera device> (default: 0)]"
82  << " [--intrinsic <intrinsic file> (default: empty)]"
83  << " [--camera_name <camera name> (default: empty)]"
84  << " [--model <model name> (default: teabox)]"
85  << " [--tracker <0=egde|1=keypoint|2=hybrid> (default: 2)]"
86  << " [--use_ogre] [--use_scanline]"
87  << " [--max_proj_error <allowed projection error> (default: 30)]"
88  << " [--learn] [--auto_init] [--learning_data <data-learned.bin> (default: learning/data-learned.bin)]"
89  << " [--display_proj_error]"
90  << " [--help] [-h]\n"
91  << std::endl;
92  return 0;
93  }
94  }
95  std::string parentname = vpIoTools::getParent(opt_modelname);
96  std::string objectname = vpIoTools::getNameWE(opt_modelname);
97 
98  if (!parentname.empty())
99  objectname = parentname + "/" + objectname;
100 
101  std::cout << "Tracker requested config files: " << objectname << ".[init, cao]" << std::endl;
102  std::cout << "Tracker optional config files: " << objectname << ".[ppm]" << std::endl;
103 
104  std::cout << "Tracked features: " << std::endl;
105  std::cout << " Use edges : " << (opt_tracker == 0 || opt_tracker == 2) << std::endl;
106  std::cout << " Use klt : " << (opt_tracker == 1 || opt_tracker == 2) << std::endl;
107  std::cout << "Tracker options: " << std::endl;
108  std::cout << " Use ogre : " << opt_use_ogre << std::endl;
109  std::cout << " Use scanline: " << opt_use_scanline << std::endl;
110  std::cout << " Proj. error : " << opt_proj_error_threshold << std::endl;
111  std::cout << " Display proj. error: " << opt_display_projection_error << std::endl;
112  std::cout << "Config files: " << std::endl;
113  std::cout << " Config file : " << "\"" << objectname + ".xml" << "\"" << std::endl;
114  std::cout << " Model file : " << "\"" << objectname + ".cao" << "\"" << std::endl;
115  std::cout << " Init file : " << "\"" << objectname + ".init" << "\"" << std::endl;
116  std::cout << "Learning options : " << std::endl;
117  std::cout << " Learn : " << opt_learn << std::endl;
118  std::cout << " Auto init : " << opt_auto_init << std::endl;
119  std::cout << " Learning data: " << opt_learning_data << std::endl;
120 
122 #if VISP_VERSION_INT > VP_VERSION_INT(3, 2, 0)
123  vpImage<vpRGBa> I; // Since ViSP 3.2.0 we support model-based tracking on color images
124 #else
125  vpImage<unsigned char> I; // Tracking on gray level images
126 #endif
127 
130  vpCameraParameters cam;
131  cam.initPersProjWithoutDistortion(839, 839, 325, 243);
133 #ifdef VISP_HAVE_PUGIXML
134  vpXmlParserCamera parser;
135  if (!opt_intrinsic_file.empty() && !opt_camera_name.empty())
136  parser.parse(cam, opt_intrinsic_file, opt_camera_name, vpCameraParameters::perspectiveProjWithoutDistortion);
137 #endif
138 
142 
144 #if defined(VISP_HAVE_V4L2)
145  vpV4l2Grabber g;
146  std::ostringstream device;
147  device << "/dev/video" << opt_device;
148  std::cout << "Use Video 4 Linux grabber on device " << device.str() << std::endl;
149  g.setDevice(device.str());
150  g.setScale(1);
151  g.open(I);
152 #elif defined(VISP_HAVE_DC1394)
153  (void)opt_device; // To avoid non used warning
154  std::cout << "Use DC1394 grabber" << std::endl;
156  g.open(I);
157 #elif defined(VISP_HAVE_CMU1394)
158  (void)opt_device; // To avoid non used warning
159  std::cout << "Use CMU1394 grabber" << std::endl;
161  g.open(I);
162 #elif defined(VISP_HAVE_FLYCAPTURE)
163  (void)opt_device; // To avoid non used warning
164  std::cout << "Use FlyCapture grabber" << std::endl;
166  g.open(I);
167 #elif defined(VISP_HAVE_REALSENSE2)
168  (void)opt_device; // To avoid non used warning
169  std::cout << "Use Realsense 2 grabber" << std::endl;
170  vpRealSense2 g;
171  rs2::config config;
172  config.disable_stream(RS2_STREAM_DEPTH);
173  config.disable_stream(RS2_STREAM_INFRARED);
174  config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30);
175  g.open(config);
176  g.acquire(I);
177 
178  std::cout << "Read camera parameters from Realsense device" << std::endl;
180 
181 #elif defined(VISP_HAVE_OPENCV)
182  std::cout << "Use OpenCV grabber on device " << opt_device << std::endl;
183  cv::VideoCapture g(opt_device); // Open the default camera
184  if (!g.isOpened()) { // Check if we succeeded
185  std::cout << "Failed to open the camera" << std::endl;
186  return -1;
187  }
188  cv::Mat frame;
189  g >> frame; // get a new frame from camera
190  vpImageConvert::convert(frame, I);
191 #endif
192 
194  vpDisplay *display = NULL;
195 #if defined(VISP_HAVE_X11)
196  display = new vpDisplayX;
197 #elif defined(VISP_HAVE_GDI)
198  display = new vpDisplayGDI;
199 #else
200  display = new vpDisplayOpenCV;
201 #endif
202  display->init(I, 100, 100, "Model-based tracker");
203 
204  while (true) {
205 #if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2)
206  g.acquire(I);
207 #elif defined(VISP_HAVE_OPENCV)
208  g >> frame;
209  vpImageConvert::convert(frame, I);
210 #endif
211 
213  vpDisplay::displayText(I, 20, 20, "Click when ready.", vpColor::red);
214  vpDisplay::flush(I);
215 
216  if (vpDisplay::getClick(I, false)) {
217  break;
218  }
219  }
220 
222  vpMbGenericTracker tracker;
223  if (opt_tracker == 0)
225 #if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV)
226  else if (opt_tracker == 1)
228  else
230 #else
231  else {
232 # if !defined(VISP_HAVE_MODULE_KLT)
233  std::cout << "klt and hybrid model-based tracker are not available since visp_klt module is not available. "
234  "In CMakeGUI turn visp_klt module ON, configure and build ViSP again."
235  << std::endl;
236 # else
237  std::cout << "Hybrid tracking is impossible since OpenCV is not enabled. "
238  << "Install OpenCV, configure and build ViSP again to run this tutorial."
239  << std::endl;
240 # endif
241  return EXIT_SUCCESS;
242  }
243 #endif
244 
246  bool usexml = false;
248 #ifdef VISP_HAVE_PUGIXML
249  if (vpIoTools::checkFilename(objectname + ".xml")) {
250  tracker.loadConfigFile(objectname + ".xml");
251  usexml = true;
252  }
253 #endif
254 
256  if (!usexml) {
258  if (opt_tracker == 0 || opt_tracker == 2) {
260  vpMe me;
261  me.setMaskSize(5);
262  me.setMaskNumber(180);
263  me.setRange(8);
264  me.setThreshold(10000);
265  me.setMu1(0.5);
266  me.setMu2(0.5);
267  me.setSampleStep(4);
268  tracker.setMovingEdge(me);
270  }
271 
272 #ifdef VISP_HAVE_MODULE_KLT
273  if (opt_tracker == 1 || opt_tracker == 2) {
275  vpKltOpencv klt_settings;
276  klt_settings.setMaxFeatures(300);
277  klt_settings.setWindowSize(5);
278  klt_settings.setQuality(0.015);
279  klt_settings.setMinDistance(8);
280  klt_settings.setHarrisFreeParameter(0.01);
281  klt_settings.setBlockSize(3);
282  klt_settings.setPyramidLevels(3);
283  tracker.setKltOpencv(klt_settings);
284  tracker.setKltMaskBorder(5);
286  }
287 #endif
288  }
289 
290  tracker.setCameraParameters(cam);
292 
294  tracker.loadModel(objectname + ".cao");
297  tracker.setDisplayFeatures(true);
300  tracker.setOgreVisibilityTest(opt_use_ogre);
301  tracker.setScanLineVisibilityTest(opt_use_scanline);
304  tracker.setProjectionErrorComputation(true);
305  tracker.setProjectionErrorDisplay(opt_display_projection_error);
307 
308 #if (defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D))
309  std::string detectorName = "SIFT";
310  std::string extractorName = "SIFT";
311  std::string matcherName = "BruteForce";
312 #else
313  std::string detectorName = "FAST";
314  std::string extractorName = "ORB";
315  std::string matcherName = "BruteForce-Hamming";
316 #endif
317  vpKeyPoint keypoint;
318  if (opt_learn || opt_auto_init) {
319  keypoint.setDetector(detectorName);
320  keypoint.setExtractor(extractorName);
321  keypoint.setMatcher(matcherName);
322 #if !(defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D))
323 # if (VISP_HAVE_OPENCV_VERSION < 0x030000)
324  keypoint.setDetectorParameter("ORB", "nLevels", 1);
325 # else
326  cv::Ptr<cv::ORB> orb_detector = keypoint.getDetector("ORB").dynamicCast<cv::ORB>();
327  if (orb_detector) {
328  orb_detector->setNLevels(1);
329  }
330 # endif
331 #endif
332  }
333 
334  if (opt_auto_init) {
335  if (!vpIoTools::checkFilename(opt_learning_data)) {
336  std::cout << "Cannot enable auto detection. Learning file \"" << opt_learning_data << "\" doesn't exist" << std::endl;
337  return EXIT_FAILURE;
338  }
339  keypoint.loadLearningData(opt_learning_data, true);
340  }
341  else {
342  tracker.initClick(I, objectname + ".init", true);
343  }
344 
345  bool learn_position = false;
346  bool run_auto_init = false;
347  if (opt_auto_init) {
348  run_auto_init = true;
349  }
350 
351  //To be able to display keypoints matching with test-detection-rs2
352  int learn_id = 1;
353  unsigned int learn_cpt = 0;
354  bool quit = false;
355  bool tracking_failed = false;
356 
357  while (!quit) {
358  double t_begin = vpTime::measureTimeMs();
359 #if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2)
360  g.acquire(I);
361 #elif defined(VISP_HAVE_OPENCV)
362  g >> frame;
363  vpImageConvert::convert(frame, I);
364 #endif
366 
367  // Run auto initialization from learned data
368  if (run_auto_init) {
369  tracking_failed = false;
370  if (keypoint.matchPoint(I, cam, cMo)) {
371  std::cout << "Auto init succeed" << std::endl;
372  tracker.initFromPose(I, cMo);
373  } else {
374  vpDisplay::flush(I);
375  continue;
376  }
377  }
378  else if (tracking_failed) {
379  // Manual init
380  tracking_failed = false;
381  tracker.initClick(I, objectname + ".init", true);
382  }
383 
384  // Run the tracker
385  try {
386  if (run_auto_init) {
387  // Turn display features off just after auto init to not display wrong moving-edge if the tracker fails
388  tracker.setDisplayFeatures(false);
389 
390  run_auto_init = false;
391  }
392  tracker.track(I);
393  } catch (const vpException &e) {
394  std::cout << "Tracker exception: " << e.getStringMessage() << std::endl;
395  tracking_failed = true;
396  if (opt_auto_init) {
397  std::cout << "Tracker needs to restart (tracking exception)" << std::endl;
398  run_auto_init = true;
399  }
400  }
401 
402  if (! tracking_failed) {
403  double proj_error = 0;
405  // Check tracking errors
406  proj_error = tracker.getProjectionError();
407  }
408  else {
409  tracker.getPose(cMo);
410  tracker.getCameraParameters(cam);
411  proj_error = tracker.computeCurrentProjectionError(I, cMo, cam);
412  }
413  if (proj_error > opt_proj_error_threshold) {
414  std::cout << "Tracker needs to restart (projection error detected: " << proj_error << ")" << std::endl;
415  if (opt_auto_init) {
416  run_auto_init = true;
417  }
418  tracking_failed = true;
419  }
420  }
421 
422  if (! tracking_failed) {
423  tracker.setDisplayFeatures(true);
425  tracker.getPose(cMo);
428  tracker.getCameraParameters(cam);
429  tracker.display(I, cMo, cam, vpColor::green, 2, false);
431  vpDisplay::displayFrame(I, cMo, cam, 0.025, vpColor::none, 3);
432 
433  { // Display estimated pose in [m] and [deg]
434  vpPoseVector pose(cMo);
435  std::stringstream ss;
436  ss << "Translation: " << std::setprecision(5) << pose[0] << " " << pose[1] << " " << pose[2] << " [m]";
437  vpDisplay::displayText(I, 80, 20, ss.str(), vpColor::green);
438  ss.str(""); // erase ss
439  ss << "Rotation tu: " << std::setprecision(4) << vpMath::deg(pose[3]) << " " << vpMath::deg(pose[4]) << " " << vpMath::deg(pose[5]) << " [deg]";
440  vpDisplay::displayText(I, 100, 20, ss.str(), vpColor::green);
441  }
442  }
443 
444  if (learn_position) {
445  learn_cpt ++;
446  // Detect keypoints on the current image
447  std::vector<cv::KeyPoint> trainKeyPoints;
448  keypoint.detect(I, trainKeyPoints);
449 
450  // Keep only keypoints on the cube
451  std::vector<vpPolygon> polygons;
452  std::vector<std::vector<vpPoint> > roisPt;
453  std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair = tracker.getPolygonFaces();
454  polygons = pair.first;
455  roisPt = pair.second;
456 
457  // Compute the 3D coordinates
458  std::vector<cv::Point3f> points3f;
459  vpKeyPoint::compute3DForPointsInPolygons(cMo, cam, trainKeyPoints, polygons, roisPt, points3f);
460 
461  // Build the reference keypoints
462  keypoint.buildReference(I, trainKeyPoints, points3f, true, learn_id++);
463 
464  // Display learned data
465  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
466  vpDisplay::displayCross(I, (int)it->pt.y, (int)it->pt.x, 10, vpColor::yellow, 3);
467  }
468  learn_position = false;
469  std::cout << "Data learned" << std::endl;
470  }
471 
472  std::stringstream ss;
473  ss << "Loop time: " << vpTime::measureTimeMs() - t_begin << " ms";
474  vpDisplay::displayText(I, 20, 20, ss.str(), vpColor::red);
475  if (opt_learn)
476  vpDisplay::displayText(I, 35, 20, "Left click: learn Right click: quit", vpColor::red);
477  else if (opt_auto_init)
478  vpDisplay::displayText(I, 35, 20, "Left click: auto_init Right click: quit", vpColor::red);
479  else
480  vpDisplay::displayText(I, 35, 20, "Right click: quit", vpColor::red);
481 
483  if (vpDisplay::getClick(I, button, false)) {
484  if (button == vpMouseButton::button3) {
485  quit = true;
486  } else if (button == vpMouseButton::button1 && opt_learn) {
487  learn_position = true;
488  } else if (button == vpMouseButton::button1 && opt_auto_init && !opt_learn) {
489  run_auto_init = true;
490  }
491  }
492 
493  vpDisplay::flush(I);
494  }
495  if (opt_learn && learn_cpt) {
496  std::cout << "Save learning from " << learn_cpt << " images in file: " << opt_learning_data << std::endl;
497  keypoint.saveLearningData(opt_learning_data, true, true);
498  }
499 
501  delete display;
503  } catch (const vpException &e) {
504  std::cout << "Catch a ViSP exception: " << e << std::endl;
505  }
506 #elif defined(VISP_HAVE_OPENCV)
507  (void) argc;
508  (void) argv;
509  std::cout << "Install a 3rd party dedicated to frame grabbing (dc1394, cmu1394, v4l2, OpenCV, FlyCapture, Realsense2), configure and build ViSP again to use this example" << std::endl;
510 #else
511  (void) argc;
512  (void) argv;
513  std::cout << "Install OpenCV 3rd party, configure and build ViSP again to use this example" << std::endl;
514 #endif
515 }
virtual void loadConfigFile(const std::string &configFile)
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
Definition: vpKeyPoint.h:487
void setMaxFeatures(int maxCount)
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
Class that defines generic functionnalities for display.
Definition: vpDisplay.h:171
virtual void track(const vpImage< unsigned char > &I)
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
void open(vpImage< unsigned char > &I)
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false)
void setHarrisFreeParameter(double harris_k)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Implementation of an homogeneous matrix and operations on such kind of matrices.
void open(vpImage< unsigned char > &I)
virtual void setTrackerType(int type)
virtual int getTrackerType() const
void setMaskNumber(const unsigned int &a)
Definition: vpMe.cpp:454
Display for windows using GDI (available on any windows 32 platform).
Definition: vpDisplayGDI.h:128
void setSampleStep(const double &s)
Definition: vpMe.h:278
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
virtual void setDisplayFeatures(bool displayF)
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Definition: vpDisplayX.h:150
void setDevice(const std::string &devname)
static const vpColor none
Definition: vpColor.h:191
virtual double getProjectionError() const
Definition: vpMbTracker.h:306
void setMinDistance(double minDistance)
virtual void initClick(const vpImage< unsigned char > &I1, const vpImage< unsigned char > &I2, const std::string &initFile1, const std::string &initFile2, bool displayHelp=false, const vpHomogeneousMatrix &T1=vpHomogeneousMatrix(), const vpHomogeneousMatrix &T2=vpHomogeneousMatrix())
error that can be emited by ViSP classes.
Definition: vpException.h:71
Definition: vpMe.h:60
void open(const rs2::config &cfg=rs2::config())
virtual void setMovingEdge(const vpMe &me)
static std::string getParent(const std::string &pathname)
Definition: vpIoTools.cpp:1473
XML parser to load and save intrinsic camera parameters.
static const vpColor green
Definition: vpColor.h:182
Real-time 6D object pose tracking using its CAD model.
static void flush(const vpImage< unsigned char > &I)
VISP_EXPORT double measureTimeMs()
Definition: vpTime.cpp:126
void setMu1(const double &mu_1)
Definition: vpMe.h:241
void open(vpImage< unsigned char > &I)
static const vpColor red
Definition: vpColor.h:179
void setQuality(double qualityLevel)
void open(vpImage< unsigned char > &I)
void setMatcher(const std::string &matcherName)
Definition: vpKeyPoint.h:911
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
Definition: vpKeyPoint.cpp:632
virtual void setKltMaskBorder(const unsigned int &e)
Firewire cameras video capture based on CMU 1394 Digital Camera SDK.
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix())
void setMaskSize(const unsigned int &a)
Definition: vpMe.cpp:461
virtual void setProjectionErrorComputation(const bool &flag)
void setPyramidLevels(int pyrMaxLevel)
static void display(const vpImage< unsigned char > &I)
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Generic class defining intrinsic camera parameters.
void setDetector(const vpFeatureDetectorType &detectorType)
Definition: vpKeyPoint.h:777
virtual void initFromPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo)
unsigned int buildReference(const vpImage< unsigned char > &I)
Definition: vpKeyPoint.cpp:240
void setWindowSize(int winSize)
virtual void getCameraParameters(vpCameraParameters &camera) const
void acquire(vpImage< unsigned char > &grey)
void setScale(unsigned scale=vpV4l2Grabber::DEFAULT_SCALE)
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
int parse(vpCameraParameters &cam, const std::string &filename, const std::string &camera_name, const vpCameraParameters::vpCameraParametersProjType &projModel, unsigned int image_width=0, unsigned int image_height=0)
void initPersProjWithoutDistortion(double px, double py, double u0, double v0)
unsigned int matchPoint(const vpImage< unsigned char > &I)
virtual void setCameraParameters(const vpCameraParameters &camera)
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
virtual void setScanLineVisibilityTest(const bool &v)
void setMu2(const double &mu_2)
Definition: vpMe.h:248
Class that is a wrapper over the Video4Linux2 (V4L2) driver.
static double deg(double rad)
Definition: vpMath.h:101
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0))
virtual void setOgreVisibilityTest(const bool &v)
Class that allows keypoints detection (and descriptors extraction) and matching thanks to OpenCV libr...
Definition: vpKeyPoint.h:222
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion) const
Implementation of a pose vector and operations on poses.
Definition: vpPoseVector.h:151
static std::string getNameWE(const std::string &pathname)
Definition: vpIoTools.cpp:1460
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
Definition: vpKltOpencv.h:78
void setThreshold(const double &t)
Definition: vpMe.h:300
virtual std::pair< std::vector< vpPolygon >, std::vector< std::vector< vpPoint > > > getPolygonFaces(bool orderPolygons=true, bool useVisibility=true, bool clipPolygon=false)
static bool checkFilename(const std::string &filename)
Definition: vpIoTools.cpp:730
virtual void setProjectionErrorDisplay(bool display)
Class for firewire ieee1394 video devices using libdc1394-2.x api.
void setRange(const unsigned int &r)
Definition: vpMe.h:271
virtual void setKltOpencv(const vpKltOpencv &t)
static const vpColor yellow
Definition: vpColor.h:187
void setExtractor(const vpFeatureDescriptorType &extractorType)
Definition: vpKeyPoint.h:835
void setBlockSize(int blockSize)
virtual void getPose(vpHomogeneousMatrix &cMo) const
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
const std::string & getStringMessage(void) const
Send a reference (constant) related the error message (can be empty).
Definition: vpException.cpp:92
virtual double computeCurrentProjectionError(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &_cMo, const vpCameraParameters &_cam)