Visual Servoing Platform  version 3.6.1 under development (2025-02-18)
testKeyPoint-2.cpp
1 /*
2  * ViSP, open source Visual Servoing Platform software.
3  * Copyright (C) 2005 - 2024 by Inria. All rights reserved.
4  *
5  * This software is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  * See the file LICENSE.txt at the root directory of this source
10  * distribution for additional information about the GNU GPL.
11  *
12  * For using ViSP with software that can not be combined with the GNU
13  * GPL, please contact Inria about acquiring a ViSP Professional
14  * Edition License.
15  *
16  * See https://visp.inria.fr for more information.
17  *
18  * This software was developed at:
19  * Inria Rennes - Bretagne Atlantique
20  * Campus Universitaire de Beaulieu
21  * 35042 Rennes Cedex
22  * France
23  *
24  * If you have questions regarding the use of this file, please contact
25  * Inria at visp@inria.fr
26  *
27  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
28  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
29  *
30  * Description:
31  * Test keypoint matching and pose estimation.
32  */
33 
40 #include <iostream>
41 
42 #include <visp3/core/vpConfig.h>
43 
44 #if defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO) && \
45  ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_CALIB3D) && defined(HAVE_OPENCV_FEATURES2D)) || \
46  ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_3D) && defined(HAVE_OPENCV_FEATURES))
47 
48 #include <visp3/core/vpImage.h>
49 #include <visp3/core/vpIoTools.h>
50 #include <visp3/gui/vpDisplayFactory.h>
51 #include <visp3/io/vpImageIo.h>
52 #include <visp3/io/vpParseArgv.h>
53 #include <visp3/io/vpVideoReader.h>
54 #include <visp3/mbt/vpMbEdgeTracker.h>
55 #include <visp3/vision/vpKeyPoint.h>
56 
57 // List of allowed command line options
58 #define GETOPTARGS "cdph"
59 
60 #ifdef ENABLE_VISP_NAMESPACE
61 using namespace VISP_NAMESPACE_NAME;
62 #endif
63 
64 void usage(const char *name, const char *badparam);
65 bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display);
66 
75 void usage(const char *name, const char *badparam)
76 {
77  fprintf(stdout, "\n\
78 Test keypoints matching.\n\
79 \n\
80 SYNOPSIS\n\
81  %s [-c] [-d] [-p] [-h]\n",
82  name);
83 
84  fprintf(stdout, "\n\
85 OPTIONS: \n\
86 \n\
87  -c\n\
88  Disable the mouse click. Useful to automate the \n\
89  execution of this program without human intervention.\n\
90 \n\
91  -d \n\
92  Turn off the display.\n\
93 \n\
94  -p \n\
95  Use parallel RANSAC.\n\
96 \n\
97  -h\n\
98  Print the help.\n");
99 
100  if (badparam)
101  fprintf(stdout, "\nERROR: Bad parameter [%s]\n", badparam);
102 }
103 
115 bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display, bool &use_parallel_ransac)
116 {
117  const char *optarg_;
118  int c;
119  while ((c = vpParseArgv::parse(argc, argv, GETOPTARGS, &optarg_)) > 1) {
120 
121  switch (c) {
122  case 'c':
123  click_allowed = false;
124  break;
125  case 'd':
126  display = false;
127  break;
128  case 'p':
129  use_parallel_ransac = true;
130  break;
131  case 'h':
132  usage(argv[0], nullptr);
133  return false;
134  break;
135 
136  default:
137  usage(argv[0], optarg_);
138  return false;
139  break;
140  }
141  }
142 
143  if ((c == 1) || (c == -1)) {
144  // standalone param or error
145  usage(argv[0], nullptr);
146  std::cerr << "ERROR: " << std::endl;
147  std::cerr << " Bad argument " << optarg_ << std::endl << std::endl;
148  return false;
149  }
150 
151  return true;
152 }
153 
154 template <typename Type>
155 void run_test(const std::string &env_ipath, bool opt_click_allowed, bool opt_display, bool use_parallel_ransac,
156  vpImage<Type> &I, vpImage<Type> &IMatching)
157 {
158 #if VISP_HAVE_DATASET_VERSION >= 0x030600
159  std::string ext("png");
160 #else
161  std::string ext("pgm");
162 #endif
163  // Set the path location of the image sequence
164  std::string dirname = vpIoTools::createFilePath(env_ipath, "mbt/cube");
165 
166  // Build the name of the image files
167  std::string filenameRef = vpIoTools::createFilePath(dirname, "image0000." + ext);
168  vpImageIo::read(I, filenameRef);
169  std::string filenameCur = vpIoTools::createFilePath(dirname, "image%04d." + ext);
170 
171  vpDisplay *display = nullptr;
172 
173  if (opt_display) {
174 #ifdef VISP_HAVE_DISPLAY
175  display = vpDisplayFactory::allocateDisplay(I, 0, 0, "ORB keypoints matching and pose estimation");
176  display->setDownScalingFactor(vpDisplay::SCALE_AUTO);
177 #else
178  std::cout << "No image viewer is available..." << std::endl;
179 #endif
180  }
181 
182  vpCameraParameters cam;
183  vpMbEdgeTracker tracker;
184  // Load config for tracker
185  std::string tracker_config_file = vpIoTools::createFilePath(env_ipath, "mbt/cube.xml");
186 
187 #if defined(VISP_HAVE_PUGIXML)
188  tracker.loadConfigFile(tracker_config_file);
189  tracker.getCameraParameters(cam);
190 #else
191  // Corresponding parameters manually set to have an example code
192  vpMe me;
193  me.setMaskSize(5);
194  me.setMaskNumber(180);
195  me.setRange(8);
197  me.setThreshold(20);
198  me.setMu1(0.5);
199  me.setMu2(0.5);
200  me.setSampleStep(4);
201  me.setNbTotalSample(250);
202  tracker.setMovingEdge(me);
203  cam.initPersProjWithoutDistortion(547.7367575, 542.0744058, 338.7036994, 234.5083345);
204  tracker.setCameraParameters(cam);
205  tracker.setNearClippingDistance(0.01);
206  tracker.setFarClippingDistance(100.0);
208 #endif
209 
210  tracker.setAngleAppear(vpMath::rad(89));
211  tracker.setAngleDisappear(vpMath::rad(89));
212 
213  // Load CAO model
214  std::string cao_model_file = vpIoTools::createFilePath(env_ipath, "mbt/cube.cao");
215  tracker.loadModel(cao_model_file);
216 
217  // Initialize the pose
218  std::string init_file = vpIoTools::createFilePath(env_ipath, "mbt/cube.init");
219  if (opt_display && opt_click_allowed) {
220  tracker.initClick(I, init_file);
221  }
222  else {
223  vpHomogeneousMatrix cMoi(0.02044769891, 0.1101505452, 0.5078963719, 2.063603907, 1.110231561, -0.4392789872);
224  tracker.initFromPose(I, cMoi);
225  }
226 
227  // Get the init pose
229  tracker.getPose(cMo);
230 
231  // Init keypoints
232  vpKeyPoint keypoints("ORB", "ORB", "BruteForce-Hamming");
233  keypoints.setRansacParallel(use_parallel_ransac);
234 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400)
235  // Bug when using LSH index with FLANN and OpenCV 2.3.1.
236  // see http://code.opencv.org/issues/1741 (Bug #1741)
237  keypoints.setMatcher("FlannBased");
238 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
239  keypoints.setDetectorParameter("ORB", "nLevels", 1);
240 #else
241  cv::Ptr<cv::ORB> orb_detector = keypoints.getDetector("ORB").dynamicCast<cv::ORB>();
242  if (orb_detector) {
243  orb_detector->setNLevels(1);
244  }
245 #endif
246 #endif
247 
248  // Detect keypoints on the current image
249  std::vector<cv::KeyPoint> trainKeyPoints;
250  double elapsedTime;
251  keypoints.detect(I, trainKeyPoints, elapsedTime);
252 
253  // Keep only keypoints on the cube
254  std::vector<vpPolygon> polygons;
255  std::vector<std::vector<vpPoint> > roisPt;
256  std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair =
257  tracker.getPolygonFaces(true); // To detect an issue with CI
258  polygons = pair.first;
259  roisPt = pair.second;
260 
261  // Compute the 3D coordinates
262  std::vector<cv::Point3f> points3f;
263  vpKeyPoint::compute3DForPointsInPolygons(cMo, cam, trainKeyPoints, polygons, roisPt, points3f);
264 
265  // Build the reference keypoints
266  keypoints.buildReference(I, trainKeyPoints, points3f, false, 1);
267 
268  // Read image 150
269  filenameRef = vpIoTools::createFilePath(dirname, "image0150." + ext);
270  vpImageIo::read(I, filenameRef);
271 
272  // Init pose at image 150
273  cMo.buildFrom(0.02651282185, -0.03713587374, 0.6873765919, 2.314744454, 0.3492296488, -0.1226054828);
274  tracker.initFromPose(I, cMo);
275 
276  // Detect keypoints on the image 150
277  keypoints.detect(I, trainKeyPoints, elapsedTime);
278 
279  // Keep only keypoints on the cube
280  pair = tracker.getPolygonFaces(true, true,
281  true); // To detect an issue with CI
282  polygons = pair.first;
283  roisPt = pair.second;
284 
285  // Compute the 3D coordinates
286  vpKeyPoint::compute3DForPointsInPolygons(cMo, cam, trainKeyPoints, polygons, roisPt, points3f);
287 
288  // Build the reference keypoints
289  keypoints.buildReference(I, trainKeyPoints, points3f, true, 2);
290 
291  // Read image 200
292  filenameRef = vpIoTools::createFilePath(dirname, "image0200." + ext);
293  vpImageIo::read(I, filenameRef);
294 
295  // Init pose at image 200
296  cMo.buildFrom(0.02965448956, -0.07283091786, 0.7253526051, 2.300529617, -0.4286674806, 0.1788761025);
297  tracker.initFromPose(I, cMo);
298 
299  // Detect keypoints on the image 200
300  keypoints.detect(I, trainKeyPoints, elapsedTime);
301 
302  // Keep only keypoints on the cube
303  pair = tracker.getPolygonFaces(false); // To detect an issue with CI
304  polygons = pair.first;
305  roisPt = pair.second;
306 
307  // Compute the 3D coordinates
308  vpKeyPoint::compute3DForPointsInPolygons(cMo, cam, trainKeyPoints, polygons, roisPt, points3f);
309 
310  // Build the reference keypoints
311  keypoints.buildReference(I, trainKeyPoints, points3f, true, 3);
312 
313  // Init reader for getting the input image sequence
314  vpVideoReader g;
315  g.setFileName(filenameCur);
316  g.open(I);
317  g.acquire(I);
318 
319  vpDisplay *display2 = nullptr;
320 
321  keypoints.createImageMatching(I, IMatching);
322 
323  if (opt_display) {
324 #ifdef VISP_HAVE_DISPLAY
325  display2 = vpDisplayFactory::allocateDisplay(IMatching, 0, (int)I.getHeight() / vpDisplay::getDownScalingFactor(I) + 30, "IMatching");
327 #endif
328  }
329 
330  bool opt_click = false;
331  double error;
333  std::vector<double> times_vec;
334  while ((opt_display && !g.end()) || (!opt_display && g.getFrameIndex() < 30)) {
335  g.acquire(I);
336 
337  if (opt_display) {
339 
340  // Display image matching
341  keypoints.insertImageMatching(I, IMatching);
342 
343  vpDisplay::display(IMatching);
344  }
345 
346  // Match keypoints and estimate the pose
347  if (keypoints.matchPoint(I, cam, cMo, error, elapsedTime)) {
348  times_vec.push_back(elapsedTime);
349 
350  tracker.setCameraParameters(cam);
351  tracker.setPose(I, cMo);
352 
353  if (opt_display) {
354  tracker.display(I, cMo, cam, vpColor::red, 2);
355  vpDisplay::displayFrame(I, cMo, cam, 0.025, vpColor::none, 3);
356 
357  std::vector<vpImagePoint> ransacInliers = keypoints.getRansacInliers();
358  std::vector<vpImagePoint> ransacOutliers = keypoints.getRansacOutliers();
359 
360  for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
362  vpImagePoint imPt(*it);
363  imPt.set_u(imPt.get_u() + I.getWidth());
364  imPt.set_v(imPt.get_v() + I.getHeight());
365  vpDisplay::displayCircle(IMatching, imPt, 4, vpColor::green);
366  }
367 
368  for (std::vector<vpImagePoint>::const_iterator it = ransacOutliers.begin(); it != ransacOutliers.end(); ++it) {
370  vpImagePoint imPt(*it);
371  imPt.set_u(imPt.get_u() + I.getWidth());
372  imPt.set_v(imPt.get_v() + I.getHeight());
373  vpDisplay::displayCircle(IMatching, imPt, 4, vpColor::red);
374  }
375 
376  keypoints.displayMatching(I, IMatching);
377 
378  // Display model in the correct sub-image in IMatching
379  vpCameraParameters cam2;
380  cam2.initPersProjWithoutDistortion(cam.get_px(), cam.get_py(), cam.get_u0() + I.getWidth(),
381  cam.get_v0() + I.getHeight());
382  tracker.setCameraParameters(cam2);
383  tracker.setPose(IMatching, cMo);
384  tracker.display(IMatching, cMo, cam2, vpColor::red, 2);
385  vpDisplay::displayFrame(IMatching, cMo, cam2, 0.025, vpColor::none, 3);
386  }
387  }
388 
389  if (opt_display) {
390  vpDisplay::flush(I);
391  vpDisplay::flush(IMatching);
392  }
393 
394  if (opt_click_allowed && opt_display) {
395  // Click requested to process next image
396  if (opt_click) {
397  vpDisplay::getClick(I, button, true);
398  if (button == vpMouseButton::button3) {
399  opt_click = false;
400  }
401  }
402  else {
403  // Use right click to enable/disable step by step tracking
404  if (vpDisplay::getClick(I, button, false)) {
405  if (button == vpMouseButton::button3) {
406  opt_click = true;
407  }
408  else if (button == vpMouseButton::button1) {
409  break;
410  }
411  }
412  }
413  }
414  }
415 
416  if (display) {
417  delete display;
418  }
419  if (display2) {
420  delete display2;
421  }
422 
423  if (!times_vec.empty()) {
424  std::cout << "Computation time, Mean: " << vpMath::getMean(times_vec)
425  << " ms ; Median: " << vpMath::getMedian(times_vec) << " ms ; Std: " << vpMath::getStdev(times_vec)
426  << std::endl;
427  }
428 }
429 
430 int main(int argc, const char **argv)
431 {
432  try {
433  std::string env_ipath;
434  bool opt_click_allowed = true;
435  bool opt_display = true;
436  bool use_parallel_ransac = false;
437 
438  // Read the command line options
439  if (getOptions(argc, argv, opt_click_allowed, opt_display, use_parallel_ransac) == false) {
440  return EXIT_FAILURE;
441  }
442 
443  // Get the visp-images-data package path or VISP_INPUT_IMAGE_PATH
444  // environment variable value
445  env_ipath = vpIoTools::getViSPImagesDataPath();
446 
447  if (env_ipath.empty()) {
448  std::cerr << "Please set the VISP_INPUT_IMAGE_PATH environment "
449  "variable value."
450  << std::endl;
451  return EXIT_FAILURE;
452  }
453 
454  {
455  vpImage<unsigned char> I, IMatching;
456 
457  std::cout << "-- Test on gray level images" << std::endl;
458 
459  run_test(env_ipath, opt_click_allowed, opt_display, use_parallel_ransac, I, IMatching);
460  }
461  {
462  vpImage<vpRGBa> I, IMatching;
463 
464  std::cout << "-- Test on color images" << std::endl;
465 
466  run_test(env_ipath, opt_click_allowed, opt_display, use_parallel_ransac, I, IMatching);
467  }
468 
469  }
470  catch (const vpException &e) {
471  std::cerr << e.what() << std::endl;
472  return EXIT_FAILURE;
473  }
474 
475  std::cout << "testKeyPoint-2 is ok !" << std::endl;
476  return EXIT_SUCCESS;
477 }
478 #else
479 int main()
480 {
481  std::cerr << "You need OpenCV library." << std::endl;
482 
483  return EXIT_SUCCESS;
484 }
485 
486 #endif
Generic class defining intrinsic camera parameters.
void initPersProjWithoutDistortion(double px, double py, double u0, double v0)
static const vpColor red
Definition: vpColor.h:198
static const vpColor none
Definition: vpColor.h:210
static const vpColor green
Definition: vpColor.h:201
Class that defines generic functionalities for display.
Definition: vpDisplay.h:178
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void displayCircle(const vpImage< unsigned char > &I, const vpImageCircle &circle, const vpColor &color, bool fill=false, unsigned int thickness=1)
virtual void setDownScalingFactor(unsigned int scale)
Definition: vpDisplay.cpp:233
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
@ SCALE_AUTO
Definition: vpDisplay.h:184
unsigned int getDownScalingFactor()
Definition: vpDisplay.h:221
error that can be emitted by ViSP classes.
Definition: vpException.h:60
const char * what() const
Definition: vpException.cpp:71
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpHomogeneousMatrix & buildFrom(const vpTranslationVector &t, const vpRotationMatrix &R)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Definition: vpImageIo.cpp:147
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:82
Definition of the vpImage class member functions.
Definition: vpImage.h:131
unsigned int getWidth() const
Definition: vpImage.h:242
unsigned int getHeight() const
Definition: vpImage.h:181
static std::string getViSPImagesDataPath()
Definition: vpIoTools.cpp:1053
static std::string createFilePath(const std::string &parent, const std::string &child)
Definition: vpIoTools.cpp:1427
Class that allows keypoints 2D features detection (and descriptors extraction) and matching thanks to...
Definition: vpKeyPoint.h:267
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
Definition: vpKeyPoint.cpp:481
static double rad(double deg)
Definition: vpMath.h:129
static double getMedian(const std::vector< double > &v)
Definition: vpMath.cpp:322
static double getStdev(const std::vector< double > &v, bool useBesselCorrection=false)
Definition: vpMath.cpp:353
static double getMean(const std::vector< double > &v)
Definition: vpMath.cpp:302
Make the complete tracking of an object by using its CAD model.
virtual void setNearClippingDistance(const double &dist) VP_OVERRIDE
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false) VP_OVERRIDE
virtual void setFarClippingDistance(const double &dist) VP_OVERRIDE
virtual void setClipping(const unsigned int &flags) VP_OVERRIDE
void setMovingEdge(const vpMe &me)
virtual void setCameraParameters(const vpCameraParameters &cam) VP_OVERRIDE
virtual void setPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cdMo) VP_OVERRIDE
virtual void loadConfigFile(const std::string &configFile, bool verbose=true) VP_OVERRIDE
virtual void getCameraParameters(vpCameraParameters &cam) const
Definition: vpMbTracker.h:250
virtual void getPose(vpHomogeneousMatrix &cMo) const
Definition: vpMbTracker.h:416
virtual void setAngleDisappear(const double &a)
Definition: vpMbTracker.h:483
virtual void initFromPose(const vpImage< unsigned char > &I, const std::string &initFile)
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix())
virtual void setAngleAppear(const double &a)
Definition: vpMbTracker.h:472
virtual std::pair< std::vector< vpPolygon >, std::vector< std::vector< vpPoint > > > getPolygonFaces(bool orderPolygons=true, bool useVisibility=true, bool clipPolygon=false)
virtual unsigned int getClipping() const
Definition: vpMbTracker.h:258
Definition: vpMe.h:134
void setMu1(const double &mu_1)
Definition: vpMe.h:385
void setRange(const unsigned int &range)
Definition: vpMe.h:415
void setLikelihoodThresholdType(const vpLikelihoodThresholdType likelihood_threshold_type)
Definition: vpMe.h:505
void setNbTotalSample(const int &ntotal_sample)
Definition: vpMe.h:399
void setMaskNumber(const unsigned int &mask_number)
Definition: vpMe.cpp:552
void setThreshold(const double &threshold)
Definition: vpMe.h:466
void setSampleStep(const double &sample_step)
Definition: vpMe.h:422
void setMaskSize(const unsigned int &mask_size)
Definition: vpMe.cpp:560
void setMu2(const double &mu_2)
Definition: vpMe.h:392
@ NORMALIZED_THRESHOLD
Definition: vpMe.h:145
static bool parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, int flags)
Definition: vpParseArgv.cpp:70
Class that enables to manipulate easily a video file or a sequence of images. As it inherits from the...
void acquire(vpImage< vpRGBa > &I)
void open(vpImage< vpRGBa > &I)
void setFileName(const std::string &filename)
long getFrameIndex() const
vpDisplay * allocateDisplay()
Return a newly allocated vpDisplay specialization if a GUI library is available or nullptr otherwise.