Visual Servoing Platform  version 3.6.1 under development (2025-02-19)
vpKeyPoint.h
1 /*
2  * ViSP, open source Visual Servoing Platform software.
3  * Copyright (C) 2005 - 2024 by Inria. All rights reserved.
4  *
5  * This software is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  * See the file LICENSE.txt at the root directory of this source
10  * distribution for additional information about the GNU GPL.
11  *
12  * For using ViSP with software that can not be combined with the GNU
13  * GPL, please contact Inria about acquiring a ViSP Professional
14  * Edition License.
15  *
16  * See https://visp.inria.fr for more information.
17  *
18  * This software was developed at:
19  * Inria Rennes - Bretagne Atlantique
20  * Campus Universitaire de Beaulieu
21  * 35042 Rennes Cedex
22  * France
23  *
24  * If you have questions regarding the use of this file, please contact
25  * Inria at visp@inria.fr
26  *
27  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
28  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
29  *
30  * Description:
31  * Key point functionalities.
32  */
33 #ifndef VP_KEYPOINT_H
34 #define VP_KEYPOINT_H
35 
36 #include <visp3/core/vpConfig.h>
37 
38 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_CALIB3D) && defined(HAVE_OPENCV_FEATURES2D)) || \
39  ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_3D) && defined(HAVE_OPENCV_FEATURES))
40 
41 #include <algorithm> // std::transform
42 #include <float.h> // DBL_MAX
43 #include <fstream> // std::ofstream
44 #include <limits>
45 #include <map> // std::map
46 #include <numeric> // std::accumulate
47 #include <stdlib.h> // srand, rand
48 #include <time.h> // time
49 #include <vector> // std::vector
50 
51 #include <visp3/core/vpDisplay.h>
52 #include <visp3/core/vpImageConvert.h>
53 #include <visp3/core/vpPixelMeterConversion.h>
54 #include <visp3/core/vpPlane.h>
55 #include <visp3/core/vpPoint.h>
56 #include <visp3/vision/vpBasicKeyPoint.h>
57 #include <visp3/vision/vpPose.h>
58 #ifdef VISP_HAVE_MODULE_IO
59 #include <visp3/io/vpImageIo.h>
60 #endif
61 #include <visp3/core/vpConvert.h>
62 #include <visp3/core/vpCylinder.h>
63 #include <visp3/core/vpMeterPixelConversion.h>
64 #include <visp3/core/vpPolygon.h>
65 #include <visp3/vision/vpXmlConfigParserKeyPoint.h>
66 
67 #include <opencv2/core/core.hpp>
68 
69 #if defined(HAVE_OPENCV_FEATURES2D)
70 #include <opencv2/features2d/features2d.hpp>
71 #endif
72 
73 #if defined(HAVE_OPENCV_XFEATURES2D)
74 #include <opencv2/xfeatures2d.hpp>
75 #endif
76 
77 #if defined(HAVE_OPENCV_IMGPROC)
78 #include <opencv2/imgproc/imgproc.hpp>
79 #endif
80 
81 #if defined(HAVE_OPENCV_NONFREE)
82 #include <opencv2/nonfree/nonfree.hpp>
83 #endif
84 
85 BEGIN_VISP_NAMESPACE
266 class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint
267 {
268 public:
271  {
272  constantFactorDistanceThreshold,
274  stdDistanceThreshold,
276  ratioDistanceThreshold,
279  stdAndRatioDistanceThreshold,
281  noFilterMatching
282  };
283 
286  {
287  detectionThreshold,
289  detectionScore
292  };
293 
295  typedef enum
296  {
300  pgmImageFormat
301  } vpImageFormatType;
302 
305  {
306 #if (VISP_HAVE_OPENCV_VERSION >= 0x050000)
307 # if defined(HAVE_OPENCV_FEATURES)
308  DETECTOR_FAST,
309  DETECTOR_GFTT,
310  DETECTOR_MSER,
311  DETECTOR_ORB,
312  DETECTOR_SIFT,
313  DETECTOR_SimpleBlob,
314 # endif
315 # if defined(HAVE_OPENCV_XFEATURES2D)
316  DETECTOR_AGAST,
317  DETECTOR_AKAZE,
318  DETECTOR_BRISK,
319  DETECTOR_KAZE,
320  DETECTOR_MSD,
321  DETECTOR_STAR,
322 # endif
323 # if defined(OPENCV_ENABLE_NONFREE) && defined(HAVE_OPENCV_XFEATURES2D)
324  DETECTOR_SURF,
325 # endif
326 #else // OpenCV < 5.0.0
327 # if defined(HAVE_OPENCV_FEATURES2D)
334 # if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
338 # endif
339 # endif
340 # if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
341  DETECTOR_MSD,
342 # endif
343 # if ((VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)) && defined(HAVE_OPENCV_FEATURES2D)
344  DETECTOR_SIFT,
345 # endif
346 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
347  DETECTOR_STAR,
348 # endif
349 # if defined(OPENCV_ENABLE_NONFREE) && defined(HAVE_OPENCV_XFEATURES2D)
350  DETECTOR_SURF,
351 # endif
352 #endif
353 
354  DETECTOR_TYPE_SIZE
355  };
356 
359  {
360 #if (VISP_HAVE_OPENCV_VERSION >= 0x050000)
361 # if defined(HAVE_OPENCV_FEATURES)
362  DESCRIPTOR_ORB,
363  DESCRIPTOR_SIFT,
364 # endif
365 # if defined(HAVE_OPENCV_XFEATURES2D)
366  DESCRIPTOR_AKAZE,
367  DESCRIPTOR_BRISK,
368  DESCRIPTOR_BoostDesc,
369  DESCRIPTOR_BRIEF,
370  DESCRIPTOR_DAISY,
371  DESCRIPTOR_FREAK,
372  DESCRIPTOR_KAZE,
373  DESCRIPTOR_LATCH,
374  DESCRIPTOR_VGG,
375 # endif
376 # if defined(OPENCV_ENABLE_NONFREE) && defined(HAVE_OPENCV_XFEATURES2D)
377  DESCRIPTOR_SURF,
378 # endif
379 #else // opencv < 5.0.0
380 # if defined(HAVE_OPENCV_FEATURES2D)
383 # if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
386 # endif
387 # endif
388 # if defined(HAVE_OPENCV_XFEATURES2D)
393 # endif
394 # if ((VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)) && defined(HAVE_OPENCV_FEATURES2D)
395  DESCRIPTOR_SIFT,
396 # endif
397 # if defined(OPENCV_ENABLE_NONFREE) && defined(HAVE_OPENCV_XFEATURES2D)
398  DESCRIPTOR_SURF,
399 # endif
400 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
401  DESCRIPTOR_BoostDesc,
402  DESCRIPTOR_VGG,
403 # endif
404 #endif
405 
406  DESCRIPTOR_TYPE_SIZE
407  };
408 
418  vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType,
419  const std::string &matcherName, const vpFilterMatchingType &filterType = ratioDistanceThreshold);
420 
430  vpKeyPoint(const std::string &detectorName = "ORB", const std::string &extractorName = "ORB",
431  const std::string &matcherName = "BruteForce-Hamming",
432  const vpFilterMatchingType &filterType = ratioDistanceThreshold);
433 
443  vpKeyPoint(const std::vector<std::string> &detectorNames, const std::vector<std::string> &extractorNames,
444  const std::string &matcherName = "BruteForce",
445  const vpFilterMatchingType &filterType = ratioDistanceThreshold);
446 
453  unsigned int buildReference(const vpImage<unsigned char> &I);
454 
464  unsigned int buildReference(const vpImage<unsigned char> &I, const vpImagePoint &iP, unsigned int height,
465  unsigned int width);
466 
474  unsigned int buildReference(const vpImage<unsigned char> &I, const vpRect &rectangle);
475 
487  unsigned int buildReference(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &trainKeyPoints,
488  std::vector<cv::Point3f> &points3f, bool append = false, int class_id = -1);
489 
503  unsigned int buildReference(const vpImage<unsigned char> &I, const std::vector<cv::KeyPoint> &trainKeyPoints,
504  const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
505  bool append = false, int class_id = -1);
506 
513  unsigned int buildReference(const vpImage<vpRGBa> &I_color);
514 
524  unsigned int buildReference(const vpImage<vpRGBa> &I_color, const vpImagePoint &iP, unsigned int height,
525  unsigned int width);
526 
534  unsigned int buildReference(const vpImage<vpRGBa> &I_color, const vpRect &rectangle);
535 
547  unsigned int buildReference(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &trainKeyPoints,
548  std::vector<cv::Point3f> &points3f, bool append = false, int class_id = -1);
549 
562  unsigned int buildReference(const vpImage<vpRGBa> &I_color, const std::vector<cv::KeyPoint> &trainKeyPoints,
563  const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
564  bool append = false, int class_id = -1);
565 
581  static void compute3D(const cv::KeyPoint &candidate, const std::vector<vpPoint> &roi, const vpCameraParameters &cam,
582  const vpHomogeneousMatrix &cMo, cv::Point3f &point);
583 
599  static void compute3D(const vpImagePoint &candidate, const std::vector<vpPoint> &roi, const vpCameraParameters &cam,
600  const vpHomogeneousMatrix &cMo, vpPoint &point);
601 
618  static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam,
619  std::vector<cv::KeyPoint> &candidates,
620  const std::vector<vpPolygon> &polygons,
621  const std::vector<std::vector<vpPoint> > &roisPt,
622  std::vector<cv::Point3f> &points, cv::Mat *descriptors = nullptr);
623 
640  static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam,
641  std::vector<vpImagePoint> &candidates,
642  const std::vector<vpPolygon> &polygons,
643  const std::vector<std::vector<vpPoint> > &roisPt,
644  std::vector<vpPoint> &points, cv::Mat *descriptors = nullptr);
645 
661  static void
662  compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam,
663  std::vector<cv::KeyPoint> &candidates, const std::vector<vpCylinder> &cylinders,
664  const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
665  std::vector<cv::Point3f> &points, cv::Mat *descriptors = nullptr);
666 
682  static void
683  compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam,
684  std::vector<vpImagePoint> &candidates, const std::vector<vpCylinder> &cylinders,
685  const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
686  std::vector<vpPoint> &points, cv::Mat *descriptors = nullptr);
687 
701  bool computePose(const std::vector<cv::Point2f> &imagePoints, const std::vector<cv::Point3f> &objectPoints,
702  const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector<int> &inlierIndex,
703  double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = nullptr);
704 
717  bool computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo, std::vector<vpPoint> &inliers,
718  double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = nullptr);
719 
733  bool computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo, std::vector<vpPoint> &inliers,
734  std::vector<unsigned int> &inlierIndex, double &elapsedTime,
735  bool (*func)(const vpHomogeneousMatrix &) = nullptr);
736 
745  void createImageMatching(vpImage<unsigned char> &IRef, vpImage<unsigned char> &ICurrent,
746  vpImage<unsigned char> &IMatching);
747 
757  void createImageMatching(vpImage<unsigned char> &ICurrent, vpImage<unsigned char> &IMatching);
758 
767  void createImageMatching(vpImage<unsigned char> &IRef, vpImage<vpRGBa> &ICurrent, vpImage<vpRGBa> &IMatching);
768 
778  void createImageMatching(vpImage<vpRGBa> &ICurrent, vpImage<vpRGBa> &IMatching);
779 
787  void detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints,
788  const vpRect &rectangle = vpRect());
789 
797  void detect(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, const vpRect &rectangle = vpRect());
798 
806  void detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, const cv::Mat &mask = cv::Mat());
807 
816  void detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
817  const vpRect &rectangle = vpRect());
818 
827  void detect(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
828  const vpRect &rectangle = vpRect());
829 
838  void detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
839  const cv::Mat &mask = cv::Mat());
840 
855  void detectExtractAffine(const vpImage<unsigned char> &I, std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
856  std::vector<cv::Mat> &listOfDescriptors,
857  std::vector<vpImage<unsigned char> > *listOfAffineI = nullptr);
858 
866  void display(const vpImage<unsigned char> &IRef, const vpImage<unsigned char> &ICurrent, unsigned int size = 3);
867 
875  void display(const vpImage<unsigned char> &ICurrent, unsigned int size = 3, const vpColor &color = vpColor::green);
876 
884  void display(const vpImage<vpRGBa> &IRef, const vpImage<vpRGBa> &ICurrent, unsigned int size = 3);
885 
893  void display(const vpImage<vpRGBa> &ICurrent, unsigned int size = 3, const vpColor &color = vpColor::green);
894 
906  void displayMatching(const vpImage<unsigned char> &IRef, vpImage<unsigned char> &IMatching, unsigned int crossSize,
907  unsigned int lineThickness = 1, const vpColor &color = vpColor::green);
908 
920  void displayMatching(const vpImage<unsigned char> &ICurrent, vpImage<unsigned char> &IMatching,
921  const std::vector<vpImagePoint> &ransacInliers = std::vector<vpImagePoint>(),
922  unsigned int crossSize = 3, unsigned int lineThickness = 1);
923 
935  void displayMatching(const vpImage<unsigned char> &IRef, vpImage<vpRGBa> &IMatching, unsigned int crossSize,
936  unsigned int lineThickness = 1, const vpColor &color = vpColor::green);
937 
949  void displayMatching(const vpImage<vpRGBa> &IRef, vpImage<vpRGBa> &IMatching, unsigned int crossSize,
950  unsigned int lineThickness = 1, const vpColor &color = vpColor::green);
951 
963  void displayMatching(const vpImage<vpRGBa> &ICurrent, vpImage<vpRGBa> &IMatching,
964  const std::vector<vpImagePoint> &ransacInliers = std::vector<vpImagePoint>(),
965  unsigned int crossSize = 3, unsigned int lineThickness = 1);
966 
977  void extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
978  std::vector<cv::Point3f> *trainPoints = nullptr);
979 
990  void extract(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
991  std::vector<cv::Point3f> *trainPoints = nullptr);
992 
1003  void extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1004  std::vector<cv::Point3f> *trainPoints = nullptr);
1005 
1017  void extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1018  double &elapsedTime, std::vector<cv::Point3f> *trainPoints = nullptr);
1019 
1031  void extract(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1032  double &elapsedTime, std::vector<cv::Point3f> *trainPoints = nullptr);
1033 
1045  void extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors, double &elapsedTime,
1046  std::vector<cv::Point3f> *trainPoints = nullptr);
1047 
1058  {
1059  if (!m_computeCovariance) {
1060  std::cout << "Warning : The covariance matrix has not been computed. "
1061  << "See setCovarianceComputation() to do it."
1062  << std::endl;
1063  return vpMatrix();
1064  }
1065 
1066  if (m_computeCovariance && !m_useRansacVVS) {
1067  std::cout << "Warning : The covariance matrix can only be computed "
1068  << "with a Virtual Visual Servoing approach." << std::endl
1069  << "Use setUseRansacVVS(true) to choose to use a pose "
1070  << "estimation method based on a Virtual Visual Servoing approach." << std::endl;
1071  return vpMatrix();
1072  }
1073 
1074  return m_covarianceMatrix;
1075  }
1076 
1082  inline double getDetectionTime() const { return m_detectionTime; }
1083 
1091  inline cv::Ptr<cv::FeatureDetector> getDetector(const vpFeatureDetectorType &type) const
1092  {
1093  std::map<vpFeatureDetectorType, std::string>::const_iterator it_name = m_mapOfDetectorNames.find(type);
1094  if (it_name == m_mapOfDetectorNames.end()) {
1095  std::cerr << "Internal problem with the feature type and the corresponding name!" << std::endl;
1096  }
1097 
1098  std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator findDetector =
1099  m_detectors.find(it_name->second);
1100  if (findDetector != m_detectors.end()) {
1101  return findDetector->second;
1102  }
1103 
1104  std::cerr << "Cannot find: " << it_name->second << std::endl;
1105  return cv::Ptr<cv::FeatureDetector>();
1106  }
1107 
1115  inline cv::Ptr<cv::FeatureDetector> getDetector(const std::string &name) const
1116  {
1117  std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator findDetector = m_detectors.find(name);
1118  if (findDetector != m_detectors.end()) {
1119  return findDetector->second;
1120  }
1121 
1122  std::cerr << "Cannot find: " << name << std::endl;
1123  return cv::Ptr<cv::FeatureDetector>();
1124  }
1125 
1129  inline std::map<vpFeatureDetectorType, std::string> getDetectorNames() const { return m_mapOfDetectorNames; }
1130 
1136  inline double getExtractionTime() const { return m_extractionTime; }
1137 
1145  inline cv::Ptr<cv::DescriptorExtractor> getExtractor(const vpFeatureDescriptorType &type) const
1146  {
1147  std::map<vpFeatureDescriptorType, std::string>::const_iterator it_name = m_mapOfDescriptorNames.find(type);
1148  if (it_name == m_mapOfDescriptorNames.end()) {
1149  std::cerr << "Internal problem with the feature type and the corresponding name!" << std::endl;
1150  }
1151 
1152  std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator findExtractor =
1153  m_extractors.find(it_name->second);
1154  if (findExtractor != m_extractors.end()) {
1155  return findExtractor->second;
1156  }
1157 
1158  std::cerr << "Cannot find: " << it_name->second << std::endl;
1159  return cv::Ptr<cv::DescriptorExtractor>();
1160  }
1161 
1169  inline cv::Ptr<cv::DescriptorExtractor> getExtractor(const std::string &name) const
1170  {
1171  std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator findExtractor = m_extractors.find(name);
1172  if (findExtractor != m_extractors.end()) {
1173  return findExtractor->second;
1174  }
1175 
1176  std::cerr << "Cannot find: " << name << std::endl;
1177  return cv::Ptr<cv::DescriptorExtractor>();
1178  }
1179 
1183  inline std::map<vpFeatureDescriptorType, std::string> getExtractorNames() const { return m_mapOfDescriptorNames; }
1184 
1190  inline vpImageFormatType getImageFormat() const { return m_imageFormat; }
1191 
1197  inline double getMatchingTime() const { return m_matchingTime; }
1198 
1204  inline cv::Ptr<cv::DescriptorMatcher> getMatcher() const { return m_matcher; }
1205 
1212  inline std::vector<cv::DMatch> getMatches() const { return m_filteredMatches; }
1213 
1221  inline std::vector<std::pair<cv::KeyPoint, cv::KeyPoint> > getMatchQueryToTrainKeyPoints() const
1222  {
1223  std::vector<std::pair<cv::KeyPoint, cv::KeyPoint> > matchQueryToTrainKeyPoints(m_filteredMatches.size());
1224  for (size_t i = 0; i < m_filteredMatches.size(); i++) {
1225  matchQueryToTrainKeyPoints.push_back(
1226  std::pair<cv::KeyPoint, cv::KeyPoint>(m_queryFilteredKeyPoints[(size_t)m_filteredMatches[i].queryIdx],
1227  m_trainKeyPoints[(size_t)m_filteredMatches[i].trainIdx]));
1228  }
1229  return matchQueryToTrainKeyPoints;
1230  }
1231 
1237  inline unsigned int getNbImages() const { return static_cast<unsigned int>(m_mapOfImages.size()); }
1238 
1246  void getObjectPoints(std::vector<cv::Point3f> &objectPoints) const;
1247 
1255  void getObjectPoints(std::vector<vpPoint> &objectPoints) const;
1256 
1262  inline double getPoseTime() const { return m_poseTime; }
1263 
1270  inline cv::Mat getQueryDescriptors() const { return m_queryDescriptors; }
1271 
1280  void getQueryKeyPoints(std::vector<cv::KeyPoint> &keyPoints, bool matches = true) const;
1281 
1290  void getQueryKeyPoints(std::vector<vpImagePoint> &keyPoints, bool matches = true) const;
1291 
1297  inline std::vector<vpImagePoint> getRansacInliers() const { return m_ransacInliers; }
1298 
1304  inline std::vector<vpImagePoint> getRansacOutliers() const { return m_ransacOutliers; }
1305 
1312  inline cv::Mat getTrainDescriptors() const { return m_trainDescriptors; }
1313 
1319  void getTrainKeyPoints(std::vector<cv::KeyPoint> &keyPoints) const;
1320 
1326  void getTrainKeyPoints(std::vector<vpImagePoint> &keyPoints) const;
1327 
1334  void getTrainPoints(std::vector<cv::Point3f> &points) const;
1335 
1342  void getTrainPoints(std::vector<vpPoint> &points) const;
1343 
1349  void initMatcher(const std::string &matcherName);
1350 
1359  void insertImageMatching(const vpImage<unsigned char> &IRef, const vpImage<unsigned char> &ICurrent,
1360  vpImage<unsigned char> &IMatching);
1361 
1369  void insertImageMatching(const vpImage<unsigned char> &ICurrent, vpImage<unsigned char> &IMatching);
1370 
1379  void insertImageMatching(const vpImage<vpRGBa> &IRef, const vpImage<vpRGBa> &ICurrent, vpImage<vpRGBa> &IMatching);
1380 
1388  void insertImageMatching(const vpImage<vpRGBa> &ICurrent, vpImage<vpRGBa> &IMatching);
1389 
1395  void loadConfigFile(const std::string &configFile);
1396 
1405  void loadLearningData(const std::string &filename, bool binaryMode = false, bool append = false);
1406 
1415  void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector<cv::DMatch> &matches,
1416  double &elapsedTime);
1417 
1425  unsigned int matchPoint(const vpImage<unsigned char> &I);
1426 
1437  unsigned int matchPoint(const vpImage<unsigned char> &I, const vpImagePoint &iP, unsigned int height,
1438  unsigned int width);
1439 
1448  unsigned int matchPoint(const vpImage<unsigned char> &I, const vpRect &rectangle);
1449 
1458  unsigned int matchPoint(const std::vector<cv::KeyPoint> &queryKeyPoints, const cv::Mat &queryDescriptors);
1459 
1473  bool (*func)(const vpHomogeneousMatrix &) = nullptr, const vpRect &rectangle = vpRect());
1474 
1491  double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = nullptr,
1492  const vpRect &rectangle = vpRect());
1493 
1514  bool matchPointAndDetect(const vpImage<unsigned char> &I, vpRect &boundingBox, vpImagePoint &centerOfGravity,
1515  const bool isPlanarObject = true, std::vector<vpImagePoint> *imPts1 = nullptr,
1516  std::vector<vpImagePoint> *imPts2 = nullptr, double *meanDescriptorDistance = nullptr,
1517  double *detectionScore = nullptr, const vpRect &rectangle = vpRect());
1518 
1538  bool matchPointAndDetect(const vpImage<unsigned char> &I, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo,
1539  double &error, double &elapsedTime, vpRect &boundingBox, vpImagePoint &centerOfGravity,
1540  bool (*func)(const vpHomogeneousMatrix &) = nullptr, const vpRect &rectangle = vpRect());
1541 
1549  unsigned int matchPoint(const vpImage<vpRGBa> &I_color);
1550 
1561  unsigned int matchPoint(const vpImage<vpRGBa> &I_color, const vpImagePoint &iP, unsigned int height,
1562  unsigned int width);
1563 
1572  unsigned int matchPoint(const vpImage<vpRGBa> &I_color, const vpRect &rectangle);
1573 
1586  bool matchPoint(const vpImage<vpRGBa> &I_color, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo,
1587  bool (*func)(const vpHomogeneousMatrix &) = nullptr, const vpRect &rectangle = vpRect());
1588 
1604  bool matchPoint(const vpImage<vpRGBa> &I_color, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo,
1605  double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = nullptr,
1606  const vpRect &rectangle = vpRect());
1607 
1611  void reset();
1612 
1621  void saveLearningData(const std::string &filename, bool binaryMode = false, bool saveTrainingImages = true);
1622 
1629  inline void setCovarianceComputation(const bool &flag)
1630  {
1631  m_computeCovariance = flag;
1632  if (!m_useRansacVVS) {
1633  std::cout << "Warning : The covariance matrix can only be computed "
1634  << "with a Virtual Visual Servoing approach." << std::endl
1635  << "Use setUseRansacVVS(true) to choose to use a pose "
1636  << "estimation method based on a Virtual "
1637  << "Visual Servoing approach." << std::endl;
1638  }
1639  }
1640 
1646  inline void setDetectionMethod(const vpDetectionMethodType &method) { m_detectionMethod = method; }
1647 
1653  inline void setDetector(const vpFeatureDetectorType &detectorType)
1654  {
1655  m_detectorNames.clear();
1656  m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
1657  m_detectors.clear();
1658  initDetector(m_mapOfDetectorNames[detectorType]);
1659  }
1660 
1666  inline void setDetector(const std::string &detectorName)
1667  {
1668  m_detectorNames.clear();
1669  m_detectorNames.push_back(detectorName);
1670  m_detectors.clear();
1671  initDetector(detectorName);
1672  }
1673 
1674 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1683  template <typename T1, typename T2, typename T3>
1684  inline void setDetectorParameter(const T1 detectorName, const T2 parameterName, const T3 value)
1685  {
1686  if (m_detectors.find(detectorName) != m_detectors.end()) {
1687  m_detectors[detectorName]->set(parameterName, value);
1688  }
1689  }
1690 #endif
1691 
1698  inline void setDetectors(const std::vector<std::string> &detectorNames)
1699  {
1700  m_detectorNames.clear();
1701  m_detectors.clear();
1702  m_detectorNames = detectorNames;
1703  initDetectors(m_detectorNames);
1704  }
1705 
1711  inline void setExtractor(const vpFeatureDescriptorType &extractorType)
1712  {
1713  m_extractorNames.clear();
1714  m_extractorNames.push_back(m_mapOfDescriptorNames[extractorType]);
1715  m_extractors.clear();
1716  initExtractor(m_mapOfDescriptorNames[extractorType]);
1717  }
1718 
1725  inline void setExtractor(const std::string &extractorName)
1726  {
1727  m_extractorNames.clear();
1728  m_extractorNames.push_back(extractorName);
1729  m_extractors.clear();
1730  initExtractor(extractorName);
1731  }
1732 
1733 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1742  template <typename T1, typename T2, typename T3>
1743  inline void setExtractorParameter(const T1 extractorName, const T2 parameterName, const T3 value)
1744  {
1745  if (m_extractors.find(extractorName) != m_extractors.end()) {
1746  m_extractors[extractorName]->set(parameterName, value);
1747  }
1748  }
1749 #endif
1750 
1757  inline void setExtractors(const std::vector<std::string> &extractorNames)
1758  {
1759  m_extractorNames.clear();
1760  m_extractorNames = extractorNames;
1761  m_extractors.clear();
1762  initExtractors(m_extractorNames);
1763  }
1764 
1770  inline void setImageFormat(const vpImageFormatType &imageFormat) { m_imageFormat = imageFormat; }
1771 
1787  inline void setMatcher(const std::string &matcherName)
1788  {
1789  m_matcherName = matcherName;
1790  initMatcher(m_matcherName);
1791  }
1792 
1798  void setMaxFeatures(int maxFeatures) { m_maxFeatures = maxFeatures; }
1799 
1815  inline void setFilterMatchingType(const vpFilterMatchingType &filterType)
1816  {
1817  m_filterType = filterType;
1818 
1819  // Use k-nearest neighbors (knn) to retrieve the two best matches for a
1820  // keypoint So this is useful only for ratioDistanceThreshold method
1821  if (filterType == ratioDistanceThreshold || filterType == stdAndRatioDistanceThreshold) {
1822  m_useKnn = true;
1823 
1824 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1825  if (m_matcher != nullptr && m_matcherName == "BruteForce") {
1826  // if a matcher is already initialized, disable the crossCheck
1827  // because it will not work with knnMatch
1828  m_matcher->set("crossCheck", false);
1829  }
1830 #endif
1831  }
1832  else {
1833  m_useKnn = false;
1834 
1835 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1836  if (m_matcher != nullptr && m_matcherName == "BruteForce") {
1837  // if a matcher is already initialized, set the crossCheck mode if
1838  // necessary
1839  m_matcher->set("crossCheck", m_useBruteForceCrossCheck);
1840  }
1841 #endif
1842  }
1843  }
1844 
1851  inline void setMatchingFactorThreshold(const double factor)
1852  {
1853  if (factor > 0.0) {
1854  m_matchingFactorThreshold = factor;
1855  }
1856  else {
1857  throw vpException(vpException::badValue, "The factor must be positive.");
1858  }
1859  }
1860 
1866  inline void setMatchingRatioThreshold(double ratio)
1867  {
1868  if (ratio > 0.0 && (ratio < 1.0 || std::fabs(ratio - 1.0) < std::numeric_limits<double>::epsilon())) {
1869  m_matchingRatioThreshold = ratio;
1870  }
1871  else {
1872  throw vpException(vpException::badValue, "The ratio must be in the interval ]0 ; 1].");
1873  }
1874  }
1875 
1882  inline void setRansacConsensusPercentage(double percentage)
1883  {
1884  if (percentage > 0.0 &&
1885  (percentage < 100.0 || std::fabs(percentage - 100.0) < std::numeric_limits<double>::epsilon())) {
1886  m_ransacConsensusPercentage = percentage;
1887  }
1888  else {
1889  throw vpException(vpException::badValue, "The percentage must be in the interval ]0 ; 100].");
1890  }
1891  }
1892 
1896  inline void setRansacFilterFlag(const vpPose::RANSAC_FILTER_FLAGS &flag) { m_ransacFilterFlag = flag; }
1897 
1904  inline void setRansacIteration(int nbIter)
1905  {
1906  if (nbIter > 0) {
1907  m_nbRansacIterations = nbIter;
1908  }
1909  else {
1910  throw vpException(vpException::badValue, "The number of iterations must be greater than zero.");
1911  }
1912  }
1913 
1919  inline void setRansacParallel(bool parallel) { m_ransacParallel = parallel; }
1920 
1927  inline void setRansacParallelNbThreads(unsigned int nthreads) { m_ransacParallelNbThreads = nthreads; }
1928 
1936  inline void setRansacReprojectionError(double reprojectionError)
1937  {
1938  if (reprojectionError > 0.0) {
1939  m_ransacReprojectionError = reprojectionError;
1940  }
1941  else {
1942  throw vpException(vpException::badValue, "The Ransac reprojection "
1943  "threshold must be positive "
1944  "as we deal with distance.");
1945  }
1946  }
1947 
1953  inline void setRansacMinInlierCount(int minCount)
1954  {
1955  if (minCount > 0) {
1956  m_nbRansacMinInlierCount = minCount;
1957  }
1958  else {
1959  throw vpException(vpException::badValue, "The minimum number of inliers must be greater than zero.");
1960  }
1961  }
1962 
1969  inline void setRansacThreshold(double threshold)
1970  {
1971  if (threshold > 0.0) {
1972  m_ransacThreshold = threshold;
1973  }
1974  else {
1975  throw vpException(vpException::badValue, "The Ransac threshold must be positive as we deal with distance.");
1976  }
1977  }
1978 
1986  inline void setUseAffineDetection(bool useAffine) { m_useAffineDetection = useAffine; }
1987 
1988 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1995  inline void setUseBruteForceCrossCheck(bool useCrossCheck)
1996  {
1997  // Only available with BruteForce and with k=1 (i.e not used with a
1998  // ratioDistanceThreshold method)
1999  if (m_matcher != nullptr && !m_useKnn && m_matcherName == "BruteForce") {
2000  m_matcher->set("crossCheck", useCrossCheck);
2001  }
2002  else if (m_matcher != nullptr && m_useKnn && m_matcherName == "BruteForce") {
2003  std::cout << "Warning, you try to set the crossCheck parameter with a "
2004  << "BruteForce matcher but knn is enabled"
2005  << " (the filtering method uses a ratio constraint)" << std::endl;
2006  }
2007  }
2008 #endif
2009 
2016  inline void setUseMatchTrainToQuery(bool useMatchTrainToQuery) { m_useMatchTrainToQuery = useMatchTrainToQuery; }
2017 
2025  inline void setUseRansacConsensusPercentage(bool usePercentage) { m_useConsensusPercentage = usePercentage; }
2026 
2034  inline void setUseRansacVVS(bool ransacVVS) { m_useRansacVVS = ransacVVS; }
2035 
2042  inline void setUseSingleMatchFilter(bool singleMatchFilter) { m_useSingleMatchFilter = singleMatchFilter; }
2043 
2044 private:
2047  bool m_computeCovariance;
2049  vpMatrix m_covarianceMatrix;
2051  int m_currentImageId;
2054  vpDetectionMethodType m_detectionMethod;
2056  double m_detectionScore;
2059  double m_detectionThreshold;
2061  double m_detectionTime;
2063  std::vector<std::string> m_detectorNames;
2066  // with a key based upon the detector name.
2067  std::map<std::string, cv::Ptr<cv::FeatureDetector> > m_detectors;
2069  double m_extractionTime;
2071  std::vector<std::string> m_extractorNames;
2074  // with a key based upon the extractor name.
2075  std::map<std::string, cv::Ptr<cv::DescriptorExtractor> > m_extractors;
2077  std::vector<cv::DMatch> m_filteredMatches;
2079  vpFilterMatchingType m_filterType;
2081  vpImageFormatType m_imageFormat;
2084  std::vector<std::vector<cv::DMatch> > m_knnMatches;
2086  std::map<vpFeatureDescriptorType, std::string> m_mapOfDescriptorNames;
2088  std::map<vpFeatureDetectorType, std::string> m_mapOfDetectorNames;
2091  std::map<int, int> m_mapOfImageId;
2094  std::map<int, vpImage<unsigned char> > m_mapOfImages;
2097  cv::Ptr<cv::DescriptorMatcher> m_matcher;
2099  std::string m_matcherName;
2101  std::vector<cv::DMatch> m_matches;
2103  double m_matchingFactorThreshold;
2105  double m_matchingRatioThreshold;
2107  double m_matchingTime;
2109  std::vector<std::pair<cv::KeyPoint, cv::Point3f> > m_matchRansacKeyPointsToPoints;
2111  int m_nbRansacIterations;
2113  int m_nbRansacMinInlierCount;
2116  std::vector<cv::Point3f> m_objectFilteredPoints;
2118  double m_poseTime;
2121  cv::Mat m_queryDescriptors;
2123  std::vector<cv::KeyPoint> m_queryFilteredKeyPoints;
2125  std::vector<cv::KeyPoint> m_queryKeyPoints;
2128  double m_ransacConsensusPercentage;
2130  vpPose::RANSAC_FILTER_FLAGS m_ransacFilterFlag;
2132  std::vector<vpImagePoint> m_ransacInliers;
2134  std::vector<vpImagePoint> m_ransacOutliers;
2136  bool m_ransacParallel;
2138  unsigned int m_ransacParallelNbThreads;
2141  double m_ransacReprojectionError;
2144  double m_ransacThreshold;
2147  // detected in the train images).
2148  cv::Mat m_trainDescriptors;
2150  std::vector<cv::KeyPoint> m_trainKeyPoints;
2153  std::vector<cv::Point3f> m_trainPoints;
2156  std::vector<vpPoint> m_trainVpPoints;
2159  bool m_useAffineDetection;
2160 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2164  bool m_useBruteForceCrossCheck;
2165 #endif
2168  bool m_useConsensusPercentage;
2170  bool m_useKnn;
2175  bool m_useMatchTrainToQuery;
2177  bool m_useRansacVVS;
2180  bool m_useSingleMatchFilter;
2184  int m_maxFeatures;
2185 
2194  void affineSkew(double tilt, double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai);
2195 
2210  double computePoseEstimationError(const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
2211  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo_est);
2212 
2216  void filterMatches();
2217 
2222  void init();
2223 
2229  void initDetector(const std::string &detectorNames);
2230 
2237  void initDetectors(const std::vector<std::string> &detectorNames);
2238 
2244  void initExtractor(const std::string &extractorName);
2245 
2252  void initExtractors(const std::vector<std::string> &extractorNames);
2253 
2257  void initFeatureNames();
2258 
2259  inline size_t myKeypointHash(const cv::KeyPoint &kp)
2260  {
2261  size_t _Val = 2166136261U, scale = 16777619U;
2262  Cv32suf u;
2263  u.f = kp.pt.x;
2264  _Val = (scale * _Val) ^ u.u;
2265  u.f = kp.pt.y;
2266  _Val = (scale * _Val) ^ u.u;
2267  u.f = kp.size;
2268  _Val = (scale * _Val) ^ u.u;
2269  // As the keypoint angle can be computed for certain type of keypoint only
2270  // when extracting the corresponding descriptor, the angle field is not
2271  // taking into account for the hash
2272  // u.f = kp.angle; _Val = (scale * _Val) ^ u.u;
2273  u.f = kp.response;
2274  _Val = (scale * _Val) ^ u.u;
2275  _Val = (scale * _Val) ^ ((size_t)kp.octave);
2276  _Val = (scale * _Val) ^ ((size_t)kp.class_id);
2277  return _Val;
2278  }
2279 
2280 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2281  /*
2282  * Adapts a detector to detect points over multiple levels of a Gaussian
2283  * pyramid. Useful for detectors that are not inherently scaled.
2284  * From OpenCV 2.4.11 source code.
2285  */
2286  class PyramidAdaptedFeatureDetector : public cv::FeatureDetector
2287  {
2288  public:
2289  // maxLevel - The 0-based index of the last pyramid layer
2290  PyramidAdaptedFeatureDetector(const cv::Ptr<cv::FeatureDetector> &detector, int maxLevel = 2);
2291 
2292  // TODO implement read/write
2293  virtual bool empty() const;
2294 
2295  protected:
2296  virtual void detect(cv::InputArray image, CV_OUT std::vector<cv::KeyPoint> &keypoints,
2297  cv::InputArray mask = cv::noArray());
2298  virtual void detectImpl(const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
2299  const cv::Mat &mask = cv::Mat()) const;
2300 
2301  cv::Ptr<cv::FeatureDetector> m_detector;
2302  int m_maxLevel;
2303  };
2304 
2305  /*
2306  * A class filters a vector of keypoints.
2307  * Because now it is difficult to provide a convenient interface for all
2308  * usage scenarios of the keypoints filter class, it has only several needed
2309  * by now static methods.
2310  */
2311  class KeyPointsFilter
2312  {
2313  public:
2314  KeyPointsFilter() { }
2315 
2316  /*
2317  * Remove keypoints within borderPixels of an image edge.
2318  */
2319  static void runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize, int borderSize);
2320  /*
2321  * Remove keypoints of sizes out of range.
2322  */
2323  static void runByKeypointSize(std::vector<cv::KeyPoint> &keypoints, float minSize, float maxSize = FLT_MAX);
2324  /*
2325  * Remove keypoints from some image by mask for pixels of this image.
2326  */
2327  static void runByPixelsMask(std::vector<cv::KeyPoint> &keypoints, const cv::Mat &mask);
2328  /*
2329  * Remove duplicated keypoints.
2330  */
2331  static void removeDuplicated(std::vector<cv::KeyPoint> &keypoints);
2332 
2333  /*
2334  * Retain the specified number of the best keypoints (according to the
2335  * response)
2336  */
2337  static void retainBest(std::vector<cv::KeyPoint> &keypoints, int npoints);
2338  };
2339 
2340 #endif
2341 };
2342 END_VISP_NAMESPACE
2343 #endif
2344 #endif
class that defines what is a keypoint. This class provides all the basic elements to implement classe...
virtual unsigned int buildReference(const vpImage< unsigned char > &I)=0
virtual unsigned int matchPoint(const vpImage< unsigned char > &I)=0
virtual void display(const vpImage< unsigned char > &Iref, const vpImage< unsigned char > &Icurrent, unsigned int size=3)=0
Generic class defining intrinsic camera parameters.
Class to define RGB colors available for display functionalities.
Definition: vpColor.h:157
static const vpColor green
Definition: vpColor.h:201
error that can be emitted by ViSP classes.
Definition: vpException.h:60
@ badValue
Used to indicate that a value is not in the allowed range.
Definition: vpException.h:73
Implementation of an homogeneous matrix and operations on such kind of matrices.
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:82
Class that allows keypoints 2D features detection (and descriptors extraction) and matching thanks to...
Definition: vpKeyPoint.h:267
double getDetectionTime() const
Definition: vpKeyPoint.h:1082
std::vector< vpImagePoint > getRansacInliers() const
Definition: vpKeyPoint.h:1297
void setMatchingFactorThreshold(const double factor)
Definition: vpKeyPoint.h:1851
void setRansacConsensusPercentage(double percentage)
Definition: vpKeyPoint.h:1882
cv::Ptr< cv::DescriptorMatcher > getMatcher() const
Definition: vpKeyPoint.h:1204
vpDetectionMethodType
Definition: vpKeyPoint.h:286
void setRansacParallel(bool parallel)
Definition: vpKeyPoint.h:1919
void setRansacReprojectionError(double reprojectionError)
Definition: vpKeyPoint.h:1936
void setExtractor(const std::string &extractorName)
Definition: vpKeyPoint.h:1725
void setUseSingleMatchFilter(bool singleMatchFilter)
Definition: vpKeyPoint.h:2042
void setFilterMatchingType(const vpFilterMatchingType &filterType)
Definition: vpKeyPoint.h:1815
void setRansacParallelNbThreads(unsigned int nthreads)
Definition: vpKeyPoint.h:1927
double getExtractionTime() const
Definition: vpKeyPoint.h:1136
void setUseRansacVVS(bool ransacVVS)
Definition: vpKeyPoint.h:2034
void setDetectors(const std::vector< std::string > &detectorNames)
Definition: vpKeyPoint.h:1698
void setExtractors(const std::vector< std::string > &extractorNames)
Definition: vpKeyPoint.h:1757
cv::Mat getTrainDescriptors() const
Definition: vpKeyPoint.h:1312
vpFeatureDetectorType
Definition: vpKeyPoint.h:305
@ DETECTOR_KAZE
KAZE detector.
Definition: vpKeyPoint.h:337
@ DETECTOR_BRISK
BRISK detector.
Definition: vpKeyPoint.h:328
@ DETECTOR_AKAZE
AKAZE detector.
Definition: vpKeyPoint.h:336
@ DETECTOR_MSER
MSER detector.
Definition: vpKeyPoint.h:331
@ DETECTOR_AGAST
AGAST detector.
Definition: vpKeyPoint.h:335
@ DETECTOR_FAST
FAST detector.
Definition: vpKeyPoint.h:329
@ DETECTOR_GFTT
GFTT detector.
Definition: vpKeyPoint.h:330
@ DETECTOR_ORB
ORB detector.
Definition: vpKeyPoint.h:332
@ DETECTOR_SimpleBlob
SimpleBlob detector.
Definition: vpKeyPoint.h:333
void setExtractor(const vpFeatureDescriptorType &extractorType)
Definition: vpKeyPoint.h:1711
void setImageFormat(const vpImageFormatType &imageFormat)
Definition: vpKeyPoint.h:1770
void setRansacThreshold(double threshold)
Definition: vpKeyPoint.h:1969
void setRansacMinInlierCount(int minCount)
Definition: vpKeyPoint.h:1953
void setRansacFilterFlag(const vpPose::RANSAC_FILTER_FLAGS &flag)
Definition: vpKeyPoint.h:1896
double getPoseTime() const
Definition: vpKeyPoint.h:1262
std::map< vpFeatureDetectorType, std::string > getDetectorNames() const
Definition: vpKeyPoint.h:1129
unsigned int getNbImages() const
Definition: vpKeyPoint.h:1237
double getMatchingTime() const
Definition: vpKeyPoint.h:1197
std::vector< vpImagePoint > getRansacOutliers() const
Definition: vpKeyPoint.h:1304
vpFeatureDescriptorType
Definition: vpKeyPoint.h:359
@ DESCRIPTOR_LATCH
LATCH descriptor.
Definition: vpKeyPoint.h:392
@ DESCRIPTOR_AKAZE
AKAZE descriptor.
Definition: vpKeyPoint.h:384
@ DESCRIPTOR_BRIEF
BRIEF descriptor.
Definition: vpKeyPoint.h:389
@ DESCRIPTOR_FREAK
FREAK descriptor.
Definition: vpKeyPoint.h:391
@ DESCRIPTOR_ORB
ORB descriptor.
Definition: vpKeyPoint.h:382
@ DESCRIPTOR_KAZE
KAZE descriptor.
Definition: vpKeyPoint.h:385
@ DESCRIPTOR_DAISY
DAISY descriptor.
Definition: vpKeyPoint.h:390
@ DESCRIPTOR_BRISK
BRISK descriptor.
Definition: vpKeyPoint.h:381
std::vector< cv::DMatch > getMatches() const
Definition: vpKeyPoint.h:1212
std::map< vpFeatureDescriptorType, std::string > getExtractorNames() const
Definition: vpKeyPoint.h:1183
void setMatcher(const std::string &matcherName)
Definition: vpKeyPoint.h:1787
vpImageFormatType getImageFormat() const
Definition: vpKeyPoint.h:1190
cv::Ptr< cv::DescriptorExtractor > getExtractor(const std::string &name) const
Definition: vpKeyPoint.h:1169
void setUseAffineDetection(bool useAffine)
Definition: vpKeyPoint.h:1986
void setUseRansacConsensusPercentage(bool usePercentage)
Definition: vpKeyPoint.h:2025
void setMatchingRatioThreshold(double ratio)
Definition: vpKeyPoint.h:1866
@ ppmImageFormat
Definition: vpKeyPoint.h:299
@ jpgImageFormat
Definition: vpKeyPoint.h:297
@ pngImageFormat
Definition: vpKeyPoint.h:298
vpFilterMatchingType
Definition: vpKeyPoint.h:271
void setCovarianceComputation(const bool &flag)
Definition: vpKeyPoint.h:1629
void setDetector(const vpFeatureDetectorType &detectorType)
Definition: vpKeyPoint.h:1653
cv::Ptr< cv::DescriptorExtractor > getExtractor(const vpFeatureDescriptorType &type) const
Definition: vpKeyPoint.h:1145
void setUseMatchTrainToQuery(bool useMatchTrainToQuery)
Definition: vpKeyPoint.h:2016
vpMatrix getCovarianceMatrix() const
Definition: vpKeyPoint.h:1057
void setDetectionMethod(const vpDetectionMethodType &method)
Definition: vpKeyPoint.h:1646
cv::Ptr< cv::FeatureDetector > getDetector(const std::string &name) const
Definition: vpKeyPoint.h:1115
void setDetector(const std::string &detectorName)
Definition: vpKeyPoint.h:1666
void setMaxFeatures(int maxFeatures)
Definition: vpKeyPoint.h:1798
std::vector< std::pair< cv::KeyPoint, cv::KeyPoint > > getMatchQueryToTrainKeyPoints() const
Definition: vpKeyPoint.h:1221
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
Definition: vpKeyPoint.h:1091
void setRansacIteration(int nbIter)
Definition: vpKeyPoint.h:1904
cv::Mat getQueryDescriptors() const
Definition: vpKeyPoint.h:1270
Implementation of a matrix and operations on matrices.
Definition: vpMatrix.h:169
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Definition: vpPoint.h:79
RANSAC_FILTER_FLAGS
Definition: vpPose.h:107
Defines a rectangle in the plane.
Definition: vpRect.h:79