ViSP  2.10.0
vpKeyPoint.h
1 /****************************************************************************
2  *
3  * This file is part of the ViSP software.
4  * Copyright (C) 2005 - 2014 by INRIA. All rights reserved.
5  *
6  * This software is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * ("GPL") version 2 as published by the Free Software Foundation.
9  * See the file LICENSE.txt at the root directory of this source
10  * distribution for additional information about the GNU GPL.
11  *
12  * For using ViSP with software that can not be combined with the GNU
13  * GPL, please contact INRIA about acquiring a ViSP Professional
14  * Edition License.
15  *
16  * See http://www.irisa.fr/lagadic/visp/visp.html for more information.
17  *
18  * This software was developed at:
19  * INRIA Rennes - Bretagne Atlantique
20  * Campus Universitaire de Beaulieu
21  * 35042 Rennes Cedex
22  * France
23  * http://www.irisa.fr/lagadic
24  *
25  * If you have questions regarding the use of this file, please contact
26  * INRIA at visp@inria.fr
27  *
28  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
29  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
30  *
31  * Description:
32  * Key point functionalities.
33  *
34  * Authors:
35  * Souriya Trinh
36  *
37  *****************************************************************************/
38 #ifndef __vpKeyPoint_h__
39 #define __vpKeyPoint_h__
40 
41 #include <algorithm> // std::transform
42 #include <vector> // std::vector
43 #include <stdlib.h> // srand, rand
44 #include <time.h> // time
45 #include <fstream> // std::ofstream
46 #include <numeric> // std::accumulate
47 #include <float.h> // DBL_MAX
48 #include <map> // std::map
49 #include <limits>
50 
51 #include <visp/vpConfig.h>
52 #include <visp/vpBasicKeyPoint.h>
53 #include <visp/vpImageConvert.h>
54 #include <visp/vpPoint.h>
55 #include <visp/vpDisplay.h>
56 #include <visp/vpPlane.h>
57 #include <visp/vpPixelMeterConversion.h>
58 #include <visp/vpMbEdgeTracker.h>
59 #include <visp/vpIoTools.h>
60 #include <visp/vpPose.h>
61 #include <visp/vpImageIo.h>
62 #include <visp/vpPolygon.h>
63 #include <visp/vpXmlConfigParserKeyPoint.h>
64 #include <visp/vpConvert.h>
65 
66 // Require at least OpenCV >= 2.1.1
67 #if (VISP_HAVE_OPENCV_VERSION >= 0x020101)
68 
69 #include <opencv2/features2d/features2d.hpp>
70 #include <opencv2/calib3d/calib3d.hpp>
71 
72 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) // OpenCV >= 3.0.0
73 # include <opencv2/xfeatures2d.hpp>
74 #elif defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && (VISP_HAVE_OPENCV_VERSION < 0x030000)
75 # include <opencv2/nonfree/nonfree.hpp>
76 #endif
77 
78 #ifdef VISP_HAVE_XML2
79 # include <libxml/xmlwriter.h>
80 #endif
81 
212 class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint {
213 
214 public:
215 
217  typedef enum {
222  noFilterMatching
223  } vpFilterMatchingType;
224 
226  typedef enum {
228  detectionScore
230  } vpDetectionMethodType;
231 
232 
233  vpKeyPoint(const std::string &detectorName="ORB", const std::string &extractorName="ORB",
234  const std::string &matcherName="BruteForce-Hamming", const vpFilterMatchingType &filterType=ratioDistanceThreshold);
235  vpKeyPoint(const std::vector<std::string> &detectorNames, const std::vector<std::string> &extractorNames,
236  const std::string &matcherName="BruteForce", const vpFilterMatchingType &filterType=ratioDistanceThreshold);
237 
238  unsigned int buildReference(const vpImage<unsigned char> &I);
239  unsigned int buildReference(const vpImage<unsigned char> &I,
240  const vpImagePoint &iP, const unsigned int height, const unsigned int width);
241  unsigned int buildReference(const vpImage<unsigned char> &I, const vpRect& rectangle);
242 
243  void buildReference(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &trainKeyPoint,
244  std::vector<cv::Point3f> &points3f, bool append=false);
245  void buildReference(const vpImage<unsigned char> &I, const std::vector<cv::KeyPoint> &trainKeyPoint,
246  const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f, bool append=false);
247 
248  static void compute3D(const cv::KeyPoint &candidate, const std::vector<vpPoint> &roi,
249  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point);
250 
251  static void compute3D(const vpImagePoint &candidate, const std::vector<vpPoint> &roi,
252  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, vpPoint &point);
253 
254  static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam,
255  std::vector<cv::KeyPoint> &candidate, std::vector<vpPolygon> &polygons, std::vector<std::vector<vpPoint> > &roisPt,
256  std::vector<cv::Point3f> &points, cv::Mat *descriptors=NULL);
257 
258  static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam,
259  std::vector<vpImagePoint> &candidate, std::vector<vpPolygon> &polygons, std::vector<std::vector<vpPoint> > &roisPt,
260  std::vector<vpPoint> &points, cv::Mat *descriptors=NULL);
261 
262  bool computePose(const std::vector<cv::Point2f> &imagePoints, const std::vector<cv::Point3f> &objectPoints,
263  const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector<int> &inlierIndex, double &elapsedTime,
264  bool (*func)(vpHomogeneousMatrix *)=NULL);
265 
266  bool computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo,
267  std::vector<vpPoint> &inliers, double &elapsedTime, bool (*func)(vpHomogeneousMatrix *)=NULL);
268 
269  void createImageMatching(vpImage<unsigned char> &IRef, vpImage<unsigned char> &ICurrent, vpImage<unsigned char> &IMatching);
270  void createImageMatching(vpImage<unsigned char> &ICurrent, vpImage<unsigned char> &IMatching);
271 
272  void detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
273  const vpRect& rectangle=vpRect());
274  void detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
275  const cv::Mat &mask=cv::Mat());
276 
277  void detectExtractAffine(const vpImage<unsigned char> &I, std::vector<std::vector<cv::KeyPoint> >& listOfKeypoints,
278  std::vector<cv::Mat>& listOfDescriptors,
279  std::vector<vpImage<unsigned char> > *listOfAffineI=NULL);
280 
281  void display(const vpImage<unsigned char> &IRef, const vpImage<unsigned char> &ICurrent, unsigned int size=3);
282  void display(const vpImage<unsigned char> &ICurrent, unsigned int size=3, const vpColor &color=vpColor::green);
283 
284  void displayMatching(const vpImage<unsigned char> &IRef, vpImage<unsigned char> &IMatching,
285  unsigned int crossSize, unsigned int lineThickness=1,
286  const vpColor &color=vpColor::green);
287  void displayMatching(const vpImage<unsigned char> &ICurrent, vpImage<unsigned char> &IMatching,
288  const std::vector<vpImagePoint> &ransacInliers=std::vector<vpImagePoint>(), unsigned int crossSize=3,
289  unsigned int lineThickness=1);
290 
291  void extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors, double &elapsedTime);
292  void extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors, double &elapsedTime);
293 
301  inline vpMatrix getCovarianceMatrix() const {
302  if(!m_computeCovariance) {
303  std::cout << "Warning : The covariance matrix has not been computed. See setCovarianceComputation() to do it." << std::endl;
304  return vpMatrix();
305  }
306 
307  if(m_computeCovariance && !m_useRansacVVS) {
308  std::cout << "Warning : The covariance matrix can only be computed with a Virtual Visual Servoing approach." << std::endl
309  << "Use setUseRansacVVS(true) to choose to use a pose estimation method based on a Virtual Visual Servoing approach."
310  << std::endl;
311  return vpMatrix();
312  }
313 
314  return m_covarianceMatrix;
315  }
316 
322  inline double getDetectionTime() const {
323  return m_detectionTime;
324  }
325 
331  inline double getExtractionTime() const {
332  return m_extractionTime;
333  }
334 
340  inline double getMatchingTime() const {
341  return m_matchingTime;
342  }
343 
349  inline std::vector<cv::DMatch> getMatches() const {
350  return m_filteredMatches;
351  }
352 
358  inline std::vector<std::pair<cv::KeyPoint, cv::KeyPoint> > getMatchQueryToTrainKeyPoints() const {
359  return m_matchQueryToTrainKeyPoints;
360  }
361 
367  inline unsigned int getNbImages() const {
368  return static_cast<unsigned int>(m_mapOfImages.size());
369  }
370 
371  void getObjectPoints(std::vector<cv::Point3f> &objectPoints) const;
372  void getObjectPoints(std::vector<vpPoint> &objectPoints) const;
373 
379  inline double getPoseTime() const {
380  return m_poseTime;
381  }
382 
388  inline cv::Mat getQueryDescriptors() const {
389  return m_queryDescriptors;
390  }
391 
392  void getQueryKeyPoints(std::vector<cv::KeyPoint> &keyPoints) const;
393  void getQueryKeyPoints(std::vector<vpImagePoint> &keyPoints) const;
394 
400  inline std::vector<vpImagePoint> getRansacInliers() const {
401  return m_ransacInliers;
402  }
403 
409  inline std::vector<vpImagePoint> getRansacOutliers() const {
410  return m_ransacOutliers;
411  }
412 
418  inline cv::Mat getTrainDescriptors() const {
419  return m_trainDescriptors;
420  }
421 
422  void getTrainKeyPoints(std::vector<cv::KeyPoint> &keyPoints) const;
423  void getTrainKeyPoints(std::vector<vpImagePoint> &keyPoints) const;
424 
425  void getTrainPoints(std::vector<cv::Point3f> &points) const;
426  void getTrainPoints(std::vector<vpPoint> &points) const;
427 
428  void initMatcher(const std::string &matcherName);
429 
430  void insertImageMatching(const vpImage<unsigned char> &IRef, const vpImage<unsigned char> &ICurrent,
431  vpImage<unsigned char> &IMatching);
432  void insertImageMatching(const vpImage<unsigned char> &ICurrent, vpImage<unsigned char> &IMatching);
433 
434 #ifdef VISP_HAVE_XML2
435  void loadConfigFile(const std::string &configFile);
436 #endif
437 
438  void loadLearningData(const std::string &filename, const bool binaryMode=false, const bool append=false);
439 
440  void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors,
441  std::vector<cv::DMatch> &matches, double &elapsedTime);
442 
443  unsigned int matchPoint(const vpImage<unsigned char> &I);
444  unsigned int matchPoint(const vpImage<unsigned char> &I, const vpImagePoint &iP,
445  const unsigned int height, const unsigned int width);
446  unsigned int matchPoint(const vpImage<unsigned char> &I, const vpRect& rectangle);
447 
449  double &error, double &elapsedTime, bool (*func)(vpHomogeneousMatrix *)=NULL);
450 
451  bool matchPointAndDetect(const vpImage<unsigned char> &I, vpRect &boundingBox, vpImagePoint &centerOfGravity,
452  const bool isPlanarObject=true, std::vector<vpImagePoint> *imPts1=NULL,
453  std::vector<vpImagePoint> *imPts2=NULL, double *meanDescriptorDistance=NULL,
454  double *detectionScore=NULL);
455 
456  bool matchPointAndDetect(const vpImage<unsigned char> &I, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo,
457  double &error, double &elapsedTime, vpRect &boundingBox, vpImagePoint &centerOfGravity,
458  bool (*func)(vpHomogeneousMatrix *)=NULL);
459 
460  void saveLearningData(const std::string &filename, const bool binaryMode=false, const bool saveTrainingImages=true);
461 
467  inline void setCovarianceComputation(const bool& flag) {
468  m_computeCovariance = flag;
469  if(!m_useRansacVVS) {
470  std::cout << "Warning : The covariance matrix can only be computed with a Virtual Visual Servoing approach." << std::endl
471  << "Use setUseRansacVVS(true) to choose to use a pose estimation method based on a Virtual "
472  "Visual Servoing approach." << std::endl;
473  }
474  }
475 
481  inline void setDetectionMethod(const vpDetectionMethodType &method) {
482  m_detectionMethod = method;
483  }
484 
490  inline void setDetector(const std::string &detectorName) {
491  m_detectorNames.clear();
492  m_detectorNames.push_back(detectorName);
493  m_detectors.clear();
494  initDetector(detectorName);
495  }
496 
504  template<typename T1, typename T2, typename T3> inline void setDetectorParameter(const T1 detectorName,
505  const T2 parameterName, const T3 value) {
506  if(m_detectors.find(detectorName) != m_detectors.end()) {
507  m_detectors[detectorName]->set(parameterName, value);
508  }
509  }
510 
516  inline void setDetectors(const std::vector<std::string> &detectorNames) {
517  m_detectorNames.clear();
518  m_detectors.clear();
519  m_detectorNames = detectorNames;
520  initDetectors(m_detectorNames);
521  }
522 
528  inline void setExtractor(const std::string &extractorName) {
529  m_extractorNames.clear();
530  m_extractorNames.push_back(extractorName);
531  m_extractors.clear();
532  initExtractor(extractorName);
533  }
534 
542  template<typename T1, typename T2, typename T3> inline void setExtractorParameter(const T1 extractorName,
543  const T2 parameterName, const T3 value) {
544  if(m_extractors.find(extractorName) != m_extractors.end()) {
545  m_extractors[extractorName]->set(parameterName, value);
546  }
547  }
548 
554  inline void setExtractors(const std::vector<std::string> &extractorNames) {
555  m_extractorNames.clear();
556  m_extractorNames = extractorNames;
557  m_extractors.clear();
558  initExtractors(m_extractorNames);
559  }
560 
575  inline void setMatcher(const std::string &matcherName) {
576  m_matcherName = matcherName;
577  initMatcher(m_matcherName);
578  }
579 
591  inline void setFilterMatchingType(const vpFilterMatchingType &filterType) {
592  m_filterType = filterType;
593 
594  //Use k-nearest neighbors (knn) to retrieve the two best matches for a keypoint
595  //So this is useful only for ratioDistanceThreshold method
596  if(filterType == ratioDistanceThreshold || filterType == stdAndRatioDistanceThreshold) {
597  m_useKnn = true;
598  } else {
599  m_useKnn = false;
600  }
601  }
602 
608  inline void setMatchingFactorThreshold(const double factor) {
609  if(factor > 0.0) {
610  m_matchingFactorThreshold = factor;
611  } else {
612  throw vpException(vpException::badValue, "The factor must be positive.");
613  }
614  }
615 
621  inline void setMatchingRatioThreshold(const double ratio) {
622  if(ratio > 0.0 && (ratio < 1.0 || std::fabs(ratio - 1.0) < std::numeric_limits<double>::epsilon())) {
623  m_matchingRatioThreshold = ratio;
624  } else {
625  throw vpException(vpException::badValue, "The ratio must be in the interval ]0 ; 1].");
626  }
627  }
628 
634  inline void setRansacConsensusPercentage(const double percentage) {
635  if(percentage > 0.0 && (percentage < 100.0 || std::fabs(percentage - 100.0) < std::numeric_limits<double>::epsilon())) {
636  m_ransacConsensusPercentage = percentage;
637  } else {
638  throw vpException(vpException::badValue, "The percentage must be in the interval ]0 ; 100].");
639  }
640  }
641 
647  inline void setRansacIteration(const int nbIter) {
648  if(nbIter > 0) {
649  m_nbRansacIterations = nbIter;
650  } else {
651  throw vpException(vpException::badValue, "The number of iterations must be greater than zero.");
652  }
653  }
654 
660  inline void setRansacReprojectionError(const double reprojectionError) {
661  if(reprojectionError > 0.0) {
662  m_ransacReprojectionError = reprojectionError;
663  } else {
664  throw vpException(vpException::badValue, "The Ransac reprojection threshold must be positive as we deal with distance.");
665  }
666  }
667 
673  inline void setRansacMinInlierCount(const int minCount) {
674  if(minCount > 0) {
675  m_nbRansacMinInlierCount = minCount;
676  } else {
677  throw vpException(vpException::badValue, "The minimum number of inliers must be greater than zero.");
678  }
679  }
680 
686  inline void setRansacThreshold(const double threshold) {
687  if(threshold > 0.0) {
688  m_ransacThreshold = threshold;
689  } else {
690  throw vpException(vpException::badValue, "The Ransac threshold must be positive as we deal with distance.");
691  }
692  }
693 
699  inline void setUseAffineDetection(const bool useAffine) {
700  m_useAffineDetection = useAffine;
701  }
702 
703 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400)
704 
709  inline void setUseBruteForceCrossCheck(const bool useCrossCheck) {
710  //Only available with BruteForce and with k=1 (i.e not used with a ratioDistanceThreshold method)
711  if(m_matcher != NULL && !m_useKnn && m_matcherName == "BruteForce") {
712  m_matcher->set("crossCheck", useCrossCheck);
713  }
714  }
715 #endif
716 
723  inline void setUseRansacConsensusPercentage(const bool usePercentage) {
724  m_useConsensusPercentage = usePercentage;
725  }
726 
732  inline void setUseRansacVVS(const bool ransacVVS) {
733  m_useRansacVVS = ransacVVS;
734  }
735 
736 private:
738  bool m_computeCovariance;
740  vpMatrix m_covarianceMatrix;
742  int m_currentImageId;
744  vpDetectionMethodType m_detectionMethod;
746  double m_detectionScore;
748  double m_detectionThreshold;
750  double m_detectionTime;
752  std::vector<std::string> m_detectorNames;
754  // with a key based upon the detector name.
755  std::map<std::string, cv::Ptr<cv::FeatureDetector> > m_detectors;
757  double m_extractionTime;
759  std::vector<std::string> m_extractorNames;
761  // with a key based upon the extractor name.
762  std::map<std::string, cv::Ptr<cv::DescriptorExtractor> > m_extractors;
764  std::vector<cv::DMatch> m_filteredMatches;
766  vpFilterMatchingType m_filterType;
768  std::vector<std::vector<cv::DMatch> > m_knnMatches;
770  std::map<int, int> m_mapOfImageId;
772  std::map<int, vpImage<unsigned char> > m_mapOfImages;
774  cv::Ptr<cv::DescriptorMatcher> m_matcher;
776  std::string m_matcherName;
778  std::vector<cv::DMatch> m_matches;
780  double m_matchingFactorThreshold;
782  double m_matchingRatioThreshold;
784  double m_matchingTime;
786  std::vector<std::pair<cv::KeyPoint, cv::KeyPoint> > m_matchQueryToTrainKeyPoints;
788  std::vector<std::pair<cv::KeyPoint, cv::Point3f> > m_matchRansacKeyPointsToPoints;
790  std::vector<std::pair<cv::KeyPoint, cv::KeyPoint> > m_matchRansacQueryToTrainKeyPoints;
792  int m_nbRansacIterations;
794  int m_nbRansacMinInlierCount;
796  std::vector<cv::Point3f> m_objectFilteredPoints;
798  double m_poseTime;
801  cv::Mat m_queryDescriptors;
803  std::vector<cv::KeyPoint> m_queryFilteredKeyPoints;
805  std::vector<cv::KeyPoint> m_queryKeyPoints;
807  double m_ransacConsensusPercentage;
809  std::vector<vpImagePoint> m_ransacInliers;
811  std::vector<vpImagePoint> m_ransacOutliers;
813  double m_ransacReprojectionError;
815  double m_ransacThreshold;
817  //detected in the train images).
818  cv::Mat m_trainDescriptors;
820  std::vector<cv::KeyPoint> m_trainKeyPoints;
822  std::vector<cv::Point3f> m_trainPoints;
824  std::vector<vpPoint> m_trainVpPoints;
826  bool m_useAffineDetection;
829  bool m_useBruteForceCrossCheck;
831  bool m_useConsensusPercentage;
833  bool m_useKnn;
835  bool m_useRansacVVS;
836 
837 
838  void affineSkew(double tilt, double phi, cv::Mat& img, cv::Mat& mask, cv::Mat& Ai);
839 
840  double computePoseEstimationError(const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
841  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo_est);
842 
843  void filterMatches();
844 
845  void init();
846  void initDetector(const std::string &detectorNames);
847  void initDetectors(const std::vector<std::string> &detectorNames);
848 
849  void initExtractor(const std::string &extractorName);
850  void initExtractors(const std::vector<std::string> &extractorNames);
851 
852 //TODO: Try to implement a pyramidal feature detection
853 //#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
854 // void pyramidFeatureDetection(cv::Ptr<cv::FeatureDetector> &detector, const cv::Mat& image, std::vector<cv::KeyPoint>& keypoints, const cv::Mat& mask,
855 // const int maxLevel=2);
856 // void runByPixelsMask(std::vector<cv::KeyPoint>& keypoints, const cv::Mat& mask);
857 //#endif
858 };
859 
860 #endif
861 #endif
Definition of the vpMatrix class.
Definition: vpMatrix.h:98
void setRansacIteration(const int nbIter)
Definition: vpKeyPoint.h:647
class that defines what is a Keypoint. This class provides all the basic elements to implement classe...
void setUseRansacVVS(const bool ransacVVS)
Definition: vpKeyPoint.h:732
The class provides a data structure for the homogeneous matrices as well as a set of operations on th...
std::vector< std::pair< cv::KeyPoint, cv::KeyPoint > > getMatchQueryToTrainKeyPoints() const
Definition: vpKeyPoint.h:358
void setRansacThreshold(const double threshold)
Definition: vpKeyPoint.h:686
void setExtractor(const std::string &extractorName)
Definition: vpKeyPoint.h:528
Class to define colors available for display functionnalities.
Definition: vpColor.h:125
error that can be emited by ViSP classes.
Definition: vpException.h:76
void setDetectors(const std::vector< std::string > &detectorNames)
Definition: vpKeyPoint.h:516
static const vpColor green
Definition: vpColor.h:170
Class that defines what is a point.
Definition: vpPoint.h:65
cv::Mat getQueryDescriptors() const
Definition: vpKeyPoint.h:388
void setExtractors(const std::vector< std::string > &extractorNames)
Definition: vpKeyPoint.h:554
void setMatcher(const std::string &matcherName)
Definition: vpKeyPoint.h:575
virtual unsigned int buildReference(const vpImage< unsigned char > &I)=0
double getDetectionTime() const
Definition: vpKeyPoint.h:322
Generic class defining intrinsic camera parameters.
double getMatchingTime() const
Definition: vpKeyPoint.h:340
void setDetectionMethod(const vpDetectionMethodType &method)
Definition: vpKeyPoint.h:481
void setUseBruteForceCrossCheck(const bool useCrossCheck)
Definition: vpKeyPoint.h:709
unsigned int getNbImages() const
Definition: vpKeyPoint.h:367
void setRansacMinInlierCount(const int minCount)
Definition: vpKeyPoint.h:673
vpMatrix getCovarianceMatrix() const
Definition: vpKeyPoint.h:301
virtual void display(const vpImage< unsigned char > &Iref, const vpImage< unsigned char > &Icurrent, unsigned int size=3)=0
void setRansacConsensusPercentage(const double percentage)
Definition: vpKeyPoint.h:634
virtual unsigned int matchPoint(const vpImage< unsigned char > &I)=0
void setUseAffineDetection(const bool useAffine)
Definition: vpKeyPoint.h:699
Class that allows keypoints detection (and descriptors extraction) and matching thanks to OpenCV libr...
Definition: vpKeyPoint.h:212
std::vector< vpImagePoint > getRansacOutliers() const
Definition: vpKeyPoint.h:409
vpDetectionMethodType
Definition: vpKeyPoint.h:226
void setDetectorParameter(const T1 detectorName, const T2 parameterName, const T3 value)
Definition: vpKeyPoint.h:504
vpFilterMatchingType
Definition: vpKeyPoint.h:217
cv::Mat getTrainDescriptors() const
Definition: vpKeyPoint.h:418
double getExtractionTime() const
Definition: vpKeyPoint.h:331
double getPoseTime() const
Definition: vpKeyPoint.h:379
void setDetector(const std::string &detectorName)
Definition: vpKeyPoint.h:490
Defines a rectangle in the plane.
Definition: vpRect.h:85
void setExtractorParameter(const T1 extractorName, const T2 parameterName, const T3 value)
Definition: vpKeyPoint.h:542
void setUseRansacConsensusPercentage(const bool usePercentage)
Definition: vpKeyPoint.h:723
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:93
std::vector< vpImagePoint > getRansacInliers() const
Definition: vpKeyPoint.h:400
void setFilterMatchingType(const vpFilterMatchingType &filterType)
Definition: vpKeyPoint.h:591
void setMatchingFactorThreshold(const double factor)
Definition: vpKeyPoint.h:608
std::vector< cv::DMatch > getMatches() const
Definition: vpKeyPoint.h:349
void setRansacReprojectionError(const double reprojectionError)
Definition: vpKeyPoint.h:660
void setMatchingRatioThreshold(const double ratio)
Definition: vpKeyPoint.h:621
void setCovarianceComputation(const bool &flag)
Definition: vpKeyPoint.h:467