38 #ifndef _vpKeyPoint_h_
39 #define _vpKeyPoint_h_
51 #include <visp3/core/vpConfig.h>
52 #include <visp3/core/vpDisplay.h>
53 #include <visp3/core/vpImageConvert.h>
54 #include <visp3/core/vpPixelMeterConversion.h>
55 #include <visp3/core/vpPlane.h>
56 #include <visp3/core/vpPoint.h>
57 #include <visp3/vision/vpBasicKeyPoint.h>
58 #include <visp3/vision/vpPose.h>
59 #ifdef VISP_HAVE_MODULE_IO
60 #include <visp3/io/vpImageIo.h>
62 #include <visp3/core/vpConvert.h>
63 #include <visp3/core/vpCylinder.h>
64 #include <visp3/core/vpMeterPixelConversion.h>
65 #include <visp3/core/vpPolygon.h>
66 #include <visp3/vision/vpXmlConfigParserKeyPoint.h>
69 #if (VISP_HAVE_OPENCV_VERSION >= 0x020101)
71 #include <opencv2/calib3d/calib3d.hpp>
72 #include <opencv2/features2d/features2d.hpp>
73 #include <opencv2/imgproc/imgproc.hpp>
75 #if (VISP_HAVE_OPENCV_VERSION >= 0x040000)
76 #include <opencv2/imgproc.hpp>
77 #include <opencv2/imgproc/imgproc_c.h>
80 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
81 #include <opencv2/xfeatures2d.hpp>
82 #elif defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && \
83 (VISP_HAVE_OPENCV_VERSION < 0x030000)
84 #include <opencv2/nonfree/nonfree.hpp>
228 constantFactorDistanceThreshold,
230 stdDistanceThreshold,
232 ratioDistanceThreshold,
235 stdAndRatioDistanceThreshold,
259 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
266 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
269 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
270 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
273 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
276 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
281 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
290 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
293 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
297 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
298 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
301 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
304 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
307 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
312 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
314 DESCRIPTOR_BoostDesc,
320 vpKeyPoint(
const vpFeatureDetectorType &detectorType,
const vpFeatureDescriptorType &descriptorType,
321 const std::string &matcherName,
const vpFilterMatchingType &filterType = ratioDistanceThreshold);
322 vpKeyPoint(
const std::string &detectorName =
"ORB",
const std::string &extractorName =
"ORB",
323 const std::string &matcherName =
"BruteForce-Hamming",
324 const vpFilterMatchingType &filterType = ratioDistanceThreshold);
325 vpKeyPoint(
const std::vector<std::string> &detectorNames,
const std::vector<std::string> &extractorNames,
326 const std::string &matcherName =
"BruteForce",
327 const vpFilterMatchingType &filterType = ratioDistanceThreshold);
335 std::vector<cv::Point3f> &points3f,
bool append =
false,
int class_id = -1);
337 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
338 bool append =
false,
int class_id = -1);
346 std::vector<cv::Point3f> &points3f,
bool append =
false,
int class_id = -1);
348 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
349 bool append =
false,
int class_id = -1);
351 static void compute3D(
const cv::KeyPoint &candidate,
const std::vector<vpPoint> &roi,
const vpCameraParameters &cam,
358 std::vector<cv::KeyPoint> &candidates,
359 const std::vector<vpPolygon> &polygons,
360 const std::vector<std::vector<vpPoint> > &roisPt,
361 std::vector<cv::Point3f> &points, cv::Mat *descriptors = NULL);
364 std::vector<vpImagePoint> &candidates,
365 const std::vector<vpPolygon> &polygons,
366 const std::vector<std::vector<vpPoint> > &roisPt,
367 std::vector<vpPoint> &points, cv::Mat *descriptors = NULL);
371 std::vector<cv::KeyPoint> &candidates,
const std::vector<vpCylinder> &cylinders,
372 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
373 std::vector<cv::Point3f> &points, cv::Mat *descriptors = NULL);
377 std::vector<vpImagePoint> &candidates,
const std::vector<vpCylinder> &cylinders,
378 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
379 std::vector<vpPoint> &points, cv::Mat *descriptors = NULL);
381 bool computePose(
const std::vector<cv::Point2f> &imagePoints,
const std::vector<cv::Point3f> &objectPoints,
385 bool computePose(
const std::vector<vpPoint> &objectVpPoints,
vpHomogeneousMatrix &cMo, std::vector<vpPoint> &inliers,
388 bool computePose(
const std::vector<vpPoint> &objectVpPoints,
vpHomogeneousMatrix &cMo, std::vector<vpPoint> &inliers,
389 std::vector<unsigned int> &inlierIndex,
double &elapsedTime,
402 void detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
const cv::Mat &mask = cv::Mat());
405 void detect(
const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints,
double &elapsedTime,
407 void detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
double &elapsedTime,
408 const cv::Mat &mask = cv::Mat());
410 void detectExtractAffine(
const vpImage<unsigned char> &I, std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
411 std::vector<cv::Mat> &listOfDescriptors,
422 const std::vector<vpImagePoint> &ransacInliers = std::vector<vpImagePoint>(),
423 unsigned int crossSize = 3,
unsigned int lineThickness = 1);
429 const std::vector<vpImagePoint> &ransacInliers = std::vector<vpImagePoint>(),
430 unsigned int crossSize = 3,
unsigned int lineThickness = 1);
433 std::vector<cv::Point3f> *trainPoints = NULL);
434 void extract(
const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
435 std::vector<cv::Point3f> *trainPoints = NULL);
436 void extract(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
437 std::vector<cv::Point3f> *trainPoints = NULL);
439 double &elapsedTime, std::vector<cv::Point3f> *trainPoints = NULL);
440 void extract(
const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
441 double &elapsedTime, std::vector<cv::Point3f> *trainPoints = NULL);
442 void extract(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
double &elapsedTime,
443 std::vector<cv::Point3f> *trainPoints = NULL);
456 if (!m_computeCovariance) {
457 std::cout <<
"Warning : The covariance matrix has not been computed. "
458 "See setCovarianceComputation() to do it."
463 if (m_computeCovariance && !m_useRansacVVS) {
464 std::cout <<
"Warning : The covariance matrix can only be computed "
465 "with a Virtual Visual Servoing approach."
467 <<
"Use setUseRansacVVS(true) to choose to use a pose "
468 "estimation method based on a Virtual Visual Servoing "
474 return m_covarianceMatrix;
493 std::map<vpFeatureDetectorType, std::string>::const_iterator it_name = m_mapOfDetectorNames.find(type);
494 if (it_name == m_mapOfDetectorNames.end()) {
495 std::cerr <<
"Internal problem with the feature type and the "
496 "corresponding name!"
500 std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator findDetector =
501 m_detectors.find(it_name->second);
502 if (findDetector != m_detectors.end()) {
503 return findDetector->second;
506 std::cerr <<
"Cannot find: " << it_name->second << std::endl;
507 return cv::Ptr<cv::FeatureDetector>();
517 inline cv::Ptr<cv::FeatureDetector>
getDetector(
const std::string &name)
const
519 std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator findDetector = m_detectors.find(name);
520 if (findDetector != m_detectors.end()) {
521 return findDetector->second;
524 std::cerr <<
"Cannot find: " << name << std::endl;
525 return cv::Ptr<cv::FeatureDetector>();
531 inline std::map<vpFeatureDetectorType, std::string>
getDetectorNames()
const {
return m_mapOfDetectorNames; }
549 std::map<vpFeatureDescriptorType, std::string>::const_iterator it_name = m_mapOfDescriptorNames.find(type);
550 if (it_name == m_mapOfDescriptorNames.end()) {
551 std::cerr <<
"Internal problem with the feature type and the "
552 "corresponding name!"
556 std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator findExtractor =
557 m_extractors.find(it_name->second);
558 if (findExtractor != m_extractors.end()) {
559 return findExtractor->second;
562 std::cerr <<
"Cannot find: " << it_name->second << std::endl;
563 return cv::Ptr<cv::DescriptorExtractor>();
573 inline cv::Ptr<cv::DescriptorExtractor>
getExtractor(
const std::string &name)
const
575 std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator findExtractor = m_extractors.find(name);
576 if (findExtractor != m_extractors.end()) {
577 return findExtractor->second;
580 std::cerr <<
"Cannot find: " << name << std::endl;
581 return cv::Ptr<cv::DescriptorExtractor>();
587 inline std::map<vpFeatureDescriptorType, std::string>
getExtractorNames()
const {
return m_mapOfDescriptorNames; }
608 inline cv::Ptr<cv::DescriptorMatcher>
getMatcher()
const {
return m_matcher; }
616 inline std::vector<cv::DMatch>
getMatches()
const {
return m_filteredMatches; }
627 std::vector<std::pair<cv::KeyPoint, cv::KeyPoint> > matchQueryToTrainKeyPoints(m_filteredMatches.size());
628 for (
size_t i = 0; i < m_filteredMatches.size(); i++) {
629 matchQueryToTrainKeyPoints.push_back(
630 std::pair<cv::KeyPoint, cv::KeyPoint>(m_queryFilteredKeyPoints[(
size_t)m_filteredMatches[i].queryIdx],
631 m_trainKeyPoints[(
size_t)m_filteredMatches[i].trainIdx]));
633 return matchQueryToTrainKeyPoints;
641 inline unsigned int getNbImages()
const {
return static_cast<unsigned int>(m_mapOfImages.size()); }
643 void getObjectPoints(std::vector<cv::Point3f> &objectPoints)
const;
644 void getObjectPoints(std::vector<vpPoint> &objectPoints)
const;
661 void getQueryKeyPoints(std::vector<cv::KeyPoint> &keyPoints,
bool matches =
true)
const;
662 void getQueryKeyPoints(std::vector<vpImagePoint> &keyPoints,
bool matches =
true)
const;
686 void getTrainKeyPoints(std::vector<cv::KeyPoint> &keyPoints)
const;
687 void getTrainKeyPoints(std::vector<vpImagePoint> &keyPoints)
const;
689 void getTrainPoints(std::vector<cv::Point3f> &points)
const;
690 void getTrainPoints(std::vector<vpPoint> &points)
const;
692 void initMatcher(
const std::string &matcherName);
701 void loadConfigFile(
const std::string &configFile);
703 void loadLearningData(
const std::string &filename,
bool binaryMode =
false,
bool append =
false);
705 void match(
const cv::Mat &trainDescriptors,
const cv::Mat &queryDescriptors, std::vector<cv::DMatch> &matches,
706 double &elapsedTime);
713 unsigned int matchPoint(
const std::vector<cv::KeyPoint> &queryKeyPoints,
const cv::Mat &queryDescriptors);
721 const bool isPlanarObject =
true, std::vector<vpImagePoint> *imPts1 = NULL,
722 std::vector<vpImagePoint> *imPts2 = NULL,
double *meanDescriptorDistance = NULL,
723 double *detectionScore = NULL,
const vpRect &rectangle =
vpRect());
726 double &error,
double &elapsedTime,
vpRect &boundingBox,
vpImagePoint ¢erOfGravity,
742 void saveLearningData(
const std::string &filename,
bool binaryMode =
false,
bool saveTrainingImages =
true);
752 m_computeCovariance = flag;
753 if (!m_useRansacVVS) {
754 std::cout <<
"Warning : The covariance matrix can only be computed "
755 "with a Virtual Visual Servoing approach."
757 <<
"Use setUseRansacVVS(true) to choose to use a pose "
758 "estimation method based on a Virtual "
759 "Visual Servoing approach."
778 m_detectorNames.clear();
779 m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
781 initDetector(m_mapOfDetectorNames[detectorType]);
791 m_detectorNames.clear();
792 m_detectorNames.push_back(detectorName);
794 initDetector(detectorName);
797 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
806 template <
typename T1,
typename T2,
typename T3>
807 inline void setDetectorParameter(
const T1 detectorName,
const T2 parameterName,
const T3 value)
809 if (m_detectors.find(detectorName) != m_detectors.end()) {
810 m_detectors[detectorName]->set(parameterName, value);
821 inline void setDetectors(
const std::vector<std::string> &detectorNames)
823 m_detectorNames.clear();
825 m_detectorNames = detectorNames;
826 initDetectors(m_detectorNames);
836 m_extractorNames.clear();
837 m_extractorNames.push_back(m_mapOfDescriptorNames[extractorType]);
838 m_extractors.clear();
839 initExtractor(m_mapOfDescriptorNames[extractorType]);
850 m_extractorNames.clear();
851 m_extractorNames.push_back(extractorName);
852 m_extractors.clear();
853 initExtractor(extractorName);
856 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
865 template <
typename T1,
typename T2,
typename T3>
866 inline void setExtractorParameter(
const T1 extractorName,
const T2 parameterName,
const T3 value)
868 if (m_extractors.find(extractorName) != m_extractors.end()) {
869 m_extractors[extractorName]->set(parameterName, value);
882 m_extractorNames.clear();
883 m_extractorNames = extractorNames;
884 m_extractors.clear();
885 initExtractors(m_extractorNames);
912 m_matcherName = matcherName;
913 initMatcher(m_matcherName);
940 m_filterType = filterType;
944 if (filterType == ratioDistanceThreshold || filterType == stdAndRatioDistanceThreshold) {
947 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
948 if (m_matcher != NULL && m_matcherName ==
"BruteForce") {
951 m_matcher->set(
"crossCheck",
false);
957 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
958 if (m_matcher != NULL && m_matcherName ==
"BruteForce") {
961 m_matcher->set(
"crossCheck", m_useBruteForceCrossCheck);
976 m_matchingFactorThreshold = factor;
989 if (ratio > 0.0 && (ratio < 1.0 || std::fabs(ratio - 1.0) < std::numeric_limits<double>::epsilon())) {
990 m_matchingRatioThreshold = ratio;
1004 if (percentage > 0.0 &&
1005 (percentage < 100.0 || std::fabs(percentage - 100.0) < std::numeric_limits<double>::epsilon())) {
1006 m_ransacConsensusPercentage = percentage;
1026 m_nbRansacIterations = nbIter;
1056 if (reprojectionError > 0.0) {
1057 m_ransacReprojectionError = reprojectionError;
1060 "threshold must be positive "
1061 "as we deal with distance.");
1073 m_nbRansacMinInlierCount = minCount;
1087 if (threshold > 0.0) {
1088 m_ransacThreshold = threshold;
1103 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1110 inline void setUseBruteForceCrossCheck(
bool useCrossCheck)
1114 if (m_matcher != NULL && !m_useKnn && m_matcherName ==
"BruteForce") {
1115 m_matcher->set(
"crossCheck", useCrossCheck);
1116 }
else if (m_matcher != NULL && m_useKnn && m_matcherName ==
"BruteForce") {
1117 std::cout <<
"Warning, you try to set the crossCheck parameter with a "
1118 "BruteForce matcher but knn is enabled";
1119 std::cout <<
" (the filtering method uses a ratio constraint)" << std::endl;
1161 bool m_computeCovariance;
1165 int m_currentImageId;
1168 vpDetectionMethodType m_detectionMethod;
1170 double m_detectionScore;
1173 double m_detectionThreshold;
1175 double m_detectionTime;
1177 std::vector<std::string> m_detectorNames;
1181 std::map<std::string, cv::Ptr<cv::FeatureDetector> > m_detectors;
1183 double m_extractionTime;
1185 std::vector<std::string> m_extractorNames;
1189 std::map<std::string, cv::Ptr<cv::DescriptorExtractor> > m_extractors;
1191 std::vector<cv::DMatch> m_filteredMatches;
1193 vpFilterMatchingType m_filterType;
1195 vpImageFormatType m_imageFormat;
1198 std::vector<std::vector<cv::DMatch> > m_knnMatches;
1200 std::map<vpFeatureDescriptorType, std::string> m_mapOfDescriptorNames;
1202 std::map<vpFeatureDetectorType, std::string> m_mapOfDetectorNames;
1205 std::map<int, int> m_mapOfImageId;
1208 std::map<int, vpImage<unsigned char> > m_mapOfImages;
1211 cv::Ptr<cv::DescriptorMatcher> m_matcher;
1213 std::string m_matcherName;
1215 std::vector<cv::DMatch> m_matches;
1217 double m_matchingFactorThreshold;
1219 double m_matchingRatioThreshold;
1221 double m_matchingTime;
1223 std::vector<std::pair<cv::KeyPoint, cv::Point3f> > m_matchRansacKeyPointsToPoints;
1225 int m_nbRansacIterations;
1227 int m_nbRansacMinInlierCount;
1230 std::vector<cv::Point3f> m_objectFilteredPoints;
1235 cv::Mat m_queryDescriptors;
1237 std::vector<cv::KeyPoint> m_queryFilteredKeyPoints;
1239 std::vector<cv::KeyPoint> m_queryKeyPoints;
1242 double m_ransacConsensusPercentage;
1246 std::vector<vpImagePoint> m_ransacInliers;
1248 std::vector<vpImagePoint> m_ransacOutliers;
1250 bool m_ransacParallel;
1252 unsigned int m_ransacParallelNbThreads;
1255 double m_ransacReprojectionError;
1258 double m_ransacThreshold;
1262 cv::Mat m_trainDescriptors;
1264 std::vector<cv::KeyPoint> m_trainKeyPoints;
1267 std::vector<cv::Point3f> m_trainPoints;
1270 std::vector<vpPoint> m_trainVpPoints;
1273 bool m_useAffineDetection;
1274 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1278 bool m_useBruteForceCrossCheck;
1282 bool m_useConsensusPercentage;
1289 bool m_useMatchTrainToQuery;
1291 bool m_useRansacVVS;
1294 bool m_useSingleMatchFilter;
1300 void affineSkew(
double tilt,
double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai);
1302 double computePoseEstimationError(
const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
1305 void filterMatches();
1308 void initDetector(
const std::string &detectorNames);
1309 void initDetectors(
const std::vector<std::string> &detectorNames);
1311 void initExtractor(
const std::string &extractorName);
1312 void initExtractors(
const std::vector<std::string> &extractorNames);
1314 void initFeatureNames();
1316 inline size_t myKeypointHash(
const cv::KeyPoint &kp)
1318 size_t _Val = 2166136261U, scale = 16777619U;
1321 _Val = (scale * _Val) ^ u.u;
1323 _Val = (scale * _Val) ^ u.u;
1325 _Val = (scale * _Val) ^ u.u;
1331 _Val = (scale * _Val) ^ u.u;
1332 _Val = (scale * _Val) ^ ((
size_t)kp.octave);
1333 _Val = (scale * _Val) ^ ((
size_t)kp.class_id);
1337 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
1343 class PyramidAdaptedFeatureDetector :
public cv::FeatureDetector
1347 PyramidAdaptedFeatureDetector(
const cv::Ptr<cv::FeatureDetector> &detector,
int maxLevel = 2);
1350 virtual bool empty()
const;
1353 virtual void detect(cv::InputArray image, CV_OUT std::vector<cv::KeyPoint> &keypoints,
1354 cv::InputArray mask = cv::noArray());
1355 virtual void detectImpl(
const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
1356 const cv::Mat &mask = cv::Mat())
const;
1358 cv::Ptr<cv::FeatureDetector> detector;
1368 class KeyPointsFilter
1371 KeyPointsFilter() {}
1376 static void runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize,
int borderSize);
1380 static void runByKeypointSize(std::vector<cv::KeyPoint> &keypoints,
float minSize,
float maxSize = FLT_MAX);
1384 static void runByPixelsMask(std::vector<cv::KeyPoint> &keypoints,
const cv::Mat &mask);
1388 static void removeDuplicated(std::vector<cv::KeyPoint> &keypoints);
1394 static void retainBest(std::vector<cv::KeyPoint> &keypoints,
int npoints);
class that defines what is a Keypoint. This class provides all the basic elements to implement classe...
virtual unsigned int buildReference(const vpImage< unsigned char > &I)=0
virtual unsigned int matchPoint(const vpImage< unsigned char > &I)=0
virtual void display(const vpImage< unsigned char > &Iref, const vpImage< unsigned char > &Icurrent, unsigned int size=3)=0
Generic class defining intrinsic camera parameters.
Class to define RGB colors available for display functionnalities.
static const vpColor green
error that can be emited by ViSP classes.
@ badValue
Used to indicate that a value is not in the allowed range.
Implementation of an homogeneous matrix and operations on such kind of matrices.
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Class that allows keypoints detection (and descriptors extraction) and matching thanks to OpenCV libr...
double getDetectionTime() const
std::vector< vpImagePoint > getRansacInliers() const
void setMatchingFactorThreshold(const double factor)
void setRansacConsensusPercentage(double percentage)
cv::Ptr< cv::DescriptorMatcher > getMatcher() const
void setRansacParallel(bool parallel)
void setRansacReprojectionError(double reprojectionError)
void setExtractor(const std::string &extractorName)
void setUseSingleMatchFilter(bool singleMatchFilter)
void setFilterMatchingType(const vpFilterMatchingType &filterType)
void setRansacParallelNbThreads(unsigned int nthreads)
double getExtractionTime() const
void setUseRansacVVS(bool ransacVVS)
void setDetectors(const std::vector< std::string > &detectorNames)
void setExtractors(const std::vector< std::string > &extractorNames)
cv::Mat getTrainDescriptors() const
void setExtractor(const vpFeatureDescriptorType &extractorType)
void setImageFormat(const vpImageFormatType &imageFormat)
void setRansacThreshold(double threshold)
void setRansacMinInlierCount(int minCount)
void setRansacFilterFlag(const vpPose::RANSAC_FILTER_FLAGS &flag)
double getPoseTime() const
std::map< vpFeatureDetectorType, std::string > getDetectorNames() const
unsigned int getNbImages() const
double getMatchingTime() const
std::vector< vpImagePoint > getRansacOutliers() const
std::vector< cv::DMatch > getMatches() const
std::map< vpFeatureDescriptorType, std::string > getExtractorNames() const
void setMatcher(const std::string &matcherName)
vpImageFormatType getImageFormat() const
cv::Ptr< cv::DescriptorExtractor > getExtractor(const std::string &name) const
void setUseAffineDetection(bool useAffine)
void setUseRansacConsensusPercentage(bool usePercentage)
void setMatchingRatioThreshold(double ratio)
void setCovarianceComputation(const bool &flag)
void setDetector(const vpFeatureDetectorType &detectorType)
cv::Ptr< cv::DescriptorExtractor > getExtractor(const vpFeatureDescriptorType &type) const
void setUseMatchTrainToQuery(bool useMatchTrainToQuery)
vpMatrix getCovarianceMatrix() const
void setDetectionMethod(const vpDetectionMethodType &method)
cv::Ptr< cv::FeatureDetector > getDetector(const std::string &name) const
void setDetector(const std::string &detectorName)
void setMaxFeatures(int maxFeatures)
std::vector< std::pair< cv::KeyPoint, cv::KeyPoint > > getMatchQueryToTrainKeyPoints() const
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
void setRansacIteration(int nbIter)
cv::Mat getQueryDescriptors() const
Implementation of a matrix and operations on matrices.
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Defines a rectangle in the plane.