33 #ifndef _vpKeyPoint_h_
34 #define _vpKeyPoint_h_
46 #include <visp3/core/vpConfig.h>
47 #include <visp3/core/vpDisplay.h>
48 #include <visp3/core/vpImageConvert.h>
49 #include <visp3/core/vpPixelMeterConversion.h>
50 #include <visp3/core/vpPlane.h>
51 #include <visp3/core/vpPoint.h>
52 #include <visp3/vision/vpBasicKeyPoint.h>
53 #include <visp3/vision/vpPose.h>
54 #ifdef VISP_HAVE_MODULE_IO
55 #include <visp3/io/vpImageIo.h>
57 #include <visp3/core/vpConvert.h>
58 #include <visp3/core/vpCylinder.h>
59 #include <visp3/core/vpMeterPixelConversion.h>
60 #include <visp3/core/vpPolygon.h>
61 #include <visp3/vision/vpXmlConfigParserKeyPoint.h>
64 #if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_FEATURES2D)
65 #include <opencv2/features2d/features2d.hpp>
66 #include <opencv2/imgproc/imgproc.hpp>
67 #include <opencv2/imgproc/imgproc_c.h>
69 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
70 #include <opencv2/xfeatures2d.hpp>
71 #elif defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && \
72 (VISP_HAVE_OPENCV_VERSION < 0x030000)
73 #include <opencv2/nonfree/nonfree.hpp>
217 constantFactorDistanceThreshold,
219 stdDistanceThreshold,
221 ratioDistanceThreshold,
224 stdAndRatioDistanceThreshold,
251 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
258 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
261 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
262 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
265 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
268 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
273 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
283 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
286 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
290 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
291 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
294 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
297 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
300 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
305 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
307 DESCRIPTOR_BoostDesc,
322 vpKeyPoint(
const vpFeatureDetectorType &detectorType,
const vpFeatureDescriptorType &descriptorType,
323 const std::string &matcherName,
const vpFilterMatchingType &filterType = ratioDistanceThreshold);
334 vpKeyPoint(
const std::string &detectorName =
"ORB",
const std::string &extractorName =
"ORB",
335 const std::string &matcherName =
"BruteForce-Hamming",
336 const vpFilterMatchingType &filterType = ratioDistanceThreshold);
347 vpKeyPoint(
const std::vector<std::string> &detectorNames,
const std::vector<std::string> &extractorNames,
348 const std::string &matcherName =
"BruteForce",
349 const vpFilterMatchingType &filterType = ratioDistanceThreshold);
392 std::vector<cv::Point3f> &points3f,
bool append =
false,
int class_id = -1);
408 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
409 bool append =
false,
int class_id = -1);
452 std::vector<cv::Point3f> &points3f,
bool append =
false,
int class_id = -1);
467 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
468 bool append =
false,
int class_id = -1);
485 static void compute3D(
const cv::KeyPoint &candidate,
const std::vector<vpPoint> &roi,
const vpCameraParameters &cam,
523 std::vector<cv::KeyPoint> &candidates,
524 const std::vector<vpPolygon> &polygons,
525 const std::vector<std::vector<vpPoint> > &roisPt,
526 std::vector<cv::Point3f> &points, cv::Mat *descriptors =
nullptr);
545 std::vector<vpImagePoint> &candidates,
546 const std::vector<vpPolygon> &polygons,
547 const std::vector<std::vector<vpPoint> > &roisPt,
548 std::vector<vpPoint> &points, cv::Mat *descriptors =
nullptr);
567 std::vector<cv::KeyPoint> &candidates,
const std::vector<vpCylinder> &cylinders,
568 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
569 std::vector<cv::Point3f> &points, cv::Mat *descriptors =
nullptr);
588 std::vector<vpImagePoint> &candidates,
const std::vector<vpCylinder> &cylinders,
589 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
590 std::vector<vpPoint> &points, cv::Mat *descriptors =
nullptr);
605 bool computePose(
const std::vector<cv::Point2f> &imagePoints,
const std::vector<cv::Point3f> &objectPoints,
621 bool computePose(
const std::vector<vpPoint> &objectVpPoints,
vpHomogeneousMatrix &cMo, std::vector<vpPoint> &inliers,
637 bool computePose(
const std::vector<vpPoint> &objectVpPoints,
vpHomogeneousMatrix &cMo, std::vector<vpPoint> &inliers,
638 std::vector<unsigned int> &inlierIndex,
double &elapsedTime,
710 void detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
const cv::Mat &mask = cv::Mat());
731 void detect(
const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints,
double &elapsedTime,
742 void detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
double &elapsedTime,
743 const cv::Mat &mask = cv::Mat());
759 void detectExtractAffine(
const vpImage<unsigned char> &I, std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
760 std::vector<cv::Mat> &listOfDescriptors,
825 const std::vector<vpImagePoint> &ransacInliers = std::vector<vpImagePoint>(),
826 unsigned int crossSize = 3,
unsigned int lineThickness = 1);
868 const std::vector<vpImagePoint> &ransacInliers = std::vector<vpImagePoint>(),
869 unsigned int crossSize = 3,
unsigned int lineThickness = 1);
882 std::vector<cv::Point3f> *trainPoints =
nullptr);
894 void extract(
const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
895 std::vector<cv::Point3f> *trainPoints =
nullptr);
907 void extract(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
908 std::vector<cv::Point3f> *trainPoints =
nullptr);
922 double &elapsedTime, std::vector<cv::Point3f> *trainPoints =
nullptr);
935 void extract(
const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
936 double &elapsedTime, std::vector<cv::Point3f> *trainPoints =
nullptr);
949 void extract(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
double &elapsedTime,
950 std::vector<cv::Point3f> *trainPoints =
nullptr);
963 if (!m_computeCovariance) {
964 std::cout <<
"Warning : The covariance matrix has not been computed. "
965 "See setCovarianceComputation() to do it."
970 if (m_computeCovariance && !m_useRansacVVS) {
971 std::cout <<
"Warning : The covariance matrix can only be computed "
972 "with a Virtual Visual Servoing approach."
974 <<
"Use setUseRansacVVS(true) to choose to use a pose "
975 "estimation method based on a Virtual Visual Servoing "
981 return m_covarianceMatrix;
1000 std::map<vpFeatureDetectorType, std::string>::const_iterator it_name = m_mapOfDetectorNames.find(type);
1001 if (it_name == m_mapOfDetectorNames.end()) {
1002 std::cerr <<
"Internal problem with the feature type and the "
1003 "corresponding name!"
1007 std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator findDetector =
1008 m_detectors.find(it_name->second);
1009 if (findDetector != m_detectors.end()) {
1010 return findDetector->second;
1013 std::cerr <<
"Cannot find: " << it_name->second << std::endl;
1014 return cv::Ptr<cv::FeatureDetector>();
1024 inline cv::Ptr<cv::FeatureDetector>
getDetector(
const std::string &name)
const
1026 std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator findDetector = m_detectors.find(name);
1027 if (findDetector != m_detectors.end()) {
1028 return findDetector->second;
1031 std::cerr <<
"Cannot find: " << name << std::endl;
1032 return cv::Ptr<cv::FeatureDetector>();
1038 inline std::map<vpFeatureDetectorType, std::string>
getDetectorNames()
const {
return m_mapOfDetectorNames; }
1056 std::map<vpFeatureDescriptorType, std::string>::const_iterator it_name = m_mapOfDescriptorNames.find(type);
1057 if (it_name == m_mapOfDescriptorNames.end()) {
1058 std::cerr <<
"Internal problem with the feature type and the "
1059 "corresponding name!"
1063 std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator findExtractor =
1064 m_extractors.find(it_name->second);
1065 if (findExtractor != m_extractors.end()) {
1066 return findExtractor->second;
1069 std::cerr <<
"Cannot find: " << it_name->second << std::endl;
1070 return cv::Ptr<cv::DescriptorExtractor>();
1080 inline cv::Ptr<cv::DescriptorExtractor>
getExtractor(
const std::string &name)
const
1082 std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator findExtractor = m_extractors.find(name);
1083 if (findExtractor != m_extractors.end()) {
1084 return findExtractor->second;
1087 std::cerr <<
"Cannot find: " << name << std::endl;
1088 return cv::Ptr<cv::DescriptorExtractor>();
1094 inline std::map<vpFeatureDescriptorType, std::string>
getExtractorNames()
const {
return m_mapOfDescriptorNames; }
1115 inline cv::Ptr<cv::DescriptorMatcher>
getMatcher()
const {
return m_matcher; }
1123 inline std::vector<cv::DMatch>
getMatches()
const {
return m_filteredMatches; }
1134 std::vector<std::pair<cv::KeyPoint, cv::KeyPoint> > matchQueryToTrainKeyPoints(m_filteredMatches.size());
1135 for (
size_t i = 0; i < m_filteredMatches.size(); i++) {
1136 matchQueryToTrainKeyPoints.push_back(
1137 std::pair<cv::KeyPoint, cv::KeyPoint>(m_queryFilteredKeyPoints[(
size_t)m_filteredMatches[i].queryIdx],
1138 m_trainKeyPoints[(
size_t)m_filteredMatches[i].trainIdx]));
1140 return matchQueryToTrainKeyPoints;
1148 inline unsigned int getNbImages()
const {
return static_cast<unsigned int>(m_mapOfImages.size()); }
1157 void getObjectPoints(std::vector<cv::Point3f> &objectPoints)
const;
1166 void getObjectPoints(std::vector<vpPoint> &objectPoints)
const;
1191 void getQueryKeyPoints(std::vector<cv::KeyPoint> &keyPoints,
bool matches =
true)
const;
1201 void getQueryKeyPoints(std::vector<vpImagePoint> &keyPoints,
bool matches =
true)
const;
1230 void getTrainKeyPoints(std::vector<cv::KeyPoint> &keyPoints)
const;
1237 void getTrainKeyPoints(std::vector<vpImagePoint> &keyPoints)
const;
1245 void getTrainPoints(std::vector<cv::Point3f> &points)
const;
1253 void getTrainPoints(std::vector<vpPoint> &points)
const;
1260 void initMatcher(
const std::string &matcherName);
1306 void loadConfigFile(
const std::string &configFile);
1316 void loadLearningData(
const std::string &filename,
bool binaryMode =
false,
bool append =
false);
1326 void match(
const cv::Mat &trainDescriptors,
const cv::Mat &queryDescriptors, std::vector<cv::DMatch> &matches,
1327 double &elapsedTime);
1349 unsigned int width);
1369 unsigned int matchPoint(
const std::vector<cv::KeyPoint> &queryKeyPoints,
const cv::Mat &queryDescriptors);
1426 const bool isPlanarObject =
true, std::vector<vpImagePoint> *imPts1 =
nullptr,
1427 std::vector<vpImagePoint> *imPts2 =
nullptr,
double *meanDescriptorDistance =
nullptr,
1428 double *detectionScore =
nullptr,
const vpRect &rectangle =
vpRect());
1450 double &error,
double &elapsedTime,
vpRect &boundingBox,
vpImagePoint ¢erOfGravity,
1473 unsigned int width);
1532 void saveLearningData(
const std::string &filename,
bool binaryMode =
false,
bool saveTrainingImages =
true);
1542 m_computeCovariance = flag;
1543 if (!m_useRansacVVS) {
1544 std::cout <<
"Warning : The covariance matrix can only be computed "
1545 "with a Virtual Visual Servoing approach."
1547 <<
"Use setUseRansacVVS(true) to choose to use a pose "
1548 "estimation method based on a Virtual "
1549 "Visual Servoing approach."
1568 m_detectorNames.clear();
1569 m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
1570 m_detectors.clear();
1571 initDetector(m_mapOfDetectorNames[detectorType]);
1581 m_detectorNames.clear();
1582 m_detectorNames.push_back(detectorName);
1583 m_detectors.clear();
1584 initDetector(detectorName);
1587 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1596 template <
typename T1,
typename T2,
typename T3>
1597 inline void setDetectorParameter(
const T1 detectorName,
const T2 parameterName,
const T3 value)
1599 if (m_detectors.find(detectorName) != m_detectors.end()) {
1600 m_detectors[detectorName]->set(parameterName, value);
1613 m_detectorNames.clear();
1614 m_detectors.clear();
1615 m_detectorNames = detectorNames;
1616 initDetectors(m_detectorNames);
1626 m_extractorNames.clear();
1627 m_extractorNames.push_back(m_mapOfDescriptorNames[extractorType]);
1628 m_extractors.clear();
1629 initExtractor(m_mapOfDescriptorNames[extractorType]);
1640 m_extractorNames.clear();
1641 m_extractorNames.push_back(extractorName);
1642 m_extractors.clear();
1643 initExtractor(extractorName);
1646 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1655 template <
typename T1,
typename T2,
typename T3>
1656 inline void setExtractorParameter(
const T1 extractorName,
const T2 parameterName,
const T3 value)
1658 if (m_extractors.find(extractorName) != m_extractors.end()) {
1659 m_extractors[extractorName]->set(parameterName, value);
1672 m_extractorNames.clear();
1673 m_extractorNames = extractorNames;
1674 m_extractors.clear();
1675 initExtractors(m_extractorNames);
1702 m_matcherName = matcherName;
1703 initMatcher(m_matcherName);
1730 m_filterType = filterType;
1734 if (filterType == ratioDistanceThreshold || filterType == stdAndRatioDistanceThreshold) {
1737 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1738 if (m_matcher !=
nullptr && m_matcherName ==
"BruteForce") {
1741 m_matcher->set(
"crossCheck",
false);
1748 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1749 if (m_matcher !=
nullptr && m_matcherName ==
"BruteForce") {
1752 m_matcher->set(
"crossCheck", m_useBruteForceCrossCheck);
1767 m_matchingFactorThreshold = factor;
1781 if (ratio > 0.0 && (ratio < 1.0 || std::fabs(ratio - 1.0) < std::numeric_limits<double>::epsilon())) {
1782 m_matchingRatioThreshold = ratio;
1797 if (percentage > 0.0 &&
1798 (percentage < 100.0 || std::fabs(percentage - 100.0) < std::numeric_limits<double>::epsilon())) {
1799 m_ransacConsensusPercentage = percentage;
1820 m_nbRansacIterations = nbIter;
1851 if (reprojectionError > 0.0) {
1852 m_ransacReprojectionError = reprojectionError;
1856 "threshold must be positive "
1857 "as we deal with distance.");
1869 m_nbRansacMinInlierCount = minCount;
1884 if (threshold > 0.0) {
1885 m_ransacThreshold = threshold;
1901 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1908 inline void setUseBruteForceCrossCheck(
bool useCrossCheck)
1912 if (m_matcher !=
nullptr && !m_useKnn && m_matcherName ==
"BruteForce") {
1913 m_matcher->set(
"crossCheck", useCrossCheck);
1915 else if (m_matcher !=
nullptr && m_useKnn && m_matcherName ==
"BruteForce") {
1916 std::cout <<
"Warning, you try to set the crossCheck parameter with a "
1917 "BruteForce matcher but knn is enabled";
1918 std::cout <<
" (the filtering method uses a ratio constraint)" << std::endl;
1960 bool m_computeCovariance;
1964 int m_currentImageId;
1967 vpDetectionMethodType m_detectionMethod;
1969 double m_detectionScore;
1972 double m_detectionThreshold;
1974 double m_detectionTime;
1976 std::vector<std::string> m_detectorNames;
1980 std::map<std::string, cv::Ptr<cv::FeatureDetector> > m_detectors;
1982 double m_extractionTime;
1984 std::vector<std::string> m_extractorNames;
1988 std::map<std::string, cv::Ptr<cv::DescriptorExtractor> > m_extractors;
1990 std::vector<cv::DMatch> m_filteredMatches;
1992 vpFilterMatchingType m_filterType;
1994 vpImageFormatType m_imageFormat;
1997 std::vector<std::vector<cv::DMatch> > m_knnMatches;
1999 std::map<vpFeatureDescriptorType, std::string> m_mapOfDescriptorNames;
2001 std::map<vpFeatureDetectorType, std::string> m_mapOfDetectorNames;
2004 std::map<int, int> m_mapOfImageId;
2007 std::map<int, vpImage<unsigned char> > m_mapOfImages;
2010 cv::Ptr<cv::DescriptorMatcher> m_matcher;
2012 std::string m_matcherName;
2014 std::vector<cv::DMatch> m_matches;
2016 double m_matchingFactorThreshold;
2018 double m_matchingRatioThreshold;
2020 double m_matchingTime;
2022 std::vector<std::pair<cv::KeyPoint, cv::Point3f> > m_matchRansacKeyPointsToPoints;
2024 int m_nbRansacIterations;
2026 int m_nbRansacMinInlierCount;
2029 std::vector<cv::Point3f> m_objectFilteredPoints;
2034 cv::Mat m_queryDescriptors;
2036 std::vector<cv::KeyPoint> m_queryFilteredKeyPoints;
2038 std::vector<cv::KeyPoint> m_queryKeyPoints;
2041 double m_ransacConsensusPercentage;
2045 std::vector<vpImagePoint> m_ransacInliers;
2047 std::vector<vpImagePoint> m_ransacOutliers;
2049 bool m_ransacParallel;
2051 unsigned int m_ransacParallelNbThreads;
2054 double m_ransacReprojectionError;
2057 double m_ransacThreshold;
2061 cv::Mat m_trainDescriptors;
2063 std::vector<cv::KeyPoint> m_trainKeyPoints;
2066 std::vector<cv::Point3f> m_trainPoints;
2069 std::vector<vpPoint> m_trainVpPoints;
2072 bool m_useAffineDetection;
2073 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2077 bool m_useBruteForceCrossCheck;
2081 bool m_useConsensusPercentage;
2088 bool m_useMatchTrainToQuery;
2090 bool m_useRansacVVS;
2093 bool m_useSingleMatchFilter;
2107 void affineSkew(
double tilt,
double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai);
2123 double computePoseEstimationError(
const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
2129 void filterMatches();
2142 void initDetector(
const std::string &detectorNames);
2150 void initDetectors(
const std::vector<std::string> &detectorNames);
2157 void initExtractor(
const std::string &extractorName);
2165 void initExtractors(
const std::vector<std::string> &extractorNames);
2170 void initFeatureNames();
2172 inline size_t myKeypointHash(
const cv::KeyPoint &kp)
2174 size_t _Val = 2166136261U, scale = 16777619U;
2177 _Val = (scale * _Val) ^ u.u;
2179 _Val = (scale * _Val) ^ u.u;
2181 _Val = (scale * _Val) ^ u.u;
2187 _Val = (scale * _Val) ^ u.u;
2188 _Val = (scale * _Val) ^ ((
size_t)kp.octave);
2189 _Val = (scale * _Val) ^ ((
size_t)kp.class_id);
2193 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2199 class PyramidAdaptedFeatureDetector :
public cv::FeatureDetector
2203 PyramidAdaptedFeatureDetector(
const cv::Ptr<cv::FeatureDetector> &detector,
int maxLevel = 2);
2206 virtual bool empty()
const;
2209 virtual void detect(cv::InputArray image, CV_OUT std::vector<cv::KeyPoint> &keypoints,
2210 cv::InputArray mask = cv::noArray());
2211 virtual void detectImpl(
const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
2212 const cv::Mat &mask = cv::Mat())
const;
2214 cv::Ptr<cv::FeatureDetector> m_detector;
2224 class KeyPointsFilter
2227 KeyPointsFilter() { }
2232 static void runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize,
int borderSize);
2236 static void runByKeypointSize(std::vector<cv::KeyPoint> &keypoints,
float minSize,
float maxSize = FLT_MAX);
2240 static void runByPixelsMask(std::vector<cv::KeyPoint> &keypoints,
const cv::Mat &mask);
2244 static void removeDuplicated(std::vector<cv::KeyPoint> &keypoints);
2250 static void retainBest(std::vector<cv::KeyPoint> &keypoints,
int npoints);
class that defines what is a keypoint. This class provides all the basic elements to implement classe...
virtual unsigned int buildReference(const vpImage< unsigned char > &I)=0
virtual unsigned int matchPoint(const vpImage< unsigned char > &I)=0
virtual void display(const vpImage< unsigned char > &Iref, const vpImage< unsigned char > &Icurrent, unsigned int size=3)=0
Generic class defining intrinsic camera parameters.
Class to define RGB colors available for display functionalities.
static const vpColor green
error that can be emitted by ViSP classes.
@ badValue
Used to indicate that a value is not in the allowed range.
Implementation of an homogeneous matrix and operations on such kind of matrices.
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Class that allows keypoints detection (and descriptors extraction) and matching thanks to OpenCV libr...
double getDetectionTime() const
std::vector< vpImagePoint > getRansacInliers() const
void setMatchingFactorThreshold(const double factor)
void setRansacConsensusPercentage(double percentage)
cv::Ptr< cv::DescriptorMatcher > getMatcher() const
void setRansacParallel(bool parallel)
void setRansacReprojectionError(double reprojectionError)
void setExtractor(const std::string &extractorName)
void setUseSingleMatchFilter(bool singleMatchFilter)
void setFilterMatchingType(const vpFilterMatchingType &filterType)
void setRansacParallelNbThreads(unsigned int nthreads)
double getExtractionTime() const
void setUseRansacVVS(bool ransacVVS)
void setDetectors(const std::vector< std::string > &detectorNames)
void setExtractors(const std::vector< std::string > &extractorNames)
cv::Mat getTrainDescriptors() const
@ DETECTOR_KAZE
KAZE detector.
@ DETECTOR_BRISK
BRISK detector.
@ DETECTOR_AKAZE
AKAZE detector.
@ DETECTOR_MSER
MSER detector.
@ DETECTOR_SURF
SURF detector.
@ DETECTOR_AGAST
AGAST detector.
@ DETECTOR_SIFT
SIFT detector.
@ DETECTOR_FAST
FAST detector.
@ DETECTOR_GFTT
GFTT detector.
@ DETECTOR_ORB
ORB detector.
@ DETECTOR_SimpleBlob
SimpleBlob detector.
void setExtractor(const vpFeatureDescriptorType &extractorType)
void setImageFormat(const vpImageFormatType &imageFormat)
void setRansacThreshold(double threshold)
void setRansacMinInlierCount(int minCount)
void setRansacFilterFlag(const vpPose::RANSAC_FILTER_FLAGS &flag)
double getPoseTime() const
std::map< vpFeatureDetectorType, std::string > getDetectorNames() const
unsigned int getNbImages() const
double getMatchingTime() const
std::vector< vpImagePoint > getRansacOutliers() const
@ DESCRIPTOR_AKAZE
AKAZE descriptor.
@ DESCRIPTOR_ORB
ORB descriptor.
@ DESCRIPTOR_KAZE
KAZE descriptor.
@ DESCRIPTOR_SURF
SUFT descriptor.
@ DESCRIPTOR_BRISK
BRISK descriptor.
@ DESCRIPTOR_SIFT
SIFT descriptor.
std::vector< cv::DMatch > getMatches() const
std::map< vpFeatureDescriptorType, std::string > getExtractorNames() const
void setMatcher(const std::string &matcherName)
vpImageFormatType getImageFormat() const
cv::Ptr< cv::DescriptorExtractor > getExtractor(const std::string &name) const
void setUseAffineDetection(bool useAffine)
void setUseRansacConsensusPercentage(bool usePercentage)
void setMatchingRatioThreshold(double ratio)
void setCovarianceComputation(const bool &flag)
void setDetector(const vpFeatureDetectorType &detectorType)
cv::Ptr< cv::DescriptorExtractor > getExtractor(const vpFeatureDescriptorType &type) const
void setUseMatchTrainToQuery(bool useMatchTrainToQuery)
vpMatrix getCovarianceMatrix() const
void setDetectionMethod(const vpDetectionMethodType &method)
cv::Ptr< cv::FeatureDetector > getDetector(const std::string &name) const
void setDetector(const std::string &detectorName)
void setMaxFeatures(int maxFeatures)
std::vector< std::pair< cv::KeyPoint, cv::KeyPoint > > getMatchQueryToTrainKeyPoints() const
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
void setRansacIteration(int nbIter)
cv::Mat getQueryDescriptors() const
Implementation of a matrix and operations on matrices.
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Defines a rectangle in the plane.