38 #ifndef _vpKeyPoint_h_ 39 #define _vpKeyPoint_h_ 51 #include <visp3/core/vpConfig.h> 52 #include <visp3/core/vpDisplay.h> 53 #include <visp3/core/vpImageConvert.h> 54 #include <visp3/core/vpPixelMeterConversion.h> 55 #include <visp3/core/vpPlane.h> 56 #include <visp3/core/vpPoint.h> 57 #include <visp3/vision/vpBasicKeyPoint.h> 58 #include <visp3/vision/vpPose.h> 59 #ifdef VISP_HAVE_MODULE_IO 60 # include <visp3/io/vpImageIo.h> 62 #include <visp3/core/vpConvert.h> 63 #include <visp3/core/vpCylinder.h> 64 #include <visp3/core/vpMeterPixelConversion.h> 65 #include <visp3/core/vpPolygon.h> 66 #include <visp3/vision/vpXmlConfigParserKeyPoint.h> 69 #if (VISP_HAVE_OPENCV_VERSION >= 0x020101) 71 # include <opencv2/calib3d/calib3d.hpp> 72 # include <opencv2/features2d/features2d.hpp> 73 # include <opencv2/imgproc/imgproc.hpp> 75 # if (VISP_HAVE_OPENCV_VERSION >= 0x040000) // Require opencv >= 4.0.0 76 # include <opencv2/imgproc/imgproc_c.h> 77 # include <opencv2/imgproc.hpp> 80 # if defined(VISP_HAVE_OPENCV_XFEATURES2D) // OpenCV >= 3.0.0 81 # include <opencv2/xfeatures2d.hpp> 82 # elif defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && \ 83 (VISP_HAVE_OPENCV_VERSION < 0x030000) 84 # include <opencv2/nonfree/nonfree.hpp> 228 constantFactorDistanceThreshold,
230 stdDistanceThreshold,
232 ratioDistanceThreshold,
235 stdAndRatioDistanceThreshold,
259 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403) 266 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D)) 269 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \ 270 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400) 273 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) 276 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000) 281 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D) 290 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403) 293 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D)) 297 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \ 298 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400) 301 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) 304 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000) 307 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) 312 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D) 314 DESCRIPTOR_BoostDesc,
321 const std::string &matcherName,
const vpFilterMatchingType &filterType = ratioDistanceThreshold);
322 vpKeyPoint(
const std::string &detectorName =
"ORB",
const std::string &extractorName =
"ORB",
323 const std::string &matcherName =
"BruteForce-Hamming",
325 vpKeyPoint(
const std::vector<std::string> &detectorNames,
const std::vector<std::string> &extractorNames,
326 const std::string &matcherName =
"BruteForce",
335 std::vector<cv::Point3f> &points3f,
bool append =
false,
int class_id = -1);
337 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
338 bool append =
false,
int class_id = -1);
346 std::vector<cv::Point3f> &points3f,
bool append =
false,
int class_id = -1);
348 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
349 bool append =
false,
int class_id = -1);
351 static void compute3D(
const cv::KeyPoint &candidate,
const std::vector<vpPoint> &roi,
const vpCameraParameters &cam,
358 std::vector<cv::KeyPoint> &candidates,
359 const std::vector<vpPolygon> &polygons,
360 const std::vector<std::vector<vpPoint> > &roisPt,
361 std::vector<cv::Point3f> &points, cv::Mat *descriptors = NULL);
364 std::vector<vpImagePoint> &candidates,
365 const std::vector<vpPolygon> &polygons,
366 const std::vector<std::vector<vpPoint> > &roisPt,
367 std::vector<vpPoint> &points, cv::Mat *descriptors = NULL);
371 std::vector<cv::KeyPoint> &candidates,
const std::vector<vpCylinder> &cylinders,
372 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
373 std::vector<cv::Point3f> &points, cv::Mat *descriptors = NULL);
377 std::vector<vpImagePoint> &candidates,
const std::vector<vpCylinder> &cylinders,
378 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
379 std::vector<vpPoint> &points, cv::Mat *descriptors = NULL);
381 bool computePose(
const std::vector<cv::Point2f> &imagePoints,
const std::vector<cv::Point3f> &objectPoints,
385 bool computePose(
const std::vector<vpPoint> &objectVpPoints,
vpHomogeneousMatrix &cMo, std::vector<vpPoint> &inliers,
388 bool computePose(
const std::vector<vpPoint> &objectVpPoints,
vpHomogeneousMatrix &cMo, std::vector<vpPoint> &inliers,
389 std::vector<unsigned int> &inlierIndex,
double &elapsedTime,
402 void detect(
const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints,
404 void detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
const cv::Mat &mask = cv::Mat());
407 void detect(
const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints,
double &elapsedTime,
409 void detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
double &elapsedTime,
410 const cv::Mat &mask = cv::Mat());
412 void detectExtractAffine(
const vpImage<unsigned char> &I, std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
413 std::vector<cv::Mat> &listOfDescriptors,
424 const std::vector<vpImagePoint> &ransacInliers = std::vector<vpImagePoint>(),
425 unsigned int crossSize = 3,
unsigned int lineThickness = 1);
431 const std::vector<vpImagePoint> &ransacInliers = std::vector<vpImagePoint>(),
432 unsigned int crossSize = 3,
unsigned int lineThickness = 1);
435 std::vector<cv::Point3f> *trainPoints = NULL);
436 void extract(
const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
437 std::vector<cv::Point3f> *trainPoints = NULL);
438 void extract(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
439 std::vector<cv::Point3f> *trainPoints = NULL);
441 double &elapsedTime, std::vector<cv::Point3f> *trainPoints = NULL);
442 void extract(
const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
443 double &elapsedTime, std::vector<cv::Point3f> *trainPoints = NULL);
444 void extract(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
double &elapsedTime,
445 std::vector<cv::Point3f> *trainPoints = NULL);
458 if (!m_computeCovariance) {
459 std::cout <<
"Warning : The covariance matrix has not been computed. " 460 "See setCovarianceComputation() to do it." 465 if (m_computeCovariance && !m_useRansacVVS) {
466 std::cout <<
"Warning : The covariance matrix can only be computed " 467 "with a Virtual Visual Servoing approach." 469 <<
"Use setUseRansacVVS(true) to choose to use a pose " 470 "estimation method based on a Virtual Visual Servoing " 476 return m_covarianceMatrix;
495 std::map<vpFeatureDetectorType, std::string>::const_iterator it_name = m_mapOfDetectorNames.find(type);
496 if (it_name == m_mapOfDetectorNames.end()) {
497 std::cerr <<
"Internal problem with the feature type and the " 498 "corresponding name!" 502 std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator findDetector =
503 m_detectors.find(it_name->second);
504 if (findDetector != m_detectors.end()) {
505 return findDetector->second;
508 std::cerr <<
"Cannot find: " << it_name->second << std::endl;
509 return cv::Ptr<cv::FeatureDetector>();
519 inline cv::Ptr<cv::FeatureDetector>
getDetector(
const std::string &name)
const 521 std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator findDetector = m_detectors.find(name);
522 if (findDetector != m_detectors.end()) {
523 return findDetector->second;
526 std::cerr <<
"Cannot find: " << name << std::endl;
527 return cv::Ptr<cv::FeatureDetector>();
533 inline std::map<vpFeatureDetectorType, std::string>
getDetectorNames()
const {
return m_mapOfDetectorNames; }
551 std::map<vpFeatureDescriptorType, std::string>::const_iterator it_name = m_mapOfDescriptorNames.find(type);
552 if (it_name == m_mapOfDescriptorNames.end()) {
553 std::cerr <<
"Internal problem with the feature type and the " 554 "corresponding name!" 558 std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator findExtractor =
559 m_extractors.find(it_name->second);
560 if (findExtractor != m_extractors.end()) {
561 return findExtractor->second;
564 std::cerr <<
"Cannot find: " << it_name->second << std::endl;
565 return cv::Ptr<cv::DescriptorExtractor>();
575 inline cv::Ptr<cv::DescriptorExtractor>
getExtractor(
const std::string &name)
const 577 std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator findExtractor = m_extractors.find(name);
578 if (findExtractor != m_extractors.end()) {
579 return findExtractor->second;
582 std::cerr <<
"Cannot find: " << name << std::endl;
583 return cv::Ptr<cv::DescriptorExtractor>();
589 inline std::map<vpFeatureDescriptorType, std::string>
getExtractorNames()
const {
return m_mapOfDescriptorNames; }
610 inline cv::Ptr<cv::DescriptorMatcher>
getMatcher()
const {
return m_matcher; }
618 inline std::vector<cv::DMatch>
getMatches()
const {
return m_filteredMatches; }
629 std::vector<std::pair<cv::KeyPoint, cv::KeyPoint> > matchQueryToTrainKeyPoints(m_filteredMatches.size());
630 for (
size_t i = 0; i < m_filteredMatches.size(); i++) {
631 matchQueryToTrainKeyPoints.push_back(
632 std::pair<cv::KeyPoint, cv::KeyPoint>(m_queryFilteredKeyPoints[(
size_t)m_filteredMatches[i].queryIdx],
633 m_trainKeyPoints[(
size_t)m_filteredMatches[i].trainIdx]));
635 return matchQueryToTrainKeyPoints;
644 return static_cast<unsigned int>(m_mapOfImages.size());
647 void getObjectPoints(std::vector<cv::Point3f> &objectPoints)
const;
648 void getObjectPoints(std::vector<vpPoint> &objectPoints)
const;
665 void getQueryKeyPoints(std::vector<cv::KeyPoint> &keyPoints,
bool matches =
true)
const;
666 void getQueryKeyPoints(std::vector<vpImagePoint> &keyPoints,
bool matches =
true)
const;
690 void getTrainKeyPoints(std::vector<cv::KeyPoint> &keyPoints)
const;
691 void getTrainKeyPoints(std::vector<vpImagePoint> &keyPoints)
const;
693 void getTrainPoints(std::vector<cv::Point3f> &points)
const;
694 void getTrainPoints(std::vector<vpPoint> &points)
const;
696 void initMatcher(
const std::string &matcherName);
706 void loadConfigFile(
const std::string &configFile);
708 void loadLearningData(
const std::string &filename,
bool binaryMode =
false,
bool append =
false);
710 void match(
const cv::Mat &trainDescriptors,
const cv::Mat &queryDescriptors, std::vector<cv::DMatch> &matches,
711 double &elapsedTime);
718 unsigned int matchPoint(
const std::vector<cv::KeyPoint> &queryKeyPoints,
const cv::Mat &queryDescriptors);
726 const bool isPlanarObject =
true, std::vector<vpImagePoint> *imPts1 = NULL,
727 std::vector<vpImagePoint> *imPts2 = NULL,
double *meanDescriptorDistance = NULL,
728 double *detectionScore = NULL,
const vpRect &rectangle =
vpRect());
731 double &error,
double &elapsedTime,
vpRect &boundingBox,
vpImagePoint ¢erOfGravity,
747 void saveLearningData(
const std::string &filename,
bool binaryMode =
false,
748 bool saveTrainingImages =
true);
758 m_computeCovariance = flag;
759 if (!m_useRansacVVS) {
760 std::cout <<
"Warning : The covariance matrix can only be computed " 761 "with a Virtual Visual Servoing approach." 763 <<
"Use setUseRansacVVS(true) to choose to use a pose " 764 "estimation method based on a Virtual " 765 "Visual Servoing approach." 784 m_detectorNames.clear();
785 m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
787 initDetector(m_mapOfDetectorNames[detectorType]);
797 m_detectorNames.clear();
798 m_detectorNames.push_back(detectorName);
800 initDetector(detectorName);
803 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000) 812 template <
typename T1,
typename T2,
typename T3>
813 inline void setDetectorParameter(
const T1 detectorName,
const T2 parameterName,
const T3 value)
815 if (m_detectors.find(detectorName) != m_detectors.end()) {
816 m_detectors[detectorName]->set(parameterName, value);
827 inline void setDetectors(
const std::vector<std::string> &detectorNames)
829 m_detectorNames.clear();
831 m_detectorNames = detectorNames;
832 initDetectors(m_detectorNames);
842 m_extractorNames.clear();
843 m_extractorNames.push_back(m_mapOfDescriptorNames[extractorType]);
844 m_extractors.clear();
845 initExtractor(m_mapOfDescriptorNames[extractorType]);
856 m_extractorNames.clear();
857 m_extractorNames.push_back(extractorName);
858 m_extractors.clear();
859 initExtractor(extractorName);
862 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000) 871 template <
typename T1,
typename T2,
typename T3>
872 inline void setExtractorParameter(
const T1 extractorName,
const T2 parameterName,
const T3 value)
874 if (m_extractors.find(extractorName) != m_extractors.end()) {
875 m_extractors[extractorName]->set(parameterName, value);
888 m_extractorNames.clear();
889 m_extractorNames = extractorNames;
890 m_extractors.clear();
891 initExtractors(m_extractorNames);
899 inline void setImageFormat(
const vpImageFormatType &imageFormat) { m_imageFormat = imageFormat; }
918 m_matcherName = matcherName;
919 initMatcher(m_matcherName);
929 m_maxFeatures = maxFeatures;
949 m_filterType = filterType;
953 if (filterType == ratioDistanceThreshold || filterType == stdAndRatioDistanceThreshold) {
956 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000) 957 if (m_matcher != NULL && m_matcherName ==
"BruteForce") {
960 m_matcher->set(
"crossCheck",
false);
966 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000) 967 if (m_matcher != NULL && m_matcherName ==
"BruteForce") {
970 m_matcher->set(
"crossCheck", m_useBruteForceCrossCheck);
985 m_matchingFactorThreshold = factor;
998 if (ratio > 0.0 && (ratio < 1.0 || std::fabs(ratio - 1.0) < std::numeric_limits<double>::epsilon())) {
999 m_matchingRatioThreshold = ratio;
1013 if (percentage > 0.0 &&
1014 (percentage < 100.0 || std::fabs(percentage - 100.0) < std::numeric_limits<double>::epsilon())) {
1015 m_ransacConsensusPercentage = percentage;
1026 m_ransacFilterFlag = flag;
1038 m_nbRansacIterations = nbIter;
1051 m_ransacParallel = parallel;
1062 m_ransacParallelNbThreads = nthreads;
1074 if (reprojectionError > 0.0) {
1075 m_ransacReprojectionError = reprojectionError;
1078 "threshold must be positive " 1079 "as we deal with distance.");
1091 m_nbRansacMinInlierCount = minCount;
1105 if (threshold > 0.0) {
1106 m_ransacThreshold = threshold;
1121 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000) 1128 inline void setUseBruteForceCrossCheck(
bool useCrossCheck)
1132 if (m_matcher != NULL && !m_useKnn && m_matcherName ==
"BruteForce") {
1133 m_matcher->set(
"crossCheck", useCrossCheck);
1134 }
else if (m_matcher != NULL && m_useKnn && m_matcherName ==
"BruteForce") {
1135 std::cout <<
"Warning, you try to set the crossCheck parameter with a " 1136 "BruteForce matcher but knn is enabled";
1137 std::cout <<
" (the filtering method uses a ratio constraint)" << std::endl;
1150 m_useMatchTrainToQuery = useMatchTrainToQuery;
1182 bool m_computeCovariance;
1186 int m_currentImageId;
1191 double m_detectionScore;
1194 double m_detectionThreshold;
1196 double m_detectionTime;
1198 std::vector<std::string> m_detectorNames;
1202 std::map<std::string, cv::Ptr<cv::FeatureDetector> > m_detectors;
1204 double m_extractionTime;
1206 std::vector<std::string> m_extractorNames;
1210 std::map<std::string, cv::Ptr<cv::DescriptorExtractor> > m_extractors;
1212 std::vector<cv::DMatch> m_filteredMatches;
1216 vpImageFormatType m_imageFormat;
1219 std::vector<std::vector<cv::DMatch> > m_knnMatches;
1221 std::map<vpFeatureDescriptorType, std::string> m_mapOfDescriptorNames;
1223 std::map<vpFeatureDetectorType, std::string> m_mapOfDetectorNames;
1226 std::map<int, int> m_mapOfImageId;
1229 std::map<int, vpImage<unsigned char> > m_mapOfImages;
1232 cv::Ptr<cv::DescriptorMatcher> m_matcher;
1234 std::string m_matcherName;
1236 std::vector<cv::DMatch> m_matches;
1238 double m_matchingFactorThreshold;
1240 double m_matchingRatioThreshold;
1242 double m_matchingTime;
1244 std::vector<std::pair<cv::KeyPoint, cv::Point3f> > m_matchRansacKeyPointsToPoints;
1246 int m_nbRansacIterations;
1248 int m_nbRansacMinInlierCount;
1251 std::vector<cv::Point3f> m_objectFilteredPoints;
1256 cv::Mat m_queryDescriptors;
1258 std::vector<cv::KeyPoint> m_queryFilteredKeyPoints;
1260 std::vector<cv::KeyPoint> m_queryKeyPoints;
1263 double m_ransacConsensusPercentage;
1267 std::vector<vpImagePoint> m_ransacInliers;
1269 std::vector<vpImagePoint> m_ransacOutliers;
1271 bool m_ransacParallel;
1273 unsigned int m_ransacParallelNbThreads;
1276 double m_ransacReprojectionError;
1279 double m_ransacThreshold;
1283 cv::Mat m_trainDescriptors;
1285 std::vector<cv::KeyPoint> m_trainKeyPoints;
1288 std::vector<cv::Point3f> m_trainPoints;
1291 std::vector<vpPoint> m_trainVpPoints;
1294 bool m_useAffineDetection;
1295 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000) 1296 bool m_useBruteForceCrossCheck;
1301 bool m_useConsensusPercentage;
1310 bool m_useMatchTrainToQuery;
1312 bool m_useRansacVVS;
1315 bool m_useSingleMatchFilter;
1321 void affineSkew(
double tilt,
double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai);
1323 double computePoseEstimationError(
const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
1326 void filterMatches();
1329 void initDetector(
const std::string &detectorNames);
1330 void initDetectors(
const std::vector<std::string> &detectorNames);
1332 void initExtractor(
const std::string &extractorName);
1333 void initExtractors(
const std::vector<std::string> &extractorNames);
1335 void initFeatureNames();
1337 inline size_t myKeypointHash(
const cv::KeyPoint &kp)
1339 size_t _Val = 2166136261U, scale = 16777619U;
1342 _Val = (scale * _Val) ^ u.u;
1344 _Val = (scale * _Val) ^ u.u;
1346 _Val = (scale * _Val) ^ u.u;
1352 _Val = (scale * _Val) ^ u.u;
1353 _Val = (scale * _Val) ^ ((
size_t)kp.octave);
1354 _Val = (scale * _Val) ^ ((
size_t)kp.class_id);
1358 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000) 1364 class PyramidAdaptedFeatureDetector :
public cv::FeatureDetector
1368 PyramidAdaptedFeatureDetector(
const cv::Ptr<cv::FeatureDetector> &detector,
int maxLevel = 2);
1371 virtual bool empty()
const;
1374 virtual void detect(cv::InputArray image, CV_OUT std::vector<cv::KeyPoint> &keypoints,
1375 cv::InputArray mask = cv::noArray());
1376 virtual void detectImpl(
const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
1377 const cv::Mat &mask = cv::Mat())
const;
1379 cv::Ptr<cv::FeatureDetector> detector;
1389 class KeyPointsFilter
1392 KeyPointsFilter() {}
1397 static void runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize,
int borderSize);
1401 static void runByKeypointSize(std::vector<cv::KeyPoint> &keypoints,
float minSize,
float maxSize = FLT_MAX);
1405 static void runByPixelsMask(std::vector<cv::KeyPoint> &keypoints,
const cv::Mat &mask);
1409 static void removeDuplicated(std::vector<cv::KeyPoint> &keypoints);
1415 static void retainBest(std::vector<cv::KeyPoint> &keypoints,
int npoints);
Used to indicate that a value is not in the allowed range.
Implementation of a matrix and operations on matrices.
class that defines what is a Keypoint. This class provides all the basic elements to implement classe...
void setUseMatchTrainToQuery(bool useMatchTrainToQuery)
void setUseRansacConsensusPercentage(bool usePercentage)
void setRansacIteration(int nbIter)
Implementation of an homogeneous matrix and operations on such kind of matrices.
void setRansacMinInlierCount(int minCount)
std::vector< std::pair< cv::KeyPoint, cv::KeyPoint > > getMatchQueryToTrainKeyPoints() const
void setExtractor(const std::string &extractorName)
Class to define RGB colors available for display functionnalities.
std::map< vpFeatureDetectorType, std::string > getDetectorNames() const
error that can be emited by ViSP classes.
void setDetectors(const std::vector< std::string > &detectorNames)
void setMaxFeatures(int maxFeatures)
void setRansacParallel(bool parallel)
static const vpColor green
cv::Ptr< cv::DescriptorExtractor > getExtractor(const vpFeatureDescriptorType &type) const
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
cv::Mat getQueryDescriptors() const
void setExtractors(const std::vector< std::string > &extractorNames)
void setMatcher(const std::string &matcherName)
cv::Ptr< cv::DescriptorMatcher > getMatcher() const
void setRansacFilterFlag(const vpPose::RANSAC_FILTER_FLAGS &flag)
virtual unsigned int buildReference(const vpImage< unsigned char > &I)=0
double getDetectionTime() const
Generic class defining intrinsic camera parameters.
void setDetector(const vpFeatureDetectorType &detectorType)
void setUseAffineDetection(bool useAffine)
double getMatchingTime() const
void setRansacConsensusPercentage(double percentage)
void setDetectionMethod(const vpDetectionMethodType &method)
void setMatchingRatioThreshold(double ratio)
void setUseSingleMatchFilter(bool singleMatchFilter)
unsigned int getNbImages() const
cv::Ptr< cv::FeatureDetector > getDetector(const std::string &name) const
vpMatrix getCovarianceMatrix() const
void setImageFormat(const vpImageFormatType &imageFormat)
virtual void display(const vpImage< unsigned char > &Iref, const vpImage< unsigned char > &Icurrent, unsigned int size=3)=0
void setRansacParallelNbThreads(unsigned int nthreads)
void setUseRansacVVS(bool ransacVVS)
virtual unsigned int matchPoint(const vpImage< unsigned char > &I)=0
vpImageFormatType getImageFormat() const
std::map< vpFeatureDescriptorType, std::string > getExtractorNames() const
Class that allows keypoints detection (and descriptors extraction) and matching thanks to OpenCV libr...
std::vector< vpImagePoint > getRansacOutliers() const
cv::Mat getTrainDescriptors() const
double getExtractionTime() const
double getPoseTime() const
void setDetector(const std::string &detectorName)
Defines a rectangle in the plane.
cv::Ptr< cv::DescriptorExtractor > getExtractor(const std::string &name) const
void setRansacThreshold(double threshold)
void setRansacReprojectionError(double reprojectionError)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
std::vector< vpImagePoint > getRansacInliers() const
void setFilterMatchingType(const vpFilterMatchingType &filterType)
void setMatchingFactorThreshold(const double factor)
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
void setExtractor(const vpFeatureDescriptorType &extractorType)
std::vector< cv::DMatch > getMatches() const
void setCovarianceComputation(const bool &flag)