34 #include <visp3/core/vpConfig.h>
37 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_CALIB3D) && defined(HAVE_OPENCV_FEATURES2D)) || \
38 ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_3D) && defined(HAVE_OPENCV_FEATURES))
43 #include <visp3/core/vpIoTools.h>
44 #include <visp3/vision/vpKeyPoint.h>
46 #if (VISP_HAVE_OPENCV_VERSION >= 0x050000)
47 #include <opencv2/3d.hpp>
48 #include <opencv2/features.hpp>
51 #if (VISP_HAVE_OPENCV_VERSION <0x050000)
52 #include <opencv2/calib3d/calib3d.hpp>
55 #if defined(HAVE_OPENCV_XFEATURES2D)
56 #include <opencv2/xfeatures2d.hpp>
59 #if defined(VISP_HAVE_PUGIXML)
60 #include <pugixml.hpp>
65 #ifndef DOXYGEN_SHOULD_SKIP_THIS
69 inline cv::DMatch knnToDMatch(
const std::vector<cv::DMatch> &knnMatches)
71 if (knnMatches.size() > 0) {
78 inline vpImagePoint matchRansacToVpImage(
const std::pair<cv::KeyPoint, cv::Point3f> &pair)
89 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
90 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
91 m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
92 m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
93 m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
94 m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
95 m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
96 m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
97 m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
98 m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
99 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
100 m_useBruteForceCrossCheck(true),
102 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
103 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
107 m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
108 m_extractorNames.push_back(m_mapOfDescriptorNames[descriptorType]);
115 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
116 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
117 m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
118 m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
119 m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
120 m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
121 m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
122 m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
123 m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
124 m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
125 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
126 m_useBruteForceCrossCheck(true),
128 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
129 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
133 m_detectorNames.push_back(detectorName);
134 m_extractorNames.push_back(extractorName);
141 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
142 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(detectorNames),
143 m_detectors(), m_extractionTime(0.), m_extractorNames(extractorNames), m_extractors(), m_filteredMatches(),
144 m_filterType(filterType), m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(),
145 m_matcher(), m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0),
146 m_matchingRatioThreshold(0.85), m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200),
147 m_nbRansacMinInlierCount(100), m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(),
148 m_queryFilteredKeyPoints(), m_queryKeyPoints(), m_ransacConsensusPercentage(20.0),
149 m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(), m_ransacParallel(false),
150 m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01), m_trainDescriptors(),
151 m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
152 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
153 m_useBruteForceCrossCheck(true),
155 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
156 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
162 void vpKeyPoint::affineSkew(
double tilt,
double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai)
167 mask = cv::Mat(h, w, CV_8UC1, cv::Scalar(255));
169 cv::Mat A = cv::Mat::eye(2, 3, CV_32F);
172 if (std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
177 A = (cv::Mat_<float>(2, 2) << c, -s, s, c);
179 cv::Mat corners = (cv::Mat_<float>(4, 2) << 0, 0, w, 0, w, h, 0, h);
180 cv::Mat tcorners = corners * A.t();
181 cv::Mat tcorners_x, tcorners_y;
182 tcorners.col(0).copyTo(tcorners_x);
183 tcorners.col(1).copyTo(tcorners_y);
184 std::vector<cv::Mat> channels;
185 channels.push_back(tcorners_x);
186 channels.push_back(tcorners_y);
187 cv::merge(channels, tcorners);
189 cv::Rect rect = cv::boundingRect(tcorners);
190 A = (cv::Mat_<float>(2, 3) << c, -s, -rect.x, s, c, -rect.y);
192 cv::warpAffine(img, img, A, cv::Size(rect.width, rect.height), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
195 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon()) {
196 double s = 0.8 * sqrt(tilt * tilt - 1);
197 cv::GaussianBlur(img, img, cv::Size(0, 0), s, 0.01);
198 cv::resize(img, img, cv::Size(0, 0), 1.0 / tilt, 1.0, cv::INTER_NEAREST);
199 A.row(0) = A.row(0) / tilt;
202 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon() ||
203 std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
206 cv::warpAffine(mask, mask, A, cv::Size(w, h), cv::INTER_NEAREST);
208 cv::invertAffineTransform(A, Ai);
231 m_trainPoints.clear();
232 m_mapOfImageId.clear();
233 m_mapOfImages.clear();
234 m_currentImageId = 1;
236 if (m_useAffineDetection) {
237 std::vector<std::vector<cv::KeyPoint> > listOfTrainKeyPoints;
238 std::vector<cv::Mat> listOfTrainDescriptors;
244 m_trainKeyPoints.clear();
245 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfTrainKeyPoints.begin();
246 it != listOfTrainKeyPoints.end(); ++it) {
247 m_trainKeyPoints.insert(m_trainKeyPoints.end(), it->begin(), it->end());
251 for (std::vector<cv::Mat>::const_iterator it = listOfTrainDescriptors.begin(); it != listOfTrainDescriptors.end();
255 it->copyTo(m_trainDescriptors);
258 m_trainDescriptors.push_back(*it);
263 detect(I, m_trainKeyPoints, m_detectionTime, rectangle);
264 extract(I, m_trainKeyPoints, m_trainDescriptors, m_extractionTime);
269 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
270 m_mapOfImageId[it->class_id] = m_currentImageId;
274 m_mapOfImages[m_currentImageId] = I;
283 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
285 return static_cast<unsigned int>(m_trainKeyPoints.size());
295 std::vector<cv::Point3f> &points3f,
bool append,
int class_id)
297 cv::Mat trainDescriptors;
299 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
301 extract(I, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
303 if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
307 std::map<size_t, size_t> mapOfKeypointHashes;
309 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
311 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
314 std::vector<cv::Point3f> trainPoints_tmp;
315 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
316 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
317 trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
322 points3f = trainPoints_tmp;
325 return (
buildReference(I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
329 std::vector<cv::Point3f> &points3f,
bool append,
int class_id)
331 cv::Mat trainDescriptors;
333 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
335 extract(I_color, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
337 if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
341 std::map<size_t, size_t> mapOfKeypointHashes;
343 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
345 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
348 std::vector<cv::Point3f> trainPoints_tmp;
349 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
350 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
351 trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
356 points3f = trainPoints_tmp;
359 return (
buildReference(I_color, trainKeyPoints, trainDescriptors, points3f, append, class_id));
364 const std::vector<cv::KeyPoint> &trainKeyPoints,
365 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
366 bool append,
int class_id)
369 m_trainPoints.clear();
370 m_mapOfImageId.clear();
371 m_mapOfImages.clear();
372 m_currentImageId = 0;
373 m_trainKeyPoints.clear();
378 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
380 if (class_id != -1) {
381 for (std::vector<cv::KeyPoint>::iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
382 it->class_id = class_id;
388 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
390 m_mapOfImageId[it->class_id] = m_currentImageId;
394 m_mapOfImages[m_currentImageId] = I;
397 m_trainKeyPoints.insert(m_trainKeyPoints.end(), trainKeyPoints_tmp.begin(), trainKeyPoints_tmp.end());
399 trainDescriptors.copyTo(m_trainDescriptors);
402 m_trainDescriptors.push_back(trainDescriptors);
404 this->m_trainPoints.insert(m_trainPoints.end(), points3f.begin(), points3f.end());
412 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
416 return static_cast<unsigned int>(m_trainKeyPoints.size());
420 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
421 bool append,
int class_id)
424 return (
buildReference(m_I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
431 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
438 vpPlane Po(pts[0], pts[1], pts[2]);
439 double xc = 0.0, yc = 0.0;
450 point_obj = cMo.
inverse() * point_cam;
451 point = cv::Point3f((
float)point_obj[0], (
float)point_obj[1], (
float)point_obj[2]);
458 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
465 vpPlane Po(pts[0], pts[1], pts[2]);
466 double xc = 0.0, yc = 0.0;
477 point_obj = cMo.
inverse() * point_cam;
482 std::vector<cv::KeyPoint> &candidates,
483 const std::vector<vpPolygon> &polygons,
484 const std::vector<std::vector<vpPoint> > &roisPt,
485 std::vector<cv::Point3f> &points, cv::Mat *descriptors)
487 std::vector<cv::KeyPoint> candidatesToCheck = candidates;
494 std::vector<std::pair<cv::KeyPoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
495 for (
size_t i = 0; i < candidatesToCheck.size(); i++) {
496 pairOfCandidatesToCheck[i] = std::pair<cv::KeyPoint, size_t>(candidatesToCheck[i], i);
500 std::vector<vpPolygon> polygons_tmp = polygons;
501 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
502 std::vector<std::pair<cv::KeyPoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
504 while (it2 != pairOfCandidatesToCheck.end()) {
505 imPt.
set_ij(it2->first.pt.y, it2->first.pt.x);
506 if (it1->isInside(imPt)) {
507 candidates.push_back(it2->first);
509 points.push_back(pt);
511 if (descriptors !=
nullptr) {
512 desc.push_back(descriptors->row((
int)it2->second));
516 it2 = pairOfCandidatesToCheck.erase(it2);
524 if (descriptors !=
nullptr) {
525 desc.copyTo(*descriptors);
530 std::vector<vpImagePoint> &candidates,
531 const std::vector<vpPolygon> &polygons,
532 const std::vector<std::vector<vpPoint> > &roisPt,
533 std::vector<vpPoint> &points, cv::Mat *descriptors)
535 std::vector<vpImagePoint> candidatesToCheck = candidates;
541 std::vector<std::pair<vpImagePoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
542 for (
size_t i = 0; i < candidatesToCheck.size(); i++) {
543 pairOfCandidatesToCheck[i] = std::pair<vpImagePoint, size_t>(candidatesToCheck[i], i);
547 std::vector<vpPolygon> polygons_tmp = polygons;
548 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
549 std::vector<std::pair<vpImagePoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
551 while (it2 != pairOfCandidatesToCheck.end()) {
552 if (it1->isInside(it2->first)) {
553 candidates.push_back(it2->first);
555 points.push_back(pt);
557 if (descriptors !=
nullptr) {
558 desc.push_back(descriptors->row((
int)it2->second));
562 it2 = pairOfCandidatesToCheck.erase(it2);
573 std::vector<cv::KeyPoint> &candidates,
const std::vector<vpCylinder> &cylinders,
574 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
575 std::vector<cv::Point3f> &points, cv::Mat *descriptors)
577 std::vector<cv::KeyPoint> candidatesToCheck = candidates;
583 size_t cpt_keypoint = 0;
584 for (std::vector<cv::KeyPoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
585 ++it1, cpt_keypoint++) {
586 size_t cpt_cylinder = 0;
589 for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
590 it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
593 for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
595 candidates.push_back(*it1);
599 double xm = 0.0, ym = 0.0;
601 double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
603 if (!
vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
605 point_cam[0] = xm * Z;
606 point_cam[1] = ym * Z;
610 point_obj = cMo.
inverse() * point_cam;
613 points.push_back(cv::Point3f((
float)pt.
get_oX(), (
float)pt.
get_oY(), (
float)pt.
get_oZ()));
615 if (descriptors !=
nullptr) {
616 desc.push_back(descriptors->row((
int)cpt_keypoint));
626 if (descriptors !=
nullptr) {
627 desc.copyTo(*descriptors);
632 std::vector<vpImagePoint> &candidates,
const std::vector<vpCylinder> &cylinders,
633 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
634 std::vector<vpPoint> &points, cv::Mat *descriptors)
636 std::vector<vpImagePoint> candidatesToCheck = candidates;
642 size_t cpt_keypoint = 0;
643 for (std::vector<vpImagePoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
644 ++it1, cpt_keypoint++) {
645 size_t cpt_cylinder = 0;
648 for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
649 it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
652 for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
654 candidates.push_back(*it1);
658 double xm = 0.0, ym = 0.0;
660 double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
662 if (!
vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
664 point_cam[0] = xm * Z;
665 point_cam[1] = ym * Z;
669 point_obj = cMo.
inverse() * point_cam;
672 points.push_back(pt);
674 if (descriptors !=
nullptr) {
675 desc.push_back(descriptors->row((
int)cpt_keypoint));
685 if (descriptors !=
nullptr) {
686 desc.copyTo(*descriptors);
696 if (imagePoints.size() < 4 || objectPoints.size() < 4 || imagePoints.size() != objectPoints.size()) {
698 std::cerr <<
"Not enough points to compute the pose (at least 4 points "
705 cv::Mat cameraMatrix =
715 cv::Mat distCoeffs = cv::Mat::zeros(1, 5, CV_64F);
718 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
720 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec,
false, m_nbRansacIterations,
721 (
float)m_ransacReprojectionError,
724 inlierIndex, cv::SOLVEPNP_ITERATIVE);
744 int nbInlierToReachConsensus = m_nbRansacMinInlierCount;
745 if (m_useConsensusPercentage) {
746 nbInlierToReachConsensus = (int)(m_ransacConsensusPercentage / 100.0 * (
double)m_queryFilteredKeyPoints.size());
749 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec,
false, m_nbRansacIterations,
750 (
float)m_ransacReprojectionError, nbInlierToReachConsensus, inlierIndex);
753 catch (cv::Exception &e) {
754 std::cerr << e.what() << std::endl;
758 vpTranslationVector translationVec(tvec.at<
double>(0), tvec.at<
double>(1), tvec.at<
double>(2));
759 vpThetaUVector thetaUVector(rvec.at<
double>(0), rvec.at<
double>(1), rvec.at<
double>(2));
762 if (func !=
nullptr) {
776 std::vector<vpPoint> &inliers,
double &elapsedTime,
779 std::vector<unsigned int> inlierIndex;
780 return computePose(objectVpPoints, cMo, inliers, inlierIndex, elapsedTime, func);
784 std::vector<vpPoint> &inliers, std::vector<unsigned int> &inlierIndex,
double &elapsedTime,
789 if (objectVpPoints.size() < 4) {
799 for (std::vector<vpPoint>::const_iterator it = objectVpPoints.begin(); it != objectVpPoints.end(); ++it) {
803 unsigned int nbInlierToReachConsensus = (
unsigned int)m_nbRansacMinInlierCount;
804 if (m_useConsensusPercentage) {
805 nbInlierToReachConsensus =
806 (
unsigned int)(m_ransacConsensusPercentage / 100.0 * (
double)m_queryFilteredKeyPoints.size());
816 bool isRansacPoseEstimationOk =
false;
823 if (m_computeCovariance) {
828 std::cerr <<
"e=" << e.
what() << std::endl;
846 return isRansacPoseEstimationOk;
849 double vpKeyPoint::computePoseEstimationError(
const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
852 if (matchKeyPoints.size() == 0) {
858 std::vector<double> errors(matchKeyPoints.size());
861 for (std::vector<std::pair<cv::KeyPoint, cv::Point3f> >::const_iterator it = matchKeyPoints.begin();
862 it != matchKeyPoints.end(); ++it, cpt++) {
867 double u = 0.0, v = 0.0;
869 errors[cpt] = std::sqrt((u - it->first.pt.x) * (u - it->first.pt.x) + (v - it->first.pt.y) * (v - it->first.pt.y));
872 return std::accumulate(errors.begin(), errors.end(), 0.0) / errors.size();
899 unsigned int nbImg = (
unsigned int)(m_mapOfImages.size() + 1);
901 if (m_mapOfImages.empty()) {
902 std::cerr <<
"There is no training image loaded !" << std::endl;
913 unsigned int nbImgSqrt = (
unsigned int)
vpMath::round(std::sqrt((
double)nbImg));
916 unsigned int nbWidth = nbImgSqrt;
918 unsigned int nbHeight = nbImgSqrt;
921 if (nbImgSqrt * nbImgSqrt < nbImg) {
925 unsigned int maxW = ICurrent.
getWidth();
926 unsigned int maxH = ICurrent.
getHeight();
927 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
929 if (maxW < it->second.getWidth()) {
930 maxW = it->second.getWidth();
933 if (maxH < it->second.getHeight()) {
934 maxH = it->second.getHeight();
946 unsigned int nbImg = (
unsigned int)(m_mapOfImages.size() + 1);
948 if (m_mapOfImages.empty()) {
949 std::cerr <<
"There is no training image loaded !" << std::endl;
960 unsigned int nbImgSqrt = (
unsigned int)
vpMath::round(std::sqrt((
double)nbImg));
963 unsigned int nbWidth = nbImgSqrt;
965 unsigned int nbHeight = nbImgSqrt;
968 if (nbImgSqrt * nbImgSqrt < nbImg) {
972 unsigned int maxW = ICurrent.
getWidth();
973 unsigned int maxH = ICurrent.
getHeight();
974 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
976 if (maxW < it->second.getWidth()) {
977 maxW = it->second.getWidth();
980 if (maxH < it->second.getHeight()) {
981 maxH = it->second.getHeight();
992 detect(I, keyPoints, elapsedTime, rectangle);
998 detect(I_color, keyPoints, elapsedTime, rectangle);
1001 void vpKeyPoint::detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
const cv::Mat &mask)
1004 detect(matImg, keyPoints, elapsedTime, mask);
1012 cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1015 #if VISP_HAVE_OPENCV_VERSION >= 0x030000
1016 int filled = cv::FILLED;
1018 int filled = CV_FILLED;
1020 cv::Point leftTop((
int)rectangle.
getLeft(), (
int)rectangle.
getTop()),
1022 cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), filled);
1025 mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1028 detect(matImg, keyPoints, elapsedTime, mask);
1036 cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1039 #if VISP_HAVE_OPENCV_VERSION >= 0x030000
1040 int filled = cv::FILLED;
1042 int filled = CV_FILLED;
1044 cv::Point leftTop((
int)rectangle.
getLeft(), (
int)rectangle.
getTop()),
1046 cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), filled);
1049 mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1052 detect(matImg, keyPoints, elapsedTime, mask);
1055 void vpKeyPoint::detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
double &elapsedTime,
1056 const cv::Mat &mask)
1061 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
1062 it != m_detectors.end(); ++it) {
1063 std::vector<cv::KeyPoint> kp;
1065 it->second->detect(matImg, kp, mask);
1066 keyPoints.insert(keyPoints.end(), kp.begin(), kp.end());
1074 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1076 std::vector<vpImagePoint> vpTrainImageKeyPoints;
1079 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1087 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1089 std::vector<vpImagePoint> vpTrainImageKeyPoints;
1092 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1100 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1103 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1110 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1113 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1119 unsigned int crossSize,
unsigned int lineThickness,
const vpColor &color)
1122 srand((
unsigned int)time(
nullptr));
1125 std::vector<vpImagePoint> queryImageKeyPoints;
1127 std::vector<vpImagePoint> trainImageKeyPoints;
1131 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1133 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1136 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1137 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
1138 queryImageKeyPoints[(
size_t)it->queryIdx].get_j() + IRef.
getWidth());
1146 unsigned int lineThickness,
const vpColor &color)
1149 srand((
unsigned int)time(
nullptr));
1152 std::vector<vpImagePoint> queryImageKeyPoints;
1154 std::vector<vpImagePoint> trainImageKeyPoints;
1158 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1160 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1163 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1164 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
1165 queryImageKeyPoints[(
size_t)it->queryIdx].get_j() + IRef.
getWidth());
1173 unsigned int lineThickness,
const vpColor &color)
1176 srand((
unsigned int)time(
nullptr));
1179 std::vector<vpImagePoint> queryImageKeyPoints;
1181 std::vector<vpImagePoint> trainImageKeyPoints;
1185 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1187 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1190 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1191 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
1192 queryImageKeyPoints[(
size_t)it->queryIdx].get_j() + IRef.
getWidth());
1201 const std::vector<vpImagePoint> &ransacInliers,
unsigned int crossSize,
1202 unsigned int lineThickness)
1204 if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1206 std::cerr <<
"There is no training image loaded !" << std::endl;
1212 int nbImg = (int)(m_mapOfImages.size() + 1);
1221 int nbWidth = nbImgSqrt;
1222 int nbHeight = nbImgSqrt;
1224 if (nbImgSqrt * nbImgSqrt < nbImg) {
1228 std::map<int, int> mapOfImageIdIndex;
1231 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1233 mapOfImageIdIndex[it->first] = cpt;
1235 if (maxW < it->second.getWidth()) {
1236 maxW = it->second.getWidth();
1239 if (maxH < it->second.getHeight()) {
1240 maxH = it->second.getHeight();
1246 int medianI = nbHeight / 2;
1247 int medianJ = nbWidth / 2;
1248 int medianIndex = medianI * nbWidth + medianJ;
1249 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1251 int current_class_id_index = 0;
1252 if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1253 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1258 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1261 int indexI = current_class_id_index / nbWidth;
1262 int indexJ = current_class_id_index - (indexI * nbWidth);
1263 topLeftCorner.
set_ij((
int)maxH * indexI, (int)maxW * indexJ);
1270 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
1271 for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1276 for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1281 for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1287 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1288 int current_class_id = 0;
1289 if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(
size_t)it->trainIdx].class_id]] < medianIndex) {
1290 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1295 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1298 int indexI = current_class_id / nbWidth;
1299 int indexJ = current_class_id - (indexI * nbWidth);
1301 vpImagePoint end((
int)maxH * indexI + m_trainKeyPoints[(
size_t)it->trainIdx].pt.y,
1302 (
int)maxW * indexJ + m_trainKeyPoints[(
size_t)it->trainIdx].pt.x);
1303 vpImagePoint start((
int)maxH * medianI + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.y,
1304 (
int)maxW * medianJ + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.x);
1314 const std::vector<vpImagePoint> &ransacInliers,
unsigned int crossSize,
1315 unsigned int lineThickness)
1317 if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1319 std::cerr <<
"There is no training image loaded !" << std::endl;
1325 int nbImg = (int)(m_mapOfImages.size() + 1);
1334 int nbWidth = nbImgSqrt;
1335 int nbHeight = nbImgSqrt;
1337 if (nbImgSqrt * nbImgSqrt < nbImg) {
1341 std::map<int, int> mapOfImageIdIndex;
1344 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1346 mapOfImageIdIndex[it->first] = cpt;
1348 if (maxW < it->second.getWidth()) {
1349 maxW = it->second.getWidth();
1352 if (maxH < it->second.getHeight()) {
1353 maxH = it->second.getHeight();
1359 int medianI = nbHeight / 2;
1360 int medianJ = nbWidth / 2;
1361 int medianIndex = medianI * nbWidth + medianJ;
1362 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1364 int current_class_id_index = 0;
1365 if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1366 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1371 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1374 int indexI = current_class_id_index / nbWidth;
1375 int indexJ = current_class_id_index - (indexI * nbWidth);
1376 topLeftCorner.
set_ij((
int)maxH * indexI, (int)maxW * indexJ);
1383 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
1384 for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1389 for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1394 for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1400 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1401 int current_class_id = 0;
1402 if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(
size_t)it->trainIdx].class_id]] < medianIndex) {
1403 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1408 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1411 int indexI = current_class_id / nbWidth;
1412 int indexJ = current_class_id - (indexI * nbWidth);
1414 vpImagePoint end((
int)maxH * indexI + m_trainKeyPoints[(
size_t)it->trainIdx].pt.y,
1415 (
int)maxW * indexJ + m_trainKeyPoints[(
size_t)it->trainIdx].pt.x);
1416 vpImagePoint start((
int)maxH * medianI + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.y,
1417 (
int)maxW * medianJ + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.x);
1427 std::vector<cv::Point3f> *trainPoints)
1430 extract(I, keyPoints, descriptors, elapsedTime, trainPoints);
1434 std::vector<cv::Point3f> *trainPoints)
1437 extract(I_color, keyPoints, descriptors, elapsedTime, trainPoints);
1441 std::vector<cv::Point3f> *trainPoints)
1444 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1448 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1452 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1456 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1460 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1464 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1469 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator itd = m_extractors.begin();
1470 itd != m_extractors.end(); ++itd) {
1474 if (trainPoints !=
nullptr && !trainPoints->empty()) {
1477 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1480 itd->second->compute(matImg, keyPoints, descriptors);
1482 if (keyPoints.size() != keyPoints_tmp.size()) {
1486 std::map<size_t, size_t> mapOfKeypointHashes;
1488 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1490 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1493 std::vector<cv::Point3f> trainPoints_tmp;
1494 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1495 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1496 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1501 *trainPoints = trainPoints_tmp;
1506 itd->second->compute(matImg, keyPoints, descriptors);
1512 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1516 itd->second->compute(matImg, keyPoints, desc);
1518 if (keyPoints.size() != keyPoints_tmp.size()) {
1522 std::map<size_t, size_t> mapOfKeypointHashes;
1524 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1526 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1529 std::vector<cv::Point3f> trainPoints_tmp;
1530 cv::Mat descriptors_tmp;
1531 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1532 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1533 if (trainPoints !=
nullptr && !trainPoints->empty()) {
1534 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1537 if (!descriptors.empty()) {
1538 descriptors_tmp.push_back(descriptors.row((
int)mapOfKeypointHashes[myKeypointHash(*it)]));
1543 if (trainPoints !=
nullptr) {
1545 *trainPoints = trainPoints_tmp;
1548 descriptors_tmp.copyTo(descriptors);
1552 if (descriptors.empty()) {
1553 desc.copyTo(descriptors);
1556 cv::hconcat(descriptors, desc, descriptors);
1561 if (keyPoints.size() != (
size_t)descriptors.rows) {
1562 std::cerr <<
"keyPoints.size() != (size_t) descriptors.rows" << std::endl;
1567 void vpKeyPoint::filterMatches()
1569 std::vector<cv::KeyPoint> queryKpts;
1570 std::vector<cv::Point3f> trainPts;
1571 std::vector<cv::DMatch> m;
1577 double min_dist = DBL_MAX;
1579 std::vector<double> distance_vec(m_knnMatches.size());
1582 for (
size_t i = 0; i < m_knnMatches.size(); i++) {
1583 double dist = m_knnMatches[i][0].distance;
1585 distance_vec[i] = dist;
1587 if (dist < min_dist) {
1594 mean /= m_queryDescriptors.rows;
1597 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1598 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1599 double threshold = min_dist + stdev;
1601 for (
size_t i = 0; i < m_knnMatches.size(); i++) {
1602 if (m_knnMatches[i].size() >= 2) {
1605 float ratio = m_knnMatches[i][0].distance / m_knnMatches[i][1].distance;
1610 double dist = m_knnMatches[i][0].distance;
1613 m.push_back(cv::DMatch((
int)queryKpts.size(), m_knnMatches[i][0].trainIdx, m_knnMatches[i][0].distance));
1615 if (!m_trainPoints.empty()) {
1616 trainPts.push_back(m_trainPoints[(
size_t)m_knnMatches[i][0].trainIdx]);
1618 queryKpts.push_back(m_queryKeyPoints[(
size_t)m_knnMatches[i][0].queryIdx]);
1627 double min_dist = DBL_MAX;
1629 std::vector<double> distance_vec(m_matches.size());
1630 for (
size_t i = 0; i < m_matches.size(); i++) {
1631 double dist = m_matches[i].distance;
1633 distance_vec[i] = dist;
1635 if (dist < min_dist) {
1642 mean /= m_queryDescriptors.rows;
1644 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1645 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1655 for (
size_t i = 0; i < m_matches.size(); i++) {
1656 if (m_matches[i].distance <= threshold) {
1657 m.push_back(cv::DMatch((
int)queryKpts.size(), m_matches[i].trainIdx, m_matches[i].distance));
1659 if (!m_trainPoints.empty()) {
1660 trainPts.push_back(m_trainPoints[(
size_t)m_matches[i].trainIdx]);
1662 queryKpts.push_back(m_queryKeyPoints[(
size_t)m_matches[i].queryIdx]);
1667 if (m_useSingleMatchFilter) {
1670 std::vector<cv::DMatch> mTmp;
1671 std::vector<cv::Point3f> trainPtsTmp;
1672 std::vector<cv::KeyPoint> queryKptsTmp;
1674 std::map<int, int> mapOfTrainIdx;
1676 for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1677 mapOfTrainIdx[it->trainIdx]++;
1681 for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1682 if (mapOfTrainIdx[it->trainIdx] == 1) {
1683 mTmp.push_back(cv::DMatch((
int)queryKptsTmp.size(), it->trainIdx, it->distance));
1685 if (!m_trainPoints.empty()) {
1686 trainPtsTmp.push_back(m_trainPoints[(
size_t)it->trainIdx]);
1688 queryKptsTmp.push_back(queryKpts[(
size_t)it->queryIdx]);
1692 m_filteredMatches = mTmp;
1693 m_objectFilteredPoints = trainPtsTmp;
1694 m_queryFilteredKeyPoints = queryKptsTmp;
1697 m_filteredMatches = m;
1698 m_objectFilteredPoints = trainPts;
1699 m_queryFilteredKeyPoints = queryKpts;
1705 objectPoints = m_objectFilteredPoints;
1716 keyPoints = m_queryFilteredKeyPoints;
1719 keyPoints = m_queryKeyPoints;
1741 void vpKeyPoint::init()
1744 #if defined(HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && (VISP_HAVE_OPENCV_VERSION < 0x030000)
1746 if (!cv::initModule_nonfree()) {
1747 std::cerr <<
"Cannot init module non free, SURF cannot be used." << std::endl;
1757 initDetectors(m_detectorNames);
1758 initExtractors(m_extractorNames);
1762 void vpKeyPoint::initDetector(
const std::string &detectorName)
1764 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
1765 m_detectors[detectorName] = cv::FeatureDetector::create(detectorName);
1767 if (m_detectors[detectorName] ==
nullptr) {
1768 std::stringstream ss_msg;
1769 ss_msg <<
"Fail to initialize the detector: " << detectorName
1770 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
1774 std::string detectorNameTmp = detectorName;
1775 std::string pyramid =
"Pyramid";
1776 std::size_t pos = detectorName.find(pyramid);
1777 bool usePyramid =
false;
1778 if (pos != std::string::npos) {
1779 detectorNameTmp = detectorName.substr(pos + pyramid.size());
1783 if (detectorNameTmp ==
"SIFT") {
1784 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && ((VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)) && defined(HAVE_OPENCV_FEATURES2D)) \
1785 || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
1786 # if (VISP_HAVE_OPENCV_VERSION >= 0x040500)
1787 cv::Ptr<cv::FeatureDetector> siftDetector = cv::SiftFeatureDetector::create();
1789 m_detectors[detectorNameTmp] = siftDetector;
1792 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
1794 # elif (VISP_HAVE_OPENCV_VERSION >= 0x030411)
1796 cv::Ptr<cv::FeatureDetector> siftDetector;
1797 if (m_maxFeatures > 0) {
1798 siftDetector = cv::SIFT::create(m_maxFeatures);
1801 siftDetector = cv::SIFT::create();
1804 cv::Ptr<cv::FeatureDetector> siftDetector;
1805 siftDetector = cv::xfeatures2d::SIFT::create();
1808 m_detectors[detectorNameTmp] = siftDetector;
1811 std::cerr <<
"You should not use SIFT with Pyramid feature detection!" << std::endl;
1812 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
1815 std::stringstream ss_msg;
1816 ss_msg <<
"Failed to initialize the SIFT 2D features detector with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
1820 else if (detectorNameTmp ==
"SURF") {
1821 #if defined(OPENCV_ENABLE_NONFREE) && defined(HAVE_OPENCV_XFEATURES2D)
1822 cv::Ptr<cv::FeatureDetector> surfDetector = cv::xfeatures2d::SURF::create();
1824 m_detectors[detectorNameTmp] = surfDetector;
1827 std::cerr <<
"You should not use SURF with Pyramid feature detection!" << std::endl;
1828 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(surfDetector);
1831 std::stringstream ss_msg;
1832 ss_msg <<
"Failed to initialize the SURF 2D features detector with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
1836 else if (detectorNameTmp ==
"FAST") {
1837 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
1838 cv::Ptr<cv::FeatureDetector> fastDetector = cv::FastFeatureDetector::create();
1840 m_detectors[detectorNameTmp] = fastDetector;
1843 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
1846 std::stringstream ss_msg;
1847 ss_msg <<
"Failed to initialize the FAST 2D features detector with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
1851 else if (detectorNameTmp ==
"MSER") {
1852 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
1853 cv::Ptr<cv::FeatureDetector> fastDetector = cv::MSER::create();
1855 m_detectors[detectorNameTmp] = fastDetector;
1858 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
1861 std::stringstream ss_msg;
1862 ss_msg <<
"Failed to initialize the MSER 2D features detector with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
1866 else if (detectorNameTmp ==
"ORB") {
1867 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
1868 cv::Ptr<cv::FeatureDetector> orbDetector;
1869 if (m_maxFeatures > 0) {
1870 orbDetector = cv::ORB::create(m_maxFeatures);
1873 orbDetector = cv::ORB::create();
1876 m_detectors[detectorNameTmp] = orbDetector;
1879 std::cerr <<
"You should not use ORB with Pyramid feature detection!" << std::endl;
1880 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(orbDetector);
1883 std::stringstream ss_msg;
1884 ss_msg <<
"Failed to initialize the ORB 2D features detector with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
1888 else if (detectorNameTmp ==
"BRISK") {
1889 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_XFEATURES2D))
1890 #if (VISP_HAVE_OPENCV_VERSION >= 0x050000)
1891 cv::Ptr<cv::FeatureDetector> briskDetector = cv::xfeatures2d::BRISK::create();
1893 cv::Ptr<cv::FeatureDetector> briskDetector = cv::BRISK::create();
1896 m_detectors[detectorNameTmp] = briskDetector;
1899 std::cerr <<
"You should not use BRISK with Pyramid feature detection!" << std::endl;
1900 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(briskDetector);
1903 std::stringstream ss_msg;
1905 ss_msg <<
"Failed to initialize the BRISK 2D features detector with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
1909 else if (detectorNameTmp ==
"KAZE") {
1910 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_XFEATURES2D))
1911 #if (VISP_HAVE_OPENCV_VERSION >= 0x050000)
1912 cv::Ptr<cv::FeatureDetector> kazeDetector = cv::xfeatures2d::KAZE::create();
1914 cv::Ptr<cv::FeatureDetector> kazeDetector = cv::KAZE::create();
1917 m_detectors[detectorNameTmp] = kazeDetector;
1920 std::cerr <<
"You should not use KAZE with Pyramid feature detection!" << std::endl;
1921 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(kazeDetector);
1924 std::stringstream ss_msg;
1925 ss_msg <<
"Failed to initialize the KAZE 2D features detector with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
1929 else if (detectorNameTmp ==
"AKAZE") {
1930 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_XFEATURES2D))
1931 #if (VISP_HAVE_OPENCV_VERSION >= 0x050000)
1932 cv::Ptr<cv::FeatureDetector> akazeDetector = cv::xfeatures2d::AKAZE::create();
1934 cv::Ptr<cv::FeatureDetector> akazeDetector = cv::AKAZE::create();
1937 m_detectors[detectorNameTmp] = akazeDetector;
1940 std::cerr <<
"You should not use AKAZE with Pyramid feature detection!" << std::endl;
1941 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(akazeDetector);
1944 std::stringstream ss_msg;
1945 ss_msg <<
"Failed to initialize the AKAZE 2D features detector with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
1949 else if (detectorNameTmp ==
"GFTT") {
1950 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
1951 cv::Ptr<cv::FeatureDetector> gfttDetector = cv::GFTTDetector::create();
1953 m_detectors[detectorNameTmp] = gfttDetector;
1956 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(gfttDetector);
1959 std::stringstream ss_msg;
1960 ss_msg <<
"Failed to initialize the GFTT 2D features detector with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
1964 else if (detectorNameTmp ==
"SimpleBlob") {
1965 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
1966 cv::Ptr<cv::FeatureDetector> simpleBlobDetector = cv::SimpleBlobDetector::create();
1968 m_detectors[detectorNameTmp] = simpleBlobDetector;
1971 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(simpleBlobDetector);
1974 std::stringstream ss_msg;
1975 ss_msg <<
"Failed to initialize the SimpleBlob 2D features detector with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
1979 else if (detectorNameTmp ==
"STAR") {
1980 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))) \
1981 || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_XFEATURES2D))
1982 cv::Ptr<cv::FeatureDetector> starDetector = cv::xfeatures2d::StarDetector::create();
1984 m_detectors[detectorNameTmp] = starDetector;
1987 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(starDetector);
1990 std::stringstream ss_msg;
1991 ss_msg <<
"Failed to initialize the STAR 2D features detector with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
1995 else if (detectorNameTmp ==
"AGAST") {
1996 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_XFEATURES2D))
1997 #if (VISP_HAVE_OPENCV_VERSION >= 0x050000)
1998 cv::Ptr<cv::FeatureDetector> agastDetector = cv::xfeatures2d::AgastFeatureDetector::create();
2000 cv::Ptr<cv::FeatureDetector> agastDetector = cv::AgastFeatureDetector::create();
2003 m_detectors[detectorNameTmp] = agastDetector;
2006 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(agastDetector);
2009 std::stringstream ss_msg;
2010 ss_msg <<
"Failed to initialize the STAR 2D features detector with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
2014 else if (detectorNameTmp ==
"MSD") {
2015 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2016 cv::Ptr<cv::FeatureDetector> msdDetector = cv::xfeatures2d::MSDDetector::create();
2018 m_detectors[detectorNameTmp] = msdDetector;
2021 std::cerr <<
"You should not use MSD with Pyramid feature detection!" << std::endl;
2022 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(msdDetector);
2025 std::stringstream ss_msg;
2026 ss_msg <<
"Failed to initialize the MSD 2D features detector with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
2031 std::cerr <<
"The detector:" << detectorNameTmp <<
" is not available." << std::endl;
2034 bool detectorInitialized =
false;
2037 detectorInitialized = !m_detectors[detectorNameTmp].empty();
2041 detectorInitialized = !m_detectors[detectorName].empty();
2044 if (!detectorInitialized) {
2045 std::stringstream ss_msg;
2046 ss_msg <<
"Fail to initialize the detector: " << detectorNameTmp
2047 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2053 void vpKeyPoint::initDetectors(
const std::vector<std::string> &detectorNames)
2055 for (std::vector<std::string>::const_iterator it = detectorNames.begin(); it != detectorNames.end(); ++it) {
2060 void vpKeyPoint::initExtractor(
const std::string &extractorName)
2062 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
2063 m_extractors[extractorName] = cv::DescriptorExtractor::create(extractorName);
2065 if (extractorName ==
"SIFT") {
2066 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && ((VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
2068 # if (VISP_HAVE_OPENCV_VERSION >= 0x030411)
2069 m_extractors[extractorName] = cv::SIFT::create();
2071 m_extractors[extractorName] = cv::xfeatures2d::SIFT::create();
2074 std::stringstream ss_msg;
2075 ss_msg <<
"Fail to initialize the SIFT 2D features extractor with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
2079 else if (extractorName ==
"SURF") {
2080 #if defined(OPENCV_ENABLE_NONFREE) && defined(HAVE_OPENCV_XFEATURES2D)
2082 m_extractors[extractorName] = cv::xfeatures2d::SURF::create(100, 4, 3,
true);
2084 std::stringstream ss_msg;
2085 ss_msg <<
"Fail to initialize the SURF 2D features extractor with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
2089 else if (extractorName ==
"ORB") {
2090 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
2091 m_extractors[extractorName] = cv::ORB::create();
2093 std::stringstream ss_msg;
2094 ss_msg <<
"Fail to initialize the ORB 2D features extractor with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
2098 else if (extractorName ==
"BRISK") {
2099 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_XFEATURES2D))
2100 #if (VISP_HAVE_OPENCV_VERSION >= 0x050000)
2101 m_extractors[extractorName] = cv::xfeatures2d::BRISK::create();
2103 m_extractors[extractorName] = cv::BRISK::create();
2106 std::stringstream ss_msg;
2107 ss_msg <<
"Fail to initialize the BRISK 2D features extractor with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
2111 else if (extractorName ==
"FREAK") {
2112 #if defined(HAVE_OPENCV_XFEATURES2D)
2113 m_extractors[extractorName] = cv::xfeatures2d::FREAK::create();
2115 std::stringstream ss_msg;
2116 ss_msg <<
"Fail to initialize the FREAK 2D features extractor with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
2120 else if (extractorName ==
"BRIEF") {
2121 #if defined(HAVE_OPENCV_XFEATURES2D)
2122 m_extractors[extractorName] = cv::xfeatures2d::BriefDescriptorExtractor::create();
2124 std::stringstream ss_msg;
2125 ss_msg <<
"Fail to initialize the BRIEF 2D features extractor with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
2129 else if (extractorName ==
"KAZE") {
2130 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_XFEATURES2D))
2131 #if (VISP_HAVE_OPENCV_VERSION >= 0x050000)
2132 m_extractors[extractorName] = cv::xfeatures2d::KAZE::create();
2134 m_extractors[extractorName] = cv::KAZE::create();
2137 std::stringstream ss_msg;
2138 ss_msg <<
"Fail to initialize the KAZE 2D features extractor with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
2142 else if (extractorName ==
"AKAZE") {
2143 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_XFEATURES2D))
2144 #if (VISP_HAVE_OPENCV_VERSION >= 0x050000)
2145 m_extractors[extractorName] = cv::xfeatures2d::AKAZE::create();
2147 m_extractors[extractorName] = cv::AKAZE::create();
2150 std::stringstream ss_msg;
2151 ss_msg <<
"Fail to initialize the AKAZE 2D features extractor with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
2155 else if (extractorName ==
"DAISY") {
2156 #if defined(HAVE_OPENCV_XFEATURES2D)
2157 m_extractors[extractorName] = cv::xfeatures2d::DAISY::create();
2159 std::stringstream ss_msg;
2160 ss_msg <<
"Fail to initialize the DAISY 2D features extractor with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
2164 else if (extractorName ==
"LATCH") {
2165 #if defined(HAVE_OPENCV_XFEATURES2D)
2166 m_extractors[extractorName] = cv::xfeatures2d::LATCH::create();
2168 std::stringstream ss_msg;
2169 ss_msg <<
"Fail to initialize the LATCH 2D features extractor with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
2173 else if (extractorName ==
"VGG") {
2174 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(HAVE_OPENCV_XFEATURES2D)
2175 m_extractors[extractorName] = cv::xfeatures2d::VGG::create();
2177 std::stringstream ss_msg;
2178 ss_msg <<
"Fail to initialize the VGG 2D features extractor with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
2182 else if (extractorName ==
"BoostDesc") {
2183 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(HAVE_OPENCV_XFEATURES2D)
2184 m_extractors[extractorName] = cv::xfeatures2d::BoostDesc::create();
2186 std::stringstream ss_msg;
2187 ss_msg <<
"Fail to initialize the BoostDesc 2D features extractor with OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION;
2192 std::cerr <<
"The extractor:" << extractorName <<
" is not available." << std::endl;
2196 if (!m_extractors[extractorName]) {
2197 std::stringstream ss_msg;
2198 ss_msg <<
"Fail to initialize the extractor: " << extractorName
2199 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2203 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2204 if (extractorName ==
"SURF") {
2206 m_extractors[extractorName]->set(
"extended", 1);
2211 void vpKeyPoint::initExtractors(
const std::vector<std::string> &extractorNames)
2213 for (std::vector<std::string>::const_iterator it = extractorNames.begin(); it != extractorNames.end(); ++it) {
2217 int descriptorType = CV_32F;
2218 bool firstIteration =
true;
2219 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2220 it != m_extractors.end(); ++it) {
2221 if (firstIteration) {
2222 firstIteration =
false;
2223 descriptorType = it->second->descriptorType();
2226 if (descriptorType != it->second->descriptorType()) {
2233 void vpKeyPoint::initFeatureNames()
2236 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
2243 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_XFEATURES2D))
2245 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2251 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))) \
2252 || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_XFEATURES2D))
2253 m_mapOfDetectorNames[DETECTOR_STAR] =
"STAR";
2255 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2256 m_mapOfDetectorNames[DETECTOR_MSD] =
"MSD";
2258 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && ((VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)) && defined(HAVE_OPENCV_FEATURES2D)) \
2259 || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
2260 m_mapOfDetectorNames[DETECTOR_SIFT] =
"SIFT";
2262 #if defined(OPENCV_ENABLE_NONFREE) && defined(HAVE_OPENCV_XFEATURES2D)
2263 m_mapOfDetectorNames[DETECTOR_SURF] =
"SURF";
2266 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
2269 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_XFEATURES2D))
2272 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2276 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && ((VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)) && defined(HAVE_OPENCV_FEATURES2D)) \
2277 || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
2278 m_mapOfDescriptorNames[DESCRIPTOR_SIFT] =
"SIFT";
2280 #if defined(OPENCV_ENABLE_NONFREE) && defined(HAVE_OPENCV_XFEATURES2D)
2281 m_mapOfDescriptorNames[DESCRIPTOR_SURF] =
"SURF";
2283 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_XFEATURES2D))
2284 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2289 #if defined(HAVE_OPENCV_XFEATURES2D)
2293 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(HAVE_OPENCV_XFEATURES2D)
2294 m_mapOfDescriptorNames[DESCRIPTOR_VGG] =
"VGG";
2295 m_mapOfDescriptorNames[DESCRIPTOR_BoostDesc] =
"BoostDesc";
2301 int descriptorType = CV_32F;
2302 bool firstIteration =
true;
2303 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2304 it != m_extractors.end(); ++it) {
2305 if (firstIteration) {
2306 firstIteration =
false;
2307 descriptorType = it->second->descriptorType();
2310 if (descriptorType != it->second->descriptorType()) {
2316 if (matcherName ==
"FlannBased") {
2317 if (m_extractors.empty()) {
2318 std::cout <<
"Warning: No extractor initialized, by default use "
2319 "floating values (CV_32F) "
2320 "for descriptor type !"
2324 if (descriptorType == CV_8U) {
2325 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2326 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::LshIndexParams>(12, 20, 2));
2328 m_matcher =
new cv::FlannBasedMatcher(
new cv::flann::LshIndexParams(12, 20, 2));
2332 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2333 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::KDTreeIndexParams>());
2335 m_matcher =
new cv::FlannBasedMatcher(
new cv::flann::KDTreeIndexParams());
2340 m_matcher = cv::DescriptorMatcher::create(matcherName);
2343 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2344 if (m_matcher !=
nullptr && !m_useKnn && matcherName ==
"BruteForce") {
2345 m_matcher->set(
"crossCheck", m_useBruteForceCrossCheck);
2350 std::stringstream ss_msg;
2351 ss_msg <<
"Fail to initialize the matcher: " << matcherName
2352 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2361 IMatching.
insert(IRef, topLeftCorner);
2363 IMatching.
insert(ICurrent, topLeftCorner);
2370 IMatching.
insert(IRef, topLeftCorner);
2372 IMatching.
insert(ICurrent, topLeftCorner);
2379 int nbImg = (int)(m_mapOfImages.size() + 1);
2381 if (m_mapOfImages.empty()) {
2382 std::cerr <<
"There is no training image loaded !" << std::endl;
2393 int nbWidth = nbImgSqrt;
2394 int nbHeight = nbImgSqrt;
2396 if (nbImgSqrt * nbImgSqrt < nbImg) {
2401 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2403 if (maxW < it->second.getWidth()) {
2404 maxW = it->second.getWidth();
2407 if (maxH < it->second.getHeight()) {
2408 maxH = it->second.getHeight();
2414 int medianI = nbHeight / 2;
2415 int medianJ = nbWidth / 2;
2416 int medianIndex = medianI * nbWidth + medianJ;
2419 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2421 int local_cpt = cpt;
2422 if (cpt >= medianIndex) {
2427 int indexI = local_cpt / nbWidth;
2428 int indexJ = local_cpt - (indexI * nbWidth);
2429 vpImagePoint topLeftCorner((
int)maxH * indexI, (
int)maxW * indexJ);
2431 IMatching.
insert(it->second, topLeftCorner);
2434 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
2435 IMatching.
insert(ICurrent, topLeftCorner);
2443 int nbImg = (int)(m_mapOfImages.size() + 1);
2445 if (m_mapOfImages.empty()) {
2446 std::cerr <<
"There is no training image loaded !" << std::endl;
2459 int nbWidth = nbImgSqrt;
2460 int nbHeight = nbImgSqrt;
2462 if (nbImgSqrt * nbImgSqrt < nbImg) {
2467 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2469 if (maxW < it->second.getWidth()) {
2470 maxW = it->second.getWidth();
2473 if (maxH < it->second.getHeight()) {
2474 maxH = it->second.getHeight();
2480 int medianI = nbHeight / 2;
2481 int medianJ = nbWidth / 2;
2482 int medianIndex = medianI * nbWidth + medianJ;
2485 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2487 int local_cpt = cpt;
2488 if (cpt >= medianIndex) {
2493 int indexI = local_cpt / nbWidth;
2494 int indexJ = local_cpt - (indexI * nbWidth);
2495 vpImagePoint topLeftCorner((
int)maxH * indexI, (
int)maxW * indexJ);
2499 IMatching.
insert(IRef, topLeftCorner);
2502 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
2503 IMatching.
insert(ICurrent, topLeftCorner);
2509 #if defined(VISP_HAVE_PUGIXML)
2514 m_detectorNames.clear();
2515 m_extractorNames.clear();
2516 m_detectors.clear();
2517 m_extractors.clear();
2519 std::cout <<
" *********** Parsing XML for configuration for vpKeyPoint "
2522 xmlp.
parse(configFile);
2584 int startClassId = 0;
2585 int startImageId = 0;
2587 m_trainKeyPoints.clear();
2588 m_trainPoints.clear();
2589 m_mapOfImageId.clear();
2590 m_mapOfImages.clear();
2594 for (std::map<int, int>::const_iterator it = m_mapOfImageId.begin(); it != m_mapOfImageId.end(); ++it) {
2595 if (startClassId < it->first) {
2596 startClassId = it->first;
2601 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2603 if (startImageId < it->first) {
2604 startImageId = it->first;
2611 if (!parent.empty()) {
2616 std::ifstream file(filename.c_str(), std::ifstream::binary);
2617 if (!file.is_open()) {
2625 #if !defined(VISP_HAVE_MODULE_IO)
2627 std::cout <<
"Warning: The learning file contains image data that will "
2628 "not be loaded as visp_io module "
2629 "is not available !"
2634 for (
int i = 0; i < nbImgs; i++) {
2642 char *path =
new char[length + 1];
2644 for (
int cpt = 0; cpt < length; cpt++) {
2646 file.read((
char *)(&c),
sizeof(c));
2649 path[length] =
'\0';
2652 #ifdef VISP_HAVE_MODULE_IO
2661 m_mapOfImages[
id + startImageId] = I;
2669 int have3DInfoInt = 0;
2671 bool have3DInfo = have3DInfoInt != 0;
2682 int descriptorType = 5;
2685 cv::Mat trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
2686 for (
int i = 0; i < nRows; i++) {
2688 float u, v, size, angle, response;
2689 int octave, class_id, image_id;
2698 cv::KeyPoint keyPoint(cv::Point2f(u, v), size, angle, response, octave, (class_id + startClassId));
2699 m_trainKeyPoints.push_back(keyPoint);
2701 if (image_id != -1) {
2702 #ifdef VISP_HAVE_MODULE_IO
2704 m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
2714 m_trainPoints.push_back(cv::Point3f(oX, oY, oZ));
2717 for (
int j = 0; j < nCols; j++) {
2719 switch (descriptorType) {
2721 unsigned char value;
2722 file.read((
char *)(&value),
sizeof(value));
2723 trainDescriptorsTmp.at<
unsigned char>(i, j) = value;
2728 file.read((
char *)(&value),
sizeof(value));
2729 trainDescriptorsTmp.at<
char>(i, j) = value;
2733 unsigned short int value;
2735 trainDescriptorsTmp.at<
unsigned short int>(i, j) = value;
2741 trainDescriptorsTmp.at<
short int>(i, j) = value;
2747 trainDescriptorsTmp.at<
int>(i, j) = value;
2753 trainDescriptorsTmp.at<
float>(i, j) = value;
2759 trainDescriptorsTmp.at<
double>(i, j) = value;
2765 trainDescriptorsTmp.at<
float>(i, j) = value;
2771 if (!append || m_trainDescriptors.empty()) {
2772 trainDescriptorsTmp.copyTo(m_trainDescriptors);
2775 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
2781 #if defined(VISP_HAVE_PUGIXML)
2782 pugi::xml_document doc;
2785 if (!doc.load_file(filename.c_str())) {
2789 pugi::xml_node root_element = doc.document_element();
2791 int descriptorType = CV_32F;
2792 int nRows = 0, nCols = 0;
2795 cv::Mat trainDescriptorsTmp;
2797 for (pugi::xml_node first_level_node = root_element.first_child(); first_level_node;
2798 first_level_node = first_level_node.next_sibling()) {
2800 std::string name(first_level_node.name());
2801 if (first_level_node.type() == pugi::node_element && name ==
"TrainingImageInfo") {
2803 for (pugi::xml_node image_info_node = first_level_node.first_child(); image_info_node;
2804 image_info_node = image_info_node.next_sibling()) {
2805 name = std::string(image_info_node.name());
2807 if (name ==
"trainImg") {
2809 int id = image_info_node.attribute(
"image_id").as_int();
2812 #ifdef VISP_HAVE_MODULE_IO
2813 std::string path(image_info_node.text().as_string());
2823 m_mapOfImages[
id + startImageId] = I;
2828 else if (first_level_node.type() == pugi::node_element && name ==
"DescriptorsInfo") {
2829 for (pugi::xml_node descriptors_info_node = first_level_node.first_child(); descriptors_info_node;
2830 descriptors_info_node = descriptors_info_node.next_sibling()) {
2831 if (descriptors_info_node.type() == pugi::node_element) {
2832 name = std::string(descriptors_info_node.name());
2834 if (name ==
"nrows") {
2835 nRows = descriptors_info_node.text().as_int();
2837 else if (name ==
"ncols") {
2838 nCols = descriptors_info_node.text().as_int();
2840 else if (name ==
"type") {
2841 descriptorType = descriptors_info_node.text().as_int();
2846 trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
2848 else if (first_level_node.type() == pugi::node_element && name ==
"DescriptorInfo") {
2849 double u = 0.0, v = 0.0, size = 0.0, angle = 0.0, response = 0.0;
2850 int octave = 0, class_id = 0, image_id = 0;
2851 double oX = 0.0, oY = 0.0, oZ = 0.0;
2853 std::stringstream ss;
2855 for (pugi::xml_node point_node = first_level_node.first_child(); point_node;
2856 point_node = point_node.next_sibling()) {
2857 if (point_node.type() == pugi::node_element) {
2858 name = std::string(point_node.name());
2862 u = point_node.text().as_double();
2864 else if (name ==
"v") {
2865 v = point_node.text().as_double();
2867 else if (name ==
"size") {
2868 size = point_node.text().as_double();
2870 else if (name ==
"angle") {
2871 angle = point_node.text().as_double();
2873 else if (name ==
"response") {
2874 response = point_node.text().as_double();
2876 else if (name ==
"octave") {
2877 octave = point_node.text().as_int();
2879 else if (name ==
"class_id") {
2880 class_id = point_node.text().as_int();
2881 cv::KeyPoint keyPoint(cv::Point2f((
float)u, (
float)v), (
float)size, (
float)angle, (
float)response, octave,
2882 (class_id + startClassId));
2883 m_trainKeyPoints.push_back(keyPoint);
2885 else if (name ==
"image_id") {
2886 image_id = point_node.text().as_int();
2887 if (image_id != -1) {
2888 #ifdef VISP_HAVE_MODULE_IO
2890 m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
2894 else if (name ==
"oX") {
2895 oX = point_node.text().as_double();
2897 else if (name ==
"oY") {
2898 oY = point_node.text().as_double();
2900 else if (name ==
"oZ") {
2901 oZ = point_node.text().as_double();
2902 m_trainPoints.push_back(cv::Point3f((
float)oX, (
float)oY, (
float)oZ));
2904 else if (name ==
"desc") {
2907 for (pugi::xml_node descriptor_value_node = point_node.first_child(); descriptor_value_node;
2908 descriptor_value_node = descriptor_value_node.next_sibling()) {
2910 if (descriptor_value_node.type() == pugi::node_element) {
2912 std::string parseStr(descriptor_value_node.text().as_string());
2917 switch (descriptorType) {
2922 trainDescriptorsTmp.at<
unsigned char>(i, j) = (
unsigned char)parseValue;
2929 trainDescriptorsTmp.at<
char>(i, j) = (
char)parseValue;
2933 ss >> trainDescriptorsTmp.at<
unsigned short int>(i, j);
2937 ss >> trainDescriptorsTmp.at<
short int>(i, j);
2941 ss >> trainDescriptorsTmp.at<
int>(i, j);
2945 ss >> trainDescriptorsTmp.at<
float>(i, j);
2949 ss >> trainDescriptorsTmp.at<
double>(i, j);
2953 ss >> trainDescriptorsTmp.at<
float>(i, j);
2958 std::cerr <<
"Error when converting:" << ss.str() << std::endl;
2971 if (!append || m_trainDescriptors.empty()) {
2972 trainDescriptorsTmp.copyTo(m_trainDescriptors);
2975 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
2988 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
2994 m_currentImageId = (int)m_mapOfImages.size();
2998 std::vector<cv::DMatch> &matches,
double &elapsedTime)
3003 m_knnMatches.clear();
3005 if (m_useMatchTrainToQuery) {
3006 std::vector<std::vector<cv::DMatch> > knnMatchesTmp;
3009 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(
true);
3010 matcherTmp->knnMatch(trainDescriptors, queryDescriptors, knnMatchesTmp, 2);
3012 for (std::vector<std::vector<cv::DMatch> >::const_iterator it1 = knnMatchesTmp.begin();
3013 it1 != knnMatchesTmp.end(); ++it1) {
3014 std::vector<cv::DMatch> tmp;
3015 for (std::vector<cv::DMatch>::const_iterator it2 = it1->begin(); it2 != it1->end(); ++it2) {
3016 tmp.push_back(cv::DMatch(it2->trainIdx, it2->queryIdx, it2->distance));
3018 m_knnMatches.push_back(tmp);
3021 matches.resize(m_knnMatches.size());
3022 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3026 m_matcher->knnMatch(queryDescriptors, m_knnMatches, 2);
3027 matches.resize(m_knnMatches.size());
3028 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3034 if (m_useMatchTrainToQuery) {
3035 std::vector<cv::DMatch> matchesTmp;
3037 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(
true);
3038 matcherTmp->match(trainDescriptors, queryDescriptors, matchesTmp);
3040 for (std::vector<cv::DMatch>::const_iterator it = matchesTmp.begin(); it != matchesTmp.end(); ++it) {
3041 matches.push_back(cv::DMatch(it->trainIdx, it->queryIdx, it->distance));
3046 m_matcher->match(queryDescriptors, matches);
3070 if (m_trainDescriptors.empty()) {
3071 std::cerr <<
"Reference is empty." << std::endl;
3073 std::cerr <<
"Reference is not computed." << std::endl;
3075 std::cerr <<
"Matching is not possible." << std::endl;
3080 if (m_useAffineDetection) {
3081 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3082 std::vector<cv::Mat> listOfQueryDescriptors;
3088 m_queryKeyPoints.clear();
3089 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3090 it != listOfQueryKeyPoints.end(); ++it) {
3091 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3095 for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3099 it->copyTo(m_queryDescriptors);
3102 m_queryDescriptors.push_back(*it);
3107 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3108 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3111 return matchPoint(m_queryKeyPoints, m_queryDescriptors);
3116 m_queryKeyPoints = queryKeyPoints;
3117 m_queryDescriptors = queryDescriptors;
3119 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3122 m_queryFilteredKeyPoints.clear();
3123 m_objectFilteredPoints.clear();
3124 m_filteredMatches.clear();
3129 if (m_useMatchTrainToQuery) {
3131 m_queryFilteredKeyPoints.clear();
3132 m_filteredMatches.clear();
3133 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3134 m_filteredMatches.push_back(cv::DMatch((
int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3135 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(
size_t)it->queryIdx]);
3139 m_queryFilteredKeyPoints = m_queryKeyPoints;
3140 m_filteredMatches = m_matches;
3143 if (!m_trainPoints.empty()) {
3144 m_objectFilteredPoints.clear();
3148 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3150 m_objectFilteredPoints.push_back(m_trainPoints[(
size_t)it->trainIdx]);
3159 return static_cast<unsigned int>(m_filteredMatches.size());
3171 double error, elapsedTime;
3172 return matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3178 double error, elapsedTime;
3179 return matchPoint(I_color, cam, cMo, error, elapsedTime, func, rectangle);
3187 if (m_trainDescriptors.empty()) {
3188 std::cerr <<
"Reference is empty." << std::endl;
3190 std::cerr <<
"Reference is not computed." << std::endl;
3192 std::cerr <<
"Matching is not possible." << std::endl;
3197 if (m_useAffineDetection) {
3198 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3199 std::vector<cv::Mat> listOfQueryDescriptors;
3205 m_queryKeyPoints.clear();
3206 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3207 it != listOfQueryKeyPoints.end(); ++it) {
3208 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3212 for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3216 it->copyTo(m_queryDescriptors);
3219 m_queryDescriptors.push_back(*it);
3224 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3225 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3228 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3230 elapsedTime = m_detectionTime + m_extractionTime + m_matchingTime;
3233 m_queryFilteredKeyPoints.clear();
3234 m_objectFilteredPoints.clear();
3235 m_filteredMatches.clear();
3240 if (m_useMatchTrainToQuery) {
3242 m_queryFilteredKeyPoints.clear();
3243 m_filteredMatches.clear();
3244 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3245 m_filteredMatches.push_back(cv::DMatch((
int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3246 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(
size_t)it->queryIdx]);
3250 m_queryFilteredKeyPoints = m_queryKeyPoints;
3251 m_filteredMatches = m_matches;
3254 if (!m_trainPoints.empty()) {
3255 m_objectFilteredPoints.clear();
3259 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3261 m_objectFilteredPoints.push_back(m_trainPoints[(
size_t)it->trainIdx]);
3273 m_ransacInliers.clear();
3274 m_ransacOutliers.clear();
3276 if (m_useRansacVVS) {
3277 std::vector<vpPoint> objectVpPoints(m_objectFilteredPoints.size());
3281 for (std::vector<cv::Point3f>::const_iterator it = m_objectFilteredPoints.begin();
3282 it != m_objectFilteredPoints.end(); ++it, cpt++) {
3286 vpImagePoint imP(m_queryFilteredKeyPoints[cpt].pt.y, m_queryFilteredKeyPoints[cpt].pt.x);
3288 double x = 0.0, y = 0.0;
3293 objectVpPoints[cpt] = pt;
3296 std::vector<vpPoint> inliers;
3297 std::vector<unsigned int> inlierIndex;
3299 bool res =
computePose(objectVpPoints, cMo, inliers, inlierIndex, m_poseTime, func);
3301 std::map<unsigned int, bool> mapOfInlierIndex;
3302 m_matchRansacKeyPointsToPoints.clear();
3304 for (std::vector<unsigned int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3305 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3306 m_queryFilteredKeyPoints[(
size_t)(*it)], m_objectFilteredPoints[(
size_t)(*it)]));
3307 mapOfInlierIndex[*it] =
true;
3310 for (
size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3311 if (mapOfInlierIndex.find((
unsigned int)i) == mapOfInlierIndex.end()) {
3312 m_ransacOutliers.push_back(
vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3316 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3318 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3319 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3320 m_ransacInliers.begin(), matchRansacToVpImage);
3322 elapsedTime += m_poseTime;
3327 std::vector<cv::Point2f> imageFilteredPoints;
3328 cv::KeyPoint::convert(m_queryFilteredKeyPoints, imageFilteredPoints);
3329 std::vector<int> inlierIndex;
3330 bool res =
computePose(imageFilteredPoints, m_objectFilteredPoints, cam, cMo, inlierIndex, m_poseTime);
3332 std::map<int, bool> mapOfInlierIndex;
3333 m_matchRansacKeyPointsToPoints.clear();
3335 for (std::vector<int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3336 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3337 m_queryFilteredKeyPoints[(
size_t)(*it)], m_objectFilteredPoints[(
size_t)(*it)]));
3338 mapOfInlierIndex[*it] =
true;
3341 for (
size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3342 if (mapOfInlierIndex.find((
int)i) == mapOfInlierIndex.end()) {
3343 m_ransacOutliers.push_back(
vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3347 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3349 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3350 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3351 m_ransacInliers.begin(), matchRansacToVpImage);
3353 elapsedTime += m_poseTime;
3364 return (
matchPoint(m_I, cam, cMo, error, elapsedTime, func, rectangle));
3368 vpImagePoint ¢erOfGravity,
const bool isPlanarObject,
3369 std::vector<vpImagePoint> *imPts1, std::vector<vpImagePoint> *imPts2,
3370 double *meanDescriptorDistance,
double *detectionScore,
const vpRect &rectangle)
3372 if (imPts1 !=
nullptr && imPts2 !=
nullptr) {
3379 double meanDescriptorDistanceTmp = 0.0;
3380 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
3381 meanDescriptorDistanceTmp += (double)it->distance;
3384 meanDescriptorDistanceTmp /= (double)m_filteredMatches.size();
3385 double score = (double)m_filteredMatches.size() / meanDescriptorDistanceTmp;
3387 if (meanDescriptorDistance !=
nullptr) {
3388 *meanDescriptorDistance = meanDescriptorDistanceTmp;
3394 if (m_filteredMatches.size() >= 4) {
3396 std::vector<cv::Point2f> points1(m_filteredMatches.size());
3398 std::vector<cv::Point2f> points2(m_filteredMatches.size());
3400 for (
size_t i = 0; i < m_filteredMatches.size(); i++) {
3401 points1[i] = cv::Point2f(m_trainKeyPoints[(
size_t)m_filteredMatches[i].trainIdx].pt);
3402 points2[i] = cv::Point2f(m_queryFilteredKeyPoints[(
size_t)m_filteredMatches[i].queryIdx].pt);
3405 std::vector<vpImagePoint> inliers;
3406 if (isPlanarObject) {
3407 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
3408 cv::Mat homographyMatrix = cv::findHomography(points1, points2, CV_RANSAC);
3410 cv::Mat homographyMatrix = cv::findHomography(points1, points2, cv::RANSAC);
3413 for (
size_t i = 0; i < m_filteredMatches.size(); i++) {
3415 cv::Mat realPoint = cv::Mat(3, 1, CV_64F);
3416 realPoint.at<
double>(0, 0) = points1[i].x;
3417 realPoint.at<
double>(1, 0) = points1[i].y;
3418 realPoint.at<
double>(2, 0) = 1.f;
3420 cv::Mat reprojectedPoint = homographyMatrix * realPoint;
3421 double err_x = (reprojectedPoint.at<
double>(0, 0) / reprojectedPoint.at<
double>(2, 0)) - points2[i].x;
3422 double err_y = (reprojectedPoint.at<
double>(1, 0) / reprojectedPoint.at<
double>(2, 0)) - points2[i].y;
3423 double reprojectionError = std::sqrt(err_x * err_x + err_y * err_y);
3425 if (reprojectionError < 6.0) {
3426 inliers.push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3427 if (imPts1 !=
nullptr) {
3428 imPts1->push_back(
vpImagePoint((
double)points1[i].y, (
double)points1[i].x));
3431 if (imPts2 !=
nullptr) {
3432 imPts2->push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3437 else if (m_filteredMatches.size() >= 8) {
3438 cv::Mat fundamentalInliers;
3439 cv::Mat fundamentalMatrix = cv::findFundamentalMat(points1, points2, cv::FM_RANSAC, 3, 0.99, fundamentalInliers);
3441 for (
size_t i = 0; i < (size_t)fundamentalInliers.rows; i++) {
3442 if (fundamentalInliers.at<uchar>((
int)i, 0)) {
3443 inliers.push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3445 if (imPts1 !=
nullptr) {
3446 imPts1->push_back(
vpImagePoint((
double)points1[i].y, (
double)points1[i].x));
3449 if (imPts2 !=
nullptr) {
3450 imPts2->push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3456 if (!inliers.empty()) {
3463 double meanU = 0.0, meanV = 0.0;
3464 for (std::vector<vpImagePoint>::const_iterator it = inliers.begin(); it != inliers.end(); ++it) {
3465 meanU += it->get_u();
3466 meanV += it->get_v();
3469 meanU /= (double)inliers.size();
3470 meanV /= (double)inliers.size();
3472 centerOfGravity.
set_u(meanU);
3473 centerOfGravity.
set_v(meanV);
3482 return meanDescriptorDistanceTmp < m_detectionThreshold;
3485 return score > m_detectionScore;
3494 bool isMatchOk =
matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3499 std::vector<vpImagePoint> modelImagePoints(m_trainVpPoints.size());
3501 for (std::vector<vpPoint>::const_iterator it = m_trainVpPoints.begin(); it != m_trainVpPoints.end(); ++it, cpt++) {
3505 modelImagePoints[cpt] = imPt;
3514 double meanU = 0.0, meanV = 0.0;
3515 for (std::vector<vpImagePoint>::const_iterator it = m_ransacInliers.begin(); it != m_ransacInliers.end(); ++it) {
3516 meanU += it->get_u();
3517 meanV += it->get_v();
3520 meanU /= (double)m_ransacInliers.size();
3521 meanV /= (double)m_ransacInliers.size();
3523 centerOfGravity.
set_u(meanU);
3524 centerOfGravity.
set_v(meanV);
3531 std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
3532 std::vector<cv::Mat> &listOfDescriptors,
3538 listOfKeypoints.clear();
3539 listOfDescriptors.clear();
3541 for (
int tl = 1; tl < 6; tl++) {
3542 double t = pow(2, 0.5 * tl);
3543 for (
int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
3544 std::vector<cv::KeyPoint> keypoints;
3545 cv::Mat descriptors;
3547 cv::Mat timg, mask, Ai;
3550 affineSkew(t, phi, timg, mask, Ai);
3553 if (listOfAffineI !=
nullptr) {
3555 bitwise_and(mask, timg, img_disp);
3558 listOfAffineI->push_back(tI);
3562 cv::bitwise_and(mask, timg, img_disp);
3563 cv::namedWindow(
"Skew", cv::WINDOW_AUTOSIZE);
3564 cv::imshow(
"Skew", img_disp);
3568 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
3569 it != m_detectors.end(); ++it) {
3570 std::vector<cv::KeyPoint> kp;
3571 it->second->detect(timg, kp, mask);
3572 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
3576 extract(timg, keypoints, descriptors, elapsedTime);
3578 for (
unsigned int i = 0; i < keypoints.size(); i++) {
3579 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
3580 cv::Mat kpt_t = Ai * cv::Mat(kpt);
3581 keypoints[i].pt.x = kpt_t.at<
float>(0, 0);
3582 keypoints[i].pt.y = kpt_t.at<
float>(1, 0);
3585 listOfKeypoints.push_back(keypoints);
3586 listOfDescriptors.push_back(descriptors);
3595 std::vector<std::pair<double, int> > listOfAffineParams;
3596 for (
int tl = 1; tl < 6; tl++) {
3597 double t = pow(2, 0.5 * tl);
3598 for (
int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
3599 listOfAffineParams.push_back(std::pair<double, int>(t, phi));
3603 listOfKeypoints.resize(listOfAffineParams.size());
3604 listOfDescriptors.resize(listOfAffineParams.size());
3606 if (listOfAffineI !=
nullptr) {
3607 listOfAffineI->resize(listOfAffineParams.size());
3610 #ifdef VISP_HAVE_OPENMP
3611 #pragma omp parallel for
3613 for (
int cpt = 0; cpt < static_cast<int>(listOfAffineParams.size()); cpt++) {
3614 std::vector<cv::KeyPoint> keypoints;
3615 cv::Mat descriptors;
3617 cv::Mat timg, mask, Ai;
3620 affineSkew(listOfAffineParams[(
size_t)cpt].first, listOfAffineParams[(
size_t)cpt].second, timg, mask, Ai);
3622 if (listOfAffineI !=
nullptr) {
3624 bitwise_and(mask, timg, img_disp);
3627 (*listOfAffineI)[(size_t)cpt] = tI;
3632 cv::bitwise_and(mask, timg, img_disp);
3633 cv::namedWindow(
"Skew", cv::WINDOW_AUTOSIZE);
3634 cv::imshow(
"Skew", img_disp);
3638 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
3639 it != m_detectors.end(); ++it) {
3640 std::vector<cv::KeyPoint> kp;
3641 it->second->detect(timg, kp, mask);
3642 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
3646 extract(timg, keypoints, descriptors, elapsedTime);
3648 for (
size_t i = 0; i < keypoints.size(); i++) {
3649 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
3650 cv::Mat kpt_t = Ai * cv::Mat(kpt);
3651 keypoints[i].pt.x = kpt_t.at<
float>(0, 0);
3652 keypoints[i].pt.y = kpt_t.at<
float>(1, 0);
3655 listOfKeypoints[(size_t)cpt] = keypoints;
3656 listOfDescriptors[(size_t)cpt] = descriptors;
3669 m_computeCovariance =
false;
3671 m_currentImageId = 0;
3673 m_detectionScore = 0.15;
3674 m_detectionThreshold = 100.0;
3675 m_detectionTime = 0.0;
3676 m_detectorNames.clear();
3677 m_detectors.clear();
3678 m_extractionTime = 0.0;
3679 m_extractorNames.clear();
3680 m_extractors.clear();
3681 m_filteredMatches.clear();
3684 m_knnMatches.clear();
3685 m_mapOfImageId.clear();
3686 m_mapOfImages.clear();
3687 m_matcher = cv::Ptr<cv::DescriptorMatcher>();
3688 m_matcherName =
"BruteForce-Hamming";
3690 m_matchingFactorThreshold = 2.0;
3691 m_matchingRatioThreshold = 0.85;
3692 m_matchingTime = 0.0;
3693 m_matchRansacKeyPointsToPoints.clear();
3694 m_nbRansacIterations = 200;
3695 m_nbRansacMinInlierCount = 100;
3696 m_objectFilteredPoints.clear();
3698 m_queryDescriptors = cv::Mat();
3699 m_queryFilteredKeyPoints.clear();
3700 m_queryKeyPoints.clear();
3701 m_ransacConsensusPercentage = 20.0;
3703 m_ransacInliers.clear();
3704 m_ransacOutliers.clear();
3705 m_ransacParallel =
true;
3706 m_ransacParallelNbThreads = 0;
3707 m_ransacReprojectionError = 6.0;
3708 m_ransacThreshold = 0.01;
3709 m_trainDescriptors = cv::Mat();
3710 m_trainKeyPoints.clear();
3711 m_trainPoints.clear();
3712 m_trainVpPoints.clear();
3713 m_useAffineDetection =
false;
3714 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
3715 m_useBruteForceCrossCheck =
true;
3717 m_useConsensusPercentage =
false;
3719 m_useMatchTrainToQuery =
false;
3720 m_useRansacVVS =
true;
3721 m_useSingleMatchFilter =
true;
3723 m_detectorNames.push_back(
"ORB");
3724 m_extractorNames.push_back(
"ORB");
3732 if (!parent.empty()) {
3736 std::map<int, std::string> mapOfImgPath;
3737 if (saveTrainingImages) {
3738 #ifdef VISP_HAVE_MODULE_IO
3740 unsigned int cpt = 0;
3742 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
3748 std::stringstream ss;
3749 ss <<
"train_image_" << std::setfill(
'0') << std::setw(3) << cpt;
3751 switch (m_imageFormat) {
3773 std::string imgFilename = ss.str();
3774 mapOfImgPath[it->first] = imgFilename;
3775 vpImageIo::write(it->second, parent + (!parent.empty() ?
"/" :
"") + imgFilename);
3778 std::cout <<
"Warning: in vpKeyPoint::saveLearningData() training images "
3779 "are not saved because "
3780 "visp_io module is not available !"
3785 bool have3DInfo = m_trainPoints.size() > 0;
3786 if (have3DInfo && m_trainPoints.size() != m_trainKeyPoints.size()) {
3792 std::ofstream file(filename.c_str(), std::ofstream::binary);
3793 if (!file.is_open()) {
3798 int nbImgs = (int)mapOfImgPath.size();
3801 #ifdef VISP_HAVE_MODULE_IO
3802 for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
3808 std::string path = it->second;
3809 int length = (int)path.length();
3812 for (
int cpt = 0; cpt < length; cpt++) {
3813 file.write((
char *)(&path[(
size_t)cpt]),
sizeof(path[(
size_t)cpt]));
3819 int have3DInfoInt = have3DInfo ? 1 : 0;
3822 int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
3823 int descriptorType = m_trainDescriptors.type();
3834 for (
int i = 0; i < nRows; i++) {
3835 unsigned int i_ = (
unsigned int)i;
3837 float u = m_trainKeyPoints[i_].pt.x;
3841 float v = m_trainKeyPoints[i_].pt.y;
3845 float size = m_trainKeyPoints[i_].size;
3849 float angle = m_trainKeyPoints[i_].angle;
3853 float response = m_trainKeyPoints[i_].response;
3857 int octave = m_trainKeyPoints[i_].octave;
3861 int class_id = m_trainKeyPoints[i_].class_id;
3865 #ifdef VISP_HAVE_MODULE_IO
3866 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
3867 int image_id = (saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1;
3876 float oX = m_trainPoints[i_].x, oY = m_trainPoints[i_].y, oZ = m_trainPoints[i_].z;
3887 for (
int j = 0; j < nCols; j++) {
3889 switch (descriptorType) {
3891 file.write((
char *)(&m_trainDescriptors.at<
unsigned char>(i, j)),
3892 sizeof(m_trainDescriptors.at<
unsigned char>(i, j)));
3896 file.write((
char *)(&m_trainDescriptors.at<
char>(i, j)),
sizeof(m_trainDescriptors.at<
char>(i, j)));
3929 #if defined(VISP_HAVE_PUGIXML)
3930 pugi::xml_document doc;
3931 pugi::xml_node node = doc.append_child(pugi::node_declaration);
3932 node.append_attribute(
"version") =
"1.0";
3933 node.append_attribute(
"encoding") =
"UTF-8";
3939 pugi::xml_node root_node = doc.append_child(
"LearningData");
3942 pugi::xml_node image_node = root_node.append_child(
"TrainingImageInfo");
3944 #ifdef VISP_HAVE_MODULE_IO
3945 for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
3946 pugi::xml_node image_info_node = image_node.append_child(
"trainImg");
3947 image_info_node.append_child(pugi::node_pcdata).set_value(it->second.c_str());
3948 std::stringstream ss;
3950 image_info_node.append_attribute(
"image_id") = ss.str().c_str();
3955 pugi::xml_node descriptors_info_node = root_node.append_child(
"DescriptorsInfo");
3957 int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
3958 int descriptorType = m_trainDescriptors.type();
3961 descriptors_info_node.append_child(
"nrows").append_child(pugi::node_pcdata).text() = nRows;
3964 descriptors_info_node.append_child(
"ncols").append_child(pugi::node_pcdata).text() = nCols;
3967 descriptors_info_node.append_child(
"type").append_child(pugi::node_pcdata).text() = descriptorType;
3969 for (
int i = 0; i < nRows; i++) {
3970 unsigned int i_ = (
unsigned int)i;
3971 pugi::xml_node descriptor_node = root_node.append_child(
"DescriptorInfo");
3973 descriptor_node.append_child(
"u").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.x;
3974 descriptor_node.append_child(
"v").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.y;
3975 descriptor_node.append_child(
"size").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].size;
3976 descriptor_node.append_child(
"angle").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].angle;
3977 descriptor_node.append_child(
"response").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].response;
3978 descriptor_node.append_child(
"octave").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].octave;
3979 descriptor_node.append_child(
"class_id").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].class_id;
3981 #ifdef VISP_HAVE_MODULE_IO
3982 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
3983 descriptor_node.append_child(
"image_id").append_child(pugi::node_pcdata).text() =
3984 ((saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1);
3986 descriptor_node.append_child(
"image_id").append_child(pugi::node_pcdata).text() = -1;
3990 descriptor_node.append_child(
"oX").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].x;
3991 descriptor_node.append_child(
"oY").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].y;
3992 descriptor_node.append_child(
"oZ").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].z;
3995 pugi::xml_node desc_node = descriptor_node.append_child(
"desc");
3997 for (
int j = 0; j < nCols; j++) {
3998 switch (descriptorType) {
4004 int val_tmp = m_trainDescriptors.at<
unsigned char>(i, j);
4005 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = val_tmp;
4013 int val_tmp = m_trainDescriptors.at<
char>(i, j);
4014 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = val_tmp;
4018 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() =
4019 m_trainDescriptors.at<
unsigned short int>(i, j);
4023 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<
short int>(i, j);
4027 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<
int>(i, j);
4031 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<
float>(i, j);
4035 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<
double>(i, j);
4045 doc.save_file(filename.c_str(), PUGIXML_TEXT(
" "), pugi::format_default, pugi::encoding_utf8);
4052 #if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x030000)
4053 #ifndef DOXYGEN_SHOULD_SKIP_THIS
4055 struct KeypointResponseGreaterThanThreshold
4057 KeypointResponseGreaterThanThreshold(
float _value) : value(_value) { }
4058 inline bool operator()(
const cv::KeyPoint &kpt)
const {
return kpt.response >= value; }
4062 struct KeypointResponseGreater
4064 inline bool operator()(
const cv::KeyPoint &kp1,
const cv::KeyPoint &kp2)
const {
return kp1.response > kp2.response; }
4068 void vpKeyPoint::KeyPointsFilter::retainBest(std::vector<cv::KeyPoint> &keypoints,
int n_points)
4072 if (n_points >= 0 && keypoints.size() > (
size_t)n_points) {
4073 if (n_points == 0) {
4079 std::nth_element(keypoints.begin(), keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreater());
4081 float ambiguous_response = keypoints[(size_t)(n_points - 1)].response;
4084 std::vector<cv::KeyPoint>::const_iterator new_end = std::partition(
4085 keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreaterThanThreshold(ambiguous_response));
4088 keypoints.resize((
size_t)(new_end - keypoints.begin()));
4094 RoiPredicate(
const cv::Rect &_r) : r(_r) { }
4096 bool operator()(
const cv::KeyPoint &keyPt)
const {
return !r.contains(keyPt.pt); }
4101 void vpKeyPoint::KeyPointsFilter::runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize,
4104 if (borderSize > 0) {
4105 if (imageSize.height <= borderSize * 2 || imageSize.width <= borderSize * 2)
4108 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(),
4109 RoiPredicate(cv::Rect(
4110 cv::Point(borderSize, borderSize),
4111 cv::Point(imageSize.width - borderSize, imageSize.height - borderSize)))),
4116 struct SizePredicate
4118 SizePredicate(
float _minSize,
float _maxSize) : minSize(_minSize), maxSize(_maxSize) { }
4120 bool operator()(
const cv::KeyPoint &keyPt)
const
4122 float size = keyPt.size;
4123 return (size < minSize) || (size > maxSize);
4126 float minSize, maxSize;
4129 void vpKeyPoint::KeyPointsFilter::runByKeypointSize(std::vector<cv::KeyPoint> &keypoints,
float minSize,
float maxSize)
4131 CV_Assert(minSize >= 0);
4132 CV_Assert(maxSize >= 0);
4133 CV_Assert(minSize <= maxSize);
4135 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), SizePredicate(minSize, maxSize)), keypoints.end());
4141 MaskPredicate(
const cv::Mat &_mask) : mask(_mask) { }
4142 bool operator()(
const cv::KeyPoint &key_pt)
const
4144 return mask.at<uchar>((int)(key_pt.pt.y + 0.5f), (int)(key_pt.pt.x + 0.5f)) == 0;
4151 void vpKeyPoint::KeyPointsFilter::runByPixelsMask(std::vector<cv::KeyPoint> &keypoints,
const cv::Mat &mask)
4156 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), MaskPredicate(mask)), keypoints.end());
4159 struct KeyPoint_LessThan
4161 KeyPoint_LessThan(
const std::vector<cv::KeyPoint> &_kp) : kp(&_kp) { }
4162 bool operator()(
size_t i,
size_t j)
const
4164 const cv::KeyPoint &kp1 = (*kp)[ i];
4165 const cv::KeyPoint &kp2 = (*kp)[ j];
4167 std::numeric_limits<float>::epsilon())) {
4169 return kp1.pt.x < kp2.pt.x;
4173 std::numeric_limits<float>::epsilon())) {
4175 return kp1.pt.y < kp2.pt.y;
4179 std::numeric_limits<float>::epsilon())) {
4181 return kp1.size > kp2.size;
4185 std::numeric_limits<float>::epsilon())) {
4187 return kp1.angle < kp2.angle;
4191 std::numeric_limits<float>::epsilon())) {
4193 return kp1.response > kp2.response;
4196 if (kp1.octave != kp2.octave) {
4197 return kp1.octave > kp2.octave;
4200 if (kp1.class_id != kp2.class_id) {
4201 return kp1.class_id > kp2.class_id;
4206 const std::vector<cv::KeyPoint> *kp;
4209 void vpKeyPoint::KeyPointsFilter::removeDuplicated(std::vector<cv::KeyPoint> &keypoints)
4211 size_t i, j, n = keypoints.size();
4212 std::vector<size_t> kpidx(n);
4213 std::vector<uchar> mask(n, (uchar)1);
4215 for (i = 0; i < n; i++) {
4218 std::sort(kpidx.begin(), kpidx.end(), KeyPoint_LessThan(keypoints));
4219 for (i = 1, j = 0; i < n; i++) {
4220 cv::KeyPoint &kp1 = keypoints[kpidx[i]];
4221 cv::KeyPoint &kp2 = keypoints[kpidx[j]];
4224 if (!
vpMath::equal(kp1.pt.x, kp2.pt.x, std::numeric_limits<float>::epsilon()) ||
4225 !
vpMath::equal(kp1.pt.y, kp2.pt.y, std::numeric_limits<float>::epsilon()) ||
4226 !
vpMath::equal(kp1.size, kp2.size, std::numeric_limits<float>::epsilon()) ||
4227 !
vpMath::equal(kp1.angle, kp2.angle, std::numeric_limits<float>::epsilon())) {
4235 for (i = j = 0; i < n; i++) {
4238 keypoints[j] = keypoints[i];
4243 keypoints.resize(j);
4249 vpKeyPoint::PyramidAdaptedFeatureDetector::PyramidAdaptedFeatureDetector(
const cv::Ptr<cv::FeatureDetector> &detector,
4251 : m_detector(detector), m_maxLevel(maxLevel)
4254 bool vpKeyPoint::PyramidAdaptedFeatureDetector::empty()
const
4256 return m_detector.empty() || (cv::FeatureDetector *)m_detector->empty();
4259 void vpKeyPoint::PyramidAdaptedFeatureDetector::detect(cv::InputArray image,
4260 CV_OUT std::vector<cv::KeyPoint> &keypoints, cv::InputArray mask)
4262 detectImpl(image.getMat(), keypoints, mask.getMat());
4265 void vpKeyPoint::PyramidAdaptedFeatureDetector::detectImpl(
const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
4266 const cv::Mat &mask)
const
4268 cv::Mat src = image;
4269 cv::Mat src_mask = mask;
4271 cv::Mat dilated_mask;
4272 if (!mask.empty()) {
4273 cv::dilate(mask, dilated_mask, cv::Mat());
4274 cv::Mat mask255(mask.size(), CV_8UC1, cv::Scalar(0));
4275 mask255.setTo(cv::Scalar(255), dilated_mask != 0);
4276 dilated_mask = mask255;
4279 for (
int l = 0, multiplier = 1; l <= m_maxLevel; ++l, multiplier *= 2) {
4281 std::vector<cv::KeyPoint> new_pts;
4282 m_detector->detect(src, new_pts, src_mask);
4283 std::vector<cv::KeyPoint>::iterator it = new_pts.begin(), end = new_pts.end();
4284 for (; it != end; ++it) {
4285 it->pt.x *= multiplier;
4286 it->pt.y *= multiplier;
4287 it->size *= multiplier;
4290 keypoints.insert(keypoints.end(), new_pts.begin(), new_pts.end());
4293 if (l < m_maxLevel) {
4299 #if (VISP_HAVE_OPENCV_VERSION >= 0x050000)
4300 resize(dilated_mask, src_mask, src.size(), 0, 0, cv::INTER_AREA);
4302 resize(dilated_mask, src_mask, src.size(), 0, 0, CV_INTER_AREA);
4308 vpKeyPoint::KeyPointsFilter::runByPixelsMask(keypoints, mask);
4315 #elif !defined(VISP_BUILD_SHARED_LIBS)
4317 void dummy_vpKeyPoint() { };
std::vector< unsigned int > m_matchedReferencePoints
std::vector< vpImagePoint > m_currentImagePointsList
bool m_reference_computed
std::vector< vpImagePoint > m_referenceImagePointsList
Generic class defining intrinsic camera parameters.
Implementation of column vector and the associated operations.
Class to define RGB colors available for display functionalities.
static const vpColor none
static const vpColor green
static void convertFromOpenCV(const cv::KeyPoint &from, vpImagePoint &to)
static void displayCircle(const vpImage< unsigned char > &I, const vpImageCircle &circle, const vpColor &color, bool fill=false, unsigned int thickness=1)
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
error that can be emitted by ViSP classes.
const char * what() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpHomogeneousMatrix inverse() const
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
static void write(const vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
void set_ij(double ii, double jj)
unsigned int getWidth() const
void insert(const vpImage< Type > &src, const vpImagePoint &topLeft)
unsigned int getHeight() const
unsigned int matchPoint(const vpImage< unsigned char > &I)
void getTrainKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
static void compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpCylinder > &cylinders, const std::vector< std::vector< std::vector< vpImagePoint > > > &vectorOfCylinderRois, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
void detectExtractAffine(const vpImage< unsigned char > &I, std::vector< std::vector< cv::KeyPoint > > &listOfKeypoints, std::vector< cv::Mat > &listOfDescriptors, std::vector< vpImage< unsigned char > > *listOfAffineI=nullptr)
void initMatcher(const std::string &matcherName)
vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType, const std::string &matcherName, const vpFilterMatchingType &filterType=ratioDistanceThreshold)
void display(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, unsigned int size=3)
void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector< cv::DMatch > &matches, double &elapsedTime)
static void compute3D(const cv::KeyPoint &candidate, const std::vector< vpPoint > &roi, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
@ DETECTOR_KAZE
KAZE detector.
@ DETECTOR_BRISK
BRISK detector.
@ DETECTOR_AKAZE
AKAZE detector.
@ DETECTOR_MSER
MSER detector.
@ DETECTOR_AGAST
AGAST detector.
@ DETECTOR_FAST
FAST detector.
@ DETECTOR_GFTT
GFTT detector.
@ DETECTOR_ORB
ORB detector.
@ DETECTOR_SimpleBlob
SimpleBlob detector.
void createImageMatching(vpImage< unsigned char > &IRef, vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
bool computePose(const std::vector< cv::Point2f > &imagePoints, const std::vector< cv::Point3f > &objectPoints, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector< int > &inlierIndex, double &elapsedTime, bool(*func)(const vpHomogeneousMatrix &)=nullptr)
void extract(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, cv::Mat &descriptors, std::vector< cv::Point3f > *trainPoints=nullptr)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
bool matchPointAndDetect(const vpImage< unsigned char > &I, vpRect &boundingBox, vpImagePoint ¢erOfGravity, const bool isPlanarObject=true, std::vector< vpImagePoint > *imPts1=nullptr, std::vector< vpImagePoint > *imPts2=nullptr, double *meanDescriptorDistance=nullptr, double *detectionScore=nullptr, const vpRect &rectangle=vpRect())
@ DESCRIPTOR_LATCH
LATCH descriptor.
@ DESCRIPTOR_AKAZE
AKAZE descriptor.
@ DESCRIPTOR_BRIEF
BRIEF descriptor.
@ DESCRIPTOR_FREAK
FREAK descriptor.
@ DESCRIPTOR_ORB
ORB descriptor.
@ DESCRIPTOR_KAZE
KAZE descriptor.
@ DESCRIPTOR_DAISY
DAISY descriptor.
@ DESCRIPTOR_BRISK
BRISK descriptor.
void getQueryKeyPoints(std::vector< cv::KeyPoint > &keyPoints, bool matches=true) const
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
@ stdAndRatioDistanceThreshold
@ constantFactorDistanceThreshold
void displayMatching(const vpImage< unsigned char > &IRef, vpImage< unsigned char > &IMatching, unsigned int crossSize, unsigned int lineThickness=1, const vpColor &color=vpColor::green)
unsigned int buildReference(const vpImage< unsigned char > &I)
void loadConfigFile(const std::string &configFile)
void getTrainPoints(std::vector< cv::Point3f > &points) const
void getObjectPoints(std::vector< cv::Point3f > &objectPoints) const
void insertImageMatching(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
static bool isNaN(double value)
static bool equal(double x, double y, double threshold=0.001)
static int round(double x)
Implementation of a matrix and operations on matrices.
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
This class defines the container for a plane geometrical structure.
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
double get_oX() const
Get the point oX coordinate in the object frame.
void set_x(double x)
Set the point x coordinate in the image plane.
double get_y() const
Get the point y coordinate in the image plane.
double get_oZ() const
Get the point oZ coordinate in the object frame.
void set_oY(double oY)
Set the point oY coordinate in the object frame.
double get_x() const
Get the point x coordinate in the image plane.
void set_oZ(double oZ)
Set the point oZ coordinate in the object frame.
void set_oX(double oX)
Set the point oX coordinate in the object frame.
double get_oY() const
Get the point oY coordinate in the object frame.
void setWorldCoordinates(double oX, double oY, double oZ)
void set_y(double y)
Set the point y coordinate in the image plane.
Defines a generic 2D polygon.
vpRect getBoundingBox() const
bool isInside(const vpImagePoint &iP, const PointInPolygonMethod &method=PnPolyRayCasting) const
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
void setRansacMaxTrials(const int &rM)
void addPoint(const vpPoint &P)
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
vpMatrix getCovarianceMatrix() const
void setCovarianceComputation(const bool &flag)
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, FuncCheckValidityPose func=nullptr)
std::vector< unsigned int > getRansacInlierIndex() const
void setRansacFilterFlag(const RANSAC_FILTER_FLAGS &flag)
@ NO_FILTER
No filter is applied.
void setUseParallelRansac(bool use)
std::vector< vpPoint > getRansacInliers() const
void setNbParallelRansacThreads(int nb)
void setRansacThreshold(const double &t)
Defines a rectangle in the plane.
Implementation of a rotation vector as axis-angle minimal representation.
Class that consider the case of a translation vector.
std::string getDetectorName() const
double getMatchingRatioThreshold() const
std::string getExtractorName() const
void parse(const std::string &filename)
double getRansacReprojectionError() const
bool getUseRansacVVSPoseEstimation() const
double getMatchingFactorThreshold() const
int getNbRansacMinInlierCount() const
bool getUseRansacConsensusPercentage() const
std::string getMatcherName() const
int getNbRansacIterations() const
double getRansacConsensusPercentage() const
@ stdAndRatioDistanceThreshold
@ constantFactorDistanceThreshold
vpMatchingMethodEnum getMatchingMethod() const
double getRansacThreshold() const
VISP_EXPORT double measureTimeMs()