37 #include <visp3/core/vpIoTools.h>
38 #include <visp3/vision/vpKeyPoint.h>
40 #if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_FEATURES2D)
42 #include <pugixml.hpp>
47 inline cv::DMatch knnToDMatch(
const std::vector<cv::DMatch> &knnMatches)
49 if (knnMatches.size() > 0) {
56 inline vpImagePoint matchRansacToVpImage(
const std::pair<cv::KeyPoint, cv::Point3f> &pair)
65 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
66 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
67 m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
68 m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
69 m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
70 m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
71 m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
72 m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
73 m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
74 m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
75 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
76 m_useBruteForceCrossCheck(true),
78 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
79 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
83 m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
84 m_extractorNames.push_back(m_mapOfDescriptorNames[descriptorType]);
91 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
92 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
93 m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
94 m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
95 m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
96 m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
97 m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
98 m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
99 m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
100 m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
101 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
102 m_useBruteForceCrossCheck(true),
104 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
105 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
109 m_detectorNames.push_back(detectorName);
110 m_extractorNames.push_back(extractorName);
117 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
118 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(detectorNames),
119 m_detectors(), m_extractionTime(0.), m_extractorNames(extractorNames), m_extractors(), m_filteredMatches(),
120 m_filterType(filterType), m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(),
121 m_matcher(), m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0),
122 m_matchingRatioThreshold(0.85), m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200),
123 m_nbRansacMinInlierCount(100), m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(),
124 m_queryFilteredKeyPoints(), m_queryKeyPoints(), m_ransacConsensusPercentage(20.0),
125 m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(), m_ransacParallel(false),
126 m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01), m_trainDescriptors(),
127 m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
128 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
129 m_useBruteForceCrossCheck(true),
131 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
132 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
138 void vpKeyPoint::affineSkew(
double tilt,
double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai)
143 mask = cv::Mat(h, w, CV_8UC1, cv::Scalar(255));
145 cv::Mat A = cv::Mat::eye(2, 3, CV_32F);
148 if (std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
153 A = (cv::Mat_<float>(2, 2) << c, -s, s, c);
155 cv::Mat corners = (cv::Mat_<float>(4, 2) << 0, 0, w, 0, w, h, 0, h);
156 cv::Mat tcorners = corners * A.t();
157 cv::Mat tcorners_x, tcorners_y;
158 tcorners.col(0).copyTo(tcorners_x);
159 tcorners.col(1).copyTo(tcorners_y);
160 std::vector<cv::Mat> channels;
161 channels.push_back(tcorners_x);
162 channels.push_back(tcorners_y);
163 cv::merge(channels, tcorners);
165 cv::Rect rect = cv::boundingRect(tcorners);
166 A = (cv::Mat_<float>(2, 3) << c, -s, -rect.x, s, c, -rect.y);
168 cv::warpAffine(img, img, A, cv::Size(rect.width, rect.height), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
171 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon()) {
172 double s = 0.8 * sqrt(tilt * tilt - 1);
173 cv::GaussianBlur(img, img, cv::Size(0, 0), s, 0.01);
174 cv::resize(img, img, cv::Size(0, 0), 1.0 / tilt, 1.0, cv::INTER_NEAREST);
175 A.row(0) = A.row(0) / tilt;
178 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon() ||
179 std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
182 cv::warpAffine(mask, mask, A, cv::Size(w, h), cv::INTER_NEAREST);
184 cv::invertAffineTransform(A, Ai);
208 m_trainPoints.clear();
209 m_mapOfImageId.clear();
210 m_mapOfImages.clear();
211 m_currentImageId = 1;
213 if (m_useAffineDetection) {
214 std::vector<std::vector<cv::KeyPoint> > listOfTrainKeyPoints;
215 std::vector<cv::Mat> listOfTrainDescriptors;
221 m_trainKeyPoints.clear();
222 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfTrainKeyPoints.begin();
223 it != listOfTrainKeyPoints.end(); ++it) {
224 m_trainKeyPoints.insert(m_trainKeyPoints.end(), it->begin(), it->end());
228 for (std::vector<cv::Mat>::const_iterator it = listOfTrainDescriptors.begin(); it != listOfTrainDescriptors.end();
232 it->copyTo(m_trainDescriptors);
235 m_trainDescriptors.push_back(*it);
240 detect(I, m_trainKeyPoints, m_detectionTime, rectangle);
241 extract(I, m_trainKeyPoints, m_trainDescriptors, m_extractionTime);
246 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
247 m_mapOfImageId[it->class_id] = m_currentImageId;
251 m_mapOfImages[m_currentImageId] = I;
260 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
262 return static_cast<unsigned int>(m_trainKeyPoints.size());
272 std::vector<cv::Point3f> &points3f,
bool append,
int class_id)
274 cv::Mat trainDescriptors;
276 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
278 extract(I, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
280 if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
284 std::map<size_t, size_t> mapOfKeypointHashes;
286 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
288 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
291 std::vector<cv::Point3f> trainPoints_tmp;
292 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
293 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
294 trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
299 points3f = trainPoints_tmp;
302 return (
buildReference(I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
306 std::vector<cv::Point3f> &points3f,
bool append,
int class_id)
308 cv::Mat trainDescriptors;
310 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
312 extract(I_color, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
314 if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
318 std::map<size_t, size_t> mapOfKeypointHashes;
320 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
322 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
325 std::vector<cv::Point3f> trainPoints_tmp;
326 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
327 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
328 trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
333 points3f = trainPoints_tmp;
336 return (
buildReference(I_color, trainKeyPoints, trainDescriptors, points3f, append, class_id));
341 const std::vector<cv::KeyPoint> &trainKeyPoints,
342 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
343 bool append,
int class_id)
346 m_trainPoints.clear();
347 m_mapOfImageId.clear();
348 m_mapOfImages.clear();
349 m_currentImageId = 0;
350 m_trainKeyPoints.clear();
355 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
357 if (class_id != -1) {
358 for (std::vector<cv::KeyPoint>::iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
359 it->class_id = class_id;
365 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
367 m_mapOfImageId[it->class_id] = m_currentImageId;
371 m_mapOfImages[m_currentImageId] = I;
374 m_trainKeyPoints.insert(m_trainKeyPoints.end(), trainKeyPoints_tmp.begin(), trainKeyPoints_tmp.end());
376 trainDescriptors.copyTo(m_trainDescriptors);
379 m_trainDescriptors.push_back(trainDescriptors);
381 this->m_trainPoints.insert(m_trainPoints.end(), points3f.begin(), points3f.end());
389 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
393 return static_cast<unsigned int>(m_trainKeyPoints.size());
397 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
398 bool append,
int class_id)
401 return (
buildReference(m_I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
408 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
415 vpPlane Po(pts[0], pts[1], pts[2]);
416 double xc = 0.0, yc = 0.0;
427 point_obj = cMo.
inverse() * point_cam;
428 point = cv::Point3f((
float)point_obj[0], (
float)point_obj[1], (
float)point_obj[2]);
435 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
442 vpPlane Po(pts[0], pts[1], pts[2]);
443 double xc = 0.0, yc = 0.0;
454 point_obj = cMo.
inverse() * point_cam;
459 std::vector<cv::KeyPoint> &candidates,
460 const std::vector<vpPolygon> &polygons,
461 const std::vector<std::vector<vpPoint> > &roisPt,
462 std::vector<cv::Point3f> &points, cv::Mat *descriptors)
464 std::vector<cv::KeyPoint> candidatesToCheck = candidates;
471 std::vector<std::pair<cv::KeyPoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
472 for (
size_t i = 0; i < candidatesToCheck.size(); i++) {
473 pairOfCandidatesToCheck[i] = std::pair<cv::KeyPoint, size_t>(candidatesToCheck[i], i);
477 std::vector<vpPolygon> polygons_tmp = polygons;
478 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
479 std::vector<std::pair<cv::KeyPoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
481 while (it2 != pairOfCandidatesToCheck.end()) {
482 imPt.
set_ij(it2->first.pt.y, it2->first.pt.x);
483 if (it1->isInside(imPt)) {
484 candidates.push_back(it2->first);
486 points.push_back(pt);
488 if (descriptors !=
nullptr) {
489 desc.push_back(descriptors->row((
int)it2->second));
493 it2 = pairOfCandidatesToCheck.erase(it2);
501 if (descriptors !=
nullptr) {
502 desc.copyTo(*descriptors);
507 std::vector<vpImagePoint> &candidates,
508 const std::vector<vpPolygon> &polygons,
509 const std::vector<std::vector<vpPoint> > &roisPt,
510 std::vector<vpPoint> &points, cv::Mat *descriptors)
512 std::vector<vpImagePoint> candidatesToCheck = candidates;
518 std::vector<std::pair<vpImagePoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
519 for (
size_t i = 0; i < candidatesToCheck.size(); i++) {
520 pairOfCandidatesToCheck[i] = std::pair<vpImagePoint, size_t>(candidatesToCheck[i], i);
524 std::vector<vpPolygon> polygons_tmp = polygons;
525 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
526 std::vector<std::pair<vpImagePoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
528 while (it2 != pairOfCandidatesToCheck.end()) {
529 if (it1->isInside(it2->first)) {
530 candidates.push_back(it2->first);
532 points.push_back(pt);
534 if (descriptors !=
nullptr) {
535 desc.push_back(descriptors->row((
int)it2->second));
539 it2 = pairOfCandidatesToCheck.erase(it2);
551 const std::vector<vpCylinder> &cylinders,
552 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<cv::Point3f> &points,
553 cv::Mat *descriptors)
555 std::vector<cv::KeyPoint> candidatesToCheck = candidates;
561 size_t cpt_keypoint = 0;
562 for (std::vector<cv::KeyPoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
563 ++it1, cpt_keypoint++) {
564 size_t cpt_cylinder = 0;
567 for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
568 it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
571 for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
573 candidates.push_back(*it1);
577 double xm = 0.0, ym = 0.0;
579 double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
581 if (!
vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
583 point_cam[0] = xm * Z;
584 point_cam[1] = ym * Z;
588 point_obj = cMo.
inverse() * point_cam;
591 points.push_back(cv::Point3f((
float)pt.
get_oX(), (
float)pt.
get_oY(), (
float)pt.
get_oZ()));
593 if (descriptors !=
nullptr) {
594 desc.push_back(descriptors->row((
int)cpt_keypoint));
604 if (descriptors !=
nullptr) {
605 desc.copyTo(*descriptors);
611 const std::vector<vpCylinder> &cylinders,
612 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<vpPoint> &points,
613 cv::Mat *descriptors)
615 std::vector<vpImagePoint> candidatesToCheck = candidates;
621 size_t cpt_keypoint = 0;
622 for (std::vector<vpImagePoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
623 ++it1, cpt_keypoint++) {
624 size_t cpt_cylinder = 0;
627 for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
628 it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
631 for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
633 candidates.push_back(*it1);
637 double xm = 0.0, ym = 0.0;
639 double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
641 if (!
vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
643 point_cam[0] = xm * Z;
644 point_cam[1] = ym * Z;
648 point_obj = cMo.
inverse() * point_cam;
651 points.push_back(pt);
653 if (descriptors !=
nullptr) {
654 desc.push_back(descriptors->row((
int)cpt_keypoint));
664 if (descriptors !=
nullptr) {
665 desc.copyTo(*descriptors);
675 if (imagePoints.size() < 4 || objectPoints.size() < 4 || imagePoints.size() != objectPoints.size()) {
677 std::cerr <<
"Not enough points to compute the pose (at least 4 points "
684 cv::Mat cameraMatrix =
694 cv::Mat distCoeffs = cv::Mat::zeros(1, 5, CV_64F);
697 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
699 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec,
false, m_nbRansacIterations,
700 (
float)m_ransacReprojectionError,
703 inlierIndex, cv::SOLVEPNP_ITERATIVE);
723 int nbInlierToReachConsensus = m_nbRansacMinInlierCount;
724 if (m_useConsensusPercentage) {
725 nbInlierToReachConsensus = (int)(m_ransacConsensusPercentage / 100.0 * (
double)m_queryFilteredKeyPoints.size());
728 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec,
false, m_nbRansacIterations,
729 (
float)m_ransacReprojectionError, nbInlierToReachConsensus, inlierIndex);
732 catch (cv::Exception &e) {
733 std::cerr << e.what() << std::endl;
737 vpTranslationVector translationVec(tvec.at<
double>(0), tvec.at<
double>(1), tvec.at<
double>(2));
738 vpThetaUVector thetaUVector(rvec.at<
double>(0), rvec.at<
double>(1), rvec.at<
double>(2));
741 if (func !=
nullptr) {
755 std::vector<vpPoint> &inliers,
double &elapsedTime,
758 std::vector<unsigned int> inlierIndex;
759 return computePose(objectVpPoints, cMo, inliers, inlierIndex, elapsedTime, func);
763 std::vector<vpPoint> &inliers, std::vector<unsigned int> &inlierIndex,
double &elapsedTime,
768 if (objectVpPoints.size() < 4) {
778 for (std::vector<vpPoint>::const_iterator it = objectVpPoints.begin(); it != objectVpPoints.end(); ++it) {
782 unsigned int nbInlierToReachConsensus = (
unsigned int)m_nbRansacMinInlierCount;
783 if (m_useConsensusPercentage) {
784 nbInlierToReachConsensus =
785 (
unsigned int)(m_ransacConsensusPercentage / 100.0 * (
double)m_queryFilteredKeyPoints.size());
795 bool isRansacPoseEstimationOk =
false;
802 if (m_computeCovariance) {
807 std::cerr <<
"e=" << e.
what() << std::endl;
825 return isRansacPoseEstimationOk;
828 double vpKeyPoint::computePoseEstimationError(
const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
831 if (matchKeyPoints.size() == 0) {
837 std::vector<double> errors(matchKeyPoints.size());
840 for (std::vector<std::pair<cv::KeyPoint, cv::Point3f> >::const_iterator it = matchKeyPoints.begin();
841 it != matchKeyPoints.end(); ++it, cpt++) {
846 double u = 0.0, v = 0.0;
848 errors[cpt] = std::sqrt((u - it->first.pt.x) * (u - it->first.pt.x) + (v - it->first.pt.y) * (v - it->first.pt.y));
851 return std::accumulate(errors.begin(), errors.end(), 0.0) / errors.size();
878 unsigned int nbImg = (
unsigned int)(m_mapOfImages.size() + 1);
880 if (m_mapOfImages.empty()) {
881 std::cerr <<
"There is no training image loaded !" << std::endl;
892 unsigned int nbImgSqrt = (
unsigned int)
vpMath::round(std::sqrt((
double)nbImg));
895 unsigned int nbWidth = nbImgSqrt;
897 unsigned int nbHeight = nbImgSqrt;
900 if (nbImgSqrt * nbImgSqrt < nbImg) {
904 unsigned int maxW = ICurrent.
getWidth();
905 unsigned int maxH = ICurrent.
getHeight();
906 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
908 if (maxW < it->second.getWidth()) {
909 maxW = it->second.getWidth();
912 if (maxH < it->second.getHeight()) {
913 maxH = it->second.getHeight();
925 unsigned int nbImg = (
unsigned int)(m_mapOfImages.size() + 1);
927 if (m_mapOfImages.empty()) {
928 std::cerr <<
"There is no training image loaded !" << std::endl;
939 unsigned int nbImgSqrt = (
unsigned int)
vpMath::round(std::sqrt((
double)nbImg));
942 unsigned int nbWidth = nbImgSqrt;
944 unsigned int nbHeight = nbImgSqrt;
947 if (nbImgSqrt * nbImgSqrt < nbImg) {
951 unsigned int maxW = ICurrent.
getWidth();
952 unsigned int maxH = ICurrent.
getHeight();
953 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
955 if (maxW < it->second.getWidth()) {
956 maxW = it->second.getWidth();
959 if (maxH < it->second.getHeight()) {
960 maxH = it->second.getHeight();
971 detect(I, keyPoints, elapsedTime, rectangle);
977 detect(I_color, keyPoints, elapsedTime, rectangle);
980 void vpKeyPoint::detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
const cv::Mat &mask)
983 detect(matImg, keyPoints, elapsedTime, mask);
991 cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
994 cv::Point leftTop((
int)rectangle.
getLeft(), (
int)rectangle.
getTop()),
996 cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
999 mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1002 detect(matImg, keyPoints, elapsedTime, mask);
1010 cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1013 cv::Point leftTop((
int)rectangle.
getLeft(), (
int)rectangle.
getTop()),
1015 cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1018 mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1021 detect(matImg, keyPoints, elapsedTime, mask);
1024 void vpKeyPoint::detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
double &elapsedTime,
1025 const cv::Mat &mask)
1030 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
1031 it != m_detectors.end(); ++it) {
1032 std::vector<cv::KeyPoint> kp;
1034 it->second->detect(matImg, kp, mask);
1035 keyPoints.insert(keyPoints.end(), kp.begin(), kp.end());
1043 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1045 std::vector<vpImagePoint> vpTrainImageKeyPoints;
1048 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1056 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1058 std::vector<vpImagePoint> vpTrainImageKeyPoints;
1061 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1069 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1072 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1079 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1082 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1088 unsigned int crossSize,
unsigned int lineThickness,
const vpColor &color)
1091 srand((
unsigned int)time(
nullptr));
1094 std::vector<vpImagePoint> queryImageKeyPoints;
1096 std::vector<vpImagePoint> trainImageKeyPoints;
1100 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1102 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1105 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1106 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
1107 queryImageKeyPoints[(
size_t)it->queryIdx].get_j() + IRef.
getWidth());
1115 unsigned int lineThickness,
const vpColor &color)
1118 srand((
unsigned int)time(
nullptr));
1121 std::vector<vpImagePoint> queryImageKeyPoints;
1123 std::vector<vpImagePoint> trainImageKeyPoints;
1127 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1129 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1132 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1133 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
1134 queryImageKeyPoints[(
size_t)it->queryIdx].get_j() + IRef.
getWidth());
1142 unsigned int lineThickness,
const vpColor &color)
1145 srand((
unsigned int)time(
nullptr));
1148 std::vector<vpImagePoint> queryImageKeyPoints;
1150 std::vector<vpImagePoint> trainImageKeyPoints;
1154 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1156 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1159 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1160 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
1161 queryImageKeyPoints[(
size_t)it->queryIdx].get_j() + IRef.
getWidth());
1170 const std::vector<vpImagePoint> &ransacInliers,
unsigned int crossSize,
1171 unsigned int lineThickness)
1173 if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1175 std::cerr <<
"There is no training image loaded !" << std::endl;
1181 int nbImg = (int)(m_mapOfImages.size() + 1);
1190 int nbWidth = nbImgSqrt;
1191 int nbHeight = nbImgSqrt;
1193 if (nbImgSqrt * nbImgSqrt < nbImg) {
1197 std::map<int, int> mapOfImageIdIndex;
1200 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1202 mapOfImageIdIndex[it->first] = cpt;
1204 if (maxW < it->second.getWidth()) {
1205 maxW = it->second.getWidth();
1208 if (maxH < it->second.getHeight()) {
1209 maxH = it->second.getHeight();
1215 int medianI = nbHeight / 2;
1216 int medianJ = nbWidth / 2;
1217 int medianIndex = medianI * nbWidth + medianJ;
1218 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1220 int current_class_id_index = 0;
1221 if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1222 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1227 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1230 int indexI = current_class_id_index / nbWidth;
1231 int indexJ = current_class_id_index - (indexI * nbWidth);
1232 topLeftCorner.
set_ij((
int)maxH * indexI, (int)maxW * indexJ);
1239 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
1240 for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1245 for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1250 for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1256 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1257 int current_class_id = 0;
1258 if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(
size_t)it->trainIdx].class_id]] < medianIndex) {
1259 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1264 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1267 int indexI = current_class_id / nbWidth;
1268 int indexJ = current_class_id - (indexI * nbWidth);
1270 vpImagePoint end((
int)maxH * indexI + m_trainKeyPoints[(
size_t)it->trainIdx].pt.y,
1271 (
int)maxW * indexJ + m_trainKeyPoints[(
size_t)it->trainIdx].pt.x);
1272 vpImagePoint start((
int)maxH * medianI + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.y,
1273 (
int)maxW * medianJ + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.x);
1283 const std::vector<vpImagePoint> &ransacInliers,
unsigned int crossSize,
1284 unsigned int lineThickness)
1286 if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1288 std::cerr <<
"There is no training image loaded !" << std::endl;
1294 int nbImg = (int)(m_mapOfImages.size() + 1);
1303 int nbWidth = nbImgSqrt;
1304 int nbHeight = nbImgSqrt;
1306 if (nbImgSqrt * nbImgSqrt < nbImg) {
1310 std::map<int, int> mapOfImageIdIndex;
1313 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1315 mapOfImageIdIndex[it->first] = cpt;
1317 if (maxW < it->second.getWidth()) {
1318 maxW = it->second.getWidth();
1321 if (maxH < it->second.getHeight()) {
1322 maxH = it->second.getHeight();
1328 int medianI = nbHeight / 2;
1329 int medianJ = nbWidth / 2;
1330 int medianIndex = medianI * nbWidth + medianJ;
1331 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1333 int current_class_id_index = 0;
1334 if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1335 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1340 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1343 int indexI = current_class_id_index / nbWidth;
1344 int indexJ = current_class_id_index - (indexI * nbWidth);
1345 topLeftCorner.
set_ij((
int)maxH * indexI, (int)maxW * indexJ);
1352 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
1353 for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1358 for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1363 for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1369 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1370 int current_class_id = 0;
1371 if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(
size_t)it->trainIdx].class_id]] < medianIndex) {
1372 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1377 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1380 int indexI = current_class_id / nbWidth;
1381 int indexJ = current_class_id - (indexI * nbWidth);
1383 vpImagePoint end((
int)maxH * indexI + m_trainKeyPoints[(
size_t)it->trainIdx].pt.y,
1384 (
int)maxW * indexJ + m_trainKeyPoints[(
size_t)it->trainIdx].pt.x);
1385 vpImagePoint start((
int)maxH * medianI + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.y,
1386 (
int)maxW * medianJ + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.x);
1396 std::vector<cv::Point3f> *trainPoints)
1399 extract(I, keyPoints, descriptors, elapsedTime, trainPoints);
1403 std::vector<cv::Point3f> *trainPoints)
1406 extract(I_color, keyPoints, descriptors, elapsedTime, trainPoints);
1410 std::vector<cv::Point3f> *trainPoints)
1413 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1417 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1421 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1425 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1429 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1433 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1438 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator itd = m_extractors.begin();
1439 itd != m_extractors.end(); ++itd) {
1443 if (trainPoints !=
nullptr && !trainPoints->empty()) {
1446 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1449 itd->second->compute(matImg, keyPoints, descriptors);
1451 if (keyPoints.size() != keyPoints_tmp.size()) {
1455 std::map<size_t, size_t> mapOfKeypointHashes;
1457 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1459 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1462 std::vector<cv::Point3f> trainPoints_tmp;
1463 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1464 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1465 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1470 *trainPoints = trainPoints_tmp;
1475 itd->second->compute(matImg, keyPoints, descriptors);
1481 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1485 itd->second->compute(matImg, keyPoints, desc);
1487 if (keyPoints.size() != keyPoints_tmp.size()) {
1491 std::map<size_t, size_t> mapOfKeypointHashes;
1493 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1495 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1498 std::vector<cv::Point3f> trainPoints_tmp;
1499 cv::Mat descriptors_tmp;
1500 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1501 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1502 if (trainPoints !=
nullptr && !trainPoints->empty()) {
1503 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1506 if (!descriptors.empty()) {
1507 descriptors_tmp.push_back(descriptors.row((
int)mapOfKeypointHashes[myKeypointHash(*it)]));
1512 if (trainPoints !=
nullptr) {
1514 *trainPoints = trainPoints_tmp;
1517 descriptors_tmp.copyTo(descriptors);
1521 if (descriptors.empty()) {
1522 desc.copyTo(descriptors);
1525 cv::hconcat(descriptors, desc, descriptors);
1530 if (keyPoints.size() != (
size_t)descriptors.rows) {
1531 std::cerr <<
"keyPoints.size() != (size_t) descriptors.rows" << std::endl;
1536 void vpKeyPoint::filterMatches()
1538 std::vector<cv::KeyPoint> queryKpts;
1539 std::vector<cv::Point3f> trainPts;
1540 std::vector<cv::DMatch> m;
1546 double min_dist = DBL_MAX;
1548 std::vector<double> distance_vec(m_knnMatches.size());
1551 for (
size_t i = 0; i < m_knnMatches.size(); i++) {
1552 double dist = m_knnMatches[i][0].distance;
1554 distance_vec[i] = dist;
1556 if (dist < min_dist) {
1563 mean /= m_queryDescriptors.rows;
1566 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1567 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1568 double threshold = min_dist + stdev;
1570 for (
size_t i = 0; i < m_knnMatches.size(); i++) {
1571 if (m_knnMatches[i].size() >= 2) {
1574 float ratio = m_knnMatches[i][0].distance / m_knnMatches[i][1].distance;
1579 double dist = m_knnMatches[i][0].distance;
1582 m.push_back(cv::DMatch((
int)queryKpts.size(), m_knnMatches[i][0].trainIdx, m_knnMatches[i][0].distance));
1584 if (!m_trainPoints.empty()) {
1585 trainPts.push_back(m_trainPoints[(
size_t)m_knnMatches[i][0].trainIdx]);
1587 queryKpts.push_back(m_queryKeyPoints[(
size_t)m_knnMatches[i][0].queryIdx]);
1596 double min_dist = DBL_MAX;
1598 std::vector<double> distance_vec(m_matches.size());
1599 for (
size_t i = 0; i < m_matches.size(); i++) {
1600 double dist = m_matches[i].distance;
1602 distance_vec[i] = dist;
1604 if (dist < min_dist) {
1611 mean /= m_queryDescriptors.rows;
1613 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1614 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1624 for (
size_t i = 0; i < m_matches.size(); i++) {
1625 if (m_matches[i].distance <= threshold) {
1626 m.push_back(cv::DMatch((
int)queryKpts.size(), m_matches[i].trainIdx, m_matches[i].distance));
1628 if (!m_trainPoints.empty()) {
1629 trainPts.push_back(m_trainPoints[(
size_t)m_matches[i].trainIdx]);
1631 queryKpts.push_back(m_queryKeyPoints[(
size_t)m_matches[i].queryIdx]);
1636 if (m_useSingleMatchFilter) {
1639 std::vector<cv::DMatch> mTmp;
1640 std::vector<cv::Point3f> trainPtsTmp;
1641 std::vector<cv::KeyPoint> queryKptsTmp;
1643 std::map<int, int> mapOfTrainIdx;
1645 for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1646 mapOfTrainIdx[it->trainIdx]++;
1650 for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1651 if (mapOfTrainIdx[it->trainIdx] == 1) {
1652 mTmp.push_back(cv::DMatch((
int)queryKptsTmp.size(), it->trainIdx, it->distance));
1654 if (!m_trainPoints.empty()) {
1655 trainPtsTmp.push_back(m_trainPoints[(
size_t)it->trainIdx]);
1657 queryKptsTmp.push_back(queryKpts[(
size_t)it->queryIdx]);
1661 m_filteredMatches = mTmp;
1662 m_objectFilteredPoints = trainPtsTmp;
1663 m_queryFilteredKeyPoints = queryKptsTmp;
1666 m_filteredMatches = m;
1667 m_objectFilteredPoints = trainPts;
1668 m_queryFilteredKeyPoints = queryKpts;
1674 objectPoints = m_objectFilteredPoints;
1685 keyPoints = m_queryFilteredKeyPoints;
1688 keyPoints = m_queryKeyPoints;
1710 void vpKeyPoint::init()
1713 #if defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && (VISP_HAVE_OPENCV_VERSION < 0x030000)
1715 if (!cv::initModule_nonfree()) {
1716 std::cerr <<
"Cannot init module non free, SIFT or SURF cannot be used." << std::endl;
1726 initDetectors(m_detectorNames);
1727 initExtractors(m_extractorNames);
1731 void vpKeyPoint::initDetector(
const std::string &detectorName)
1733 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
1734 m_detectors[detectorName] = cv::FeatureDetector::create(detectorName);
1736 if (m_detectors[detectorName] ==
nullptr) {
1737 std::stringstream ss_msg;
1738 ss_msg <<
"Fail to initialize the detector: " << detectorName
1739 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
1743 std::string detectorNameTmp = detectorName;
1744 std::string pyramid =
"Pyramid";
1745 std::size_t pos = detectorName.find(pyramid);
1746 bool usePyramid =
false;
1747 if (pos != std::string::npos) {
1748 detectorNameTmp = detectorName.substr(pos + pyramid.size());
1752 if (detectorNameTmp ==
"SIFT") {
1753 #if (VISP_HAVE_OPENCV_VERSION >= 0x040500)
1754 cv::Ptr<cv::FeatureDetector> siftDetector = cv::SiftFeatureDetector::create();
1756 m_detectors[detectorNameTmp] = siftDetector;
1759 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
1762 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) || (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || \
1763 (VISP_HAVE_OPENCV_VERSION >= 0x040400)
1765 cv::Ptr<cv::FeatureDetector> siftDetector;
1766 if (m_maxFeatures > 0) {
1767 #if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
1768 siftDetector = cv::SIFT::create(m_maxFeatures);
1770 siftDetector = cv::xfeatures2d::SIFT::create(m_maxFeatures);
1774 #if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
1775 siftDetector = cv::SIFT::create();
1777 siftDetector = cv::xfeatures2d::SIFT::create();
1781 m_detectors[detectorNameTmp] = siftDetector;
1784 std::cerr <<
"You should not use SIFT with Pyramid feature detection!" << std::endl;
1785 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
1788 std::stringstream ss_msg;
1789 ss_msg <<
"Fail to initialize the detector: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1790 <<
" was not build with xFeatures2d module.";
1795 else if (detectorNameTmp ==
"SURF") {
1796 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1797 cv::Ptr<cv::FeatureDetector> surfDetector = cv::xfeatures2d::SURF::create();
1799 m_detectors[detectorNameTmp] = surfDetector;
1802 std::cerr <<
"You should not use SURF with Pyramid feature detection!" << std::endl;
1803 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(surfDetector);
1806 std::stringstream ss_msg;
1807 ss_msg <<
"Fail to initialize the detector: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1808 <<
" was not build with xFeatures2d module.";
1812 else if (detectorNameTmp ==
"FAST") {
1813 cv::Ptr<cv::FeatureDetector> fastDetector = cv::FastFeatureDetector::create();
1815 m_detectors[detectorNameTmp] = fastDetector;
1818 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
1821 else if (detectorNameTmp ==
"MSER") {
1822 cv::Ptr<cv::FeatureDetector> fastDetector = cv::MSER::create();
1824 m_detectors[detectorNameTmp] = fastDetector;
1827 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
1830 else if (detectorNameTmp ==
"ORB") {
1831 cv::Ptr<cv::FeatureDetector> orbDetector;
1832 if (m_maxFeatures > 0) {
1833 orbDetector = cv::ORB::create(m_maxFeatures);
1836 orbDetector = cv::ORB::create();
1839 m_detectors[detectorNameTmp] = orbDetector;
1842 std::cerr <<
"You should not use ORB with Pyramid feature detection!" << std::endl;
1843 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(orbDetector);
1846 else if (detectorNameTmp ==
"BRISK") {
1847 cv::Ptr<cv::FeatureDetector> briskDetector = cv::BRISK::create();
1849 m_detectors[detectorNameTmp] = briskDetector;
1852 std::cerr <<
"You should not use BRISK with Pyramid feature detection!" << std::endl;
1853 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(briskDetector);
1856 else if (detectorNameTmp ==
"KAZE") {
1857 cv::Ptr<cv::FeatureDetector> kazeDetector = cv::KAZE::create();
1859 m_detectors[detectorNameTmp] = kazeDetector;
1862 std::cerr <<
"You should not use KAZE with Pyramid feature detection!" << std::endl;
1863 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(kazeDetector);
1866 else if (detectorNameTmp ==
"AKAZE") {
1867 cv::Ptr<cv::FeatureDetector> akazeDetector = cv::AKAZE::create();
1869 m_detectors[detectorNameTmp] = akazeDetector;
1872 std::cerr <<
"You should not use AKAZE with Pyramid feature detection!" << std::endl;
1873 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(akazeDetector);
1876 else if (detectorNameTmp ==
"GFTT") {
1877 cv::Ptr<cv::FeatureDetector> gfttDetector = cv::GFTTDetector::create();
1879 m_detectors[detectorNameTmp] = gfttDetector;
1882 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(gfttDetector);
1885 else if (detectorNameTmp ==
"SimpleBlob") {
1886 cv::Ptr<cv::FeatureDetector> simpleBlobDetector = cv::SimpleBlobDetector::create();
1888 m_detectors[detectorNameTmp] = simpleBlobDetector;
1891 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(simpleBlobDetector);
1894 else if (detectorNameTmp ==
"STAR") {
1895 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1896 cv::Ptr<cv::FeatureDetector> starDetector = cv::xfeatures2d::StarDetector::create();
1898 m_detectors[detectorNameTmp] = starDetector;
1901 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(starDetector);
1904 std::stringstream ss_msg;
1905 ss_msg <<
"Fail to initialize the detector: STAR. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1906 <<
" was not build with xFeatures2d module.";
1910 else if (detectorNameTmp ==
"AGAST") {
1911 cv::Ptr<cv::FeatureDetector> agastDetector = cv::AgastFeatureDetector::create();
1913 m_detectors[detectorNameTmp] = agastDetector;
1916 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(agastDetector);
1919 else if (detectorNameTmp ==
"MSD") {
1920 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100)
1921 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
1922 cv::Ptr<cv::FeatureDetector> msdDetector = cv::xfeatures2d::MSDDetector::create();
1924 m_detectors[detectorNameTmp] = msdDetector;
1927 std::cerr <<
"You should not use MSD with Pyramid feature detection!" << std::endl;
1928 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(msdDetector);
1931 std::stringstream ss_msg;
1932 ss_msg <<
"Fail to initialize the detector: MSD. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1933 <<
" was not build with xFeatures2d module.";
1937 std::stringstream ss_msg;
1938 ss_msg <<
"Feature " << detectorName <<
" is not available in OpenCV version: " << std::hex
1939 << VISP_HAVE_OPENCV_VERSION <<
" (require >= OpenCV 3.1).";
1943 std::cerr <<
"The detector:" << detectorNameTmp <<
" is not available." << std::endl;
1946 bool detectorInitialized =
false;
1949 detectorInitialized = !m_detectors[detectorNameTmp].empty();
1953 detectorInitialized = !m_detectors[detectorName].empty();
1956 if (!detectorInitialized) {
1957 std::stringstream ss_msg;
1958 ss_msg <<
"Fail to initialize the detector: " << detectorNameTmp
1959 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
1965 void vpKeyPoint::initDetectors(
const std::vector<std::string> &detectorNames)
1967 for (std::vector<std::string>::const_iterator it = detectorNames.begin(); it != detectorNames.end(); ++it) {
1972 void vpKeyPoint::initExtractor(
const std::string &extractorName)
1974 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
1975 m_extractors[extractorName] = cv::DescriptorExtractor::create(extractorName);
1977 if (extractorName ==
"SIFT") {
1978 #if (VISP_HAVE_OPENCV_VERSION >= 0x040500)
1979 m_extractors[extractorName] = cv::SIFT::create();
1981 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) || (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || \
1982 (VISP_HAVE_OPENCV_VERSION >= 0x040400)
1984 #if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
1985 m_extractors[extractorName] = cv::SIFT::create();
1987 m_extractors[extractorName] = cv::xfeatures2d::SIFT::create();
1990 std::stringstream ss_msg;
1991 ss_msg <<
"Fail to initialize the extractor: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1992 <<
" was not build with xFeatures2d module.";
1997 else if (extractorName ==
"SURF") {
1998 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2000 m_extractors[extractorName] = cv::xfeatures2d::SURF::create(100, 4, 3,
true);
2002 std::stringstream ss_msg;
2003 ss_msg <<
"Fail to initialize the extractor: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2004 <<
" was not build with xFeatures2d module.";
2008 else if (extractorName ==
"ORB") {
2009 m_extractors[extractorName] = cv::ORB::create();
2011 else if (extractorName ==
"BRISK") {
2012 m_extractors[extractorName] = cv::BRISK::create();
2014 else if (extractorName ==
"FREAK") {
2015 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2016 m_extractors[extractorName] = cv::xfeatures2d::FREAK::create();
2018 std::stringstream ss_msg;
2019 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2020 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2024 else if (extractorName ==
"BRIEF") {
2025 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2026 m_extractors[extractorName] = cv::xfeatures2d::BriefDescriptorExtractor::create();
2028 std::stringstream ss_msg;
2029 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2030 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2034 else if (extractorName ==
"KAZE") {
2035 m_extractors[extractorName] = cv::KAZE::create();
2037 else if (extractorName ==
"AKAZE") {
2038 m_extractors[extractorName] = cv::AKAZE::create();
2040 else if (extractorName ==
"DAISY") {
2041 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2042 m_extractors[extractorName] = cv::xfeatures2d::DAISY::create();
2044 std::stringstream ss_msg;
2045 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2046 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2050 else if (extractorName ==
"LATCH") {
2051 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2052 m_extractors[extractorName] = cv::xfeatures2d::LATCH::create();
2054 std::stringstream ss_msg;
2055 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2056 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2060 else if (extractorName ==
"LUCID") {
2061 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2066 std::stringstream ss_msg;
2067 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2068 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2072 else if (extractorName ==
"VGG") {
2073 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2074 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2075 m_extractors[extractorName] = cv::xfeatures2d::VGG::create();
2077 std::stringstream ss_msg;
2078 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2079 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2083 std::stringstream ss_msg;
2084 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2085 << VISP_HAVE_OPENCV_VERSION <<
" but requires at least OpenCV 3.2.";
2089 else if (extractorName ==
"BoostDesc") {
2090 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2091 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2092 m_extractors[extractorName] = cv::xfeatures2d::BoostDesc::create();
2094 std::stringstream ss_msg;
2095 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2096 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2100 std::stringstream ss_msg;
2101 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2102 << VISP_HAVE_OPENCV_VERSION <<
" but requires at least OpenCV 3.2.";
2107 std::cerr <<
"The extractor:" << extractorName <<
" is not available." << std::endl;
2111 if (!m_extractors[extractorName]) {
2112 std::stringstream ss_msg;
2113 ss_msg <<
"Fail to initialize the extractor: " << extractorName
2114 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2118 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2119 if (extractorName ==
"SURF") {
2121 m_extractors[extractorName]->set(
"extended", 1);
2126 void vpKeyPoint::initExtractors(
const std::vector<std::string> &extractorNames)
2128 for (std::vector<std::string>::const_iterator it = extractorNames.begin(); it != extractorNames.end(); ++it) {
2132 int descriptorType = CV_32F;
2133 bool firstIteration =
true;
2134 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2135 it != m_extractors.end(); ++it) {
2136 if (firstIteration) {
2137 firstIteration =
false;
2138 descriptorType = it->second->descriptorType();
2141 if (descriptorType != it->second->descriptorType()) {
2148 void vpKeyPoint::initFeatureNames()
2151 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2158 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2159 m_mapOfDetectorNames[DETECTOR_STAR] =
"STAR";
2161 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2162 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2165 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2168 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2173 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2174 m_mapOfDetectorNames[DETECTOR_MSD] =
"MSD";
2178 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2181 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2182 m_mapOfDescriptorNames[DESCRIPTOR_FREAK] =
"FREAK";
2183 m_mapOfDescriptorNames[DESCRIPTOR_BRIEF] =
"BRIEF";
2185 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2186 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2189 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2192 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2195 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2196 m_mapOfDescriptorNames[DESCRIPTOR_DAISY] =
"DAISY";
2197 m_mapOfDescriptorNames[DESCRIPTOR_LATCH] =
"LATCH";
2200 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2201 m_mapOfDescriptorNames[DESCRIPTOR_VGG] =
"VGG";
2202 m_mapOfDescriptorNames[DESCRIPTOR_BoostDesc] =
"BoostDesc";
2209 int descriptorType = CV_32F;
2210 bool firstIteration =
true;
2211 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2212 it != m_extractors.end(); ++it) {
2213 if (firstIteration) {
2214 firstIteration =
false;
2215 descriptorType = it->second->descriptorType();
2218 if (descriptorType != it->second->descriptorType()) {
2224 if (matcherName ==
"FlannBased") {
2225 if (m_extractors.empty()) {
2226 std::cout <<
"Warning: No extractor initialized, by default use "
2227 "floating values (CV_32F) "
2228 "for descriptor type !"
2232 if (descriptorType == CV_8U) {
2233 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2234 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::LshIndexParams>(12, 20, 2));
2236 m_matcher =
new cv::FlannBasedMatcher(
new cv::flann::LshIndexParams(12, 20, 2));
2240 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2241 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::KDTreeIndexParams>());
2243 m_matcher =
new cv::FlannBasedMatcher(
new cv::flann::KDTreeIndexParams());
2248 m_matcher = cv::DescriptorMatcher::create(matcherName);
2251 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2252 if (m_matcher !=
nullptr && !m_useKnn && matcherName ==
"BruteForce") {
2253 m_matcher->set(
"crossCheck", m_useBruteForceCrossCheck);
2258 std::stringstream ss_msg;
2259 ss_msg <<
"Fail to initialize the matcher: " << matcherName
2260 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2269 IMatching.
insert(IRef, topLeftCorner);
2271 IMatching.
insert(ICurrent, topLeftCorner);
2278 IMatching.
insert(IRef, topLeftCorner);
2280 IMatching.
insert(ICurrent, topLeftCorner);
2287 int nbImg = (int)(m_mapOfImages.size() + 1);
2289 if (m_mapOfImages.empty()) {
2290 std::cerr <<
"There is no training image loaded !" << std::endl;
2301 int nbWidth = nbImgSqrt;
2302 int nbHeight = nbImgSqrt;
2304 if (nbImgSqrt * nbImgSqrt < nbImg) {
2309 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2311 if (maxW < it->second.getWidth()) {
2312 maxW = it->second.getWidth();
2315 if (maxH < it->second.getHeight()) {
2316 maxH = it->second.getHeight();
2322 int medianI = nbHeight / 2;
2323 int medianJ = nbWidth / 2;
2324 int medianIndex = medianI * nbWidth + medianJ;
2327 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2329 int local_cpt = cpt;
2330 if (cpt >= medianIndex) {
2335 int indexI = local_cpt / nbWidth;
2336 int indexJ = local_cpt - (indexI * nbWidth);
2337 vpImagePoint topLeftCorner((
int)maxH * indexI, (
int)maxW * indexJ);
2339 IMatching.
insert(it->second, topLeftCorner);
2342 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
2343 IMatching.
insert(ICurrent, topLeftCorner);
2351 int nbImg = (int)(m_mapOfImages.size() + 1);
2353 if (m_mapOfImages.empty()) {
2354 std::cerr <<
"There is no training image loaded !" << std::endl;
2367 int nbWidth = nbImgSqrt;
2368 int nbHeight = nbImgSqrt;
2370 if (nbImgSqrt * nbImgSqrt < nbImg) {
2375 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2377 if (maxW < it->second.getWidth()) {
2378 maxW = it->second.getWidth();
2381 if (maxH < it->second.getHeight()) {
2382 maxH = it->second.getHeight();
2388 int medianI = nbHeight / 2;
2389 int medianJ = nbWidth / 2;
2390 int medianIndex = medianI * nbWidth + medianJ;
2393 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2395 int local_cpt = cpt;
2396 if (cpt >= medianIndex) {
2401 int indexI = local_cpt / nbWidth;
2402 int indexJ = local_cpt - (indexI * nbWidth);
2403 vpImagePoint topLeftCorner((
int)maxH * indexI, (
int)maxW * indexJ);
2407 IMatching.
insert(IRef, topLeftCorner);
2410 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
2411 IMatching.
insert(ICurrent, topLeftCorner);
2421 m_detectorNames.clear();
2422 m_extractorNames.clear();
2423 m_detectors.clear();
2424 m_extractors.clear();
2426 std::cout <<
" *********** Parsing XML for configuration for vpKeyPoint "
2429 xmlp.
parse(configFile);
2487 int startClassId = 0;
2488 int startImageId = 0;
2490 m_trainKeyPoints.clear();
2491 m_trainPoints.clear();
2492 m_mapOfImageId.clear();
2493 m_mapOfImages.clear();
2497 for (std::map<int, int>::const_iterator it = m_mapOfImageId.begin(); it != m_mapOfImageId.end(); ++it) {
2498 if (startClassId < it->first) {
2499 startClassId = it->first;
2504 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2506 if (startImageId < it->first) {
2507 startImageId = it->first;
2514 if (!parent.empty()) {
2519 std::ifstream file(filename.c_str(), std::ifstream::binary);
2520 if (!file.is_open()) {
2528 #if !defined(VISP_HAVE_MODULE_IO)
2530 std::cout <<
"Warning: The learning file contains image data that will "
2531 "not be loaded as visp_io module "
2532 "is not available !"
2537 for (
int i = 0; i < nbImgs; i++) {
2545 char *path =
new char[length + 1];
2547 for (
int cpt = 0; cpt < length; cpt++) {
2549 file.read((
char *)(&c),
sizeof(c));
2552 path[length] =
'\0';
2555 #ifdef VISP_HAVE_MODULE_IO
2564 m_mapOfImages[
id + startImageId] = I;
2572 int have3DInfoInt = 0;
2574 bool have3DInfo = have3DInfoInt != 0;
2585 int descriptorType = 5;
2588 cv::Mat trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
2589 for (
int i = 0; i < nRows; i++) {
2591 float u, v, size, angle, response;
2592 int octave, class_id, image_id;
2601 cv::KeyPoint keyPoint(cv::Point2f(u, v), size, angle, response, octave, (class_id + startClassId));
2602 m_trainKeyPoints.push_back(keyPoint);
2604 if (image_id != -1) {
2605 #ifdef VISP_HAVE_MODULE_IO
2607 m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
2617 m_trainPoints.push_back(cv::Point3f(oX, oY, oZ));
2620 for (
int j = 0; j < nCols; j++) {
2622 switch (descriptorType) {
2624 unsigned char value;
2625 file.read((
char *)(&value),
sizeof(value));
2626 trainDescriptorsTmp.at<
unsigned char>(i, j) = value;
2631 file.read((
char *)(&value),
sizeof(value));
2632 trainDescriptorsTmp.at<
char>(i, j) = value;
2636 unsigned short int value;
2638 trainDescriptorsTmp.at<
unsigned short int>(i, j) = value;
2644 trainDescriptorsTmp.at<
short int>(i, j) = value;
2650 trainDescriptorsTmp.at<
int>(i, j) = value;
2656 trainDescriptorsTmp.at<
float>(i, j) = value;
2662 trainDescriptorsTmp.at<
double>(i, j) = value;
2668 trainDescriptorsTmp.at<
float>(i, j) = value;
2674 if (!append || m_trainDescriptors.empty()) {
2675 trainDescriptorsTmp.copyTo(m_trainDescriptors);
2678 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
2684 pugi::xml_document doc;
2687 if (!doc.load_file(filename.c_str())) {
2691 pugi::xml_node root_element = doc.document_element();
2693 int descriptorType = CV_32F;
2694 int nRows = 0, nCols = 0;
2697 cv::Mat trainDescriptorsTmp;
2699 for (pugi::xml_node first_level_node = root_element.first_child(); first_level_node;
2700 first_level_node = first_level_node.next_sibling()) {
2702 std::string name(first_level_node.name());
2703 if (first_level_node.type() == pugi::node_element && name ==
"TrainingImageInfo") {
2705 for (pugi::xml_node image_info_node = first_level_node.first_child(); image_info_node;
2706 image_info_node = image_info_node.next_sibling()) {
2707 name = std::string(image_info_node.name());
2709 if (name ==
"trainImg") {
2711 int id = image_info_node.attribute(
"image_id").as_int();
2714 #ifdef VISP_HAVE_MODULE_IO
2715 std::string path(image_info_node.text().as_string());
2725 m_mapOfImages[
id + startImageId] = I;
2730 else if (first_level_node.type() == pugi::node_element && name ==
"DescriptorsInfo") {
2731 for (pugi::xml_node descriptors_info_node = first_level_node.first_child(); descriptors_info_node;
2732 descriptors_info_node = descriptors_info_node.next_sibling()) {
2733 if (descriptors_info_node.type() == pugi::node_element) {
2734 name = std::string(descriptors_info_node.name());
2736 if (name ==
"nrows") {
2737 nRows = descriptors_info_node.text().as_int();
2739 else if (name ==
"ncols") {
2740 nCols = descriptors_info_node.text().as_int();
2742 else if (name ==
"type") {
2743 descriptorType = descriptors_info_node.text().as_int();
2748 trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
2750 else if (first_level_node.type() == pugi::node_element && name ==
"DescriptorInfo") {
2751 double u = 0.0, v = 0.0, size = 0.0, angle = 0.0, response = 0.0;
2752 int octave = 0, class_id = 0, image_id = 0;
2753 double oX = 0.0, oY = 0.0, oZ = 0.0;
2755 std::stringstream ss;
2757 for (pugi::xml_node point_node = first_level_node.first_child(); point_node;
2758 point_node = point_node.next_sibling()) {
2759 if (point_node.type() == pugi::node_element) {
2760 name = std::string(point_node.name());
2764 u = point_node.text().as_double();
2766 else if (name ==
"v") {
2767 v = point_node.text().as_double();
2769 else if (name ==
"size") {
2770 size = point_node.text().as_double();
2772 else if (name ==
"angle") {
2773 angle = point_node.text().as_double();
2775 else if (name ==
"response") {
2776 response = point_node.text().as_double();
2778 else if (name ==
"octave") {
2779 octave = point_node.text().as_int();
2781 else if (name ==
"class_id") {
2782 class_id = point_node.text().as_int();
2783 cv::KeyPoint keyPoint(cv::Point2f((
float)u, (
float)v), (
float)size, (
float)angle, (
float)response, octave,
2784 (class_id + startClassId));
2785 m_trainKeyPoints.push_back(keyPoint);
2787 else if (name ==
"image_id") {
2788 image_id = point_node.text().as_int();
2789 if (image_id != -1) {
2790 #ifdef VISP_HAVE_MODULE_IO
2792 m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
2796 else if (name ==
"oX") {
2797 oX = point_node.text().as_double();
2799 else if (name ==
"oY") {
2800 oY = point_node.text().as_double();
2802 else if (name ==
"oZ") {
2803 oZ = point_node.text().as_double();
2804 m_trainPoints.push_back(cv::Point3f((
float)oX, (
float)oY, (
float)oZ));
2806 else if (name ==
"desc") {
2809 for (pugi::xml_node descriptor_value_node = point_node.first_child(); descriptor_value_node;
2810 descriptor_value_node = descriptor_value_node.next_sibling()) {
2812 if (descriptor_value_node.type() == pugi::node_element) {
2814 std::string parseStr(descriptor_value_node.text().as_string());
2819 switch (descriptorType) {
2824 trainDescriptorsTmp.at<
unsigned char>(i, j) = (
unsigned char)parseValue;
2831 trainDescriptorsTmp.at<
char>(i, j) = (
char)parseValue;
2835 ss >> trainDescriptorsTmp.at<
unsigned short int>(i, j);
2839 ss >> trainDescriptorsTmp.at<
short int>(i, j);
2843 ss >> trainDescriptorsTmp.at<
int>(i, j);
2847 ss >> trainDescriptorsTmp.at<
float>(i, j);
2851 ss >> trainDescriptorsTmp.at<
double>(i, j);
2855 ss >> trainDescriptorsTmp.at<
float>(i, j);
2860 std::cerr <<
"Error when converting:" << ss.str() << std::endl;
2873 if (!append || m_trainDescriptors.empty()) {
2874 trainDescriptorsTmp.copyTo(m_trainDescriptors);
2877 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
2887 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
2893 m_currentImageId = (int)m_mapOfImages.size();
2897 std::vector<cv::DMatch> &matches,
double &elapsedTime)
2902 m_knnMatches.clear();
2904 if (m_useMatchTrainToQuery) {
2905 std::vector<std::vector<cv::DMatch> > knnMatchesTmp;
2908 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(
true);
2909 matcherTmp->knnMatch(trainDescriptors, queryDescriptors, knnMatchesTmp, 2);
2911 for (std::vector<std::vector<cv::DMatch> >::const_iterator it1 = knnMatchesTmp.begin();
2912 it1 != knnMatchesTmp.end(); ++it1) {
2913 std::vector<cv::DMatch> tmp;
2914 for (std::vector<cv::DMatch>::const_iterator it2 = it1->begin(); it2 != it1->end(); ++it2) {
2915 tmp.push_back(cv::DMatch(it2->trainIdx, it2->queryIdx, it2->distance));
2917 m_knnMatches.push_back(tmp);
2920 matches.resize(m_knnMatches.size());
2921 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
2925 m_matcher->knnMatch(queryDescriptors, m_knnMatches, 2);
2926 matches.resize(m_knnMatches.size());
2927 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
2933 if (m_useMatchTrainToQuery) {
2934 std::vector<cv::DMatch> matchesTmp;
2936 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(
true);
2937 matcherTmp->match(trainDescriptors, queryDescriptors, matchesTmp);
2939 for (std::vector<cv::DMatch>::const_iterator it = matchesTmp.begin(); it != matchesTmp.end(); ++it) {
2940 matches.push_back(cv::DMatch(it->trainIdx, it->queryIdx, it->distance));
2945 m_matcher->match(queryDescriptors, matches);
2969 if (m_trainDescriptors.empty()) {
2970 std::cerr <<
"Reference is empty." << std::endl;
2972 std::cerr <<
"Reference is not computed." << std::endl;
2974 std::cerr <<
"Matching is not possible." << std::endl;
2979 if (m_useAffineDetection) {
2980 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
2981 std::vector<cv::Mat> listOfQueryDescriptors;
2987 m_queryKeyPoints.clear();
2988 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
2989 it != listOfQueryKeyPoints.end(); ++it) {
2990 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
2994 for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
2998 it->copyTo(m_queryDescriptors);
3001 m_queryDescriptors.push_back(*it);
3006 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3007 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3010 return matchPoint(m_queryKeyPoints, m_queryDescriptors);
3015 m_queryKeyPoints = queryKeyPoints;
3016 m_queryDescriptors = queryDescriptors;
3018 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3021 m_queryFilteredKeyPoints.clear();
3022 m_objectFilteredPoints.clear();
3023 m_filteredMatches.clear();
3028 if (m_useMatchTrainToQuery) {
3030 m_queryFilteredKeyPoints.clear();
3031 m_filteredMatches.clear();
3032 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3033 m_filteredMatches.push_back(cv::DMatch((
int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3034 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(
size_t)it->queryIdx]);
3038 m_queryFilteredKeyPoints = m_queryKeyPoints;
3039 m_filteredMatches = m_matches;
3042 if (!m_trainPoints.empty()) {
3043 m_objectFilteredPoints.clear();
3047 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3049 m_objectFilteredPoints.push_back(m_trainPoints[(
size_t)it->trainIdx]);
3058 return static_cast<unsigned int>(m_filteredMatches.size());
3070 double error, elapsedTime;
3071 return matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3077 double error, elapsedTime;
3078 return matchPoint(I_color, cam, cMo, error, elapsedTime, func, rectangle);
3086 if (m_trainDescriptors.empty()) {
3087 std::cerr <<
"Reference is empty." << std::endl;
3089 std::cerr <<
"Reference is not computed." << std::endl;
3091 std::cerr <<
"Matching is not possible." << std::endl;
3096 if (m_useAffineDetection) {
3097 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3098 std::vector<cv::Mat> listOfQueryDescriptors;
3104 m_queryKeyPoints.clear();
3105 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3106 it != listOfQueryKeyPoints.end(); ++it) {
3107 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3111 for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3115 it->copyTo(m_queryDescriptors);
3118 m_queryDescriptors.push_back(*it);
3123 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3124 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3127 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3129 elapsedTime = m_detectionTime + m_extractionTime + m_matchingTime;
3132 m_queryFilteredKeyPoints.clear();
3133 m_objectFilteredPoints.clear();
3134 m_filteredMatches.clear();
3139 if (m_useMatchTrainToQuery) {
3141 m_queryFilteredKeyPoints.clear();
3142 m_filteredMatches.clear();
3143 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3144 m_filteredMatches.push_back(cv::DMatch((
int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3145 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(
size_t)it->queryIdx]);
3149 m_queryFilteredKeyPoints = m_queryKeyPoints;
3150 m_filteredMatches = m_matches;
3153 if (!m_trainPoints.empty()) {
3154 m_objectFilteredPoints.clear();
3158 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3160 m_objectFilteredPoints.push_back(m_trainPoints[(
size_t)it->trainIdx]);
3172 m_ransacInliers.clear();
3173 m_ransacOutliers.clear();
3175 if (m_useRansacVVS) {
3176 std::vector<vpPoint> objectVpPoints(m_objectFilteredPoints.size());
3180 for (std::vector<cv::Point3f>::const_iterator it = m_objectFilteredPoints.begin();
3181 it != m_objectFilteredPoints.end(); ++it, cpt++) {
3185 vpImagePoint imP(m_queryFilteredKeyPoints[cpt].pt.y, m_queryFilteredKeyPoints[cpt].pt.x);
3187 double x = 0.0, y = 0.0;
3192 objectVpPoints[cpt] = pt;
3195 std::vector<vpPoint> inliers;
3196 std::vector<unsigned int> inlierIndex;
3198 bool res =
computePose(objectVpPoints, cMo, inliers, inlierIndex, m_poseTime, func);
3200 std::map<unsigned int, bool> mapOfInlierIndex;
3201 m_matchRansacKeyPointsToPoints.clear();
3203 for (std::vector<unsigned int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3204 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3205 m_queryFilteredKeyPoints[(
size_t)(*it)], m_objectFilteredPoints[(
size_t)(*it)]));
3206 mapOfInlierIndex[*it] =
true;
3209 for (
size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3210 if (mapOfInlierIndex.find((
unsigned int)i) == mapOfInlierIndex.end()) {
3211 m_ransacOutliers.push_back(
vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3215 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3217 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3218 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3219 m_ransacInliers.begin(), matchRansacToVpImage);
3221 elapsedTime += m_poseTime;
3226 std::vector<cv::Point2f> imageFilteredPoints;
3227 cv::KeyPoint::convert(m_queryFilteredKeyPoints, imageFilteredPoints);
3228 std::vector<int> inlierIndex;
3229 bool res =
computePose(imageFilteredPoints, m_objectFilteredPoints, cam, cMo, inlierIndex, m_poseTime);
3231 std::map<int, bool> mapOfInlierIndex;
3232 m_matchRansacKeyPointsToPoints.clear();
3234 for (std::vector<int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3235 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3236 m_queryFilteredKeyPoints[(
size_t)(*it)], m_objectFilteredPoints[(
size_t)(*it)]));
3237 mapOfInlierIndex[*it] =
true;
3240 for (
size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3241 if (mapOfInlierIndex.find((
int)i) == mapOfInlierIndex.end()) {
3242 m_ransacOutliers.push_back(
vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3246 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3248 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3249 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3250 m_ransacInliers.begin(), matchRansacToVpImage);
3252 elapsedTime += m_poseTime;
3263 return (
matchPoint(m_I, cam, cMo, error, elapsedTime, func, rectangle));
3267 vpImagePoint ¢erOfGravity,
const bool isPlanarObject,
3268 std::vector<vpImagePoint> *imPts1, std::vector<vpImagePoint> *imPts2,
3269 double *meanDescriptorDistance,
double *detectionScore,
const vpRect &rectangle)
3271 if (imPts1 !=
nullptr && imPts2 !=
nullptr) {
3278 double meanDescriptorDistanceTmp = 0.0;
3279 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
3280 meanDescriptorDistanceTmp += (double)it->distance;
3283 meanDescriptorDistanceTmp /= (double)m_filteredMatches.size();
3284 double score = (double)m_filteredMatches.size() / meanDescriptorDistanceTmp;
3286 if (meanDescriptorDistance !=
nullptr) {
3287 *meanDescriptorDistance = meanDescriptorDistanceTmp;
3293 if (m_filteredMatches.size() >= 4) {
3295 std::vector<cv::Point2f> points1(m_filteredMatches.size());
3297 std::vector<cv::Point2f> points2(m_filteredMatches.size());
3299 for (
size_t i = 0; i < m_filteredMatches.size(); i++) {
3300 points1[i] = cv::Point2f(m_trainKeyPoints[(
size_t)m_filteredMatches[i].trainIdx].pt);
3301 points2[i] = cv::Point2f(m_queryFilteredKeyPoints[(
size_t)m_filteredMatches[i].queryIdx].pt);
3304 std::vector<vpImagePoint> inliers;
3305 if (isPlanarObject) {
3306 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
3307 cv::Mat homographyMatrix = cv::findHomography(points1, points2, CV_RANSAC);
3309 cv::Mat homographyMatrix = cv::findHomography(points1, points2, cv::RANSAC);
3312 for (
size_t i = 0; i < m_filteredMatches.size(); i++) {
3314 cv::Mat realPoint = cv::Mat(3, 1, CV_64F);
3315 realPoint.at<
double>(0, 0) = points1[i].x;
3316 realPoint.at<
double>(1, 0) = points1[i].y;
3317 realPoint.at<
double>(2, 0) = 1.f;
3319 cv::Mat reprojectedPoint = homographyMatrix * realPoint;
3320 double err_x = (reprojectedPoint.at<
double>(0, 0) / reprojectedPoint.at<
double>(2, 0)) - points2[i].x;
3321 double err_y = (reprojectedPoint.at<
double>(1, 0) / reprojectedPoint.at<
double>(2, 0)) - points2[i].y;
3322 double reprojectionError = std::sqrt(err_x * err_x + err_y * err_y);
3324 if (reprojectionError < 6.0) {
3325 inliers.push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3326 if (imPts1 !=
nullptr) {
3327 imPts1->push_back(
vpImagePoint((
double)points1[i].y, (
double)points1[i].x));
3330 if (imPts2 !=
nullptr) {
3331 imPts2->push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3336 else if (m_filteredMatches.size() >= 8) {
3337 cv::Mat fundamentalInliers;
3338 cv::Mat fundamentalMatrix = cv::findFundamentalMat(points1, points2, cv::FM_RANSAC, 3, 0.99, fundamentalInliers);
3340 for (
size_t i = 0; i < (size_t)fundamentalInliers.rows; i++) {
3341 if (fundamentalInliers.at<uchar>((
int)i, 0)) {
3342 inliers.push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3344 if (imPts1 !=
nullptr) {
3345 imPts1->push_back(
vpImagePoint((
double)points1[i].y, (
double)points1[i].x));
3348 if (imPts2 !=
nullptr) {
3349 imPts2->push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3355 if (!inliers.empty()) {
3362 double meanU = 0.0, meanV = 0.0;
3363 for (std::vector<vpImagePoint>::const_iterator it = inliers.begin(); it != inliers.end(); ++it) {
3364 meanU += it->get_u();
3365 meanV += it->get_v();
3368 meanU /= (double)inliers.size();
3369 meanV /= (double)inliers.size();
3371 centerOfGravity.
set_u(meanU);
3372 centerOfGravity.
set_v(meanV);
3381 return meanDescriptorDistanceTmp < m_detectionThreshold;
3384 return score > m_detectionScore;
3393 bool isMatchOk =
matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3398 std::vector<vpImagePoint> modelImagePoints(m_trainVpPoints.size());
3400 for (std::vector<vpPoint>::const_iterator it = m_trainVpPoints.begin(); it != m_trainVpPoints.end(); ++it, cpt++) {
3404 modelImagePoints[cpt] = imPt;
3413 double meanU = 0.0, meanV = 0.0;
3414 for (std::vector<vpImagePoint>::const_iterator it = m_ransacInliers.begin(); it != m_ransacInliers.end(); ++it) {
3415 meanU += it->get_u();
3416 meanV += it->get_v();
3419 meanU /= (double)m_ransacInliers.size();
3420 meanV /= (double)m_ransacInliers.size();
3422 centerOfGravity.
set_u(meanU);
3423 centerOfGravity.
set_v(meanV);
3430 std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
3431 std::vector<cv::Mat> &listOfDescriptors,
3437 listOfKeypoints.clear();
3438 listOfDescriptors.clear();
3440 for (
int tl = 1; tl < 6; tl++) {
3441 double t = pow(2, 0.5 * tl);
3442 for (
int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
3443 std::vector<cv::KeyPoint> keypoints;
3444 cv::Mat descriptors;
3446 cv::Mat timg, mask, Ai;
3449 affineSkew(t, phi, timg, mask, Ai);
3452 if (listOfAffineI !=
nullptr) {
3454 bitwise_and(mask, timg, img_disp);
3457 listOfAffineI->push_back(tI);
3461 cv::bitwise_and(mask, timg, img_disp);
3462 cv::namedWindow(
"Skew", cv::WINDOW_AUTOSIZE);
3463 cv::imshow(
"Skew", img_disp);
3467 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
3468 it != m_detectors.end(); ++it) {
3469 std::vector<cv::KeyPoint> kp;
3470 it->second->detect(timg, kp, mask);
3471 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
3475 extract(timg, keypoints, descriptors, elapsedTime);
3477 for (
unsigned int i = 0; i < keypoints.size(); i++) {
3478 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
3479 cv::Mat kpt_t = Ai * cv::Mat(kpt);
3480 keypoints[i].pt.x = kpt_t.at<
float>(0, 0);
3481 keypoints[i].pt.y = kpt_t.at<
float>(1, 0);
3484 listOfKeypoints.push_back(keypoints);
3485 listOfDescriptors.push_back(descriptors);
3494 std::vector<std::pair<double, int> > listOfAffineParams;
3495 for (
int tl = 1; tl < 6; tl++) {
3496 double t = pow(2, 0.5 * tl);
3497 for (
int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
3498 listOfAffineParams.push_back(std::pair<double, int>(t, phi));
3502 listOfKeypoints.resize(listOfAffineParams.size());
3503 listOfDescriptors.resize(listOfAffineParams.size());
3505 if (listOfAffineI !=
nullptr) {
3506 listOfAffineI->resize(listOfAffineParams.size());
3509 #ifdef VISP_HAVE_OPENMP
3510 #pragma omp parallel for
3512 for (
int cpt = 0; cpt < static_cast<int>(listOfAffineParams.size()); cpt++) {
3513 std::vector<cv::KeyPoint> keypoints;
3514 cv::Mat descriptors;
3516 cv::Mat timg, mask, Ai;
3519 affineSkew(listOfAffineParams[(
size_t)cpt].first, listOfAffineParams[(
size_t)cpt].second, timg, mask, Ai);
3521 if (listOfAffineI !=
nullptr) {
3523 bitwise_and(mask, timg, img_disp);
3526 (*listOfAffineI)[(size_t)cpt] = tI;
3531 cv::bitwise_and(mask, timg, img_disp);
3532 cv::namedWindow(
"Skew", cv::WINDOW_AUTOSIZE);
3533 cv::imshow(
"Skew", img_disp);
3537 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
3538 it != m_detectors.end(); ++it) {
3539 std::vector<cv::KeyPoint> kp;
3540 it->second->detect(timg, kp, mask);
3541 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
3545 extract(timg, keypoints, descriptors, elapsedTime);
3547 for (
size_t i = 0; i < keypoints.size(); i++) {
3548 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
3549 cv::Mat kpt_t = Ai * cv::Mat(kpt);
3550 keypoints[i].pt.x = kpt_t.at<
float>(0, 0);
3551 keypoints[i].pt.y = kpt_t.at<
float>(1, 0);
3554 listOfKeypoints[(size_t)cpt] = keypoints;
3555 listOfDescriptors[(size_t)cpt] = descriptors;
3568 m_computeCovariance =
false;
3570 m_currentImageId = 0;
3572 m_detectionScore = 0.15;
3573 m_detectionThreshold = 100.0;
3574 m_detectionTime = 0.0;
3575 m_detectorNames.clear();
3576 m_detectors.clear();
3577 m_extractionTime = 0.0;
3578 m_extractorNames.clear();
3579 m_extractors.clear();
3580 m_filteredMatches.clear();
3583 m_knnMatches.clear();
3584 m_mapOfImageId.clear();
3585 m_mapOfImages.clear();
3586 m_matcher = cv::Ptr<cv::DescriptorMatcher>();
3587 m_matcherName =
"BruteForce-Hamming";
3589 m_matchingFactorThreshold = 2.0;
3590 m_matchingRatioThreshold = 0.85;
3591 m_matchingTime = 0.0;
3592 m_matchRansacKeyPointsToPoints.clear();
3593 m_nbRansacIterations = 200;
3594 m_nbRansacMinInlierCount = 100;
3595 m_objectFilteredPoints.clear();
3597 m_queryDescriptors = cv::Mat();
3598 m_queryFilteredKeyPoints.clear();
3599 m_queryKeyPoints.clear();
3600 m_ransacConsensusPercentage = 20.0;
3602 m_ransacInliers.clear();
3603 m_ransacOutliers.clear();
3604 m_ransacParallel =
true;
3605 m_ransacParallelNbThreads = 0;
3606 m_ransacReprojectionError = 6.0;
3607 m_ransacThreshold = 0.01;
3608 m_trainDescriptors = cv::Mat();
3609 m_trainKeyPoints.clear();
3610 m_trainPoints.clear();
3611 m_trainVpPoints.clear();
3612 m_useAffineDetection =
false;
3613 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
3614 m_useBruteForceCrossCheck =
true;
3616 m_useConsensusPercentage =
false;
3618 m_useMatchTrainToQuery =
false;
3619 m_useRansacVVS =
true;
3620 m_useSingleMatchFilter =
true;
3622 m_detectorNames.push_back(
"ORB");
3623 m_extractorNames.push_back(
"ORB");
3631 if (!parent.empty()) {
3635 std::map<int, std::string> mapOfImgPath;
3636 if (saveTrainingImages) {
3637 #ifdef VISP_HAVE_MODULE_IO
3639 unsigned int cpt = 0;
3641 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
3647 std::stringstream ss;
3648 ss <<
"train_image_" << std::setfill(
'0') << std::setw(3) << cpt;
3650 switch (m_imageFormat) {
3672 std::string imgFilename = ss.str();
3673 mapOfImgPath[it->first] = imgFilename;
3674 vpImageIo::write(it->second, parent + (!parent.empty() ?
"/" :
"") + imgFilename);
3677 std::cout <<
"Warning: in vpKeyPoint::saveLearningData() training images "
3678 "are not saved because "
3679 "visp_io module is not available !"
3684 bool have3DInfo = m_trainPoints.size() > 0;
3685 if (have3DInfo && m_trainPoints.size() != m_trainKeyPoints.size()) {
3691 std::ofstream file(filename.c_str(), std::ofstream::binary);
3692 if (!file.is_open()) {
3697 int nbImgs = (int)mapOfImgPath.size();
3700 #ifdef VISP_HAVE_MODULE_IO
3701 for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
3707 std::string path = it->second;
3708 int length = (int)path.length();
3711 for (
int cpt = 0; cpt < length; cpt++) {
3712 file.write((
char *)(&path[(
size_t)cpt]),
sizeof(path[(
size_t)cpt]));
3718 int have3DInfoInt = have3DInfo ? 1 : 0;
3721 int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
3722 int descriptorType = m_trainDescriptors.type();
3733 for (
int i = 0; i < nRows; i++) {
3734 unsigned int i_ = (
unsigned int)i;
3736 float u = m_trainKeyPoints[i_].pt.x;
3740 float v = m_trainKeyPoints[i_].pt.y;
3744 float size = m_trainKeyPoints[i_].size;
3748 float angle = m_trainKeyPoints[i_].angle;
3752 float response = m_trainKeyPoints[i_].response;
3756 int octave = m_trainKeyPoints[i_].octave;
3760 int class_id = m_trainKeyPoints[i_].class_id;
3764 #ifdef VISP_HAVE_MODULE_IO
3765 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
3766 int image_id = (saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1;
3775 float oX = m_trainPoints[i_].x, oY = m_trainPoints[i_].y, oZ = m_trainPoints[i_].z;
3786 for (
int j = 0; j < nCols; j++) {
3788 switch (descriptorType) {
3790 file.write((
char *)(&m_trainDescriptors.at<
unsigned char>(i, j)),
3791 sizeof(m_trainDescriptors.at<
unsigned char>(i, j)));
3795 file.write((
char *)(&m_trainDescriptors.at<
char>(i, j)),
sizeof(m_trainDescriptors.at<
char>(i, j)));
3828 pugi::xml_document doc;
3829 pugi::xml_node node = doc.append_child(pugi::node_declaration);
3830 node.append_attribute(
"version") =
"1.0";
3831 node.append_attribute(
"encoding") =
"UTF-8";
3837 pugi::xml_node root_node = doc.append_child(
"LearningData");
3840 pugi::xml_node image_node = root_node.append_child(
"TrainingImageInfo");
3842 #ifdef VISP_HAVE_MODULE_IO
3843 for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
3844 pugi::xml_node image_info_node = image_node.append_child(
"trainImg");
3845 image_info_node.append_child(pugi::node_pcdata).set_value(it->second.c_str());
3846 std::stringstream ss;
3848 image_info_node.append_attribute(
"image_id") = ss.str().c_str();
3853 pugi::xml_node descriptors_info_node = root_node.append_child(
"DescriptorsInfo");
3855 int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
3856 int descriptorType = m_trainDescriptors.type();
3859 descriptors_info_node.append_child(
"nrows").append_child(pugi::node_pcdata).text() = nRows;
3862 descriptors_info_node.append_child(
"ncols").append_child(pugi::node_pcdata).text() = nCols;
3865 descriptors_info_node.append_child(
"type").append_child(pugi::node_pcdata).text() = descriptorType;
3867 for (
int i = 0; i < nRows; i++) {
3868 unsigned int i_ = (
unsigned int)i;
3869 pugi::xml_node descriptor_node = root_node.append_child(
"DescriptorInfo");
3871 descriptor_node.append_child(
"u").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.x;
3872 descriptor_node.append_child(
"v").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.y;
3873 descriptor_node.append_child(
"size").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].size;
3874 descriptor_node.append_child(
"angle").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].angle;
3875 descriptor_node.append_child(
"response").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].response;
3876 descriptor_node.append_child(
"octave").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].octave;
3877 descriptor_node.append_child(
"class_id").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].class_id;
3879 #ifdef VISP_HAVE_MODULE_IO
3880 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
3881 descriptor_node.append_child(
"image_id").append_child(pugi::node_pcdata).text() =
3882 ((saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1);
3884 descriptor_node.append_child(
"image_id").append_child(pugi::node_pcdata).text() = -1;
3888 descriptor_node.append_child(
"oX").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].x;
3889 descriptor_node.append_child(
"oY").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].y;
3890 descriptor_node.append_child(
"oZ").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].z;
3893 pugi::xml_node desc_node = descriptor_node.append_child(
"desc");
3895 for (
int j = 0; j < nCols; j++) {
3896 switch (descriptorType) {
3902 int val_tmp = m_trainDescriptors.at<
unsigned char>(i, j);
3903 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = val_tmp;
3911 int val_tmp = m_trainDescriptors.at<
char>(i, j);
3912 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = val_tmp;
3916 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() =
3917 m_trainDescriptors.at<
unsigned short int>(i, j);
3921 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<
short int>(i, j);
3925 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<
int>(i, j);
3929 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<
float>(i, j);
3933 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<
double>(i, j);
3943 doc.save_file(filename.c_str(), PUGIXML_TEXT(
" "), pugi::format_default, pugi::encoding_utf8);
3947 #if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x030000)
3948 #ifndef DOXYGEN_SHOULD_SKIP_THIS
3950 struct KeypointResponseGreaterThanThreshold
3952 KeypointResponseGreaterThanThreshold(
float _value) : value(_value) { }
3953 inline bool operator()(
const cv::KeyPoint &kpt)
const {
return kpt.response >= value; }
3957 struct KeypointResponseGreater
3959 inline bool operator()(
const cv::KeyPoint &kp1,
const cv::KeyPoint &kp2)
const {
return kp1.response > kp2.response; }
3963 void vpKeyPoint::KeyPointsFilter::retainBest(std::vector<cv::KeyPoint> &keypoints,
int n_points)
3967 if (n_points >= 0 && keypoints.size() > (
size_t)n_points) {
3968 if (n_points == 0) {
3974 std::nth_element(keypoints.begin(), keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreater());
3976 float ambiguous_response = keypoints[(size_t)(n_points - 1)].response;
3979 std::vector<cv::KeyPoint>::const_iterator new_end = std::partition(
3980 keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreaterThanThreshold(ambiguous_response));
3983 keypoints.resize((
size_t)(new_end - keypoints.begin()));
3989 RoiPredicate(
const cv::Rect &_r) : r(_r) { }
3991 bool operator()(
const cv::KeyPoint &keyPt)
const {
return !r.contains(keyPt.pt); }
3996 void vpKeyPoint::KeyPointsFilter::runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize,
3999 if (borderSize > 0) {
4000 if (imageSize.height <= borderSize * 2 || imageSize.width <= borderSize * 2)
4003 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(),
4004 RoiPredicate(cv::Rect(
4005 cv::Point(borderSize, borderSize),
4006 cv::Point(imageSize.width - borderSize, imageSize.height - borderSize)))),
4011 struct SizePredicate
4013 SizePredicate(
float _minSize,
float _maxSize) : minSize(_minSize), maxSize(_maxSize) { }
4015 bool operator()(
const cv::KeyPoint &keyPt)
const
4017 float size = keyPt.size;
4018 return (size < minSize) || (size > maxSize);
4021 float minSize, maxSize;
4024 void vpKeyPoint::KeyPointsFilter::runByKeypointSize(std::vector<cv::KeyPoint> &keypoints,
float minSize,
float maxSize)
4026 CV_Assert(minSize >= 0);
4027 CV_Assert(maxSize >= 0);
4028 CV_Assert(minSize <= maxSize);
4030 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), SizePredicate(minSize, maxSize)), keypoints.end());
4036 MaskPredicate(
const cv::Mat &_mask) : mask(_mask) { }
4037 bool operator()(
const cv::KeyPoint &key_pt)
const
4039 return mask.at<uchar>((int)(key_pt.pt.y + 0.5f), (int)(key_pt.pt.x + 0.5f)) == 0;
4046 void vpKeyPoint::KeyPointsFilter::runByPixelsMask(std::vector<cv::KeyPoint> &keypoints,
const cv::Mat &mask)
4051 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), MaskPredicate(mask)), keypoints.end());
4054 struct KeyPoint_LessThan
4056 KeyPoint_LessThan(
const std::vector<cv::KeyPoint> &_kp) : kp(&_kp) { }
4057 bool operator()(
size_t i,
size_t j)
const
4059 const cv::KeyPoint &kp1 = (*kp)[ i];
4060 const cv::KeyPoint &kp2 = (*kp)[ j];
4062 std::numeric_limits<float>::epsilon())) {
4064 return kp1.pt.x < kp2.pt.x;
4068 std::numeric_limits<float>::epsilon())) {
4070 return kp1.pt.y < kp2.pt.y;
4074 std::numeric_limits<float>::epsilon())) {
4076 return kp1.size > kp2.size;
4080 std::numeric_limits<float>::epsilon())) {
4082 return kp1.angle < kp2.angle;
4086 std::numeric_limits<float>::epsilon())) {
4088 return kp1.response > kp2.response;
4091 if (kp1.octave != kp2.octave) {
4092 return kp1.octave > kp2.octave;
4095 if (kp1.class_id != kp2.class_id) {
4096 return kp1.class_id > kp2.class_id;
4101 const std::vector<cv::KeyPoint> *kp;
4104 void vpKeyPoint::KeyPointsFilter::removeDuplicated(std::vector<cv::KeyPoint> &keypoints)
4106 size_t i, j, n = keypoints.size();
4107 std::vector<size_t> kpidx(n);
4108 std::vector<uchar> mask(n, (uchar)1);
4110 for (i = 0; i < n; i++) {
4113 std::sort(kpidx.begin(), kpidx.end(), KeyPoint_LessThan(keypoints));
4114 for (i = 1, j = 0; i < n; i++) {
4115 cv::KeyPoint &kp1 = keypoints[kpidx[i]];
4116 cv::KeyPoint &kp2 = keypoints[kpidx[j]];
4119 if (!
vpMath::equal(kp1.pt.x, kp2.pt.x, std::numeric_limits<float>::epsilon()) ||
4120 !
vpMath::equal(kp1.pt.y, kp2.pt.y, std::numeric_limits<float>::epsilon()) ||
4121 !
vpMath::equal(kp1.size, kp2.size, std::numeric_limits<float>::epsilon()) ||
4122 !
vpMath::equal(kp1.angle, kp2.angle, std::numeric_limits<float>::epsilon())) {
4130 for (i = j = 0; i < n; i++) {
4133 keypoints[j] = keypoints[i];
4138 keypoints.resize(j);
4144 vpKeyPoint::PyramidAdaptedFeatureDetector::PyramidAdaptedFeatureDetector(
const cv::Ptr<cv::FeatureDetector> &detector,
4146 : m_detector(detector), m_maxLevel(maxLevel)
4149 bool vpKeyPoint::PyramidAdaptedFeatureDetector::empty()
const
4151 return m_detector.empty() || (cv::FeatureDetector *)m_detector->empty();
4154 void vpKeyPoint::PyramidAdaptedFeatureDetector::detect(cv::InputArray image,
4155 CV_OUT std::vector<cv::KeyPoint> &keypoints, cv::InputArray mask)
4157 detectImpl(image.getMat(), keypoints, mask.getMat());
4160 void vpKeyPoint::PyramidAdaptedFeatureDetector::detectImpl(
const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
4161 const cv::Mat &mask)
const
4163 cv::Mat src = image;
4164 cv::Mat src_mask = mask;
4166 cv::Mat dilated_mask;
4167 if (!mask.empty()) {
4168 cv::dilate(mask, dilated_mask, cv::Mat());
4169 cv::Mat mask255(mask.size(), CV_8UC1, cv::Scalar(0));
4170 mask255.setTo(cv::Scalar(255), dilated_mask != 0);
4171 dilated_mask = mask255;
4174 for (
int l = 0, multiplier = 1; l <= m_maxLevel; ++l, multiplier *= 2) {
4176 std::vector<cv::KeyPoint> new_pts;
4177 m_detector->detect(src, new_pts, src_mask);
4178 std::vector<cv::KeyPoint>::iterator it = new_pts.begin(), end = new_pts.end();
4179 for (; it != end; ++it) {
4180 it->pt.x *= multiplier;
4181 it->pt.y *= multiplier;
4182 it->size *= multiplier;
4185 keypoints.insert(keypoints.end(), new_pts.begin(), new_pts.end());
4188 if (l < m_maxLevel) {
4194 resize(dilated_mask, src_mask, src.size(), 0, 0, CV_INTER_AREA);
4199 vpKeyPoint::KeyPointsFilter::runByPixelsMask(keypoints, mask);
4204 #elif !defined(VISP_BUILD_SHARED_LIBS)
4206 void dummy_vpKeyPoint() { };
std::vector< unsigned int > m_matchedReferencePoints
std::vector< vpImagePoint > m_currentImagePointsList
bool m_reference_computed
std::vector< vpImagePoint > m_referenceImagePointsList
Generic class defining intrinsic camera parameters.
Implementation of column vector and the associated operations.
Class to define RGB colors available for display functionalities.
static const vpColor none
static const vpColor green
static void convertFromOpenCV(const cv::KeyPoint &from, vpImagePoint &to)
static void displayCircle(const vpImage< unsigned char > &I, const vpImageCircle &circle, const vpColor &color, bool fill=false, unsigned int thickness=1)
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
error that can be emitted by ViSP classes.
@ badValue
Used to indicate that a value is not in the allowed range.
const char * what() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpHomogeneousMatrix inverse() const
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
static void write(const vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
void set_ij(double ii, double jj)
unsigned int getWidth() const
void insert(const vpImage< Type > &src, const vpImagePoint &topLeft)
unsigned int getHeight() const
unsigned int matchPoint(const vpImage< unsigned char > &I)
void getTrainKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
static void compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpCylinder > &cylinders, const std::vector< std::vector< std::vector< vpImagePoint > > > &vectorOfCylinderRois, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
void detectExtractAffine(const vpImage< unsigned char > &I, std::vector< std::vector< cv::KeyPoint > > &listOfKeypoints, std::vector< cv::Mat > &listOfDescriptors, std::vector< vpImage< unsigned char > > *listOfAffineI=nullptr)
void initMatcher(const std::string &matcherName)
void display(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, unsigned int size=3)
void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector< cv::DMatch > &matches, double &elapsedTime)
static void compute3D(const cv::KeyPoint &candidate, const std::vector< vpPoint > &roi, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
@ DETECTOR_KAZE
KAZE detector.
@ DETECTOR_BRISK
BRISK detector.
@ DETECTOR_AKAZE
AKAZE detector.
@ DETECTOR_MSER
MSER detector.
@ DETECTOR_SURF
SURF detector.
@ DETECTOR_AGAST
AGAST detector.
@ DETECTOR_SIFT
SIFT detector.
@ DETECTOR_FAST
FAST detector.
@ DETECTOR_GFTT
GFTT detector.
@ DETECTOR_ORB
ORB detector.
@ DETECTOR_SimpleBlob
SimpleBlob detector.
void createImageMatching(vpImage< unsigned char > &IRef, vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
bool computePose(const std::vector< cv::Point2f > &imagePoints, const std::vector< cv::Point3f > &objectPoints, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector< int > &inlierIndex, double &elapsedTime, bool(*func)(const vpHomogeneousMatrix &)=nullptr)
void extract(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, cv::Mat &descriptors, std::vector< cv::Point3f > *trainPoints=nullptr)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
bool matchPointAndDetect(const vpImage< unsigned char > &I, vpRect &boundingBox, vpImagePoint ¢erOfGravity, const bool isPlanarObject=true, std::vector< vpImagePoint > *imPts1=nullptr, std::vector< vpImagePoint > *imPts2=nullptr, double *meanDescriptorDistance=nullptr, double *detectionScore=nullptr, const vpRect &rectangle=vpRect())
@ DESCRIPTOR_AKAZE
AKAZE descriptor.
@ DESCRIPTOR_ORB
ORB descriptor.
@ DESCRIPTOR_KAZE
KAZE descriptor.
@ DESCRIPTOR_SURF
SUFT descriptor.
@ DESCRIPTOR_BRISK
BRISK descriptor.
@ DESCRIPTOR_SIFT
SIFT descriptor.
void getQueryKeyPoints(std::vector< cv::KeyPoint > &keyPoints, bool matches=true) const
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType, const std::string &matcherName, const vpFilterMatchingType &filterType=ratioDistanceThreshold)
@ stdAndRatioDistanceThreshold
@ constantFactorDistanceThreshold
void displayMatching(const vpImage< unsigned char > &IRef, vpImage< unsigned char > &IMatching, unsigned int crossSize, unsigned int lineThickness=1, const vpColor &color=vpColor::green)
unsigned int buildReference(const vpImage< unsigned char > &I)
void loadConfigFile(const std::string &configFile)
void getTrainPoints(std::vector< cv::Point3f > &points) const
void getObjectPoints(std::vector< cv::Point3f > &objectPoints) const
void insertImageMatching(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
static bool isNaN(double value)
static bool equal(double x, double y, double threshold=0.001)
static int round(double x)
Implementation of a matrix and operations on matrices.
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
This class defines the container for a plane geometrical structure.
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
double get_oX() const
Get the point oX coordinate in the object frame.
void set_x(double x)
Set the point x coordinate in the image plane.
double get_y() const
Get the point y coordinate in the image plane.
double get_oZ() const
Get the point oZ coordinate in the object frame.
void set_oY(double oY)
Set the point oY coordinate in the object frame.
double get_x() const
Get the point x coordinate in the image plane.
void set_oZ(double oZ)
Set the point oZ coordinate in the object frame.
void set_oX(double oX)
Set the point oX coordinate in the object frame.
double get_oY() const
Get the point oY coordinate in the object frame.
void setWorldCoordinates(double oX, double oY, double oZ)
void set_y(double y)
Set the point y coordinate in the image plane.
Defines a generic 2D polygon.
vpRect getBoundingBox() const
bool isInside(const vpImagePoint &iP, const PointInPolygonMethod &method=PnPolyRayCasting) const
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
void setRansacMaxTrials(const int &rM)
void addPoint(const vpPoint &P)
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
vpMatrix getCovarianceMatrix() const
void setCovarianceComputation(const bool &flag)
std::vector< unsigned int > getRansacInlierIndex() const
void setRansacFilterFlag(const RANSAC_FILTER_FLAGS &flag)
@ NO_FILTER
No filter is applied.
void setUseParallelRansac(bool use)
std::vector< vpPoint > getRansacInliers() const
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool(*func)(const vpHomogeneousMatrix &)=nullptr)
void setNbParallelRansacThreads(int nb)
void setRansacThreshold(const double &t)
Defines a rectangle in the plane.
Implementation of a rotation vector as axis-angle minimal representation.
Class that consider the case of a translation vector.
std::string getDetectorName() const
double getMatchingRatioThreshold() const
std::string getExtractorName() const
void parse(const std::string &filename)
double getRansacReprojectionError() const
bool getUseRansacVVSPoseEstimation() const
double getMatchingFactorThreshold() const
int getNbRansacMinInlierCount() const
bool getUseRansacConsensusPercentage() const
std::string getMatcherName() const
int getNbRansacIterations() const
double getRansacConsensusPercentage() const
@ stdAndRatioDistanceThreshold
@ constantFactorDistanceThreshold
vpMatchingMethodEnum getMatchingMethod() const
double getRansacThreshold() const
VISP_EXPORT double measureTimeMs()