37 #include <visp3/core/vpIoTools.h>
38 #include <visp3/vision/vpKeyPoint.h>
40 #if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_FEATURES2D)
42 #if defined(VISP_HAVE_PUGIXML)
43 #include <pugixml.hpp>
49 inline cv::DMatch knnToDMatch(
const std::vector<cv::DMatch> &knnMatches)
51 if (knnMatches.size() > 0) {
58 inline vpImagePoint matchRansacToVpImage(
const std::pair<cv::KeyPoint, cv::Point3f> &pair)
67 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
68 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
69 m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
70 m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
71 m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
72 m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
73 m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
74 m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
75 m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
76 m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
77 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
78 m_useBruteForceCrossCheck(true),
80 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
81 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
85 m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
86 m_extractorNames.push_back(m_mapOfDescriptorNames[descriptorType]);
93 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
94 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
95 m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
96 m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
97 m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
98 m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
99 m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
100 m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
101 m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
102 m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
103 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
104 m_useBruteForceCrossCheck(true),
106 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
107 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
111 m_detectorNames.push_back(detectorName);
112 m_extractorNames.push_back(extractorName);
119 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
120 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(detectorNames),
121 m_detectors(), m_extractionTime(0.), m_extractorNames(extractorNames), m_extractors(), m_filteredMatches(),
122 m_filterType(filterType), m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(),
123 m_matcher(), m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0),
124 m_matchingRatioThreshold(0.85), m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200),
125 m_nbRansacMinInlierCount(100), m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(),
126 m_queryFilteredKeyPoints(), m_queryKeyPoints(), m_ransacConsensusPercentage(20.0),
127 m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(), m_ransacParallel(false),
128 m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01), m_trainDescriptors(),
129 m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
130 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
131 m_useBruteForceCrossCheck(true),
133 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
134 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
140 void vpKeyPoint::affineSkew(
double tilt,
double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai)
145 mask = cv::Mat(h, w, CV_8UC1, cv::Scalar(255));
147 cv::Mat A = cv::Mat::eye(2, 3, CV_32F);
150 if (std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
155 A = (cv::Mat_<float>(2, 2) << c, -s, s, c);
157 cv::Mat corners = (cv::Mat_<float>(4, 2) << 0, 0, w, 0, w, h, 0, h);
158 cv::Mat tcorners = corners * A.t();
159 cv::Mat tcorners_x, tcorners_y;
160 tcorners.col(0).copyTo(tcorners_x);
161 tcorners.col(1).copyTo(tcorners_y);
162 std::vector<cv::Mat> channels;
163 channels.push_back(tcorners_x);
164 channels.push_back(tcorners_y);
165 cv::merge(channels, tcorners);
167 cv::Rect rect = cv::boundingRect(tcorners);
168 A = (cv::Mat_<float>(2, 3) << c, -s, -rect.x, s, c, -rect.y);
170 cv::warpAffine(img, img, A, cv::Size(rect.width, rect.height), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
173 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon()) {
174 double s = 0.8 * sqrt(tilt * tilt - 1);
175 cv::GaussianBlur(img, img, cv::Size(0, 0), s, 0.01);
176 cv::resize(img, img, cv::Size(0, 0), 1.0 / tilt, 1.0, cv::INTER_NEAREST);
177 A.row(0) = A.row(0) / tilt;
180 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon() ||
181 std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
184 cv::warpAffine(mask, mask, A, cv::Size(w, h), cv::INTER_NEAREST);
186 cv::invertAffineTransform(A, Ai);
210 m_trainPoints.clear();
211 m_mapOfImageId.clear();
212 m_mapOfImages.clear();
213 m_currentImageId = 1;
215 if (m_useAffineDetection) {
216 std::vector<std::vector<cv::KeyPoint> > listOfTrainKeyPoints;
217 std::vector<cv::Mat> listOfTrainDescriptors;
223 m_trainKeyPoints.clear();
224 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfTrainKeyPoints.begin();
225 it != listOfTrainKeyPoints.end(); ++it) {
226 m_trainKeyPoints.insert(m_trainKeyPoints.end(), it->begin(), it->end());
230 for (std::vector<cv::Mat>::const_iterator it = listOfTrainDescriptors.begin(); it != listOfTrainDescriptors.end();
234 it->copyTo(m_trainDescriptors);
237 m_trainDescriptors.push_back(*it);
242 detect(I, m_trainKeyPoints, m_detectionTime, rectangle);
243 extract(I, m_trainKeyPoints, m_trainDescriptors, m_extractionTime);
248 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
249 m_mapOfImageId[it->class_id] = m_currentImageId;
253 m_mapOfImages[m_currentImageId] = I;
262 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
264 return static_cast<unsigned int>(m_trainKeyPoints.size());
274 std::vector<cv::Point3f> &points3f,
bool append,
int class_id)
276 cv::Mat trainDescriptors;
278 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
280 extract(I, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
282 if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
286 std::map<size_t, size_t> mapOfKeypointHashes;
288 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
290 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
293 std::vector<cv::Point3f> trainPoints_tmp;
294 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
295 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
296 trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
301 points3f = trainPoints_tmp;
304 return (
buildReference(I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
308 std::vector<cv::Point3f> &points3f,
bool append,
int class_id)
310 cv::Mat trainDescriptors;
312 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
314 extract(I_color, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
316 if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
320 std::map<size_t, size_t> mapOfKeypointHashes;
322 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
324 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
327 std::vector<cv::Point3f> trainPoints_tmp;
328 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
329 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
330 trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
335 points3f = trainPoints_tmp;
338 return (
buildReference(I_color, trainKeyPoints, trainDescriptors, points3f, append, class_id));
343 const std::vector<cv::KeyPoint> &trainKeyPoints,
344 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
345 bool append,
int class_id)
348 m_trainPoints.clear();
349 m_mapOfImageId.clear();
350 m_mapOfImages.clear();
351 m_currentImageId = 0;
352 m_trainKeyPoints.clear();
357 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
359 if (class_id != -1) {
360 for (std::vector<cv::KeyPoint>::iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
361 it->class_id = class_id;
367 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
369 m_mapOfImageId[it->class_id] = m_currentImageId;
373 m_mapOfImages[m_currentImageId] = I;
376 m_trainKeyPoints.insert(m_trainKeyPoints.end(), trainKeyPoints_tmp.begin(), trainKeyPoints_tmp.end());
378 trainDescriptors.copyTo(m_trainDescriptors);
381 m_trainDescriptors.push_back(trainDescriptors);
383 this->m_trainPoints.insert(m_trainPoints.end(), points3f.begin(), points3f.end());
391 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
395 return static_cast<unsigned int>(m_trainKeyPoints.size());
399 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
400 bool append,
int class_id)
403 return (
buildReference(m_I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
410 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
417 vpPlane Po(pts[0], pts[1], pts[2]);
418 double xc = 0.0, yc = 0.0;
429 point_obj = cMo.
inverse() * point_cam;
430 point = cv::Point3f((
float)point_obj[0], (
float)point_obj[1], (
float)point_obj[2]);
437 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
444 vpPlane Po(pts[0], pts[1], pts[2]);
445 double xc = 0.0, yc = 0.0;
456 point_obj = cMo.
inverse() * point_cam;
461 std::vector<cv::KeyPoint> &candidates,
462 const std::vector<vpPolygon> &polygons,
463 const std::vector<std::vector<vpPoint> > &roisPt,
464 std::vector<cv::Point3f> &points, cv::Mat *descriptors)
466 std::vector<cv::KeyPoint> candidatesToCheck = candidates;
473 std::vector<std::pair<cv::KeyPoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
474 for (
size_t i = 0; i < candidatesToCheck.size(); i++) {
475 pairOfCandidatesToCheck[i] = std::pair<cv::KeyPoint, size_t>(candidatesToCheck[i], i);
479 std::vector<vpPolygon> polygons_tmp = polygons;
480 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
481 std::vector<std::pair<cv::KeyPoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
483 while (it2 != pairOfCandidatesToCheck.end()) {
484 imPt.
set_ij(it2->first.pt.y, it2->first.pt.x);
485 if (it1->isInside(imPt)) {
486 candidates.push_back(it2->first);
488 points.push_back(pt);
490 if (descriptors !=
nullptr) {
491 desc.push_back(descriptors->row((
int)it2->second));
495 it2 = pairOfCandidatesToCheck.erase(it2);
503 if (descriptors !=
nullptr) {
504 desc.copyTo(*descriptors);
509 std::vector<vpImagePoint> &candidates,
510 const std::vector<vpPolygon> &polygons,
511 const std::vector<std::vector<vpPoint> > &roisPt,
512 std::vector<vpPoint> &points, cv::Mat *descriptors)
514 std::vector<vpImagePoint> candidatesToCheck = candidates;
520 std::vector<std::pair<vpImagePoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
521 for (
size_t i = 0; i < candidatesToCheck.size(); i++) {
522 pairOfCandidatesToCheck[i] = std::pair<vpImagePoint, size_t>(candidatesToCheck[i], i);
526 std::vector<vpPolygon> polygons_tmp = polygons;
527 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
528 std::vector<std::pair<vpImagePoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
530 while (it2 != pairOfCandidatesToCheck.end()) {
531 if (it1->isInside(it2->first)) {
532 candidates.push_back(it2->first);
534 points.push_back(pt);
536 if (descriptors !=
nullptr) {
537 desc.push_back(descriptors->row((
int)it2->second));
541 it2 = pairOfCandidatesToCheck.erase(it2);
553 const std::vector<vpCylinder> &cylinders,
554 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<cv::Point3f> &points,
555 cv::Mat *descriptors)
557 std::vector<cv::KeyPoint> candidatesToCheck = candidates;
563 size_t cpt_keypoint = 0;
564 for (std::vector<cv::KeyPoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
565 ++it1, cpt_keypoint++) {
566 size_t cpt_cylinder = 0;
569 for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
570 it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
573 for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
575 candidates.push_back(*it1);
579 double xm = 0.0, ym = 0.0;
581 double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
583 if (!
vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
585 point_cam[0] = xm * Z;
586 point_cam[1] = ym * Z;
590 point_obj = cMo.
inverse() * point_cam;
593 points.push_back(cv::Point3f((
float)pt.
get_oX(), (
float)pt.
get_oY(), (
float)pt.
get_oZ()));
595 if (descriptors !=
nullptr) {
596 desc.push_back(descriptors->row((
int)cpt_keypoint));
606 if (descriptors !=
nullptr) {
607 desc.copyTo(*descriptors);
613 const std::vector<vpCylinder> &cylinders,
614 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<vpPoint> &points,
615 cv::Mat *descriptors)
617 std::vector<vpImagePoint> candidatesToCheck = candidates;
623 size_t cpt_keypoint = 0;
624 for (std::vector<vpImagePoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
625 ++it1, cpt_keypoint++) {
626 size_t cpt_cylinder = 0;
629 for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
630 it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
633 for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
635 candidates.push_back(*it1);
639 double xm = 0.0, ym = 0.0;
641 double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
643 if (!
vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
645 point_cam[0] = xm * Z;
646 point_cam[1] = ym * Z;
650 point_obj = cMo.
inverse() * point_cam;
653 points.push_back(pt);
655 if (descriptors !=
nullptr) {
656 desc.push_back(descriptors->row((
int)cpt_keypoint));
666 if (descriptors !=
nullptr) {
667 desc.copyTo(*descriptors);
677 if (imagePoints.size() < 4 || objectPoints.size() < 4 || imagePoints.size() != objectPoints.size()) {
679 std::cerr <<
"Not enough points to compute the pose (at least 4 points "
686 cv::Mat cameraMatrix =
696 cv::Mat distCoeffs = cv::Mat::zeros(1, 5, CV_64F);
699 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
701 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec,
false, m_nbRansacIterations,
702 (
float)m_ransacReprojectionError,
705 inlierIndex, cv::SOLVEPNP_ITERATIVE);
725 int nbInlierToReachConsensus = m_nbRansacMinInlierCount;
726 if (m_useConsensusPercentage) {
727 nbInlierToReachConsensus = (int)(m_ransacConsensusPercentage / 100.0 * (
double)m_queryFilteredKeyPoints.size());
730 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec,
false, m_nbRansacIterations,
731 (
float)m_ransacReprojectionError, nbInlierToReachConsensus, inlierIndex);
734 catch (cv::Exception &e) {
735 std::cerr << e.what() << std::endl;
739 vpTranslationVector translationVec(tvec.at<
double>(0), tvec.at<
double>(1), tvec.at<
double>(2));
740 vpThetaUVector thetaUVector(rvec.at<
double>(0), rvec.at<
double>(1), rvec.at<
double>(2));
743 if (func !=
nullptr) {
757 std::vector<vpPoint> &inliers,
double &elapsedTime,
760 std::vector<unsigned int> inlierIndex;
761 return computePose(objectVpPoints, cMo, inliers, inlierIndex, elapsedTime, func);
765 std::vector<vpPoint> &inliers, std::vector<unsigned int> &inlierIndex,
double &elapsedTime,
770 if (objectVpPoints.size() < 4) {
780 for (std::vector<vpPoint>::const_iterator it = objectVpPoints.begin(); it != objectVpPoints.end(); ++it) {
784 unsigned int nbInlierToReachConsensus = (
unsigned int)m_nbRansacMinInlierCount;
785 if (m_useConsensusPercentage) {
786 nbInlierToReachConsensus =
787 (
unsigned int)(m_ransacConsensusPercentage / 100.0 * (
double)m_queryFilteredKeyPoints.size());
797 bool isRansacPoseEstimationOk =
false;
804 if (m_computeCovariance) {
809 std::cerr <<
"e=" << e.
what() << std::endl;
827 return isRansacPoseEstimationOk;
830 double vpKeyPoint::computePoseEstimationError(
const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
833 if (matchKeyPoints.size() == 0) {
839 std::vector<double> errors(matchKeyPoints.size());
842 for (std::vector<std::pair<cv::KeyPoint, cv::Point3f> >::const_iterator it = matchKeyPoints.begin();
843 it != matchKeyPoints.end(); ++it, cpt++) {
848 double u = 0.0, v = 0.0;
850 errors[cpt] = std::sqrt((u - it->first.pt.x) * (u - it->first.pt.x) + (v - it->first.pt.y) * (v - it->first.pt.y));
853 return std::accumulate(errors.begin(), errors.end(), 0.0) / errors.size();
880 unsigned int nbImg = (
unsigned int)(m_mapOfImages.size() + 1);
882 if (m_mapOfImages.empty()) {
883 std::cerr <<
"There is no training image loaded !" << std::endl;
894 unsigned int nbImgSqrt = (
unsigned int)
vpMath::round(std::sqrt((
double)nbImg));
897 unsigned int nbWidth = nbImgSqrt;
899 unsigned int nbHeight = nbImgSqrt;
902 if (nbImgSqrt * nbImgSqrt < nbImg) {
906 unsigned int maxW = ICurrent.
getWidth();
907 unsigned int maxH = ICurrent.
getHeight();
908 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
910 if (maxW < it->second.getWidth()) {
911 maxW = it->second.getWidth();
914 if (maxH < it->second.getHeight()) {
915 maxH = it->second.getHeight();
927 unsigned int nbImg = (
unsigned int)(m_mapOfImages.size() + 1);
929 if (m_mapOfImages.empty()) {
930 std::cerr <<
"There is no training image loaded !" << std::endl;
941 unsigned int nbImgSqrt = (
unsigned int)
vpMath::round(std::sqrt((
double)nbImg));
944 unsigned int nbWidth = nbImgSqrt;
946 unsigned int nbHeight = nbImgSqrt;
949 if (nbImgSqrt * nbImgSqrt < nbImg) {
953 unsigned int maxW = ICurrent.
getWidth();
954 unsigned int maxH = ICurrent.
getHeight();
955 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
957 if (maxW < it->second.getWidth()) {
958 maxW = it->second.getWidth();
961 if (maxH < it->second.getHeight()) {
962 maxH = it->second.getHeight();
973 detect(I, keyPoints, elapsedTime, rectangle);
979 detect(I_color, keyPoints, elapsedTime, rectangle);
982 void vpKeyPoint::detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
const cv::Mat &mask)
985 detect(matImg, keyPoints, elapsedTime, mask);
993 cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
996 cv::Point leftTop((
int)rectangle.
getLeft(), (
int)rectangle.
getTop()),
998 cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1001 mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1004 detect(matImg, keyPoints, elapsedTime, mask);
1012 cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1015 cv::Point leftTop((
int)rectangle.
getLeft(), (
int)rectangle.
getTop()),
1017 cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1020 mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1023 detect(matImg, keyPoints, elapsedTime, mask);
1026 void vpKeyPoint::detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
double &elapsedTime,
1027 const cv::Mat &mask)
1032 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
1033 it != m_detectors.end(); ++it) {
1034 std::vector<cv::KeyPoint> kp;
1036 it->second->detect(matImg, kp, mask);
1037 keyPoints.insert(keyPoints.end(), kp.begin(), kp.end());
1045 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1047 std::vector<vpImagePoint> vpTrainImageKeyPoints;
1050 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1058 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1060 std::vector<vpImagePoint> vpTrainImageKeyPoints;
1063 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1071 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1074 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1081 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1084 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1090 unsigned int crossSize,
unsigned int lineThickness,
const vpColor &color)
1093 srand((
unsigned int)time(
nullptr));
1096 std::vector<vpImagePoint> queryImageKeyPoints;
1098 std::vector<vpImagePoint> trainImageKeyPoints;
1102 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1104 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1107 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1108 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
1109 queryImageKeyPoints[(
size_t)it->queryIdx].get_j() + IRef.
getWidth());
1117 unsigned int lineThickness,
const vpColor &color)
1120 srand((
unsigned int)time(
nullptr));
1123 std::vector<vpImagePoint> queryImageKeyPoints;
1125 std::vector<vpImagePoint> trainImageKeyPoints;
1129 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1131 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1134 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1135 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
1136 queryImageKeyPoints[(
size_t)it->queryIdx].get_j() + IRef.
getWidth());
1144 unsigned int lineThickness,
const vpColor &color)
1147 srand((
unsigned int)time(
nullptr));
1150 std::vector<vpImagePoint> queryImageKeyPoints;
1152 std::vector<vpImagePoint> trainImageKeyPoints;
1156 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1158 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1161 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1162 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
1163 queryImageKeyPoints[(
size_t)it->queryIdx].get_j() + IRef.
getWidth());
1172 const std::vector<vpImagePoint> &ransacInliers,
unsigned int crossSize,
1173 unsigned int lineThickness)
1175 if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1177 std::cerr <<
"There is no training image loaded !" << std::endl;
1183 int nbImg = (int)(m_mapOfImages.size() + 1);
1192 int nbWidth = nbImgSqrt;
1193 int nbHeight = nbImgSqrt;
1195 if (nbImgSqrt * nbImgSqrt < nbImg) {
1199 std::map<int, int> mapOfImageIdIndex;
1202 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1204 mapOfImageIdIndex[it->first] = cpt;
1206 if (maxW < it->second.getWidth()) {
1207 maxW = it->second.getWidth();
1210 if (maxH < it->second.getHeight()) {
1211 maxH = it->second.getHeight();
1217 int medianI = nbHeight / 2;
1218 int medianJ = nbWidth / 2;
1219 int medianIndex = medianI * nbWidth + medianJ;
1220 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1222 int current_class_id_index = 0;
1223 if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1224 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1229 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1232 int indexI = current_class_id_index / nbWidth;
1233 int indexJ = current_class_id_index - (indexI * nbWidth);
1234 topLeftCorner.
set_ij((
int)maxH * indexI, (int)maxW * indexJ);
1241 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
1242 for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1247 for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1252 for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1258 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1259 int current_class_id = 0;
1260 if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(
size_t)it->trainIdx].class_id]] < medianIndex) {
1261 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1266 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1269 int indexI = current_class_id / nbWidth;
1270 int indexJ = current_class_id - (indexI * nbWidth);
1272 vpImagePoint end((
int)maxH * indexI + m_trainKeyPoints[(
size_t)it->trainIdx].pt.y,
1273 (
int)maxW * indexJ + m_trainKeyPoints[(
size_t)it->trainIdx].pt.x);
1274 vpImagePoint start((
int)maxH * medianI + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.y,
1275 (
int)maxW * medianJ + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.x);
1285 const std::vector<vpImagePoint> &ransacInliers,
unsigned int crossSize,
1286 unsigned int lineThickness)
1288 if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1290 std::cerr <<
"There is no training image loaded !" << std::endl;
1296 int nbImg = (int)(m_mapOfImages.size() + 1);
1305 int nbWidth = nbImgSqrt;
1306 int nbHeight = nbImgSqrt;
1308 if (nbImgSqrt * nbImgSqrt < nbImg) {
1312 std::map<int, int> mapOfImageIdIndex;
1315 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1317 mapOfImageIdIndex[it->first] = cpt;
1319 if (maxW < it->second.getWidth()) {
1320 maxW = it->second.getWidth();
1323 if (maxH < it->second.getHeight()) {
1324 maxH = it->second.getHeight();
1330 int medianI = nbHeight / 2;
1331 int medianJ = nbWidth / 2;
1332 int medianIndex = medianI * nbWidth + medianJ;
1333 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1335 int current_class_id_index = 0;
1336 if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1337 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1342 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1345 int indexI = current_class_id_index / nbWidth;
1346 int indexJ = current_class_id_index - (indexI * nbWidth);
1347 topLeftCorner.
set_ij((
int)maxH * indexI, (int)maxW * indexJ);
1354 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
1355 for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1360 for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1365 for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1371 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1372 int current_class_id = 0;
1373 if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(
size_t)it->trainIdx].class_id]] < medianIndex) {
1374 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1379 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1382 int indexI = current_class_id / nbWidth;
1383 int indexJ = current_class_id - (indexI * nbWidth);
1385 vpImagePoint end((
int)maxH * indexI + m_trainKeyPoints[(
size_t)it->trainIdx].pt.y,
1386 (
int)maxW * indexJ + m_trainKeyPoints[(
size_t)it->trainIdx].pt.x);
1387 vpImagePoint start((
int)maxH * medianI + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.y,
1388 (
int)maxW * medianJ + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.x);
1398 std::vector<cv::Point3f> *trainPoints)
1401 extract(I, keyPoints, descriptors, elapsedTime, trainPoints);
1405 std::vector<cv::Point3f> *trainPoints)
1408 extract(I_color, keyPoints, descriptors, elapsedTime, trainPoints);
1412 std::vector<cv::Point3f> *trainPoints)
1415 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1419 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1423 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1427 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1431 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1435 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1440 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator itd = m_extractors.begin();
1441 itd != m_extractors.end(); ++itd) {
1445 if (trainPoints !=
nullptr && !trainPoints->empty()) {
1448 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1451 itd->second->compute(matImg, keyPoints, descriptors);
1453 if (keyPoints.size() != keyPoints_tmp.size()) {
1457 std::map<size_t, size_t> mapOfKeypointHashes;
1459 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1461 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1464 std::vector<cv::Point3f> trainPoints_tmp;
1465 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1466 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1467 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1472 *trainPoints = trainPoints_tmp;
1477 itd->second->compute(matImg, keyPoints, descriptors);
1483 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1487 itd->second->compute(matImg, keyPoints, desc);
1489 if (keyPoints.size() != keyPoints_tmp.size()) {
1493 std::map<size_t, size_t> mapOfKeypointHashes;
1495 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1497 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1500 std::vector<cv::Point3f> trainPoints_tmp;
1501 cv::Mat descriptors_tmp;
1502 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1503 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1504 if (trainPoints !=
nullptr && !trainPoints->empty()) {
1505 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1508 if (!descriptors.empty()) {
1509 descriptors_tmp.push_back(descriptors.row((
int)mapOfKeypointHashes[myKeypointHash(*it)]));
1514 if (trainPoints !=
nullptr) {
1516 *trainPoints = trainPoints_tmp;
1519 descriptors_tmp.copyTo(descriptors);
1523 if (descriptors.empty()) {
1524 desc.copyTo(descriptors);
1527 cv::hconcat(descriptors, desc, descriptors);
1532 if (keyPoints.size() != (
size_t)descriptors.rows) {
1533 std::cerr <<
"keyPoints.size() != (size_t) descriptors.rows" << std::endl;
1538 void vpKeyPoint::filterMatches()
1540 std::vector<cv::KeyPoint> queryKpts;
1541 std::vector<cv::Point3f> trainPts;
1542 std::vector<cv::DMatch> m;
1548 double min_dist = DBL_MAX;
1550 std::vector<double> distance_vec(m_knnMatches.size());
1553 for (
size_t i = 0; i < m_knnMatches.size(); i++) {
1554 double dist = m_knnMatches[i][0].distance;
1556 distance_vec[i] = dist;
1558 if (dist < min_dist) {
1565 mean /= m_queryDescriptors.rows;
1568 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1569 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1570 double threshold = min_dist + stdev;
1572 for (
size_t i = 0; i < m_knnMatches.size(); i++) {
1573 if (m_knnMatches[i].size() >= 2) {
1576 float ratio = m_knnMatches[i][0].distance / m_knnMatches[i][1].distance;
1581 double dist = m_knnMatches[i][0].distance;
1584 m.push_back(cv::DMatch((
int)queryKpts.size(), m_knnMatches[i][0].trainIdx, m_knnMatches[i][0].distance));
1586 if (!m_trainPoints.empty()) {
1587 trainPts.push_back(m_trainPoints[(
size_t)m_knnMatches[i][0].trainIdx]);
1589 queryKpts.push_back(m_queryKeyPoints[(
size_t)m_knnMatches[i][0].queryIdx]);
1598 double min_dist = DBL_MAX;
1600 std::vector<double> distance_vec(m_matches.size());
1601 for (
size_t i = 0; i < m_matches.size(); i++) {
1602 double dist = m_matches[i].distance;
1604 distance_vec[i] = dist;
1606 if (dist < min_dist) {
1613 mean /= m_queryDescriptors.rows;
1615 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1616 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1626 for (
size_t i = 0; i < m_matches.size(); i++) {
1627 if (m_matches[i].distance <= threshold) {
1628 m.push_back(cv::DMatch((
int)queryKpts.size(), m_matches[i].trainIdx, m_matches[i].distance));
1630 if (!m_trainPoints.empty()) {
1631 trainPts.push_back(m_trainPoints[(
size_t)m_matches[i].trainIdx]);
1633 queryKpts.push_back(m_queryKeyPoints[(
size_t)m_matches[i].queryIdx]);
1638 if (m_useSingleMatchFilter) {
1641 std::vector<cv::DMatch> mTmp;
1642 std::vector<cv::Point3f> trainPtsTmp;
1643 std::vector<cv::KeyPoint> queryKptsTmp;
1645 std::map<int, int> mapOfTrainIdx;
1647 for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1648 mapOfTrainIdx[it->trainIdx]++;
1652 for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1653 if (mapOfTrainIdx[it->trainIdx] == 1) {
1654 mTmp.push_back(cv::DMatch((
int)queryKptsTmp.size(), it->trainIdx, it->distance));
1656 if (!m_trainPoints.empty()) {
1657 trainPtsTmp.push_back(m_trainPoints[(
size_t)it->trainIdx]);
1659 queryKptsTmp.push_back(queryKpts[(
size_t)it->queryIdx]);
1663 m_filteredMatches = mTmp;
1664 m_objectFilteredPoints = trainPtsTmp;
1665 m_queryFilteredKeyPoints = queryKptsTmp;
1668 m_filteredMatches = m;
1669 m_objectFilteredPoints = trainPts;
1670 m_queryFilteredKeyPoints = queryKpts;
1676 objectPoints = m_objectFilteredPoints;
1687 keyPoints = m_queryFilteredKeyPoints;
1690 keyPoints = m_queryKeyPoints;
1712 void vpKeyPoint::init()
1715 #if defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && (VISP_HAVE_OPENCV_VERSION < 0x030000)
1717 if (!cv::initModule_nonfree()) {
1718 std::cerr <<
"Cannot init module non free, SIFT or SURF cannot be used." << std::endl;
1728 initDetectors(m_detectorNames);
1729 initExtractors(m_extractorNames);
1733 void vpKeyPoint::initDetector(
const std::string &detectorName)
1735 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
1736 m_detectors[detectorName] = cv::FeatureDetector::create(detectorName);
1738 if (m_detectors[detectorName] ==
nullptr) {
1739 std::stringstream ss_msg;
1740 ss_msg <<
"Fail to initialize the detector: " << detectorName
1741 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
1745 std::string detectorNameTmp = detectorName;
1746 std::string pyramid =
"Pyramid";
1747 std::size_t pos = detectorName.find(pyramid);
1748 bool usePyramid =
false;
1749 if (pos != std::string::npos) {
1750 detectorNameTmp = detectorName.substr(pos + pyramid.size());
1754 if (detectorNameTmp ==
"SIFT") {
1755 #if (VISP_HAVE_OPENCV_VERSION >= 0x040500)
1756 cv::Ptr<cv::FeatureDetector> siftDetector = cv::SiftFeatureDetector::create();
1758 m_detectors[detectorNameTmp] = siftDetector;
1761 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
1764 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) || (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || \
1765 (VISP_HAVE_OPENCV_VERSION >= 0x040400)
1767 cv::Ptr<cv::FeatureDetector> siftDetector;
1768 if (m_maxFeatures > 0) {
1769 #if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
1770 siftDetector = cv::SIFT::create(m_maxFeatures);
1772 siftDetector = cv::xfeatures2d::SIFT::create(m_maxFeatures);
1776 #if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
1777 siftDetector = cv::SIFT::create();
1779 siftDetector = cv::xfeatures2d::SIFT::create();
1783 m_detectors[detectorNameTmp] = siftDetector;
1786 std::cerr <<
"You should not use SIFT with Pyramid feature detection!" << std::endl;
1787 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
1790 std::stringstream ss_msg;
1791 ss_msg <<
"Fail to initialize the detector: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1792 <<
" was not build with xFeatures2d module.";
1797 else if (detectorNameTmp ==
"SURF") {
1798 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1799 cv::Ptr<cv::FeatureDetector> surfDetector = cv::xfeatures2d::SURF::create();
1801 m_detectors[detectorNameTmp] = surfDetector;
1804 std::cerr <<
"You should not use SURF with Pyramid feature detection!" << std::endl;
1805 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(surfDetector);
1808 std::stringstream ss_msg;
1809 ss_msg <<
"Fail to initialize the detector: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1810 <<
" was not build with xFeatures2d module.";
1814 else if (detectorNameTmp ==
"FAST") {
1815 cv::Ptr<cv::FeatureDetector> fastDetector = cv::FastFeatureDetector::create();
1817 m_detectors[detectorNameTmp] = fastDetector;
1820 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
1823 else if (detectorNameTmp ==
"MSER") {
1824 cv::Ptr<cv::FeatureDetector> fastDetector = cv::MSER::create();
1826 m_detectors[detectorNameTmp] = fastDetector;
1829 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
1832 else if (detectorNameTmp ==
"ORB") {
1833 cv::Ptr<cv::FeatureDetector> orbDetector;
1834 if (m_maxFeatures > 0) {
1835 orbDetector = cv::ORB::create(m_maxFeatures);
1838 orbDetector = cv::ORB::create();
1841 m_detectors[detectorNameTmp] = orbDetector;
1844 std::cerr <<
"You should not use ORB with Pyramid feature detection!" << std::endl;
1845 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(orbDetector);
1848 else if (detectorNameTmp ==
"BRISK") {
1849 cv::Ptr<cv::FeatureDetector> briskDetector = cv::BRISK::create();
1851 m_detectors[detectorNameTmp] = briskDetector;
1854 std::cerr <<
"You should not use BRISK with Pyramid feature detection!" << std::endl;
1855 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(briskDetector);
1858 else if (detectorNameTmp ==
"KAZE") {
1859 cv::Ptr<cv::FeatureDetector> kazeDetector = cv::KAZE::create();
1861 m_detectors[detectorNameTmp] = kazeDetector;
1864 std::cerr <<
"You should not use KAZE with Pyramid feature detection!" << std::endl;
1865 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(kazeDetector);
1868 else if (detectorNameTmp ==
"AKAZE") {
1869 cv::Ptr<cv::FeatureDetector> akazeDetector = cv::AKAZE::create();
1871 m_detectors[detectorNameTmp] = akazeDetector;
1874 std::cerr <<
"You should not use AKAZE with Pyramid feature detection!" << std::endl;
1875 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(akazeDetector);
1878 else if (detectorNameTmp ==
"GFTT") {
1879 cv::Ptr<cv::FeatureDetector> gfttDetector = cv::GFTTDetector::create();
1881 m_detectors[detectorNameTmp] = gfttDetector;
1884 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(gfttDetector);
1887 else if (detectorNameTmp ==
"SimpleBlob") {
1888 cv::Ptr<cv::FeatureDetector> simpleBlobDetector = cv::SimpleBlobDetector::create();
1890 m_detectors[detectorNameTmp] = simpleBlobDetector;
1893 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(simpleBlobDetector);
1896 else if (detectorNameTmp ==
"STAR") {
1897 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1898 cv::Ptr<cv::FeatureDetector> starDetector = cv::xfeatures2d::StarDetector::create();
1900 m_detectors[detectorNameTmp] = starDetector;
1903 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(starDetector);
1906 std::stringstream ss_msg;
1907 ss_msg <<
"Fail to initialize the detector: STAR. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1908 <<
" was not build with xFeatures2d module.";
1912 else if (detectorNameTmp ==
"AGAST") {
1913 cv::Ptr<cv::FeatureDetector> agastDetector = cv::AgastFeatureDetector::create();
1915 m_detectors[detectorNameTmp] = agastDetector;
1918 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(agastDetector);
1921 else if (detectorNameTmp ==
"MSD") {
1922 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100)
1923 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
1924 cv::Ptr<cv::FeatureDetector> msdDetector = cv::xfeatures2d::MSDDetector::create();
1926 m_detectors[detectorNameTmp] = msdDetector;
1929 std::cerr <<
"You should not use MSD with Pyramid feature detection!" << std::endl;
1930 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(msdDetector);
1933 std::stringstream ss_msg;
1934 ss_msg <<
"Fail to initialize the detector: MSD. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1935 <<
" was not build with xFeatures2d module.";
1939 std::stringstream ss_msg;
1940 ss_msg <<
"Feature " << detectorName <<
" is not available in OpenCV version: " << std::hex
1941 << VISP_HAVE_OPENCV_VERSION <<
" (require >= OpenCV 3.1).";
1945 std::cerr <<
"The detector:" << detectorNameTmp <<
" is not available." << std::endl;
1948 bool detectorInitialized =
false;
1951 detectorInitialized = !m_detectors[detectorNameTmp].empty();
1955 detectorInitialized = !m_detectors[detectorName].empty();
1958 if (!detectorInitialized) {
1959 std::stringstream ss_msg;
1960 ss_msg <<
"Fail to initialize the detector: " << detectorNameTmp
1961 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
1967 void vpKeyPoint::initDetectors(
const std::vector<std::string> &detectorNames)
1969 for (std::vector<std::string>::const_iterator it = detectorNames.begin(); it != detectorNames.end(); ++it) {
1974 void vpKeyPoint::initExtractor(
const std::string &extractorName)
1976 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
1977 m_extractors[extractorName] = cv::DescriptorExtractor::create(extractorName);
1979 if (extractorName ==
"SIFT") {
1980 #if (VISP_HAVE_OPENCV_VERSION >= 0x040500)
1981 m_extractors[extractorName] = cv::SIFT::create();
1983 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) || (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || \
1984 (VISP_HAVE_OPENCV_VERSION >= 0x040400)
1986 #if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
1987 m_extractors[extractorName] = cv::SIFT::create();
1989 m_extractors[extractorName] = cv::xfeatures2d::SIFT::create();
1992 std::stringstream ss_msg;
1993 ss_msg <<
"Fail to initialize the extractor: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1994 <<
" was not build with xFeatures2d module.";
1999 else if (extractorName ==
"SURF") {
2000 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2002 m_extractors[extractorName] = cv::xfeatures2d::SURF::create(100, 4, 3,
true);
2004 std::stringstream ss_msg;
2005 ss_msg <<
"Fail to initialize the extractor: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2006 <<
" was not build with xFeatures2d module.";
2010 else if (extractorName ==
"ORB") {
2011 m_extractors[extractorName] = cv::ORB::create();
2013 else if (extractorName ==
"BRISK") {
2014 m_extractors[extractorName] = cv::BRISK::create();
2016 else if (extractorName ==
"FREAK") {
2017 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2018 m_extractors[extractorName] = cv::xfeatures2d::FREAK::create();
2020 std::stringstream ss_msg;
2021 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2022 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2026 else if (extractorName ==
"BRIEF") {
2027 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2028 m_extractors[extractorName] = cv::xfeatures2d::BriefDescriptorExtractor::create();
2030 std::stringstream ss_msg;
2031 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2032 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2036 else if (extractorName ==
"KAZE") {
2037 m_extractors[extractorName] = cv::KAZE::create();
2039 else if (extractorName ==
"AKAZE") {
2040 m_extractors[extractorName] = cv::AKAZE::create();
2042 else if (extractorName ==
"DAISY") {
2043 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2044 m_extractors[extractorName] = cv::xfeatures2d::DAISY::create();
2046 std::stringstream ss_msg;
2047 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2048 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2052 else if (extractorName ==
"LATCH") {
2053 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2054 m_extractors[extractorName] = cv::xfeatures2d::LATCH::create();
2056 std::stringstream ss_msg;
2057 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2058 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2062 else if (extractorName ==
"LUCID") {
2063 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2068 std::stringstream ss_msg;
2069 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2070 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2074 else if (extractorName ==
"VGG") {
2075 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2076 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2077 m_extractors[extractorName] = cv::xfeatures2d::VGG::create();
2079 std::stringstream ss_msg;
2080 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2081 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2085 std::stringstream ss_msg;
2086 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2087 << VISP_HAVE_OPENCV_VERSION <<
" but requires at least OpenCV 3.2.";
2091 else if (extractorName ==
"BoostDesc") {
2092 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2093 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2094 m_extractors[extractorName] = cv::xfeatures2d::BoostDesc::create();
2096 std::stringstream ss_msg;
2097 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2098 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2102 std::stringstream ss_msg;
2103 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2104 << VISP_HAVE_OPENCV_VERSION <<
" but requires at least OpenCV 3.2.";
2109 std::cerr <<
"The extractor:" << extractorName <<
" is not available." << std::endl;
2113 if (!m_extractors[extractorName]) {
2114 std::stringstream ss_msg;
2115 ss_msg <<
"Fail to initialize the extractor: " << extractorName
2116 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2120 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2121 if (extractorName ==
"SURF") {
2123 m_extractors[extractorName]->set(
"extended", 1);
2128 void vpKeyPoint::initExtractors(
const std::vector<std::string> &extractorNames)
2130 for (std::vector<std::string>::const_iterator it = extractorNames.begin(); it != extractorNames.end(); ++it) {
2134 int descriptorType = CV_32F;
2135 bool firstIteration =
true;
2136 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2137 it != m_extractors.end(); ++it) {
2138 if (firstIteration) {
2139 firstIteration =
false;
2140 descriptorType = it->second->descriptorType();
2143 if (descriptorType != it->second->descriptorType()) {
2150 void vpKeyPoint::initFeatureNames()
2153 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2160 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2161 m_mapOfDetectorNames[DETECTOR_STAR] =
"STAR";
2163 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2164 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2167 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2170 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2175 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2176 m_mapOfDetectorNames[DETECTOR_MSD] =
"MSD";
2180 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2183 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2184 m_mapOfDescriptorNames[DESCRIPTOR_FREAK] =
"FREAK";
2185 m_mapOfDescriptorNames[DESCRIPTOR_BRIEF] =
"BRIEF";
2187 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2188 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2191 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2194 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2197 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2198 m_mapOfDescriptorNames[DESCRIPTOR_DAISY] =
"DAISY";
2199 m_mapOfDescriptorNames[DESCRIPTOR_LATCH] =
"LATCH";
2202 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2203 m_mapOfDescriptorNames[DESCRIPTOR_VGG] =
"VGG";
2204 m_mapOfDescriptorNames[DESCRIPTOR_BoostDesc] =
"BoostDesc";
2211 int descriptorType = CV_32F;
2212 bool firstIteration =
true;
2213 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2214 it != m_extractors.end(); ++it) {
2215 if (firstIteration) {
2216 firstIteration =
false;
2217 descriptorType = it->second->descriptorType();
2220 if (descriptorType != it->second->descriptorType()) {
2226 if (matcherName ==
"FlannBased") {
2227 if (m_extractors.empty()) {
2228 std::cout <<
"Warning: No extractor initialized, by default use "
2229 "floating values (CV_32F) "
2230 "for descriptor type !"
2234 if (descriptorType == CV_8U) {
2235 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2236 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::LshIndexParams>(12, 20, 2));
2238 m_matcher =
new cv::FlannBasedMatcher(
new cv::flann::LshIndexParams(12, 20, 2));
2242 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2243 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::KDTreeIndexParams>());
2245 m_matcher =
new cv::FlannBasedMatcher(
new cv::flann::KDTreeIndexParams());
2250 m_matcher = cv::DescriptorMatcher::create(matcherName);
2253 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2254 if (m_matcher !=
nullptr && !m_useKnn && matcherName ==
"BruteForce") {
2255 m_matcher->set(
"crossCheck", m_useBruteForceCrossCheck);
2260 std::stringstream ss_msg;
2261 ss_msg <<
"Fail to initialize the matcher: " << matcherName
2262 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2271 IMatching.
insert(IRef, topLeftCorner);
2273 IMatching.
insert(ICurrent, topLeftCorner);
2280 IMatching.
insert(IRef, topLeftCorner);
2282 IMatching.
insert(ICurrent, topLeftCorner);
2289 int nbImg = (int)(m_mapOfImages.size() + 1);
2291 if (m_mapOfImages.empty()) {
2292 std::cerr <<
"There is no training image loaded !" << std::endl;
2303 int nbWidth = nbImgSqrt;
2304 int nbHeight = nbImgSqrt;
2306 if (nbImgSqrt * nbImgSqrt < nbImg) {
2311 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2313 if (maxW < it->second.getWidth()) {
2314 maxW = it->second.getWidth();
2317 if (maxH < it->second.getHeight()) {
2318 maxH = it->second.getHeight();
2324 int medianI = nbHeight / 2;
2325 int medianJ = nbWidth / 2;
2326 int medianIndex = medianI * nbWidth + medianJ;
2329 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2331 int local_cpt = cpt;
2332 if (cpt >= medianIndex) {
2337 int indexI = local_cpt / nbWidth;
2338 int indexJ = local_cpt - (indexI * nbWidth);
2339 vpImagePoint topLeftCorner((
int)maxH * indexI, (
int)maxW * indexJ);
2341 IMatching.
insert(it->second, topLeftCorner);
2344 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
2345 IMatching.
insert(ICurrent, topLeftCorner);
2353 int nbImg = (int)(m_mapOfImages.size() + 1);
2355 if (m_mapOfImages.empty()) {
2356 std::cerr <<
"There is no training image loaded !" << std::endl;
2369 int nbWidth = nbImgSqrt;
2370 int nbHeight = nbImgSqrt;
2372 if (nbImgSqrt * nbImgSqrt < nbImg) {
2377 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2379 if (maxW < it->second.getWidth()) {
2380 maxW = it->second.getWidth();
2383 if (maxH < it->second.getHeight()) {
2384 maxH = it->second.getHeight();
2390 int medianI = nbHeight / 2;
2391 int medianJ = nbWidth / 2;
2392 int medianIndex = medianI * nbWidth + medianJ;
2395 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2397 int local_cpt = cpt;
2398 if (cpt >= medianIndex) {
2403 int indexI = local_cpt / nbWidth;
2404 int indexJ = local_cpt - (indexI * nbWidth);
2405 vpImagePoint topLeftCorner((
int)maxH * indexI, (
int)maxW * indexJ);
2409 IMatching.
insert(IRef, topLeftCorner);
2412 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
2413 IMatching.
insert(ICurrent, topLeftCorner);
2419 #if defined(VISP_HAVE_PUGIXML)
2424 m_detectorNames.clear();
2425 m_extractorNames.clear();
2426 m_detectors.clear();
2427 m_extractors.clear();
2429 std::cout <<
" *********** Parsing XML for configuration for vpKeyPoint "
2432 xmlp.
parse(configFile);
2494 int startClassId = 0;
2495 int startImageId = 0;
2497 m_trainKeyPoints.clear();
2498 m_trainPoints.clear();
2499 m_mapOfImageId.clear();
2500 m_mapOfImages.clear();
2504 for (std::map<int, int>::const_iterator it = m_mapOfImageId.begin(); it != m_mapOfImageId.end(); ++it) {
2505 if (startClassId < it->first) {
2506 startClassId = it->first;
2511 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2513 if (startImageId < it->first) {
2514 startImageId = it->first;
2521 if (!parent.empty()) {
2526 std::ifstream file(filename.c_str(), std::ifstream::binary);
2527 if (!file.is_open()) {
2535 #if !defined(VISP_HAVE_MODULE_IO)
2537 std::cout <<
"Warning: The learning file contains image data that will "
2538 "not be loaded as visp_io module "
2539 "is not available !"
2544 for (
int i = 0; i < nbImgs; i++) {
2552 char *path =
new char[length + 1];
2554 for (
int cpt = 0; cpt < length; cpt++) {
2556 file.read((
char *)(&c),
sizeof(c));
2559 path[length] =
'\0';
2562 #ifdef VISP_HAVE_MODULE_IO
2571 m_mapOfImages[
id + startImageId] = I;
2579 int have3DInfoInt = 0;
2581 bool have3DInfo = have3DInfoInt != 0;
2592 int descriptorType = 5;
2595 cv::Mat trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
2596 for (
int i = 0; i < nRows; i++) {
2598 float u, v, size, angle, response;
2599 int octave, class_id, image_id;
2608 cv::KeyPoint keyPoint(cv::Point2f(u, v), size, angle, response, octave, (class_id + startClassId));
2609 m_trainKeyPoints.push_back(keyPoint);
2611 if (image_id != -1) {
2612 #ifdef VISP_HAVE_MODULE_IO
2614 m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
2624 m_trainPoints.push_back(cv::Point3f(oX, oY, oZ));
2627 for (
int j = 0; j < nCols; j++) {
2629 switch (descriptorType) {
2631 unsigned char value;
2632 file.read((
char *)(&value),
sizeof(value));
2633 trainDescriptorsTmp.at<
unsigned char>(i, j) = value;
2638 file.read((
char *)(&value),
sizeof(value));
2639 trainDescriptorsTmp.at<
char>(i, j) = value;
2643 unsigned short int value;
2645 trainDescriptorsTmp.at<
unsigned short int>(i, j) = value;
2651 trainDescriptorsTmp.at<
short int>(i, j) = value;
2657 trainDescriptorsTmp.at<
int>(i, j) = value;
2663 trainDescriptorsTmp.at<
float>(i, j) = value;
2669 trainDescriptorsTmp.at<
double>(i, j) = value;
2675 trainDescriptorsTmp.at<
float>(i, j) = value;
2681 if (!append || m_trainDescriptors.empty()) {
2682 trainDescriptorsTmp.copyTo(m_trainDescriptors);
2685 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
2691 #if defined(VISP_HAVE_PUGIXML)
2692 pugi::xml_document doc;
2695 if (!doc.load_file(filename.c_str())) {
2699 pugi::xml_node root_element = doc.document_element();
2701 int descriptorType = CV_32F;
2702 int nRows = 0, nCols = 0;
2705 cv::Mat trainDescriptorsTmp;
2707 for (pugi::xml_node first_level_node = root_element.first_child(); first_level_node;
2708 first_level_node = first_level_node.next_sibling()) {
2710 std::string name(first_level_node.name());
2711 if (first_level_node.type() == pugi::node_element && name ==
"TrainingImageInfo") {
2713 for (pugi::xml_node image_info_node = first_level_node.first_child(); image_info_node;
2714 image_info_node = image_info_node.next_sibling()) {
2715 name = std::string(image_info_node.name());
2717 if (name ==
"trainImg") {
2719 int id = image_info_node.attribute(
"image_id").as_int();
2722 #ifdef VISP_HAVE_MODULE_IO
2723 std::string path(image_info_node.text().as_string());
2733 m_mapOfImages[
id + startImageId] = I;
2738 else if (first_level_node.type() == pugi::node_element && name ==
"DescriptorsInfo") {
2739 for (pugi::xml_node descriptors_info_node = first_level_node.first_child(); descriptors_info_node;
2740 descriptors_info_node = descriptors_info_node.next_sibling()) {
2741 if (descriptors_info_node.type() == pugi::node_element) {
2742 name = std::string(descriptors_info_node.name());
2744 if (name ==
"nrows") {
2745 nRows = descriptors_info_node.text().as_int();
2747 else if (name ==
"ncols") {
2748 nCols = descriptors_info_node.text().as_int();
2750 else if (name ==
"type") {
2751 descriptorType = descriptors_info_node.text().as_int();
2756 trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
2758 else if (first_level_node.type() == pugi::node_element && name ==
"DescriptorInfo") {
2759 double u = 0.0, v = 0.0, size = 0.0, angle = 0.0, response = 0.0;
2760 int octave = 0, class_id = 0, image_id = 0;
2761 double oX = 0.0, oY = 0.0, oZ = 0.0;
2763 std::stringstream ss;
2765 for (pugi::xml_node point_node = first_level_node.first_child(); point_node;
2766 point_node = point_node.next_sibling()) {
2767 if (point_node.type() == pugi::node_element) {
2768 name = std::string(point_node.name());
2772 u = point_node.text().as_double();
2774 else if (name ==
"v") {
2775 v = point_node.text().as_double();
2777 else if (name ==
"size") {
2778 size = point_node.text().as_double();
2780 else if (name ==
"angle") {
2781 angle = point_node.text().as_double();
2783 else if (name ==
"response") {
2784 response = point_node.text().as_double();
2786 else if (name ==
"octave") {
2787 octave = point_node.text().as_int();
2789 else if (name ==
"class_id") {
2790 class_id = point_node.text().as_int();
2791 cv::KeyPoint keyPoint(cv::Point2f((
float)u, (
float)v), (
float)size, (
float)angle, (
float)response, octave,
2792 (class_id + startClassId));
2793 m_trainKeyPoints.push_back(keyPoint);
2795 else if (name ==
"image_id") {
2796 image_id = point_node.text().as_int();
2797 if (image_id != -1) {
2798 #ifdef VISP_HAVE_MODULE_IO
2800 m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
2804 else if (name ==
"oX") {
2805 oX = point_node.text().as_double();
2807 else if (name ==
"oY") {
2808 oY = point_node.text().as_double();
2810 else if (name ==
"oZ") {
2811 oZ = point_node.text().as_double();
2812 m_trainPoints.push_back(cv::Point3f((
float)oX, (
float)oY, (
float)oZ));
2814 else if (name ==
"desc") {
2817 for (pugi::xml_node descriptor_value_node = point_node.first_child(); descriptor_value_node;
2818 descriptor_value_node = descriptor_value_node.next_sibling()) {
2820 if (descriptor_value_node.type() == pugi::node_element) {
2822 std::string parseStr(descriptor_value_node.text().as_string());
2827 switch (descriptorType) {
2832 trainDescriptorsTmp.at<
unsigned char>(i, j) = (
unsigned char)parseValue;
2839 trainDescriptorsTmp.at<
char>(i, j) = (
char)parseValue;
2843 ss >> trainDescriptorsTmp.at<
unsigned short int>(i, j);
2847 ss >> trainDescriptorsTmp.at<
short int>(i, j);
2851 ss >> trainDescriptorsTmp.at<
int>(i, j);
2855 ss >> trainDescriptorsTmp.at<
float>(i, j);
2859 ss >> trainDescriptorsTmp.at<
double>(i, j);
2863 ss >> trainDescriptorsTmp.at<
float>(i, j);
2868 std::cerr <<
"Error when converting:" << ss.str() << std::endl;
2881 if (!append || m_trainDescriptors.empty()) {
2882 trainDescriptorsTmp.copyTo(m_trainDescriptors);
2885 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
2898 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
2904 m_currentImageId = (int)m_mapOfImages.size();
2908 std::vector<cv::DMatch> &matches,
double &elapsedTime)
2913 m_knnMatches.clear();
2915 if (m_useMatchTrainToQuery) {
2916 std::vector<std::vector<cv::DMatch> > knnMatchesTmp;
2919 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(
true);
2920 matcherTmp->knnMatch(trainDescriptors, queryDescriptors, knnMatchesTmp, 2);
2922 for (std::vector<std::vector<cv::DMatch> >::const_iterator it1 = knnMatchesTmp.begin();
2923 it1 != knnMatchesTmp.end(); ++it1) {
2924 std::vector<cv::DMatch> tmp;
2925 for (std::vector<cv::DMatch>::const_iterator it2 = it1->begin(); it2 != it1->end(); ++it2) {
2926 tmp.push_back(cv::DMatch(it2->trainIdx, it2->queryIdx, it2->distance));
2928 m_knnMatches.push_back(tmp);
2931 matches.resize(m_knnMatches.size());
2932 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
2936 m_matcher->knnMatch(queryDescriptors, m_knnMatches, 2);
2937 matches.resize(m_knnMatches.size());
2938 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
2944 if (m_useMatchTrainToQuery) {
2945 std::vector<cv::DMatch> matchesTmp;
2947 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(
true);
2948 matcherTmp->match(trainDescriptors, queryDescriptors, matchesTmp);
2950 for (std::vector<cv::DMatch>::const_iterator it = matchesTmp.begin(); it != matchesTmp.end(); ++it) {
2951 matches.push_back(cv::DMatch(it->trainIdx, it->queryIdx, it->distance));
2956 m_matcher->match(queryDescriptors, matches);
2980 if (m_trainDescriptors.empty()) {
2981 std::cerr <<
"Reference is empty." << std::endl;
2983 std::cerr <<
"Reference is not computed." << std::endl;
2985 std::cerr <<
"Matching is not possible." << std::endl;
2990 if (m_useAffineDetection) {
2991 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
2992 std::vector<cv::Mat> listOfQueryDescriptors;
2998 m_queryKeyPoints.clear();
2999 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3000 it != listOfQueryKeyPoints.end(); ++it) {
3001 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3005 for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3009 it->copyTo(m_queryDescriptors);
3012 m_queryDescriptors.push_back(*it);
3017 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3018 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3021 return matchPoint(m_queryKeyPoints, m_queryDescriptors);
3026 m_queryKeyPoints = queryKeyPoints;
3027 m_queryDescriptors = queryDescriptors;
3029 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3032 m_queryFilteredKeyPoints.clear();
3033 m_objectFilteredPoints.clear();
3034 m_filteredMatches.clear();
3039 if (m_useMatchTrainToQuery) {
3041 m_queryFilteredKeyPoints.clear();
3042 m_filteredMatches.clear();
3043 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3044 m_filteredMatches.push_back(cv::DMatch((
int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3045 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(
size_t)it->queryIdx]);
3049 m_queryFilteredKeyPoints = m_queryKeyPoints;
3050 m_filteredMatches = m_matches;
3053 if (!m_trainPoints.empty()) {
3054 m_objectFilteredPoints.clear();
3058 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3060 m_objectFilteredPoints.push_back(m_trainPoints[(
size_t)it->trainIdx]);
3069 return static_cast<unsigned int>(m_filteredMatches.size());
3081 double error, elapsedTime;
3082 return matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3088 double error, elapsedTime;
3089 return matchPoint(I_color, cam, cMo, error, elapsedTime, func, rectangle);
3097 if (m_trainDescriptors.empty()) {
3098 std::cerr <<
"Reference is empty." << std::endl;
3100 std::cerr <<
"Reference is not computed." << std::endl;
3102 std::cerr <<
"Matching is not possible." << std::endl;
3107 if (m_useAffineDetection) {
3108 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3109 std::vector<cv::Mat> listOfQueryDescriptors;
3115 m_queryKeyPoints.clear();
3116 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3117 it != listOfQueryKeyPoints.end(); ++it) {
3118 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3122 for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3126 it->copyTo(m_queryDescriptors);
3129 m_queryDescriptors.push_back(*it);
3134 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3135 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3138 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3140 elapsedTime = m_detectionTime + m_extractionTime + m_matchingTime;
3143 m_queryFilteredKeyPoints.clear();
3144 m_objectFilteredPoints.clear();
3145 m_filteredMatches.clear();
3150 if (m_useMatchTrainToQuery) {
3152 m_queryFilteredKeyPoints.clear();
3153 m_filteredMatches.clear();
3154 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3155 m_filteredMatches.push_back(cv::DMatch((
int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3156 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(
size_t)it->queryIdx]);
3160 m_queryFilteredKeyPoints = m_queryKeyPoints;
3161 m_filteredMatches = m_matches;
3164 if (!m_trainPoints.empty()) {
3165 m_objectFilteredPoints.clear();
3169 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3171 m_objectFilteredPoints.push_back(m_trainPoints[(
size_t)it->trainIdx]);
3183 m_ransacInliers.clear();
3184 m_ransacOutliers.clear();
3186 if (m_useRansacVVS) {
3187 std::vector<vpPoint> objectVpPoints(m_objectFilteredPoints.size());
3191 for (std::vector<cv::Point3f>::const_iterator it = m_objectFilteredPoints.begin();
3192 it != m_objectFilteredPoints.end(); ++it, cpt++) {
3196 vpImagePoint imP(m_queryFilteredKeyPoints[cpt].pt.y, m_queryFilteredKeyPoints[cpt].pt.x);
3198 double x = 0.0, y = 0.0;
3203 objectVpPoints[cpt] = pt;
3206 std::vector<vpPoint> inliers;
3207 std::vector<unsigned int> inlierIndex;
3209 bool res =
computePose(objectVpPoints, cMo, inliers, inlierIndex, m_poseTime, func);
3211 std::map<unsigned int, bool> mapOfInlierIndex;
3212 m_matchRansacKeyPointsToPoints.clear();
3214 for (std::vector<unsigned int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3215 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3216 m_queryFilteredKeyPoints[(
size_t)(*it)], m_objectFilteredPoints[(
size_t)(*it)]));
3217 mapOfInlierIndex[*it] =
true;
3220 for (
size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3221 if (mapOfInlierIndex.find((
unsigned int)i) == mapOfInlierIndex.end()) {
3222 m_ransacOutliers.push_back(
vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3226 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3228 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3229 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3230 m_ransacInliers.begin(), matchRansacToVpImage);
3232 elapsedTime += m_poseTime;
3237 std::vector<cv::Point2f> imageFilteredPoints;
3238 cv::KeyPoint::convert(m_queryFilteredKeyPoints, imageFilteredPoints);
3239 std::vector<int> inlierIndex;
3240 bool res =
computePose(imageFilteredPoints, m_objectFilteredPoints, cam, cMo, inlierIndex, m_poseTime);
3242 std::map<int, bool> mapOfInlierIndex;
3243 m_matchRansacKeyPointsToPoints.clear();
3245 for (std::vector<int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3246 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3247 m_queryFilteredKeyPoints[(
size_t)(*it)], m_objectFilteredPoints[(
size_t)(*it)]));
3248 mapOfInlierIndex[*it] =
true;
3251 for (
size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3252 if (mapOfInlierIndex.find((
int)i) == mapOfInlierIndex.end()) {
3253 m_ransacOutliers.push_back(
vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3257 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3259 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3260 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3261 m_ransacInliers.begin(), matchRansacToVpImage);
3263 elapsedTime += m_poseTime;
3274 return (
matchPoint(m_I, cam, cMo, error, elapsedTime, func, rectangle));
3278 vpImagePoint ¢erOfGravity,
const bool isPlanarObject,
3279 std::vector<vpImagePoint> *imPts1, std::vector<vpImagePoint> *imPts2,
3280 double *meanDescriptorDistance,
double *detectionScore,
const vpRect &rectangle)
3282 if (imPts1 !=
nullptr && imPts2 !=
nullptr) {
3289 double meanDescriptorDistanceTmp = 0.0;
3290 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
3291 meanDescriptorDistanceTmp += (double)it->distance;
3294 meanDescriptorDistanceTmp /= (double)m_filteredMatches.size();
3295 double score = (double)m_filteredMatches.size() / meanDescriptorDistanceTmp;
3297 if (meanDescriptorDistance !=
nullptr) {
3298 *meanDescriptorDistance = meanDescriptorDistanceTmp;
3304 if (m_filteredMatches.size() >= 4) {
3306 std::vector<cv::Point2f> points1(m_filteredMatches.size());
3308 std::vector<cv::Point2f> points2(m_filteredMatches.size());
3310 for (
size_t i = 0; i < m_filteredMatches.size(); i++) {
3311 points1[i] = cv::Point2f(m_trainKeyPoints[(
size_t)m_filteredMatches[i].trainIdx].pt);
3312 points2[i] = cv::Point2f(m_queryFilteredKeyPoints[(
size_t)m_filteredMatches[i].queryIdx].pt);
3315 std::vector<vpImagePoint> inliers;
3316 if (isPlanarObject) {
3317 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
3318 cv::Mat homographyMatrix = cv::findHomography(points1, points2, CV_RANSAC);
3320 cv::Mat homographyMatrix = cv::findHomography(points1, points2, cv::RANSAC);
3323 for (
size_t i = 0; i < m_filteredMatches.size(); i++) {
3325 cv::Mat realPoint = cv::Mat(3, 1, CV_64F);
3326 realPoint.at<
double>(0, 0) = points1[i].x;
3327 realPoint.at<
double>(1, 0) = points1[i].y;
3328 realPoint.at<
double>(2, 0) = 1.f;
3330 cv::Mat reprojectedPoint = homographyMatrix * realPoint;
3331 double err_x = (reprojectedPoint.at<
double>(0, 0) / reprojectedPoint.at<
double>(2, 0)) - points2[i].x;
3332 double err_y = (reprojectedPoint.at<
double>(1, 0) / reprojectedPoint.at<
double>(2, 0)) - points2[i].y;
3333 double reprojectionError = std::sqrt(err_x * err_x + err_y * err_y);
3335 if (reprojectionError < 6.0) {
3336 inliers.push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3337 if (imPts1 !=
nullptr) {
3338 imPts1->push_back(
vpImagePoint((
double)points1[i].y, (
double)points1[i].x));
3341 if (imPts2 !=
nullptr) {
3342 imPts2->push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3347 else if (m_filteredMatches.size() >= 8) {
3348 cv::Mat fundamentalInliers;
3349 cv::Mat fundamentalMatrix = cv::findFundamentalMat(points1, points2, cv::FM_RANSAC, 3, 0.99, fundamentalInliers);
3351 for (
size_t i = 0; i < (size_t)fundamentalInliers.rows; i++) {
3352 if (fundamentalInliers.at<uchar>((
int)i, 0)) {
3353 inliers.push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3355 if (imPts1 !=
nullptr) {
3356 imPts1->push_back(
vpImagePoint((
double)points1[i].y, (
double)points1[i].x));
3359 if (imPts2 !=
nullptr) {
3360 imPts2->push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3366 if (!inliers.empty()) {
3373 double meanU = 0.0, meanV = 0.0;
3374 for (std::vector<vpImagePoint>::const_iterator it = inliers.begin(); it != inliers.end(); ++it) {
3375 meanU += it->get_u();
3376 meanV += it->get_v();
3379 meanU /= (double)inliers.size();
3380 meanV /= (double)inliers.size();
3382 centerOfGravity.
set_u(meanU);
3383 centerOfGravity.
set_v(meanV);
3392 return meanDescriptorDistanceTmp < m_detectionThreshold;
3395 return score > m_detectionScore;
3404 bool isMatchOk =
matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3409 std::vector<vpImagePoint> modelImagePoints(m_trainVpPoints.size());
3411 for (std::vector<vpPoint>::const_iterator it = m_trainVpPoints.begin(); it != m_trainVpPoints.end(); ++it, cpt++) {
3415 modelImagePoints[cpt] = imPt;
3424 double meanU = 0.0, meanV = 0.0;
3425 for (std::vector<vpImagePoint>::const_iterator it = m_ransacInliers.begin(); it != m_ransacInliers.end(); ++it) {
3426 meanU += it->get_u();
3427 meanV += it->get_v();
3430 meanU /= (double)m_ransacInliers.size();
3431 meanV /= (double)m_ransacInliers.size();
3433 centerOfGravity.
set_u(meanU);
3434 centerOfGravity.
set_v(meanV);
3441 std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
3442 std::vector<cv::Mat> &listOfDescriptors,
3448 listOfKeypoints.clear();
3449 listOfDescriptors.clear();
3451 for (
int tl = 1; tl < 6; tl++) {
3452 double t = pow(2, 0.5 * tl);
3453 for (
int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
3454 std::vector<cv::KeyPoint> keypoints;
3455 cv::Mat descriptors;
3457 cv::Mat timg, mask, Ai;
3460 affineSkew(t, phi, timg, mask, Ai);
3463 if (listOfAffineI !=
nullptr) {
3465 bitwise_and(mask, timg, img_disp);
3468 listOfAffineI->push_back(tI);
3472 cv::bitwise_and(mask, timg, img_disp);
3473 cv::namedWindow(
"Skew", cv::WINDOW_AUTOSIZE);
3474 cv::imshow(
"Skew", img_disp);
3478 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
3479 it != m_detectors.end(); ++it) {
3480 std::vector<cv::KeyPoint> kp;
3481 it->second->detect(timg, kp, mask);
3482 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
3486 extract(timg, keypoints, descriptors, elapsedTime);
3488 for (
unsigned int i = 0; i < keypoints.size(); i++) {
3489 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
3490 cv::Mat kpt_t = Ai * cv::Mat(kpt);
3491 keypoints[i].pt.x = kpt_t.at<
float>(0, 0);
3492 keypoints[i].pt.y = kpt_t.at<
float>(1, 0);
3495 listOfKeypoints.push_back(keypoints);
3496 listOfDescriptors.push_back(descriptors);
3505 std::vector<std::pair<double, int> > listOfAffineParams;
3506 for (
int tl = 1; tl < 6; tl++) {
3507 double t = pow(2, 0.5 * tl);
3508 for (
int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
3509 listOfAffineParams.push_back(std::pair<double, int>(t, phi));
3513 listOfKeypoints.resize(listOfAffineParams.size());
3514 listOfDescriptors.resize(listOfAffineParams.size());
3516 if (listOfAffineI !=
nullptr) {
3517 listOfAffineI->resize(listOfAffineParams.size());
3520 #ifdef VISP_HAVE_OPENMP
3521 #pragma omp parallel for
3523 for (
int cpt = 0; cpt < static_cast<int>(listOfAffineParams.size()); cpt++) {
3524 std::vector<cv::KeyPoint> keypoints;
3525 cv::Mat descriptors;
3527 cv::Mat timg, mask, Ai;
3530 affineSkew(listOfAffineParams[(
size_t)cpt].first, listOfAffineParams[(
size_t)cpt].second, timg, mask, Ai);
3532 if (listOfAffineI !=
nullptr) {
3534 bitwise_and(mask, timg, img_disp);
3537 (*listOfAffineI)[(size_t)cpt] = tI;
3542 cv::bitwise_and(mask, timg, img_disp);
3543 cv::namedWindow(
"Skew", cv::WINDOW_AUTOSIZE);
3544 cv::imshow(
"Skew", img_disp);
3548 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
3549 it != m_detectors.end(); ++it) {
3550 std::vector<cv::KeyPoint> kp;
3551 it->second->detect(timg, kp, mask);
3552 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
3556 extract(timg, keypoints, descriptors, elapsedTime);
3558 for (
size_t i = 0; i < keypoints.size(); i++) {
3559 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
3560 cv::Mat kpt_t = Ai * cv::Mat(kpt);
3561 keypoints[i].pt.x = kpt_t.at<
float>(0, 0);
3562 keypoints[i].pt.y = kpt_t.at<
float>(1, 0);
3565 listOfKeypoints[(size_t)cpt] = keypoints;
3566 listOfDescriptors[(size_t)cpt] = descriptors;
3579 m_computeCovariance =
false;
3581 m_currentImageId = 0;
3583 m_detectionScore = 0.15;
3584 m_detectionThreshold = 100.0;
3585 m_detectionTime = 0.0;
3586 m_detectorNames.clear();
3587 m_detectors.clear();
3588 m_extractionTime = 0.0;
3589 m_extractorNames.clear();
3590 m_extractors.clear();
3591 m_filteredMatches.clear();
3594 m_knnMatches.clear();
3595 m_mapOfImageId.clear();
3596 m_mapOfImages.clear();
3597 m_matcher = cv::Ptr<cv::DescriptorMatcher>();
3598 m_matcherName =
"BruteForce-Hamming";
3600 m_matchingFactorThreshold = 2.0;
3601 m_matchingRatioThreshold = 0.85;
3602 m_matchingTime = 0.0;
3603 m_matchRansacKeyPointsToPoints.clear();
3604 m_nbRansacIterations = 200;
3605 m_nbRansacMinInlierCount = 100;
3606 m_objectFilteredPoints.clear();
3608 m_queryDescriptors = cv::Mat();
3609 m_queryFilteredKeyPoints.clear();
3610 m_queryKeyPoints.clear();
3611 m_ransacConsensusPercentage = 20.0;
3613 m_ransacInliers.clear();
3614 m_ransacOutliers.clear();
3615 m_ransacParallel =
true;
3616 m_ransacParallelNbThreads = 0;
3617 m_ransacReprojectionError = 6.0;
3618 m_ransacThreshold = 0.01;
3619 m_trainDescriptors = cv::Mat();
3620 m_trainKeyPoints.clear();
3621 m_trainPoints.clear();
3622 m_trainVpPoints.clear();
3623 m_useAffineDetection =
false;
3624 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
3625 m_useBruteForceCrossCheck =
true;
3627 m_useConsensusPercentage =
false;
3629 m_useMatchTrainToQuery =
false;
3630 m_useRansacVVS =
true;
3631 m_useSingleMatchFilter =
true;
3633 m_detectorNames.push_back(
"ORB");
3634 m_extractorNames.push_back(
"ORB");
3642 if (!parent.empty()) {
3646 std::map<int, std::string> mapOfImgPath;
3647 if (saveTrainingImages) {
3648 #ifdef VISP_HAVE_MODULE_IO
3650 unsigned int cpt = 0;
3652 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
3658 std::stringstream ss;
3659 ss <<
"train_image_" << std::setfill(
'0') << std::setw(3) << cpt;
3661 switch (m_imageFormat) {
3683 std::string imgFilename = ss.str();
3684 mapOfImgPath[it->first] = imgFilename;
3685 vpImageIo::write(it->second, parent + (!parent.empty() ?
"/" :
"") + imgFilename);
3688 std::cout <<
"Warning: in vpKeyPoint::saveLearningData() training images "
3689 "are not saved because "
3690 "visp_io module is not available !"
3695 bool have3DInfo = m_trainPoints.size() > 0;
3696 if (have3DInfo && m_trainPoints.size() != m_trainKeyPoints.size()) {
3702 std::ofstream file(filename.c_str(), std::ofstream::binary);
3703 if (!file.is_open()) {
3708 int nbImgs = (int)mapOfImgPath.size();
3711 #ifdef VISP_HAVE_MODULE_IO
3712 for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
3718 std::string path = it->second;
3719 int length = (int)path.length();
3722 for (
int cpt = 0; cpt < length; cpt++) {
3723 file.write((
char *)(&path[(
size_t)cpt]),
sizeof(path[(
size_t)cpt]));
3729 int have3DInfoInt = have3DInfo ? 1 : 0;
3732 int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
3733 int descriptorType = m_trainDescriptors.type();
3744 for (
int i = 0; i < nRows; i++) {
3745 unsigned int i_ = (
unsigned int)i;
3747 float u = m_trainKeyPoints[i_].pt.x;
3751 float v = m_trainKeyPoints[i_].pt.y;
3755 float size = m_trainKeyPoints[i_].size;
3759 float angle = m_trainKeyPoints[i_].angle;
3763 float response = m_trainKeyPoints[i_].response;
3767 int octave = m_trainKeyPoints[i_].octave;
3771 int class_id = m_trainKeyPoints[i_].class_id;
3775 #ifdef VISP_HAVE_MODULE_IO
3776 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
3777 int image_id = (saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1;
3786 float oX = m_trainPoints[i_].x, oY = m_trainPoints[i_].y, oZ = m_trainPoints[i_].z;
3797 for (
int j = 0; j < nCols; j++) {
3799 switch (descriptorType) {
3801 file.write((
char *)(&m_trainDescriptors.at<
unsigned char>(i, j)),
3802 sizeof(m_trainDescriptors.at<
unsigned char>(i, j)));
3806 file.write((
char *)(&m_trainDescriptors.at<
char>(i, j)),
sizeof(m_trainDescriptors.at<
char>(i, j)));
3839 #if defined(VISP_HAVE_PUGIXML)
3840 pugi::xml_document doc;
3841 pugi::xml_node node = doc.append_child(pugi::node_declaration);
3842 node.append_attribute(
"version") =
"1.0";
3843 node.append_attribute(
"encoding") =
"UTF-8";
3849 pugi::xml_node root_node = doc.append_child(
"LearningData");
3852 pugi::xml_node image_node = root_node.append_child(
"TrainingImageInfo");
3854 #ifdef VISP_HAVE_MODULE_IO
3855 for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
3856 pugi::xml_node image_info_node = image_node.append_child(
"trainImg");
3857 image_info_node.append_child(pugi::node_pcdata).set_value(it->second.c_str());
3858 std::stringstream ss;
3860 image_info_node.append_attribute(
"image_id") = ss.str().c_str();
3865 pugi::xml_node descriptors_info_node = root_node.append_child(
"DescriptorsInfo");
3867 int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
3868 int descriptorType = m_trainDescriptors.type();
3871 descriptors_info_node.append_child(
"nrows").append_child(pugi::node_pcdata).text() = nRows;
3874 descriptors_info_node.append_child(
"ncols").append_child(pugi::node_pcdata).text() = nCols;
3877 descriptors_info_node.append_child(
"type").append_child(pugi::node_pcdata).text() = descriptorType;
3879 for (
int i = 0; i < nRows; i++) {
3880 unsigned int i_ = (
unsigned int)i;
3881 pugi::xml_node descriptor_node = root_node.append_child(
"DescriptorInfo");
3883 descriptor_node.append_child(
"u").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.x;
3884 descriptor_node.append_child(
"v").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.y;
3885 descriptor_node.append_child(
"size").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].size;
3886 descriptor_node.append_child(
"angle").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].angle;
3887 descriptor_node.append_child(
"response").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].response;
3888 descriptor_node.append_child(
"octave").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].octave;
3889 descriptor_node.append_child(
"class_id").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].class_id;
3891 #ifdef VISP_HAVE_MODULE_IO
3892 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
3893 descriptor_node.append_child(
"image_id").append_child(pugi::node_pcdata).text() =
3894 ((saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1);
3896 descriptor_node.append_child(
"image_id").append_child(pugi::node_pcdata).text() = -1;
3900 descriptor_node.append_child(
"oX").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].x;
3901 descriptor_node.append_child(
"oY").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].y;
3902 descriptor_node.append_child(
"oZ").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].z;
3905 pugi::xml_node desc_node = descriptor_node.append_child(
"desc");
3907 for (
int j = 0; j < nCols; j++) {
3908 switch (descriptorType) {
3914 int val_tmp = m_trainDescriptors.at<
unsigned char>(i, j);
3915 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = val_tmp;
3923 int val_tmp = m_trainDescriptors.at<
char>(i, j);
3924 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = val_tmp;
3928 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() =
3929 m_trainDescriptors.at<
unsigned short int>(i, j);
3933 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<
short int>(i, j);
3937 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<
int>(i, j);
3941 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<
float>(i, j);
3945 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<
double>(i, j);
3955 doc.save_file(filename.c_str(), PUGIXML_TEXT(
" "), pugi::format_default, pugi::encoding_utf8);
3962 #if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x030000)
3963 #ifndef DOXYGEN_SHOULD_SKIP_THIS
3965 struct KeypointResponseGreaterThanThreshold
3967 KeypointResponseGreaterThanThreshold(
float _value) : value(_value) { }
3968 inline bool operator()(
const cv::KeyPoint &kpt)
const {
return kpt.response >= value; }
3972 struct KeypointResponseGreater
3974 inline bool operator()(
const cv::KeyPoint &kp1,
const cv::KeyPoint &kp2)
const {
return kp1.response > kp2.response; }
3978 void vpKeyPoint::KeyPointsFilter::retainBest(std::vector<cv::KeyPoint> &keypoints,
int n_points)
3982 if (n_points >= 0 && keypoints.size() > (
size_t)n_points) {
3983 if (n_points == 0) {
3989 std::nth_element(keypoints.begin(), keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreater());
3991 float ambiguous_response = keypoints[(size_t)(n_points - 1)].response;
3994 std::vector<cv::KeyPoint>::const_iterator new_end = std::partition(
3995 keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreaterThanThreshold(ambiguous_response));
3998 keypoints.resize((
size_t)(new_end - keypoints.begin()));
4004 RoiPredicate(
const cv::Rect &_r) : r(_r) { }
4006 bool operator()(
const cv::KeyPoint &keyPt)
const {
return !r.contains(keyPt.pt); }
4011 void vpKeyPoint::KeyPointsFilter::runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize,
4014 if (borderSize > 0) {
4015 if (imageSize.height <= borderSize * 2 || imageSize.width <= borderSize * 2)
4018 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(),
4019 RoiPredicate(cv::Rect(
4020 cv::Point(borderSize, borderSize),
4021 cv::Point(imageSize.width - borderSize, imageSize.height - borderSize)))),
4026 struct SizePredicate
4028 SizePredicate(
float _minSize,
float _maxSize) : minSize(_minSize), maxSize(_maxSize) { }
4030 bool operator()(
const cv::KeyPoint &keyPt)
const
4032 float size = keyPt.size;
4033 return (size < minSize) || (size > maxSize);
4036 float minSize, maxSize;
4039 void vpKeyPoint::KeyPointsFilter::runByKeypointSize(std::vector<cv::KeyPoint> &keypoints,
float minSize,
float maxSize)
4041 CV_Assert(minSize >= 0);
4042 CV_Assert(maxSize >= 0);
4043 CV_Assert(minSize <= maxSize);
4045 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), SizePredicate(minSize, maxSize)), keypoints.end());
4051 MaskPredicate(
const cv::Mat &_mask) : mask(_mask) { }
4052 bool operator()(
const cv::KeyPoint &key_pt)
const
4054 return mask.at<uchar>((int)(key_pt.pt.y + 0.5f), (int)(key_pt.pt.x + 0.5f)) == 0;
4061 void vpKeyPoint::KeyPointsFilter::runByPixelsMask(std::vector<cv::KeyPoint> &keypoints,
const cv::Mat &mask)
4066 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), MaskPredicate(mask)), keypoints.end());
4069 struct KeyPoint_LessThan
4071 KeyPoint_LessThan(
const std::vector<cv::KeyPoint> &_kp) : kp(&_kp) { }
4072 bool operator()(
size_t i,
size_t j)
const
4074 const cv::KeyPoint &kp1 = (*kp)[ i];
4075 const cv::KeyPoint &kp2 = (*kp)[ j];
4077 std::numeric_limits<float>::epsilon())) {
4079 return kp1.pt.x < kp2.pt.x;
4083 std::numeric_limits<float>::epsilon())) {
4085 return kp1.pt.y < kp2.pt.y;
4089 std::numeric_limits<float>::epsilon())) {
4091 return kp1.size > kp2.size;
4095 std::numeric_limits<float>::epsilon())) {
4097 return kp1.angle < kp2.angle;
4101 std::numeric_limits<float>::epsilon())) {
4103 return kp1.response > kp2.response;
4106 if (kp1.octave != kp2.octave) {
4107 return kp1.octave > kp2.octave;
4110 if (kp1.class_id != kp2.class_id) {
4111 return kp1.class_id > kp2.class_id;
4116 const std::vector<cv::KeyPoint> *kp;
4119 void vpKeyPoint::KeyPointsFilter::removeDuplicated(std::vector<cv::KeyPoint> &keypoints)
4121 size_t i, j, n = keypoints.size();
4122 std::vector<size_t> kpidx(n);
4123 std::vector<uchar> mask(n, (uchar)1);
4125 for (i = 0; i < n; i++) {
4128 std::sort(kpidx.begin(), kpidx.end(), KeyPoint_LessThan(keypoints));
4129 for (i = 1, j = 0; i < n; i++) {
4130 cv::KeyPoint &kp1 = keypoints[kpidx[i]];
4131 cv::KeyPoint &kp2 = keypoints[kpidx[j]];
4134 if (!
vpMath::equal(kp1.pt.x, kp2.pt.x, std::numeric_limits<float>::epsilon()) ||
4135 !
vpMath::equal(kp1.pt.y, kp2.pt.y, std::numeric_limits<float>::epsilon()) ||
4136 !
vpMath::equal(kp1.size, kp2.size, std::numeric_limits<float>::epsilon()) ||
4137 !
vpMath::equal(kp1.angle, kp2.angle, std::numeric_limits<float>::epsilon())) {
4145 for (i = j = 0; i < n; i++) {
4148 keypoints[j] = keypoints[i];
4153 keypoints.resize(j);
4159 vpKeyPoint::PyramidAdaptedFeatureDetector::PyramidAdaptedFeatureDetector(
const cv::Ptr<cv::FeatureDetector> &detector,
4161 : m_detector(detector), m_maxLevel(maxLevel)
4164 bool vpKeyPoint::PyramidAdaptedFeatureDetector::empty()
const
4166 return m_detector.empty() || (cv::FeatureDetector *)m_detector->empty();
4169 void vpKeyPoint::PyramidAdaptedFeatureDetector::detect(cv::InputArray image,
4170 CV_OUT std::vector<cv::KeyPoint> &keypoints, cv::InputArray mask)
4172 detectImpl(image.getMat(), keypoints, mask.getMat());
4175 void vpKeyPoint::PyramidAdaptedFeatureDetector::detectImpl(
const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
4176 const cv::Mat &mask)
const
4178 cv::Mat src = image;
4179 cv::Mat src_mask = mask;
4181 cv::Mat dilated_mask;
4182 if (!mask.empty()) {
4183 cv::dilate(mask, dilated_mask, cv::Mat());
4184 cv::Mat mask255(mask.size(), CV_8UC1, cv::Scalar(0));
4185 mask255.setTo(cv::Scalar(255), dilated_mask != 0);
4186 dilated_mask = mask255;
4189 for (
int l = 0, multiplier = 1; l <= m_maxLevel; ++l, multiplier *= 2) {
4191 std::vector<cv::KeyPoint> new_pts;
4192 m_detector->detect(src, new_pts, src_mask);
4193 std::vector<cv::KeyPoint>::iterator it = new_pts.begin(), end = new_pts.end();
4194 for (; it != end; ++it) {
4195 it->pt.x *= multiplier;
4196 it->pt.y *= multiplier;
4197 it->size *= multiplier;
4200 keypoints.insert(keypoints.end(), new_pts.begin(), new_pts.end());
4203 if (l < m_maxLevel) {
4209 resize(dilated_mask, src_mask, src.size(), 0, 0, CV_INTER_AREA);
4214 vpKeyPoint::KeyPointsFilter::runByPixelsMask(keypoints, mask);
4219 #elif !defined(VISP_BUILD_SHARED_LIBS)
4221 void dummy_vpKeyPoint() { };
std::vector< unsigned int > m_matchedReferencePoints
std::vector< vpImagePoint > m_currentImagePointsList
bool m_reference_computed
std::vector< vpImagePoint > m_referenceImagePointsList
Generic class defining intrinsic camera parameters.
Implementation of column vector and the associated operations.
Class to define RGB colors available for display functionalities.
static const vpColor none
static const vpColor green
static void convertFromOpenCV(const cv::KeyPoint &from, vpImagePoint &to)
static void displayCircle(const vpImage< unsigned char > &I, const vpImageCircle &circle, const vpColor &color, bool fill=false, unsigned int thickness=1)
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
error that can be emitted by ViSP classes.
@ badValue
Used to indicate that a value is not in the allowed range.
const char * what() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpHomogeneousMatrix inverse() const
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
static void write(const vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
void set_ij(double ii, double jj)
unsigned int getWidth() const
void insert(const vpImage< Type > &src, const vpImagePoint &topLeft)
unsigned int getHeight() const
unsigned int matchPoint(const vpImage< unsigned char > &I)
void getTrainKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
static void compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpCylinder > &cylinders, const std::vector< std::vector< std::vector< vpImagePoint > > > &vectorOfCylinderRois, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
void detectExtractAffine(const vpImage< unsigned char > &I, std::vector< std::vector< cv::KeyPoint > > &listOfKeypoints, std::vector< cv::Mat > &listOfDescriptors, std::vector< vpImage< unsigned char > > *listOfAffineI=nullptr)
void initMatcher(const std::string &matcherName)
void display(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, unsigned int size=3)
void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector< cv::DMatch > &matches, double &elapsedTime)
static void compute3D(const cv::KeyPoint &candidate, const std::vector< vpPoint > &roi, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
@ DETECTOR_KAZE
KAZE detector.
@ DETECTOR_BRISK
BRISK detector.
@ DETECTOR_AKAZE
AKAZE detector.
@ DETECTOR_MSER
MSER detector.
@ DETECTOR_SURF
SURF detector.
@ DETECTOR_AGAST
AGAST detector.
@ DETECTOR_SIFT
SIFT detector.
@ DETECTOR_FAST
FAST detector.
@ DETECTOR_GFTT
GFTT detector.
@ DETECTOR_ORB
ORB detector.
@ DETECTOR_SimpleBlob
SimpleBlob detector.
void createImageMatching(vpImage< unsigned char > &IRef, vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
bool computePose(const std::vector< cv::Point2f > &imagePoints, const std::vector< cv::Point3f > &objectPoints, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector< int > &inlierIndex, double &elapsedTime, bool(*func)(const vpHomogeneousMatrix &)=nullptr)
void extract(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, cv::Mat &descriptors, std::vector< cv::Point3f > *trainPoints=nullptr)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
bool matchPointAndDetect(const vpImage< unsigned char > &I, vpRect &boundingBox, vpImagePoint ¢erOfGravity, const bool isPlanarObject=true, std::vector< vpImagePoint > *imPts1=nullptr, std::vector< vpImagePoint > *imPts2=nullptr, double *meanDescriptorDistance=nullptr, double *detectionScore=nullptr, const vpRect &rectangle=vpRect())
@ DESCRIPTOR_AKAZE
AKAZE descriptor.
@ DESCRIPTOR_ORB
ORB descriptor.
@ DESCRIPTOR_KAZE
KAZE descriptor.
@ DESCRIPTOR_SURF
SUFT descriptor.
@ DESCRIPTOR_BRISK
BRISK descriptor.
@ DESCRIPTOR_SIFT
SIFT descriptor.
void getQueryKeyPoints(std::vector< cv::KeyPoint > &keyPoints, bool matches=true) const
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType, const std::string &matcherName, const vpFilterMatchingType &filterType=ratioDistanceThreshold)
@ stdAndRatioDistanceThreshold
@ constantFactorDistanceThreshold
void displayMatching(const vpImage< unsigned char > &IRef, vpImage< unsigned char > &IMatching, unsigned int crossSize, unsigned int lineThickness=1, const vpColor &color=vpColor::green)
unsigned int buildReference(const vpImage< unsigned char > &I)
void loadConfigFile(const std::string &configFile)
void getTrainPoints(std::vector< cv::Point3f > &points) const
void getObjectPoints(std::vector< cv::Point3f > &objectPoints) const
void insertImageMatching(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
static bool isNaN(double value)
static bool equal(double x, double y, double threshold=0.001)
static int round(double x)
Implementation of a matrix and operations on matrices.
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
This class defines the container for a plane geometrical structure.
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
double get_oX() const
Get the point oX coordinate in the object frame.
void set_x(double x)
Set the point x coordinate in the image plane.
double get_y() const
Get the point y coordinate in the image plane.
double get_oZ() const
Get the point oZ coordinate in the object frame.
void set_oY(double oY)
Set the point oY coordinate in the object frame.
double get_x() const
Get the point x coordinate in the image plane.
void set_oZ(double oZ)
Set the point oZ coordinate in the object frame.
void set_oX(double oX)
Set the point oX coordinate in the object frame.
double get_oY() const
Get the point oY coordinate in the object frame.
void setWorldCoordinates(double oX, double oY, double oZ)
void set_y(double y)
Set the point y coordinate in the image plane.
Defines a generic 2D polygon.
vpRect getBoundingBox() const
bool isInside(const vpImagePoint &iP, const PointInPolygonMethod &method=PnPolyRayCasting) const
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
void setRansacMaxTrials(const int &rM)
void addPoint(const vpPoint &P)
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
vpMatrix getCovarianceMatrix() const
void setCovarianceComputation(const bool &flag)
std::vector< unsigned int > getRansacInlierIndex() const
void setRansacFilterFlag(const RANSAC_FILTER_FLAGS &flag)
@ NO_FILTER
No filter is applied.
void setUseParallelRansac(bool use)
std::vector< vpPoint > getRansacInliers() const
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool(*func)(const vpHomogeneousMatrix &)=nullptr)
void setNbParallelRansacThreads(int nb)
void setRansacThreshold(const double &t)
Defines a rectangle in the plane.
Implementation of a rotation vector as axis-angle minimal representation.
Class that consider the case of a translation vector.
std::string getDetectorName() const
double getMatchingRatioThreshold() const
std::string getExtractorName() const
void parse(const std::string &filename)
double getRansacReprojectionError() const
bool getUseRansacVVSPoseEstimation() const
double getMatchingFactorThreshold() const
int getNbRansacMinInlierCount() const
bool getUseRansacConsensusPercentage() const
std::string getMatcherName() const
int getNbRansacIterations() const
double getRansacConsensusPercentage() const
@ stdAndRatioDistanceThreshold
@ constantFactorDistanceThreshold
vpMatchingMethodEnum getMatchingMethod() const
double getRansacThreshold() const
VISP_EXPORT double measureTimeMs()