37 #include <visp3/core/vpIoTools.h>
38 #include <visp3/vision/vpKeyPoint.h>
40 #if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_FEATURES2D)
42 #if defined(VISP_HAVE_PUGIXML)
43 #include <pugixml.hpp>
48 #ifndef DOXYGEN_SHOULD_SKIP_THIS
52 inline cv::DMatch knnToDMatch(
const std::vector<cv::DMatch> &knnMatches)
54 if (knnMatches.size() > 0) {
61 inline vpImagePoint matchRansacToVpImage(
const std::pair<cv::KeyPoint, cv::Point3f> &pair)
72 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
73 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
74 m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
75 m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
76 m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
77 m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
78 m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
79 m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
80 m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
81 m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
82 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
83 m_useBruteForceCrossCheck(true),
85 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
86 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
90 m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
91 m_extractorNames.push_back(m_mapOfDescriptorNames[descriptorType]);
98 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
99 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
100 m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
101 m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
102 m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
103 m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
104 m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
105 m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
106 m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
107 m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
108 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
109 m_useBruteForceCrossCheck(true),
111 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
112 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
116 m_detectorNames.push_back(detectorName);
117 m_extractorNames.push_back(extractorName);
124 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
125 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(detectorNames),
126 m_detectors(), m_extractionTime(0.), m_extractorNames(extractorNames), m_extractors(), m_filteredMatches(),
127 m_filterType(filterType), m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(),
128 m_matcher(), m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0),
129 m_matchingRatioThreshold(0.85), m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200),
130 m_nbRansacMinInlierCount(100), m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(),
131 m_queryFilteredKeyPoints(), m_queryKeyPoints(), m_ransacConsensusPercentage(20.0),
132 m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(), m_ransacParallel(false),
133 m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01), m_trainDescriptors(),
134 m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
135 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
136 m_useBruteForceCrossCheck(true),
138 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
139 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
145 void vpKeyPoint::affineSkew(
double tilt,
double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai)
150 mask = cv::Mat(h, w, CV_8UC1, cv::Scalar(255));
152 cv::Mat A = cv::Mat::eye(2, 3, CV_32F);
155 if (std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
160 A = (cv::Mat_<float>(2, 2) << c, -s, s, c);
162 cv::Mat corners = (cv::Mat_<float>(4, 2) << 0, 0, w, 0, w, h, 0, h);
163 cv::Mat tcorners = corners * A.t();
164 cv::Mat tcorners_x, tcorners_y;
165 tcorners.col(0).copyTo(tcorners_x);
166 tcorners.col(1).copyTo(tcorners_y);
167 std::vector<cv::Mat> channels;
168 channels.push_back(tcorners_x);
169 channels.push_back(tcorners_y);
170 cv::merge(channels, tcorners);
172 cv::Rect rect = cv::boundingRect(tcorners);
173 A = (cv::Mat_<float>(2, 3) << c, -s, -rect.x, s, c, -rect.y);
175 cv::warpAffine(img, img, A, cv::Size(rect.width, rect.height), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
178 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon()) {
179 double s = 0.8 * sqrt(tilt * tilt - 1);
180 cv::GaussianBlur(img, img, cv::Size(0, 0), s, 0.01);
181 cv::resize(img, img, cv::Size(0, 0), 1.0 / tilt, 1.0, cv::INTER_NEAREST);
182 A.row(0) = A.row(0) / tilt;
185 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon() ||
186 std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
189 cv::warpAffine(mask, mask, A, cv::Size(w, h), cv::INTER_NEAREST);
191 cv::invertAffineTransform(A, Ai);
215 m_trainPoints.clear();
216 m_mapOfImageId.clear();
217 m_mapOfImages.clear();
218 m_currentImageId = 1;
220 if (m_useAffineDetection) {
221 std::vector<std::vector<cv::KeyPoint> > listOfTrainKeyPoints;
222 std::vector<cv::Mat> listOfTrainDescriptors;
228 m_trainKeyPoints.clear();
229 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfTrainKeyPoints.begin();
230 it != listOfTrainKeyPoints.end(); ++it) {
231 m_trainKeyPoints.insert(m_trainKeyPoints.end(), it->begin(), it->end());
235 for (std::vector<cv::Mat>::const_iterator it = listOfTrainDescriptors.begin(); it != listOfTrainDescriptors.end();
239 it->copyTo(m_trainDescriptors);
242 m_trainDescriptors.push_back(*it);
247 detect(I, m_trainKeyPoints, m_detectionTime, rectangle);
248 extract(I, m_trainKeyPoints, m_trainDescriptors, m_extractionTime);
253 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
254 m_mapOfImageId[it->class_id] = m_currentImageId;
258 m_mapOfImages[m_currentImageId] = I;
267 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
269 return static_cast<unsigned int>(m_trainKeyPoints.size());
279 std::vector<cv::Point3f> &points3f,
bool append,
int class_id)
281 cv::Mat trainDescriptors;
283 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
285 extract(I, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
287 if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
291 std::map<size_t, size_t> mapOfKeypointHashes;
293 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
295 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
298 std::vector<cv::Point3f> trainPoints_tmp;
299 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
300 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
301 trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
306 points3f = trainPoints_tmp;
309 return (
buildReference(I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
313 std::vector<cv::Point3f> &points3f,
bool append,
int class_id)
315 cv::Mat trainDescriptors;
317 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
319 extract(I_color, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
321 if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
325 std::map<size_t, size_t> mapOfKeypointHashes;
327 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
329 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
332 std::vector<cv::Point3f> trainPoints_tmp;
333 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
334 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
335 trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
340 points3f = trainPoints_tmp;
343 return (
buildReference(I_color, trainKeyPoints, trainDescriptors, points3f, append, class_id));
348 const std::vector<cv::KeyPoint> &trainKeyPoints,
349 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
350 bool append,
int class_id)
353 m_trainPoints.clear();
354 m_mapOfImageId.clear();
355 m_mapOfImages.clear();
356 m_currentImageId = 0;
357 m_trainKeyPoints.clear();
362 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
364 if (class_id != -1) {
365 for (std::vector<cv::KeyPoint>::iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
366 it->class_id = class_id;
372 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
374 m_mapOfImageId[it->class_id] = m_currentImageId;
378 m_mapOfImages[m_currentImageId] = I;
381 m_trainKeyPoints.insert(m_trainKeyPoints.end(), trainKeyPoints_tmp.begin(), trainKeyPoints_tmp.end());
383 trainDescriptors.copyTo(m_trainDescriptors);
386 m_trainDescriptors.push_back(trainDescriptors);
388 this->m_trainPoints.insert(m_trainPoints.end(), points3f.begin(), points3f.end());
396 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
400 return static_cast<unsigned int>(m_trainKeyPoints.size());
404 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
405 bool append,
int class_id)
408 return (
buildReference(m_I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
415 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
422 vpPlane Po(pts[0], pts[1], pts[2]);
423 double xc = 0.0, yc = 0.0;
434 point_obj = cMo.
inverse() * point_cam;
435 point = cv::Point3f((
float)point_obj[0], (
float)point_obj[1], (
float)point_obj[2]);
442 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
449 vpPlane Po(pts[0], pts[1], pts[2]);
450 double xc = 0.0, yc = 0.0;
461 point_obj = cMo.
inverse() * point_cam;
466 std::vector<cv::KeyPoint> &candidates,
467 const std::vector<vpPolygon> &polygons,
468 const std::vector<std::vector<vpPoint> > &roisPt,
469 std::vector<cv::Point3f> &points, cv::Mat *descriptors)
471 std::vector<cv::KeyPoint> candidatesToCheck = candidates;
478 std::vector<std::pair<cv::KeyPoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
479 for (
size_t i = 0; i < candidatesToCheck.size(); i++) {
480 pairOfCandidatesToCheck[i] = std::pair<cv::KeyPoint, size_t>(candidatesToCheck[i], i);
484 std::vector<vpPolygon> polygons_tmp = polygons;
485 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
486 std::vector<std::pair<cv::KeyPoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
488 while (it2 != pairOfCandidatesToCheck.end()) {
489 imPt.
set_ij(it2->first.pt.y, it2->first.pt.x);
490 if (it1->isInside(imPt)) {
491 candidates.push_back(it2->first);
493 points.push_back(pt);
495 if (descriptors !=
nullptr) {
496 desc.push_back(descriptors->row((
int)it2->second));
500 it2 = pairOfCandidatesToCheck.erase(it2);
508 if (descriptors !=
nullptr) {
509 desc.copyTo(*descriptors);
514 std::vector<vpImagePoint> &candidates,
515 const std::vector<vpPolygon> &polygons,
516 const std::vector<std::vector<vpPoint> > &roisPt,
517 std::vector<vpPoint> &points, cv::Mat *descriptors)
519 std::vector<vpImagePoint> candidatesToCheck = candidates;
525 std::vector<std::pair<vpImagePoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
526 for (
size_t i = 0; i < candidatesToCheck.size(); i++) {
527 pairOfCandidatesToCheck[i] = std::pair<vpImagePoint, size_t>(candidatesToCheck[i], i);
531 std::vector<vpPolygon> polygons_tmp = polygons;
532 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
533 std::vector<std::pair<vpImagePoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
535 while (it2 != pairOfCandidatesToCheck.end()) {
536 if (it1->isInside(it2->first)) {
537 candidates.push_back(it2->first);
539 points.push_back(pt);
541 if (descriptors !=
nullptr) {
542 desc.push_back(descriptors->row((
int)it2->second));
546 it2 = pairOfCandidatesToCheck.erase(it2);
558 const std::vector<vpCylinder> &cylinders,
559 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<cv::Point3f> &points,
560 cv::Mat *descriptors)
562 std::vector<cv::KeyPoint> candidatesToCheck = candidates;
568 size_t cpt_keypoint = 0;
569 for (std::vector<cv::KeyPoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
570 ++it1, cpt_keypoint++) {
571 size_t cpt_cylinder = 0;
574 for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
575 it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
578 for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
580 candidates.push_back(*it1);
584 double xm = 0.0, ym = 0.0;
586 double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
588 if (!
vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
590 point_cam[0] = xm * Z;
591 point_cam[1] = ym * Z;
595 point_obj = cMo.
inverse() * point_cam;
598 points.push_back(cv::Point3f((
float)pt.
get_oX(), (
float)pt.
get_oY(), (
float)pt.
get_oZ()));
600 if (descriptors !=
nullptr) {
601 desc.push_back(descriptors->row((
int)cpt_keypoint));
611 if (descriptors !=
nullptr) {
612 desc.copyTo(*descriptors);
618 const std::vector<vpCylinder> &cylinders,
619 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<vpPoint> &points,
620 cv::Mat *descriptors)
622 std::vector<vpImagePoint> candidatesToCheck = candidates;
628 size_t cpt_keypoint = 0;
629 for (std::vector<vpImagePoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
630 ++it1, cpt_keypoint++) {
631 size_t cpt_cylinder = 0;
634 for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
635 it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
638 for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
640 candidates.push_back(*it1);
644 double xm = 0.0, ym = 0.0;
646 double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
648 if (!
vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
650 point_cam[0] = xm * Z;
651 point_cam[1] = ym * Z;
655 point_obj = cMo.
inverse() * point_cam;
658 points.push_back(pt);
660 if (descriptors !=
nullptr) {
661 desc.push_back(descriptors->row((
int)cpt_keypoint));
671 if (descriptors !=
nullptr) {
672 desc.copyTo(*descriptors);
682 if (imagePoints.size() < 4 || objectPoints.size() < 4 || imagePoints.size() != objectPoints.size()) {
684 std::cerr <<
"Not enough points to compute the pose (at least 4 points "
691 cv::Mat cameraMatrix =
701 cv::Mat distCoeffs = cv::Mat::zeros(1, 5, CV_64F);
704 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
706 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec,
false, m_nbRansacIterations,
707 (
float)m_ransacReprojectionError,
710 inlierIndex, cv::SOLVEPNP_ITERATIVE);
730 int nbInlierToReachConsensus = m_nbRansacMinInlierCount;
731 if (m_useConsensusPercentage) {
732 nbInlierToReachConsensus = (int)(m_ransacConsensusPercentage / 100.0 * (
double)m_queryFilteredKeyPoints.size());
735 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec,
false, m_nbRansacIterations,
736 (
float)m_ransacReprojectionError, nbInlierToReachConsensus, inlierIndex);
739 catch (cv::Exception &e) {
740 std::cerr << e.what() << std::endl;
744 vpTranslationVector translationVec(tvec.at<
double>(0), tvec.at<
double>(1), tvec.at<
double>(2));
745 vpThetaUVector thetaUVector(rvec.at<
double>(0), rvec.at<
double>(1), rvec.at<
double>(2));
748 if (func !=
nullptr) {
762 std::vector<vpPoint> &inliers,
double &elapsedTime,
765 std::vector<unsigned int> inlierIndex;
766 return computePose(objectVpPoints, cMo, inliers, inlierIndex, elapsedTime, func);
770 std::vector<vpPoint> &inliers, std::vector<unsigned int> &inlierIndex,
double &elapsedTime,
775 if (objectVpPoints.size() < 4) {
785 for (std::vector<vpPoint>::const_iterator it = objectVpPoints.begin(); it != objectVpPoints.end(); ++it) {
789 unsigned int nbInlierToReachConsensus = (
unsigned int)m_nbRansacMinInlierCount;
790 if (m_useConsensusPercentage) {
791 nbInlierToReachConsensus =
792 (
unsigned int)(m_ransacConsensusPercentage / 100.0 * (
double)m_queryFilteredKeyPoints.size());
802 bool isRansacPoseEstimationOk =
false;
809 if (m_computeCovariance) {
814 std::cerr <<
"e=" << e.
what() << std::endl;
832 return isRansacPoseEstimationOk;
835 double vpKeyPoint::computePoseEstimationError(
const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
838 if (matchKeyPoints.size() == 0) {
844 std::vector<double> errors(matchKeyPoints.size());
847 for (std::vector<std::pair<cv::KeyPoint, cv::Point3f> >::const_iterator it = matchKeyPoints.begin();
848 it != matchKeyPoints.end(); ++it, cpt++) {
853 double u = 0.0, v = 0.0;
855 errors[cpt] = std::sqrt((u - it->first.pt.x) * (u - it->first.pt.x) + (v - it->first.pt.y) * (v - it->first.pt.y));
858 return std::accumulate(errors.begin(), errors.end(), 0.0) / errors.size();
885 unsigned int nbImg = (
unsigned int)(m_mapOfImages.size() + 1);
887 if (m_mapOfImages.empty()) {
888 std::cerr <<
"There is no training image loaded !" << std::endl;
899 unsigned int nbImgSqrt = (
unsigned int)
vpMath::round(std::sqrt((
double)nbImg));
902 unsigned int nbWidth = nbImgSqrt;
904 unsigned int nbHeight = nbImgSqrt;
907 if (nbImgSqrt * nbImgSqrt < nbImg) {
911 unsigned int maxW = ICurrent.
getWidth();
912 unsigned int maxH = ICurrent.
getHeight();
913 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
915 if (maxW < it->second.getWidth()) {
916 maxW = it->second.getWidth();
919 if (maxH < it->second.getHeight()) {
920 maxH = it->second.getHeight();
932 unsigned int nbImg = (
unsigned int)(m_mapOfImages.size() + 1);
934 if (m_mapOfImages.empty()) {
935 std::cerr <<
"There is no training image loaded !" << std::endl;
946 unsigned int nbImgSqrt = (
unsigned int)
vpMath::round(std::sqrt((
double)nbImg));
949 unsigned int nbWidth = nbImgSqrt;
951 unsigned int nbHeight = nbImgSqrt;
954 if (nbImgSqrt * nbImgSqrt < nbImg) {
958 unsigned int maxW = ICurrent.
getWidth();
959 unsigned int maxH = ICurrent.
getHeight();
960 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
962 if (maxW < it->second.getWidth()) {
963 maxW = it->second.getWidth();
966 if (maxH < it->second.getHeight()) {
967 maxH = it->second.getHeight();
978 detect(I, keyPoints, elapsedTime, rectangle);
984 detect(I_color, keyPoints, elapsedTime, rectangle);
987 void vpKeyPoint::detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
const cv::Mat &mask)
990 detect(matImg, keyPoints, elapsedTime, mask);
998 cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1001 cv::Point leftTop((
int)rectangle.
getLeft(), (
int)rectangle.
getTop()),
1003 cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1006 mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1009 detect(matImg, keyPoints, elapsedTime, mask);
1017 cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1020 cv::Point leftTop((
int)rectangle.
getLeft(), (
int)rectangle.
getTop()),
1022 cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1025 mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1028 detect(matImg, keyPoints, elapsedTime, mask);
1031 void vpKeyPoint::detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
double &elapsedTime,
1032 const cv::Mat &mask)
1037 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
1038 it != m_detectors.end(); ++it) {
1039 std::vector<cv::KeyPoint> kp;
1041 it->second->detect(matImg, kp, mask);
1042 keyPoints.insert(keyPoints.end(), kp.begin(), kp.end());
1050 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1052 std::vector<vpImagePoint> vpTrainImageKeyPoints;
1055 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1063 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1065 std::vector<vpImagePoint> vpTrainImageKeyPoints;
1068 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1076 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1079 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1086 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1089 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1095 unsigned int crossSize,
unsigned int lineThickness,
const vpColor &color)
1098 srand((
unsigned int)time(
nullptr));
1101 std::vector<vpImagePoint> queryImageKeyPoints;
1103 std::vector<vpImagePoint> trainImageKeyPoints;
1107 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1109 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1112 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1113 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
1114 queryImageKeyPoints[(
size_t)it->queryIdx].get_j() + IRef.
getWidth());
1122 unsigned int lineThickness,
const vpColor &color)
1125 srand((
unsigned int)time(
nullptr));
1128 std::vector<vpImagePoint> queryImageKeyPoints;
1130 std::vector<vpImagePoint> trainImageKeyPoints;
1134 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1136 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1139 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1140 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
1141 queryImageKeyPoints[(
size_t)it->queryIdx].get_j() + IRef.
getWidth());
1149 unsigned int lineThickness,
const vpColor &color)
1152 srand((
unsigned int)time(
nullptr));
1155 std::vector<vpImagePoint> queryImageKeyPoints;
1157 std::vector<vpImagePoint> trainImageKeyPoints;
1161 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1163 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1166 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1167 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
1168 queryImageKeyPoints[(
size_t)it->queryIdx].get_j() + IRef.
getWidth());
1177 const std::vector<vpImagePoint> &ransacInliers,
unsigned int crossSize,
1178 unsigned int lineThickness)
1180 if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1182 std::cerr <<
"There is no training image loaded !" << std::endl;
1188 int nbImg = (int)(m_mapOfImages.size() + 1);
1197 int nbWidth = nbImgSqrt;
1198 int nbHeight = nbImgSqrt;
1200 if (nbImgSqrt * nbImgSqrt < nbImg) {
1204 std::map<int, int> mapOfImageIdIndex;
1207 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1209 mapOfImageIdIndex[it->first] = cpt;
1211 if (maxW < it->second.getWidth()) {
1212 maxW = it->second.getWidth();
1215 if (maxH < it->second.getHeight()) {
1216 maxH = it->second.getHeight();
1222 int medianI = nbHeight / 2;
1223 int medianJ = nbWidth / 2;
1224 int medianIndex = medianI * nbWidth + medianJ;
1225 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1227 int current_class_id_index = 0;
1228 if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1229 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1234 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1237 int indexI = current_class_id_index / nbWidth;
1238 int indexJ = current_class_id_index - (indexI * nbWidth);
1239 topLeftCorner.
set_ij((
int)maxH * indexI, (int)maxW * indexJ);
1246 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
1247 for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1252 for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1257 for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1263 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1264 int current_class_id = 0;
1265 if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(
size_t)it->trainIdx].class_id]] < medianIndex) {
1266 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1271 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1274 int indexI = current_class_id / nbWidth;
1275 int indexJ = current_class_id - (indexI * nbWidth);
1277 vpImagePoint end((
int)maxH * indexI + m_trainKeyPoints[(
size_t)it->trainIdx].pt.y,
1278 (
int)maxW * indexJ + m_trainKeyPoints[(
size_t)it->trainIdx].pt.x);
1279 vpImagePoint start((
int)maxH * medianI + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.y,
1280 (
int)maxW * medianJ + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.x);
1290 const std::vector<vpImagePoint> &ransacInliers,
unsigned int crossSize,
1291 unsigned int lineThickness)
1293 if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1295 std::cerr <<
"There is no training image loaded !" << std::endl;
1301 int nbImg = (int)(m_mapOfImages.size() + 1);
1310 int nbWidth = nbImgSqrt;
1311 int nbHeight = nbImgSqrt;
1313 if (nbImgSqrt * nbImgSqrt < nbImg) {
1317 std::map<int, int> mapOfImageIdIndex;
1320 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1322 mapOfImageIdIndex[it->first] = cpt;
1324 if (maxW < it->second.getWidth()) {
1325 maxW = it->second.getWidth();
1328 if (maxH < it->second.getHeight()) {
1329 maxH = it->second.getHeight();
1335 int medianI = nbHeight / 2;
1336 int medianJ = nbWidth / 2;
1337 int medianIndex = medianI * nbWidth + medianJ;
1338 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1340 int current_class_id_index = 0;
1341 if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1342 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1347 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1350 int indexI = current_class_id_index / nbWidth;
1351 int indexJ = current_class_id_index - (indexI * nbWidth);
1352 topLeftCorner.
set_ij((
int)maxH * indexI, (int)maxW * indexJ);
1359 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
1360 for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1365 for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1370 for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1376 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1377 int current_class_id = 0;
1378 if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(
size_t)it->trainIdx].class_id]] < medianIndex) {
1379 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1384 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1387 int indexI = current_class_id / nbWidth;
1388 int indexJ = current_class_id - (indexI * nbWidth);
1390 vpImagePoint end((
int)maxH * indexI + m_trainKeyPoints[(
size_t)it->trainIdx].pt.y,
1391 (
int)maxW * indexJ + m_trainKeyPoints[(
size_t)it->trainIdx].pt.x);
1392 vpImagePoint start((
int)maxH * medianI + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.y,
1393 (
int)maxW * medianJ + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.x);
1403 std::vector<cv::Point3f> *trainPoints)
1406 extract(I, keyPoints, descriptors, elapsedTime, trainPoints);
1410 std::vector<cv::Point3f> *trainPoints)
1413 extract(I_color, keyPoints, descriptors, elapsedTime, trainPoints);
1417 std::vector<cv::Point3f> *trainPoints)
1420 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1424 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1428 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1432 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1436 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1440 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1445 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator itd = m_extractors.begin();
1446 itd != m_extractors.end(); ++itd) {
1450 if (trainPoints !=
nullptr && !trainPoints->empty()) {
1453 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1456 itd->second->compute(matImg, keyPoints, descriptors);
1458 if (keyPoints.size() != keyPoints_tmp.size()) {
1462 std::map<size_t, size_t> mapOfKeypointHashes;
1464 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1466 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1469 std::vector<cv::Point3f> trainPoints_tmp;
1470 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1471 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1472 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1477 *trainPoints = trainPoints_tmp;
1482 itd->second->compute(matImg, keyPoints, descriptors);
1488 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1492 itd->second->compute(matImg, keyPoints, desc);
1494 if (keyPoints.size() != keyPoints_tmp.size()) {
1498 std::map<size_t, size_t> mapOfKeypointHashes;
1500 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1502 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1505 std::vector<cv::Point3f> trainPoints_tmp;
1506 cv::Mat descriptors_tmp;
1507 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1508 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1509 if (trainPoints !=
nullptr && !trainPoints->empty()) {
1510 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1513 if (!descriptors.empty()) {
1514 descriptors_tmp.push_back(descriptors.row((
int)mapOfKeypointHashes[myKeypointHash(*it)]));
1519 if (trainPoints !=
nullptr) {
1521 *trainPoints = trainPoints_tmp;
1524 descriptors_tmp.copyTo(descriptors);
1528 if (descriptors.empty()) {
1529 desc.copyTo(descriptors);
1532 cv::hconcat(descriptors, desc, descriptors);
1537 if (keyPoints.size() != (
size_t)descriptors.rows) {
1538 std::cerr <<
"keyPoints.size() != (size_t) descriptors.rows" << std::endl;
1543 void vpKeyPoint::filterMatches()
1545 std::vector<cv::KeyPoint> queryKpts;
1546 std::vector<cv::Point3f> trainPts;
1547 std::vector<cv::DMatch> m;
1553 double min_dist = DBL_MAX;
1555 std::vector<double> distance_vec(m_knnMatches.size());
1558 for (
size_t i = 0; i < m_knnMatches.size(); i++) {
1559 double dist = m_knnMatches[i][0].distance;
1561 distance_vec[i] = dist;
1563 if (dist < min_dist) {
1570 mean /= m_queryDescriptors.rows;
1573 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1574 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1575 double threshold = min_dist + stdev;
1577 for (
size_t i = 0; i < m_knnMatches.size(); i++) {
1578 if (m_knnMatches[i].size() >= 2) {
1581 float ratio = m_knnMatches[i][0].distance / m_knnMatches[i][1].distance;
1586 double dist = m_knnMatches[i][0].distance;
1589 m.push_back(cv::DMatch((
int)queryKpts.size(), m_knnMatches[i][0].trainIdx, m_knnMatches[i][0].distance));
1591 if (!m_trainPoints.empty()) {
1592 trainPts.push_back(m_trainPoints[(
size_t)m_knnMatches[i][0].trainIdx]);
1594 queryKpts.push_back(m_queryKeyPoints[(
size_t)m_knnMatches[i][0].queryIdx]);
1603 double min_dist = DBL_MAX;
1605 std::vector<double> distance_vec(m_matches.size());
1606 for (
size_t i = 0; i < m_matches.size(); i++) {
1607 double dist = m_matches[i].distance;
1609 distance_vec[i] = dist;
1611 if (dist < min_dist) {
1618 mean /= m_queryDescriptors.rows;
1620 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1621 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1631 for (
size_t i = 0; i < m_matches.size(); i++) {
1632 if (m_matches[i].distance <= threshold) {
1633 m.push_back(cv::DMatch((
int)queryKpts.size(), m_matches[i].trainIdx, m_matches[i].distance));
1635 if (!m_trainPoints.empty()) {
1636 trainPts.push_back(m_trainPoints[(
size_t)m_matches[i].trainIdx]);
1638 queryKpts.push_back(m_queryKeyPoints[(
size_t)m_matches[i].queryIdx]);
1643 if (m_useSingleMatchFilter) {
1646 std::vector<cv::DMatch> mTmp;
1647 std::vector<cv::Point3f> trainPtsTmp;
1648 std::vector<cv::KeyPoint> queryKptsTmp;
1650 std::map<int, int> mapOfTrainIdx;
1652 for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1653 mapOfTrainIdx[it->trainIdx]++;
1657 for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1658 if (mapOfTrainIdx[it->trainIdx] == 1) {
1659 mTmp.push_back(cv::DMatch((
int)queryKptsTmp.size(), it->trainIdx, it->distance));
1661 if (!m_trainPoints.empty()) {
1662 trainPtsTmp.push_back(m_trainPoints[(
size_t)it->trainIdx]);
1664 queryKptsTmp.push_back(queryKpts[(
size_t)it->queryIdx]);
1668 m_filteredMatches = mTmp;
1669 m_objectFilteredPoints = trainPtsTmp;
1670 m_queryFilteredKeyPoints = queryKptsTmp;
1673 m_filteredMatches = m;
1674 m_objectFilteredPoints = trainPts;
1675 m_queryFilteredKeyPoints = queryKpts;
1681 objectPoints = m_objectFilteredPoints;
1692 keyPoints = m_queryFilteredKeyPoints;
1695 keyPoints = m_queryKeyPoints;
1717 void vpKeyPoint::init()
1720 #if defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && (VISP_HAVE_OPENCV_VERSION < 0x030000)
1722 if (!cv::initModule_nonfree()) {
1723 std::cerr <<
"Cannot init module non free, SIFT or SURF cannot be used." << std::endl;
1733 initDetectors(m_detectorNames);
1734 initExtractors(m_extractorNames);
1738 void vpKeyPoint::initDetector(
const std::string &detectorName)
1740 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
1741 m_detectors[detectorName] = cv::FeatureDetector::create(detectorName);
1743 if (m_detectors[detectorName] ==
nullptr) {
1744 std::stringstream ss_msg;
1745 ss_msg <<
"Fail to initialize the detector: " << detectorName
1746 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
1750 std::string detectorNameTmp = detectorName;
1751 std::string pyramid =
"Pyramid";
1752 std::size_t pos = detectorName.find(pyramid);
1753 bool usePyramid =
false;
1754 if (pos != std::string::npos) {
1755 detectorNameTmp = detectorName.substr(pos + pyramid.size());
1759 if (detectorNameTmp ==
"SIFT") {
1760 #if (VISP_HAVE_OPENCV_VERSION >= 0x040500)
1761 cv::Ptr<cv::FeatureDetector> siftDetector = cv::SiftFeatureDetector::create();
1763 m_detectors[detectorNameTmp] = siftDetector;
1766 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
1769 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) || (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || \
1770 (VISP_HAVE_OPENCV_VERSION >= 0x040400)
1772 cv::Ptr<cv::FeatureDetector> siftDetector;
1773 if (m_maxFeatures > 0) {
1774 #if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
1775 siftDetector = cv::SIFT::create(m_maxFeatures);
1777 siftDetector = cv::xfeatures2d::SIFT::create(m_maxFeatures);
1781 #if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
1782 siftDetector = cv::SIFT::create();
1784 siftDetector = cv::xfeatures2d::SIFT::create();
1788 m_detectors[detectorNameTmp] = siftDetector;
1791 std::cerr <<
"You should not use SIFT with Pyramid feature detection!" << std::endl;
1792 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
1795 std::stringstream ss_msg;
1796 ss_msg <<
"Fail to initialize the detector: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1797 <<
" was not build with xFeatures2d module.";
1802 else if (detectorNameTmp ==
"SURF") {
1803 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1804 cv::Ptr<cv::FeatureDetector> surfDetector = cv::xfeatures2d::SURF::create();
1806 m_detectors[detectorNameTmp] = surfDetector;
1809 std::cerr <<
"You should not use SURF with Pyramid feature detection!" << std::endl;
1810 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(surfDetector);
1813 std::stringstream ss_msg;
1814 ss_msg <<
"Fail to initialize the detector: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1815 <<
" was not build with xFeatures2d module.";
1819 else if (detectorNameTmp ==
"FAST") {
1820 cv::Ptr<cv::FeatureDetector> fastDetector = cv::FastFeatureDetector::create();
1822 m_detectors[detectorNameTmp] = fastDetector;
1825 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
1828 else if (detectorNameTmp ==
"MSER") {
1829 cv::Ptr<cv::FeatureDetector> fastDetector = cv::MSER::create();
1831 m_detectors[detectorNameTmp] = fastDetector;
1834 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
1837 else if (detectorNameTmp ==
"ORB") {
1838 cv::Ptr<cv::FeatureDetector> orbDetector;
1839 if (m_maxFeatures > 0) {
1840 orbDetector = cv::ORB::create(m_maxFeatures);
1843 orbDetector = cv::ORB::create();
1846 m_detectors[detectorNameTmp] = orbDetector;
1849 std::cerr <<
"You should not use ORB with Pyramid feature detection!" << std::endl;
1850 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(orbDetector);
1853 else if (detectorNameTmp ==
"BRISK") {
1854 cv::Ptr<cv::FeatureDetector> briskDetector = cv::BRISK::create();
1856 m_detectors[detectorNameTmp] = briskDetector;
1859 std::cerr <<
"You should not use BRISK with Pyramid feature detection!" << std::endl;
1860 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(briskDetector);
1863 else if (detectorNameTmp ==
"KAZE") {
1864 cv::Ptr<cv::FeatureDetector> kazeDetector = cv::KAZE::create();
1866 m_detectors[detectorNameTmp] = kazeDetector;
1869 std::cerr <<
"You should not use KAZE with Pyramid feature detection!" << std::endl;
1870 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(kazeDetector);
1873 else if (detectorNameTmp ==
"AKAZE") {
1874 cv::Ptr<cv::FeatureDetector> akazeDetector = cv::AKAZE::create();
1876 m_detectors[detectorNameTmp] = akazeDetector;
1879 std::cerr <<
"You should not use AKAZE with Pyramid feature detection!" << std::endl;
1880 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(akazeDetector);
1883 else if (detectorNameTmp ==
"GFTT") {
1884 cv::Ptr<cv::FeatureDetector> gfttDetector = cv::GFTTDetector::create();
1886 m_detectors[detectorNameTmp] = gfttDetector;
1889 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(gfttDetector);
1892 else if (detectorNameTmp ==
"SimpleBlob") {
1893 cv::Ptr<cv::FeatureDetector> simpleBlobDetector = cv::SimpleBlobDetector::create();
1895 m_detectors[detectorNameTmp] = simpleBlobDetector;
1898 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(simpleBlobDetector);
1901 else if (detectorNameTmp ==
"STAR") {
1902 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1903 cv::Ptr<cv::FeatureDetector> starDetector = cv::xfeatures2d::StarDetector::create();
1905 m_detectors[detectorNameTmp] = starDetector;
1908 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(starDetector);
1911 std::stringstream ss_msg;
1912 ss_msg <<
"Fail to initialize the detector: STAR. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1913 <<
" was not build with xFeatures2d module.";
1917 else if (detectorNameTmp ==
"AGAST") {
1918 cv::Ptr<cv::FeatureDetector> agastDetector = cv::AgastFeatureDetector::create();
1920 m_detectors[detectorNameTmp] = agastDetector;
1923 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(agastDetector);
1926 else if (detectorNameTmp ==
"MSD") {
1927 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100)
1928 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
1929 cv::Ptr<cv::FeatureDetector> msdDetector = cv::xfeatures2d::MSDDetector::create();
1931 m_detectors[detectorNameTmp] = msdDetector;
1934 std::cerr <<
"You should not use MSD with Pyramid feature detection!" << std::endl;
1935 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(msdDetector);
1938 std::stringstream ss_msg;
1939 ss_msg <<
"Fail to initialize the detector: MSD. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1940 <<
" was not build with xFeatures2d module.";
1944 std::stringstream ss_msg;
1945 ss_msg <<
"Feature " << detectorName <<
" is not available in OpenCV version: " << std::hex
1946 << VISP_HAVE_OPENCV_VERSION <<
" (require >= OpenCV 3.1).";
1950 std::cerr <<
"The detector:" << detectorNameTmp <<
" is not available." << std::endl;
1953 bool detectorInitialized =
false;
1956 detectorInitialized = !m_detectors[detectorNameTmp].empty();
1960 detectorInitialized = !m_detectors[detectorName].empty();
1963 if (!detectorInitialized) {
1964 std::stringstream ss_msg;
1965 ss_msg <<
"Fail to initialize the detector: " << detectorNameTmp
1966 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
1972 void vpKeyPoint::initDetectors(
const std::vector<std::string> &detectorNames)
1974 for (std::vector<std::string>::const_iterator it = detectorNames.begin(); it != detectorNames.end(); ++it) {
1979 void vpKeyPoint::initExtractor(
const std::string &extractorName)
1981 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
1982 m_extractors[extractorName] = cv::DescriptorExtractor::create(extractorName);
1984 if (extractorName ==
"SIFT") {
1985 #if (VISP_HAVE_OPENCV_VERSION >= 0x040500)
1986 m_extractors[extractorName] = cv::SIFT::create();
1988 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) || (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || \
1989 (VISP_HAVE_OPENCV_VERSION >= 0x040400)
1991 #if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
1992 m_extractors[extractorName] = cv::SIFT::create();
1994 m_extractors[extractorName] = cv::xfeatures2d::SIFT::create();
1997 std::stringstream ss_msg;
1998 ss_msg <<
"Fail to initialize the extractor: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1999 <<
" was not build with xFeatures2d module.";
2004 else if (extractorName ==
"SURF") {
2005 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2007 m_extractors[extractorName] = cv::xfeatures2d::SURF::create(100, 4, 3,
true);
2009 std::stringstream ss_msg;
2010 ss_msg <<
"Fail to initialize the extractor: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2011 <<
" was not build with xFeatures2d module.";
2015 else if (extractorName ==
"ORB") {
2016 m_extractors[extractorName] = cv::ORB::create();
2018 else if (extractorName ==
"BRISK") {
2019 m_extractors[extractorName] = cv::BRISK::create();
2021 else if (extractorName ==
"FREAK") {
2022 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2023 m_extractors[extractorName] = cv::xfeatures2d::FREAK::create();
2025 std::stringstream ss_msg;
2026 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2027 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2031 else if (extractorName ==
"BRIEF") {
2032 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2033 m_extractors[extractorName] = cv::xfeatures2d::BriefDescriptorExtractor::create();
2035 std::stringstream ss_msg;
2036 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2037 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2041 else if (extractorName ==
"KAZE") {
2042 m_extractors[extractorName] = cv::KAZE::create();
2044 else if (extractorName ==
"AKAZE") {
2045 m_extractors[extractorName] = cv::AKAZE::create();
2047 else if (extractorName ==
"DAISY") {
2048 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2049 m_extractors[extractorName] = cv::xfeatures2d::DAISY::create();
2051 std::stringstream ss_msg;
2052 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2053 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2057 else if (extractorName ==
"LATCH") {
2058 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2059 m_extractors[extractorName] = cv::xfeatures2d::LATCH::create();
2061 std::stringstream ss_msg;
2062 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2063 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2067 else if (extractorName ==
"LUCID") {
2068 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2073 std::stringstream ss_msg;
2074 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2075 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2079 else if (extractorName ==
"VGG") {
2080 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2081 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2082 m_extractors[extractorName] = cv::xfeatures2d::VGG::create();
2084 std::stringstream ss_msg;
2085 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2086 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2090 std::stringstream ss_msg;
2091 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2092 << VISP_HAVE_OPENCV_VERSION <<
" but requires at least OpenCV 3.2.";
2096 else if (extractorName ==
"BoostDesc") {
2097 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2098 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2099 m_extractors[extractorName] = cv::xfeatures2d::BoostDesc::create();
2101 std::stringstream ss_msg;
2102 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2103 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2107 std::stringstream ss_msg;
2108 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2109 << VISP_HAVE_OPENCV_VERSION <<
" but requires at least OpenCV 3.2.";
2114 std::cerr <<
"The extractor:" << extractorName <<
" is not available." << std::endl;
2118 if (!m_extractors[extractorName]) {
2119 std::stringstream ss_msg;
2120 ss_msg <<
"Fail to initialize the extractor: " << extractorName
2121 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2125 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2126 if (extractorName ==
"SURF") {
2128 m_extractors[extractorName]->set(
"extended", 1);
2133 void vpKeyPoint::initExtractors(
const std::vector<std::string> &extractorNames)
2135 for (std::vector<std::string>::const_iterator it = extractorNames.begin(); it != extractorNames.end(); ++it) {
2139 int descriptorType = CV_32F;
2140 bool firstIteration =
true;
2141 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2142 it != m_extractors.end(); ++it) {
2143 if (firstIteration) {
2144 firstIteration =
false;
2145 descriptorType = it->second->descriptorType();
2148 if (descriptorType != it->second->descriptorType()) {
2155 void vpKeyPoint::initFeatureNames()
2158 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2165 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2166 m_mapOfDetectorNames[DETECTOR_STAR] =
"STAR";
2168 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2169 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2172 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2175 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2180 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2181 m_mapOfDetectorNames[DETECTOR_MSD] =
"MSD";
2185 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2188 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2189 m_mapOfDescriptorNames[DESCRIPTOR_FREAK] =
"FREAK";
2190 m_mapOfDescriptorNames[DESCRIPTOR_BRIEF] =
"BRIEF";
2192 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2193 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2196 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2199 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2202 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2203 m_mapOfDescriptorNames[DESCRIPTOR_DAISY] =
"DAISY";
2204 m_mapOfDescriptorNames[DESCRIPTOR_LATCH] =
"LATCH";
2207 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2208 m_mapOfDescriptorNames[DESCRIPTOR_VGG] =
"VGG";
2209 m_mapOfDescriptorNames[DESCRIPTOR_BoostDesc] =
"BoostDesc";
2216 int descriptorType = CV_32F;
2217 bool firstIteration =
true;
2218 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2219 it != m_extractors.end(); ++it) {
2220 if (firstIteration) {
2221 firstIteration =
false;
2222 descriptorType = it->second->descriptorType();
2225 if (descriptorType != it->second->descriptorType()) {
2231 if (matcherName ==
"FlannBased") {
2232 if (m_extractors.empty()) {
2233 std::cout <<
"Warning: No extractor initialized, by default use "
2234 "floating values (CV_32F) "
2235 "for descriptor type !"
2239 if (descriptorType == CV_8U) {
2240 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2241 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::LshIndexParams>(12, 20, 2));
2243 m_matcher =
new cv::FlannBasedMatcher(
new cv::flann::LshIndexParams(12, 20, 2));
2247 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2248 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::KDTreeIndexParams>());
2250 m_matcher =
new cv::FlannBasedMatcher(
new cv::flann::KDTreeIndexParams());
2255 m_matcher = cv::DescriptorMatcher::create(matcherName);
2258 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2259 if (m_matcher !=
nullptr && !m_useKnn && matcherName ==
"BruteForce") {
2260 m_matcher->set(
"crossCheck", m_useBruteForceCrossCheck);
2265 std::stringstream ss_msg;
2266 ss_msg <<
"Fail to initialize the matcher: " << matcherName
2267 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2276 IMatching.
insert(IRef, topLeftCorner);
2278 IMatching.
insert(ICurrent, topLeftCorner);
2285 IMatching.
insert(IRef, topLeftCorner);
2287 IMatching.
insert(ICurrent, topLeftCorner);
2294 int nbImg = (int)(m_mapOfImages.size() + 1);
2296 if (m_mapOfImages.empty()) {
2297 std::cerr <<
"There is no training image loaded !" << std::endl;
2308 int nbWidth = nbImgSqrt;
2309 int nbHeight = nbImgSqrt;
2311 if (nbImgSqrt * nbImgSqrt < nbImg) {
2316 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2318 if (maxW < it->second.getWidth()) {
2319 maxW = it->second.getWidth();
2322 if (maxH < it->second.getHeight()) {
2323 maxH = it->second.getHeight();
2329 int medianI = nbHeight / 2;
2330 int medianJ = nbWidth / 2;
2331 int medianIndex = medianI * nbWidth + medianJ;
2334 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2336 int local_cpt = cpt;
2337 if (cpt >= medianIndex) {
2342 int indexI = local_cpt / nbWidth;
2343 int indexJ = local_cpt - (indexI * nbWidth);
2344 vpImagePoint topLeftCorner((
int)maxH * indexI, (
int)maxW * indexJ);
2346 IMatching.
insert(it->second, topLeftCorner);
2349 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
2350 IMatching.
insert(ICurrent, topLeftCorner);
2358 int nbImg = (int)(m_mapOfImages.size() + 1);
2360 if (m_mapOfImages.empty()) {
2361 std::cerr <<
"There is no training image loaded !" << std::endl;
2374 int nbWidth = nbImgSqrt;
2375 int nbHeight = nbImgSqrt;
2377 if (nbImgSqrt * nbImgSqrt < nbImg) {
2382 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2384 if (maxW < it->second.getWidth()) {
2385 maxW = it->second.getWidth();
2388 if (maxH < it->second.getHeight()) {
2389 maxH = it->second.getHeight();
2395 int medianI = nbHeight / 2;
2396 int medianJ = nbWidth / 2;
2397 int medianIndex = medianI * nbWidth + medianJ;
2400 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2402 int local_cpt = cpt;
2403 if (cpt >= medianIndex) {
2408 int indexI = local_cpt / nbWidth;
2409 int indexJ = local_cpt - (indexI * nbWidth);
2410 vpImagePoint topLeftCorner((
int)maxH * indexI, (
int)maxW * indexJ);
2414 IMatching.
insert(IRef, topLeftCorner);
2417 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
2418 IMatching.
insert(ICurrent, topLeftCorner);
2424 #if defined(VISP_HAVE_PUGIXML)
2429 m_detectorNames.clear();
2430 m_extractorNames.clear();
2431 m_detectors.clear();
2432 m_extractors.clear();
2434 std::cout <<
" *********** Parsing XML for configuration for vpKeyPoint "
2437 xmlp.
parse(configFile);
2499 int startClassId = 0;
2500 int startImageId = 0;
2502 m_trainKeyPoints.clear();
2503 m_trainPoints.clear();
2504 m_mapOfImageId.clear();
2505 m_mapOfImages.clear();
2509 for (std::map<int, int>::const_iterator it = m_mapOfImageId.begin(); it != m_mapOfImageId.end(); ++it) {
2510 if (startClassId < it->first) {
2511 startClassId = it->first;
2516 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2518 if (startImageId < it->first) {
2519 startImageId = it->first;
2526 if (!parent.empty()) {
2531 std::ifstream file(filename.c_str(), std::ifstream::binary);
2532 if (!file.is_open()) {
2540 #if !defined(VISP_HAVE_MODULE_IO)
2542 std::cout <<
"Warning: The learning file contains image data that will "
2543 "not be loaded as visp_io module "
2544 "is not available !"
2549 for (
int i = 0; i < nbImgs; i++) {
2557 char *path =
new char[length + 1];
2559 for (
int cpt = 0; cpt < length; cpt++) {
2561 file.read((
char *)(&c),
sizeof(c));
2564 path[length] =
'\0';
2567 #ifdef VISP_HAVE_MODULE_IO
2576 m_mapOfImages[
id + startImageId] = I;
2584 int have3DInfoInt = 0;
2586 bool have3DInfo = have3DInfoInt != 0;
2597 int descriptorType = 5;
2600 cv::Mat trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
2601 for (
int i = 0; i < nRows; i++) {
2603 float u, v, size, angle, response;
2604 int octave, class_id, image_id;
2613 cv::KeyPoint keyPoint(cv::Point2f(u, v), size, angle, response, octave, (class_id + startClassId));
2614 m_trainKeyPoints.push_back(keyPoint);
2616 if (image_id != -1) {
2617 #ifdef VISP_HAVE_MODULE_IO
2619 m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
2629 m_trainPoints.push_back(cv::Point3f(oX, oY, oZ));
2632 for (
int j = 0; j < nCols; j++) {
2634 switch (descriptorType) {
2636 unsigned char value;
2637 file.read((
char *)(&value),
sizeof(value));
2638 trainDescriptorsTmp.at<
unsigned char>(i, j) = value;
2643 file.read((
char *)(&value),
sizeof(value));
2644 trainDescriptorsTmp.at<
char>(i, j) = value;
2648 unsigned short int value;
2650 trainDescriptorsTmp.at<
unsigned short int>(i, j) = value;
2656 trainDescriptorsTmp.at<
short int>(i, j) = value;
2662 trainDescriptorsTmp.at<
int>(i, j) = value;
2668 trainDescriptorsTmp.at<
float>(i, j) = value;
2674 trainDescriptorsTmp.at<
double>(i, j) = value;
2680 trainDescriptorsTmp.at<
float>(i, j) = value;
2686 if (!append || m_trainDescriptors.empty()) {
2687 trainDescriptorsTmp.copyTo(m_trainDescriptors);
2690 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
2696 #if defined(VISP_HAVE_PUGIXML)
2697 pugi::xml_document doc;
2700 if (!doc.load_file(filename.c_str())) {
2704 pugi::xml_node root_element = doc.document_element();
2706 int descriptorType = CV_32F;
2707 int nRows = 0, nCols = 0;
2710 cv::Mat trainDescriptorsTmp;
2712 for (pugi::xml_node first_level_node = root_element.first_child(); first_level_node;
2713 first_level_node = first_level_node.next_sibling()) {
2715 std::string name(first_level_node.name());
2716 if (first_level_node.type() == pugi::node_element && name ==
"TrainingImageInfo") {
2718 for (pugi::xml_node image_info_node = first_level_node.first_child(); image_info_node;
2719 image_info_node = image_info_node.next_sibling()) {
2720 name = std::string(image_info_node.name());
2722 if (name ==
"trainImg") {
2724 int id = image_info_node.attribute(
"image_id").as_int();
2727 #ifdef VISP_HAVE_MODULE_IO
2728 std::string path(image_info_node.text().as_string());
2738 m_mapOfImages[
id + startImageId] = I;
2743 else if (first_level_node.type() == pugi::node_element && name ==
"DescriptorsInfo") {
2744 for (pugi::xml_node descriptors_info_node = first_level_node.first_child(); descriptors_info_node;
2745 descriptors_info_node = descriptors_info_node.next_sibling()) {
2746 if (descriptors_info_node.type() == pugi::node_element) {
2747 name = std::string(descriptors_info_node.name());
2749 if (name ==
"nrows") {
2750 nRows = descriptors_info_node.text().as_int();
2752 else if (name ==
"ncols") {
2753 nCols = descriptors_info_node.text().as_int();
2755 else if (name ==
"type") {
2756 descriptorType = descriptors_info_node.text().as_int();
2761 trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
2763 else if (first_level_node.type() == pugi::node_element && name ==
"DescriptorInfo") {
2764 double u = 0.0, v = 0.0, size = 0.0, angle = 0.0, response = 0.0;
2765 int octave = 0, class_id = 0, image_id = 0;
2766 double oX = 0.0, oY = 0.0, oZ = 0.0;
2768 std::stringstream ss;
2770 for (pugi::xml_node point_node = first_level_node.first_child(); point_node;
2771 point_node = point_node.next_sibling()) {
2772 if (point_node.type() == pugi::node_element) {
2773 name = std::string(point_node.name());
2777 u = point_node.text().as_double();
2779 else if (name ==
"v") {
2780 v = point_node.text().as_double();
2782 else if (name ==
"size") {
2783 size = point_node.text().as_double();
2785 else if (name ==
"angle") {
2786 angle = point_node.text().as_double();
2788 else if (name ==
"response") {
2789 response = point_node.text().as_double();
2791 else if (name ==
"octave") {
2792 octave = point_node.text().as_int();
2794 else if (name ==
"class_id") {
2795 class_id = point_node.text().as_int();
2796 cv::KeyPoint keyPoint(cv::Point2f((
float)u, (
float)v), (
float)size, (
float)angle, (
float)response, octave,
2797 (class_id + startClassId));
2798 m_trainKeyPoints.push_back(keyPoint);
2800 else if (name ==
"image_id") {
2801 image_id = point_node.text().as_int();
2802 if (image_id != -1) {
2803 #ifdef VISP_HAVE_MODULE_IO
2805 m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
2809 else if (name ==
"oX") {
2810 oX = point_node.text().as_double();
2812 else if (name ==
"oY") {
2813 oY = point_node.text().as_double();
2815 else if (name ==
"oZ") {
2816 oZ = point_node.text().as_double();
2817 m_trainPoints.push_back(cv::Point3f((
float)oX, (
float)oY, (
float)oZ));
2819 else if (name ==
"desc") {
2822 for (pugi::xml_node descriptor_value_node = point_node.first_child(); descriptor_value_node;
2823 descriptor_value_node = descriptor_value_node.next_sibling()) {
2825 if (descriptor_value_node.type() == pugi::node_element) {
2827 std::string parseStr(descriptor_value_node.text().as_string());
2832 switch (descriptorType) {
2837 trainDescriptorsTmp.at<
unsigned char>(i, j) = (
unsigned char)parseValue;
2844 trainDescriptorsTmp.at<
char>(i, j) = (
char)parseValue;
2848 ss >> trainDescriptorsTmp.at<
unsigned short int>(i, j);
2852 ss >> trainDescriptorsTmp.at<
short int>(i, j);
2856 ss >> trainDescriptorsTmp.at<
int>(i, j);
2860 ss >> trainDescriptorsTmp.at<
float>(i, j);
2864 ss >> trainDescriptorsTmp.at<
double>(i, j);
2868 ss >> trainDescriptorsTmp.at<
float>(i, j);
2873 std::cerr <<
"Error when converting:" << ss.str() << std::endl;
2886 if (!append || m_trainDescriptors.empty()) {
2887 trainDescriptorsTmp.copyTo(m_trainDescriptors);
2890 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
2903 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
2909 m_currentImageId = (int)m_mapOfImages.size();
2913 std::vector<cv::DMatch> &matches,
double &elapsedTime)
2918 m_knnMatches.clear();
2920 if (m_useMatchTrainToQuery) {
2921 std::vector<std::vector<cv::DMatch> > knnMatchesTmp;
2924 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(
true);
2925 matcherTmp->knnMatch(trainDescriptors, queryDescriptors, knnMatchesTmp, 2);
2927 for (std::vector<std::vector<cv::DMatch> >::const_iterator it1 = knnMatchesTmp.begin();
2928 it1 != knnMatchesTmp.end(); ++it1) {
2929 std::vector<cv::DMatch> tmp;
2930 for (std::vector<cv::DMatch>::const_iterator it2 = it1->begin(); it2 != it1->end(); ++it2) {
2931 tmp.push_back(cv::DMatch(it2->trainIdx, it2->queryIdx, it2->distance));
2933 m_knnMatches.push_back(tmp);
2936 matches.resize(m_knnMatches.size());
2937 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
2941 m_matcher->knnMatch(queryDescriptors, m_knnMatches, 2);
2942 matches.resize(m_knnMatches.size());
2943 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
2949 if (m_useMatchTrainToQuery) {
2950 std::vector<cv::DMatch> matchesTmp;
2952 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(
true);
2953 matcherTmp->match(trainDescriptors, queryDescriptors, matchesTmp);
2955 for (std::vector<cv::DMatch>::const_iterator it = matchesTmp.begin(); it != matchesTmp.end(); ++it) {
2956 matches.push_back(cv::DMatch(it->trainIdx, it->queryIdx, it->distance));
2961 m_matcher->match(queryDescriptors, matches);
2985 if (m_trainDescriptors.empty()) {
2986 std::cerr <<
"Reference is empty." << std::endl;
2988 std::cerr <<
"Reference is not computed." << std::endl;
2990 std::cerr <<
"Matching is not possible." << std::endl;
2995 if (m_useAffineDetection) {
2996 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
2997 std::vector<cv::Mat> listOfQueryDescriptors;
3003 m_queryKeyPoints.clear();
3004 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3005 it != listOfQueryKeyPoints.end(); ++it) {
3006 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3010 for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3014 it->copyTo(m_queryDescriptors);
3017 m_queryDescriptors.push_back(*it);
3022 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3023 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3026 return matchPoint(m_queryKeyPoints, m_queryDescriptors);
3031 m_queryKeyPoints = queryKeyPoints;
3032 m_queryDescriptors = queryDescriptors;
3034 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3037 m_queryFilteredKeyPoints.clear();
3038 m_objectFilteredPoints.clear();
3039 m_filteredMatches.clear();
3044 if (m_useMatchTrainToQuery) {
3046 m_queryFilteredKeyPoints.clear();
3047 m_filteredMatches.clear();
3048 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3049 m_filteredMatches.push_back(cv::DMatch((
int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3050 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(
size_t)it->queryIdx]);
3054 m_queryFilteredKeyPoints = m_queryKeyPoints;
3055 m_filteredMatches = m_matches;
3058 if (!m_trainPoints.empty()) {
3059 m_objectFilteredPoints.clear();
3063 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3065 m_objectFilteredPoints.push_back(m_trainPoints[(
size_t)it->trainIdx]);
3074 return static_cast<unsigned int>(m_filteredMatches.size());
3086 double error, elapsedTime;
3087 return matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3093 double error, elapsedTime;
3094 return matchPoint(I_color, cam, cMo, error, elapsedTime, func, rectangle);
3102 if (m_trainDescriptors.empty()) {
3103 std::cerr <<
"Reference is empty." << std::endl;
3105 std::cerr <<
"Reference is not computed." << std::endl;
3107 std::cerr <<
"Matching is not possible." << std::endl;
3112 if (m_useAffineDetection) {
3113 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3114 std::vector<cv::Mat> listOfQueryDescriptors;
3120 m_queryKeyPoints.clear();
3121 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3122 it != listOfQueryKeyPoints.end(); ++it) {
3123 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3127 for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3131 it->copyTo(m_queryDescriptors);
3134 m_queryDescriptors.push_back(*it);
3139 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3140 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3143 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3145 elapsedTime = m_detectionTime + m_extractionTime + m_matchingTime;
3148 m_queryFilteredKeyPoints.clear();
3149 m_objectFilteredPoints.clear();
3150 m_filteredMatches.clear();
3155 if (m_useMatchTrainToQuery) {
3157 m_queryFilteredKeyPoints.clear();
3158 m_filteredMatches.clear();
3159 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3160 m_filteredMatches.push_back(cv::DMatch((
int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3161 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(
size_t)it->queryIdx]);
3165 m_queryFilteredKeyPoints = m_queryKeyPoints;
3166 m_filteredMatches = m_matches;
3169 if (!m_trainPoints.empty()) {
3170 m_objectFilteredPoints.clear();
3174 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3176 m_objectFilteredPoints.push_back(m_trainPoints[(
size_t)it->trainIdx]);
3188 m_ransacInliers.clear();
3189 m_ransacOutliers.clear();
3191 if (m_useRansacVVS) {
3192 std::vector<vpPoint> objectVpPoints(m_objectFilteredPoints.size());
3196 for (std::vector<cv::Point3f>::const_iterator it = m_objectFilteredPoints.begin();
3197 it != m_objectFilteredPoints.end(); ++it, cpt++) {
3201 vpImagePoint imP(m_queryFilteredKeyPoints[cpt].pt.y, m_queryFilteredKeyPoints[cpt].pt.x);
3203 double x = 0.0, y = 0.0;
3208 objectVpPoints[cpt] = pt;
3211 std::vector<vpPoint> inliers;
3212 std::vector<unsigned int> inlierIndex;
3214 bool res =
computePose(objectVpPoints, cMo, inliers, inlierIndex, m_poseTime, func);
3216 std::map<unsigned int, bool> mapOfInlierIndex;
3217 m_matchRansacKeyPointsToPoints.clear();
3219 for (std::vector<unsigned int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3220 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3221 m_queryFilteredKeyPoints[(
size_t)(*it)], m_objectFilteredPoints[(
size_t)(*it)]));
3222 mapOfInlierIndex[*it] =
true;
3225 for (
size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3226 if (mapOfInlierIndex.find((
unsigned int)i) == mapOfInlierIndex.end()) {
3227 m_ransacOutliers.push_back(
vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3231 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3233 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3234 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3235 m_ransacInliers.begin(), matchRansacToVpImage);
3237 elapsedTime += m_poseTime;
3242 std::vector<cv::Point2f> imageFilteredPoints;
3243 cv::KeyPoint::convert(m_queryFilteredKeyPoints, imageFilteredPoints);
3244 std::vector<int> inlierIndex;
3245 bool res =
computePose(imageFilteredPoints, m_objectFilteredPoints, cam, cMo, inlierIndex, m_poseTime);
3247 std::map<int, bool> mapOfInlierIndex;
3248 m_matchRansacKeyPointsToPoints.clear();
3250 for (std::vector<int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3251 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3252 m_queryFilteredKeyPoints[(
size_t)(*it)], m_objectFilteredPoints[(
size_t)(*it)]));
3253 mapOfInlierIndex[*it] =
true;
3256 for (
size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3257 if (mapOfInlierIndex.find((
int)i) == mapOfInlierIndex.end()) {
3258 m_ransacOutliers.push_back(
vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3262 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3264 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3265 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3266 m_ransacInliers.begin(), matchRansacToVpImage);
3268 elapsedTime += m_poseTime;
3279 return (
matchPoint(m_I, cam, cMo, error, elapsedTime, func, rectangle));
3283 vpImagePoint ¢erOfGravity,
const bool isPlanarObject,
3284 std::vector<vpImagePoint> *imPts1, std::vector<vpImagePoint> *imPts2,
3285 double *meanDescriptorDistance,
double *detectionScore,
const vpRect &rectangle)
3287 if (imPts1 !=
nullptr && imPts2 !=
nullptr) {
3294 double meanDescriptorDistanceTmp = 0.0;
3295 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
3296 meanDescriptorDistanceTmp += (double)it->distance;
3299 meanDescriptorDistanceTmp /= (double)m_filteredMatches.size();
3300 double score = (double)m_filteredMatches.size() / meanDescriptorDistanceTmp;
3302 if (meanDescriptorDistance !=
nullptr) {
3303 *meanDescriptorDistance = meanDescriptorDistanceTmp;
3309 if (m_filteredMatches.size() >= 4) {
3311 std::vector<cv::Point2f> points1(m_filteredMatches.size());
3313 std::vector<cv::Point2f> points2(m_filteredMatches.size());
3315 for (
size_t i = 0; i < m_filteredMatches.size(); i++) {
3316 points1[i] = cv::Point2f(m_trainKeyPoints[(
size_t)m_filteredMatches[i].trainIdx].pt);
3317 points2[i] = cv::Point2f(m_queryFilteredKeyPoints[(
size_t)m_filteredMatches[i].queryIdx].pt);
3320 std::vector<vpImagePoint> inliers;
3321 if (isPlanarObject) {
3322 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
3323 cv::Mat homographyMatrix = cv::findHomography(points1, points2, CV_RANSAC);
3325 cv::Mat homographyMatrix = cv::findHomography(points1, points2, cv::RANSAC);
3328 for (
size_t i = 0; i < m_filteredMatches.size(); i++) {
3330 cv::Mat realPoint = cv::Mat(3, 1, CV_64F);
3331 realPoint.at<
double>(0, 0) = points1[i].x;
3332 realPoint.at<
double>(1, 0) = points1[i].y;
3333 realPoint.at<
double>(2, 0) = 1.f;
3335 cv::Mat reprojectedPoint = homographyMatrix * realPoint;
3336 double err_x = (reprojectedPoint.at<
double>(0, 0) / reprojectedPoint.at<
double>(2, 0)) - points2[i].x;
3337 double err_y = (reprojectedPoint.at<
double>(1, 0) / reprojectedPoint.at<
double>(2, 0)) - points2[i].y;
3338 double reprojectionError = std::sqrt(err_x * err_x + err_y * err_y);
3340 if (reprojectionError < 6.0) {
3341 inliers.push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3342 if (imPts1 !=
nullptr) {
3343 imPts1->push_back(
vpImagePoint((
double)points1[i].y, (
double)points1[i].x));
3346 if (imPts2 !=
nullptr) {
3347 imPts2->push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3352 else if (m_filteredMatches.size() >= 8) {
3353 cv::Mat fundamentalInliers;
3354 cv::Mat fundamentalMatrix = cv::findFundamentalMat(points1, points2, cv::FM_RANSAC, 3, 0.99, fundamentalInliers);
3356 for (
size_t i = 0; i < (size_t)fundamentalInliers.rows; i++) {
3357 if (fundamentalInliers.at<uchar>((
int)i, 0)) {
3358 inliers.push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3360 if (imPts1 !=
nullptr) {
3361 imPts1->push_back(
vpImagePoint((
double)points1[i].y, (
double)points1[i].x));
3364 if (imPts2 !=
nullptr) {
3365 imPts2->push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3371 if (!inliers.empty()) {
3378 double meanU = 0.0, meanV = 0.0;
3379 for (std::vector<vpImagePoint>::const_iterator it = inliers.begin(); it != inliers.end(); ++it) {
3380 meanU += it->get_u();
3381 meanV += it->get_v();
3384 meanU /= (double)inliers.size();
3385 meanV /= (double)inliers.size();
3387 centerOfGravity.
set_u(meanU);
3388 centerOfGravity.
set_v(meanV);
3397 return meanDescriptorDistanceTmp < m_detectionThreshold;
3400 return score > m_detectionScore;
3409 bool isMatchOk =
matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3414 std::vector<vpImagePoint> modelImagePoints(m_trainVpPoints.size());
3416 for (std::vector<vpPoint>::const_iterator it = m_trainVpPoints.begin(); it != m_trainVpPoints.end(); ++it, cpt++) {
3420 modelImagePoints[cpt] = imPt;
3429 double meanU = 0.0, meanV = 0.0;
3430 for (std::vector<vpImagePoint>::const_iterator it = m_ransacInliers.begin(); it != m_ransacInliers.end(); ++it) {
3431 meanU += it->get_u();
3432 meanV += it->get_v();
3435 meanU /= (double)m_ransacInliers.size();
3436 meanV /= (double)m_ransacInliers.size();
3438 centerOfGravity.
set_u(meanU);
3439 centerOfGravity.
set_v(meanV);
3446 std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
3447 std::vector<cv::Mat> &listOfDescriptors,
3453 listOfKeypoints.clear();
3454 listOfDescriptors.clear();
3456 for (
int tl = 1; tl < 6; tl++) {
3457 double t = pow(2, 0.5 * tl);
3458 for (
int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
3459 std::vector<cv::KeyPoint> keypoints;
3460 cv::Mat descriptors;
3462 cv::Mat timg, mask, Ai;
3465 affineSkew(t, phi, timg, mask, Ai);
3468 if (listOfAffineI !=
nullptr) {
3470 bitwise_and(mask, timg, img_disp);
3473 listOfAffineI->push_back(tI);
3477 cv::bitwise_and(mask, timg, img_disp);
3478 cv::namedWindow(
"Skew", cv::WINDOW_AUTOSIZE);
3479 cv::imshow(
"Skew", img_disp);
3483 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
3484 it != m_detectors.end(); ++it) {
3485 std::vector<cv::KeyPoint> kp;
3486 it->second->detect(timg, kp, mask);
3487 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
3491 extract(timg, keypoints, descriptors, elapsedTime);
3493 for (
unsigned int i = 0; i < keypoints.size(); i++) {
3494 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
3495 cv::Mat kpt_t = Ai * cv::Mat(kpt);
3496 keypoints[i].pt.x = kpt_t.at<
float>(0, 0);
3497 keypoints[i].pt.y = kpt_t.at<
float>(1, 0);
3500 listOfKeypoints.push_back(keypoints);
3501 listOfDescriptors.push_back(descriptors);
3510 std::vector<std::pair<double, int> > listOfAffineParams;
3511 for (
int tl = 1; tl < 6; tl++) {
3512 double t = pow(2, 0.5 * tl);
3513 for (
int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
3514 listOfAffineParams.push_back(std::pair<double, int>(t, phi));
3518 listOfKeypoints.resize(listOfAffineParams.size());
3519 listOfDescriptors.resize(listOfAffineParams.size());
3521 if (listOfAffineI !=
nullptr) {
3522 listOfAffineI->resize(listOfAffineParams.size());
3525 #ifdef VISP_HAVE_OPENMP
3526 #pragma omp parallel for
3528 for (
int cpt = 0; cpt < static_cast<int>(listOfAffineParams.size()); cpt++) {
3529 std::vector<cv::KeyPoint> keypoints;
3530 cv::Mat descriptors;
3532 cv::Mat timg, mask, Ai;
3535 affineSkew(listOfAffineParams[(
size_t)cpt].first, listOfAffineParams[(
size_t)cpt].second, timg, mask, Ai);
3537 if (listOfAffineI !=
nullptr) {
3539 bitwise_and(mask, timg, img_disp);
3542 (*listOfAffineI)[(size_t)cpt] = tI;
3547 cv::bitwise_and(mask, timg, img_disp);
3548 cv::namedWindow(
"Skew", cv::WINDOW_AUTOSIZE);
3549 cv::imshow(
"Skew", img_disp);
3553 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
3554 it != m_detectors.end(); ++it) {
3555 std::vector<cv::KeyPoint> kp;
3556 it->second->detect(timg, kp, mask);
3557 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
3561 extract(timg, keypoints, descriptors, elapsedTime);
3563 for (
size_t i = 0; i < keypoints.size(); i++) {
3564 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
3565 cv::Mat kpt_t = Ai * cv::Mat(kpt);
3566 keypoints[i].pt.x = kpt_t.at<
float>(0, 0);
3567 keypoints[i].pt.y = kpt_t.at<
float>(1, 0);
3570 listOfKeypoints[(size_t)cpt] = keypoints;
3571 listOfDescriptors[(size_t)cpt] = descriptors;
3584 m_computeCovariance =
false;
3586 m_currentImageId = 0;
3588 m_detectionScore = 0.15;
3589 m_detectionThreshold = 100.0;
3590 m_detectionTime = 0.0;
3591 m_detectorNames.clear();
3592 m_detectors.clear();
3593 m_extractionTime = 0.0;
3594 m_extractorNames.clear();
3595 m_extractors.clear();
3596 m_filteredMatches.clear();
3599 m_knnMatches.clear();
3600 m_mapOfImageId.clear();
3601 m_mapOfImages.clear();
3602 m_matcher = cv::Ptr<cv::DescriptorMatcher>();
3603 m_matcherName =
"BruteForce-Hamming";
3605 m_matchingFactorThreshold = 2.0;
3606 m_matchingRatioThreshold = 0.85;
3607 m_matchingTime = 0.0;
3608 m_matchRansacKeyPointsToPoints.clear();
3609 m_nbRansacIterations = 200;
3610 m_nbRansacMinInlierCount = 100;
3611 m_objectFilteredPoints.clear();
3613 m_queryDescriptors = cv::Mat();
3614 m_queryFilteredKeyPoints.clear();
3615 m_queryKeyPoints.clear();
3616 m_ransacConsensusPercentage = 20.0;
3618 m_ransacInliers.clear();
3619 m_ransacOutliers.clear();
3620 m_ransacParallel =
true;
3621 m_ransacParallelNbThreads = 0;
3622 m_ransacReprojectionError = 6.0;
3623 m_ransacThreshold = 0.01;
3624 m_trainDescriptors = cv::Mat();
3625 m_trainKeyPoints.clear();
3626 m_trainPoints.clear();
3627 m_trainVpPoints.clear();
3628 m_useAffineDetection =
false;
3629 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
3630 m_useBruteForceCrossCheck =
true;
3632 m_useConsensusPercentage =
false;
3634 m_useMatchTrainToQuery =
false;
3635 m_useRansacVVS =
true;
3636 m_useSingleMatchFilter =
true;
3638 m_detectorNames.push_back(
"ORB");
3639 m_extractorNames.push_back(
"ORB");
3647 if (!parent.empty()) {
3651 std::map<int, std::string> mapOfImgPath;
3652 if (saveTrainingImages) {
3653 #ifdef VISP_HAVE_MODULE_IO
3655 unsigned int cpt = 0;
3657 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
3663 std::stringstream ss;
3664 ss <<
"train_image_" << std::setfill(
'0') << std::setw(3) << cpt;
3666 switch (m_imageFormat) {
3688 std::string imgFilename = ss.str();
3689 mapOfImgPath[it->first] = imgFilename;
3690 vpImageIo::write(it->second, parent + (!parent.empty() ?
"/" :
"") + imgFilename);
3693 std::cout <<
"Warning: in vpKeyPoint::saveLearningData() training images "
3694 "are not saved because "
3695 "visp_io module is not available !"
3700 bool have3DInfo = m_trainPoints.size() > 0;
3701 if (have3DInfo && m_trainPoints.size() != m_trainKeyPoints.size()) {
3707 std::ofstream file(filename.c_str(), std::ofstream::binary);
3708 if (!file.is_open()) {
3713 int nbImgs = (int)mapOfImgPath.size();
3716 #ifdef VISP_HAVE_MODULE_IO
3717 for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
3723 std::string path = it->second;
3724 int length = (int)path.length();
3727 for (
int cpt = 0; cpt < length; cpt++) {
3728 file.write((
char *)(&path[(
size_t)cpt]),
sizeof(path[(
size_t)cpt]));
3734 int have3DInfoInt = have3DInfo ? 1 : 0;
3737 int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
3738 int descriptorType = m_trainDescriptors.type();
3749 for (
int i = 0; i < nRows; i++) {
3750 unsigned int i_ = (
unsigned int)i;
3752 float u = m_trainKeyPoints[i_].pt.x;
3756 float v = m_trainKeyPoints[i_].pt.y;
3760 float size = m_trainKeyPoints[i_].size;
3764 float angle = m_trainKeyPoints[i_].angle;
3768 float response = m_trainKeyPoints[i_].response;
3772 int octave = m_trainKeyPoints[i_].octave;
3776 int class_id = m_trainKeyPoints[i_].class_id;
3780 #ifdef VISP_HAVE_MODULE_IO
3781 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
3782 int image_id = (saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1;
3791 float oX = m_trainPoints[i_].x, oY = m_trainPoints[i_].y, oZ = m_trainPoints[i_].z;
3802 for (
int j = 0; j < nCols; j++) {
3804 switch (descriptorType) {
3806 file.write((
char *)(&m_trainDescriptors.at<
unsigned char>(i, j)),
3807 sizeof(m_trainDescriptors.at<
unsigned char>(i, j)));
3811 file.write((
char *)(&m_trainDescriptors.at<
char>(i, j)),
sizeof(m_trainDescriptors.at<
char>(i, j)));
3844 #if defined(VISP_HAVE_PUGIXML)
3845 pugi::xml_document doc;
3846 pugi::xml_node node = doc.append_child(pugi::node_declaration);
3847 node.append_attribute(
"version") =
"1.0";
3848 node.append_attribute(
"encoding") =
"UTF-8";
3854 pugi::xml_node root_node = doc.append_child(
"LearningData");
3857 pugi::xml_node image_node = root_node.append_child(
"TrainingImageInfo");
3859 #ifdef VISP_HAVE_MODULE_IO
3860 for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
3861 pugi::xml_node image_info_node = image_node.append_child(
"trainImg");
3862 image_info_node.append_child(pugi::node_pcdata).set_value(it->second.c_str());
3863 std::stringstream ss;
3865 image_info_node.append_attribute(
"image_id") = ss.str().c_str();
3870 pugi::xml_node descriptors_info_node = root_node.append_child(
"DescriptorsInfo");
3872 int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
3873 int descriptorType = m_trainDescriptors.type();
3876 descriptors_info_node.append_child(
"nrows").append_child(pugi::node_pcdata).text() = nRows;
3879 descriptors_info_node.append_child(
"ncols").append_child(pugi::node_pcdata).text() = nCols;
3882 descriptors_info_node.append_child(
"type").append_child(pugi::node_pcdata).text() = descriptorType;
3884 for (
int i = 0; i < nRows; i++) {
3885 unsigned int i_ = (
unsigned int)i;
3886 pugi::xml_node descriptor_node = root_node.append_child(
"DescriptorInfo");
3888 descriptor_node.append_child(
"u").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.x;
3889 descriptor_node.append_child(
"v").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.y;
3890 descriptor_node.append_child(
"size").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].size;
3891 descriptor_node.append_child(
"angle").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].angle;
3892 descriptor_node.append_child(
"response").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].response;
3893 descriptor_node.append_child(
"octave").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].octave;
3894 descriptor_node.append_child(
"class_id").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].class_id;
3896 #ifdef VISP_HAVE_MODULE_IO
3897 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
3898 descriptor_node.append_child(
"image_id").append_child(pugi::node_pcdata).text() =
3899 ((saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1);
3901 descriptor_node.append_child(
"image_id").append_child(pugi::node_pcdata).text() = -1;
3905 descriptor_node.append_child(
"oX").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].x;
3906 descriptor_node.append_child(
"oY").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].y;
3907 descriptor_node.append_child(
"oZ").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].z;
3910 pugi::xml_node desc_node = descriptor_node.append_child(
"desc");
3912 for (
int j = 0; j < nCols; j++) {
3913 switch (descriptorType) {
3919 int val_tmp = m_trainDescriptors.at<
unsigned char>(i, j);
3920 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = val_tmp;
3928 int val_tmp = m_trainDescriptors.at<
char>(i, j);
3929 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = val_tmp;
3933 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() =
3934 m_trainDescriptors.at<
unsigned short int>(i, j);
3938 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<
short int>(i, j);
3942 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<
int>(i, j);
3946 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<
float>(i, j);
3950 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<
double>(i, j);
3960 doc.save_file(filename.c_str(), PUGIXML_TEXT(
" "), pugi::format_default, pugi::encoding_utf8);
3967 #if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x030000)
3968 #ifndef DOXYGEN_SHOULD_SKIP_THIS
3970 struct KeypointResponseGreaterThanThreshold
3972 KeypointResponseGreaterThanThreshold(
float _value) : value(_value) { }
3973 inline bool operator()(
const cv::KeyPoint &kpt)
const {
return kpt.response >= value; }
3977 struct KeypointResponseGreater
3979 inline bool operator()(
const cv::KeyPoint &kp1,
const cv::KeyPoint &kp2)
const {
return kp1.response > kp2.response; }
3983 void vpKeyPoint::KeyPointsFilter::retainBest(std::vector<cv::KeyPoint> &keypoints,
int n_points)
3987 if (n_points >= 0 && keypoints.size() > (
size_t)n_points) {
3988 if (n_points == 0) {
3994 std::nth_element(keypoints.begin(), keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreater());
3996 float ambiguous_response = keypoints[(size_t)(n_points - 1)].response;
3999 std::vector<cv::KeyPoint>::const_iterator new_end = std::partition(
4000 keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreaterThanThreshold(ambiguous_response));
4003 keypoints.resize((
size_t)(new_end - keypoints.begin()));
4009 RoiPredicate(
const cv::Rect &_r) : r(_r) { }
4011 bool operator()(
const cv::KeyPoint &keyPt)
const {
return !r.contains(keyPt.pt); }
4016 void vpKeyPoint::KeyPointsFilter::runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize,
4019 if (borderSize > 0) {
4020 if (imageSize.height <= borderSize * 2 || imageSize.width <= borderSize * 2)
4023 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(),
4024 RoiPredicate(cv::Rect(
4025 cv::Point(borderSize, borderSize),
4026 cv::Point(imageSize.width - borderSize, imageSize.height - borderSize)))),
4031 struct SizePredicate
4033 SizePredicate(
float _minSize,
float _maxSize) : minSize(_minSize), maxSize(_maxSize) { }
4035 bool operator()(
const cv::KeyPoint &keyPt)
const
4037 float size = keyPt.size;
4038 return (size < minSize) || (size > maxSize);
4041 float minSize, maxSize;
4044 void vpKeyPoint::KeyPointsFilter::runByKeypointSize(std::vector<cv::KeyPoint> &keypoints,
float minSize,
float maxSize)
4046 CV_Assert(minSize >= 0);
4047 CV_Assert(maxSize >= 0);
4048 CV_Assert(minSize <= maxSize);
4050 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), SizePredicate(minSize, maxSize)), keypoints.end());
4056 MaskPredicate(
const cv::Mat &_mask) : mask(_mask) { }
4057 bool operator()(
const cv::KeyPoint &key_pt)
const
4059 return mask.at<uchar>((int)(key_pt.pt.y + 0.5f), (int)(key_pt.pt.x + 0.5f)) == 0;
4066 void vpKeyPoint::KeyPointsFilter::runByPixelsMask(std::vector<cv::KeyPoint> &keypoints,
const cv::Mat &mask)
4071 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), MaskPredicate(mask)), keypoints.end());
4074 struct KeyPoint_LessThan
4076 KeyPoint_LessThan(
const std::vector<cv::KeyPoint> &_kp) : kp(&_kp) { }
4077 bool operator()(
size_t i,
size_t j)
const
4079 const cv::KeyPoint &kp1 = (*kp)[ i];
4080 const cv::KeyPoint &kp2 = (*kp)[ j];
4082 std::numeric_limits<float>::epsilon())) {
4084 return kp1.pt.x < kp2.pt.x;
4088 std::numeric_limits<float>::epsilon())) {
4090 return kp1.pt.y < kp2.pt.y;
4094 std::numeric_limits<float>::epsilon())) {
4096 return kp1.size > kp2.size;
4100 std::numeric_limits<float>::epsilon())) {
4102 return kp1.angle < kp2.angle;
4106 std::numeric_limits<float>::epsilon())) {
4108 return kp1.response > kp2.response;
4111 if (kp1.octave != kp2.octave) {
4112 return kp1.octave > kp2.octave;
4115 if (kp1.class_id != kp2.class_id) {
4116 return kp1.class_id > kp2.class_id;
4121 const std::vector<cv::KeyPoint> *kp;
4124 void vpKeyPoint::KeyPointsFilter::removeDuplicated(std::vector<cv::KeyPoint> &keypoints)
4126 size_t i, j, n = keypoints.size();
4127 std::vector<size_t> kpidx(n);
4128 std::vector<uchar> mask(n, (uchar)1);
4130 for (i = 0; i < n; i++) {
4133 std::sort(kpidx.begin(), kpidx.end(), KeyPoint_LessThan(keypoints));
4134 for (i = 1, j = 0; i < n; i++) {
4135 cv::KeyPoint &kp1 = keypoints[kpidx[i]];
4136 cv::KeyPoint &kp2 = keypoints[kpidx[j]];
4139 if (!
vpMath::equal(kp1.pt.x, kp2.pt.x, std::numeric_limits<float>::epsilon()) ||
4140 !
vpMath::equal(kp1.pt.y, kp2.pt.y, std::numeric_limits<float>::epsilon()) ||
4141 !
vpMath::equal(kp1.size, kp2.size, std::numeric_limits<float>::epsilon()) ||
4142 !
vpMath::equal(kp1.angle, kp2.angle, std::numeric_limits<float>::epsilon())) {
4150 for (i = j = 0; i < n; i++) {
4153 keypoints[j] = keypoints[i];
4158 keypoints.resize(j);
4164 vpKeyPoint::PyramidAdaptedFeatureDetector::PyramidAdaptedFeatureDetector(
const cv::Ptr<cv::FeatureDetector> &detector,
4166 : m_detector(detector), m_maxLevel(maxLevel)
4169 bool vpKeyPoint::PyramidAdaptedFeatureDetector::empty()
const
4171 return m_detector.empty() || (cv::FeatureDetector *)m_detector->empty();
4174 void vpKeyPoint::PyramidAdaptedFeatureDetector::detect(cv::InputArray image,
4175 CV_OUT std::vector<cv::KeyPoint> &keypoints, cv::InputArray mask)
4177 detectImpl(image.getMat(), keypoints, mask.getMat());
4180 void vpKeyPoint::PyramidAdaptedFeatureDetector::detectImpl(
const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
4181 const cv::Mat &mask)
const
4183 cv::Mat src = image;
4184 cv::Mat src_mask = mask;
4186 cv::Mat dilated_mask;
4187 if (!mask.empty()) {
4188 cv::dilate(mask, dilated_mask, cv::Mat());
4189 cv::Mat mask255(mask.size(), CV_8UC1, cv::Scalar(0));
4190 mask255.setTo(cv::Scalar(255), dilated_mask != 0);
4191 dilated_mask = mask255;
4194 for (
int l = 0, multiplier = 1; l <= m_maxLevel; ++l, multiplier *= 2) {
4196 std::vector<cv::KeyPoint> new_pts;
4197 m_detector->detect(src, new_pts, src_mask);
4198 std::vector<cv::KeyPoint>::iterator it = new_pts.begin(), end = new_pts.end();
4199 for (; it != end; ++it) {
4200 it->pt.x *= multiplier;
4201 it->pt.y *= multiplier;
4202 it->size *= multiplier;
4205 keypoints.insert(keypoints.end(), new_pts.begin(), new_pts.end());
4208 if (l < m_maxLevel) {
4214 resize(dilated_mask, src_mask, src.size(), 0, 0, CV_INTER_AREA);
4219 vpKeyPoint::KeyPointsFilter::runByPixelsMask(keypoints, mask);
4226 #elif !defined(VISP_BUILD_SHARED_LIBS)
4228 void dummy_vpKeyPoint() { };
std::vector< unsigned int > m_matchedReferencePoints
std::vector< vpImagePoint > m_currentImagePointsList
bool m_reference_computed
std::vector< vpImagePoint > m_referenceImagePointsList
Generic class defining intrinsic camera parameters.
Implementation of column vector and the associated operations.
Class to define RGB colors available for display functionalities.
static const vpColor none
static const vpColor green
static void convertFromOpenCV(const cv::KeyPoint &from, vpImagePoint &to)
static void displayCircle(const vpImage< unsigned char > &I, const vpImageCircle &circle, const vpColor &color, bool fill=false, unsigned int thickness=1)
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
error that can be emitted by ViSP classes.
@ badValue
Used to indicate that a value is not in the allowed range.
const char * what() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpHomogeneousMatrix inverse() const
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
static void write(const vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
void set_ij(double ii, double jj)
unsigned int getWidth() const
void insert(const vpImage< Type > &src, const vpImagePoint &topLeft)
unsigned int getHeight() const
unsigned int matchPoint(const vpImage< unsigned char > &I)
void getTrainKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
static void compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpCylinder > &cylinders, const std::vector< std::vector< std::vector< vpImagePoint > > > &vectorOfCylinderRois, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
void detectExtractAffine(const vpImage< unsigned char > &I, std::vector< std::vector< cv::KeyPoint > > &listOfKeypoints, std::vector< cv::Mat > &listOfDescriptors, std::vector< vpImage< unsigned char > > *listOfAffineI=nullptr)
void initMatcher(const std::string &matcherName)
vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType, const std::string &matcherName, const vpFilterMatchingType &filterType=ratioDistanceThreshold)
void display(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, unsigned int size=3)
void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector< cv::DMatch > &matches, double &elapsedTime)
static void compute3D(const cv::KeyPoint &candidate, const std::vector< vpPoint > &roi, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
@ DETECTOR_KAZE
KAZE detector.
@ DETECTOR_BRISK
BRISK detector.
@ DETECTOR_AKAZE
AKAZE detector.
@ DETECTOR_MSER
MSER detector.
@ DETECTOR_SURF
SURF detector.
@ DETECTOR_AGAST
AGAST detector.
@ DETECTOR_SIFT
SIFT detector.
@ DETECTOR_FAST
FAST detector.
@ DETECTOR_GFTT
GFTT detector.
@ DETECTOR_ORB
ORB detector.
@ DETECTOR_SimpleBlob
SimpleBlob detector.
void createImageMatching(vpImage< unsigned char > &IRef, vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
bool computePose(const std::vector< cv::Point2f > &imagePoints, const std::vector< cv::Point3f > &objectPoints, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector< int > &inlierIndex, double &elapsedTime, bool(*func)(const vpHomogeneousMatrix &)=nullptr)
void extract(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, cv::Mat &descriptors, std::vector< cv::Point3f > *trainPoints=nullptr)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
bool matchPointAndDetect(const vpImage< unsigned char > &I, vpRect &boundingBox, vpImagePoint ¢erOfGravity, const bool isPlanarObject=true, std::vector< vpImagePoint > *imPts1=nullptr, std::vector< vpImagePoint > *imPts2=nullptr, double *meanDescriptorDistance=nullptr, double *detectionScore=nullptr, const vpRect &rectangle=vpRect())
@ DESCRIPTOR_AKAZE
AKAZE descriptor.
@ DESCRIPTOR_ORB
ORB descriptor.
@ DESCRIPTOR_KAZE
KAZE descriptor.
@ DESCRIPTOR_SURF
SUFT descriptor.
@ DESCRIPTOR_BRISK
BRISK descriptor.
@ DESCRIPTOR_SIFT
SIFT descriptor.
void getQueryKeyPoints(std::vector< cv::KeyPoint > &keyPoints, bool matches=true) const
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
@ stdAndRatioDistanceThreshold
@ constantFactorDistanceThreshold
void displayMatching(const vpImage< unsigned char > &IRef, vpImage< unsigned char > &IMatching, unsigned int crossSize, unsigned int lineThickness=1, const vpColor &color=vpColor::green)
unsigned int buildReference(const vpImage< unsigned char > &I)
void loadConfigFile(const std::string &configFile)
void getTrainPoints(std::vector< cv::Point3f > &points) const
void getObjectPoints(std::vector< cv::Point3f > &objectPoints) const
void insertImageMatching(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
static bool isNaN(double value)
static bool equal(double x, double y, double threshold=0.001)
static int round(double x)
Implementation of a matrix and operations on matrices.
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
This class defines the container for a plane geometrical structure.
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
double get_oX() const
Get the point oX coordinate in the object frame.
void set_x(double x)
Set the point x coordinate in the image plane.
double get_y() const
Get the point y coordinate in the image plane.
double get_oZ() const
Get the point oZ coordinate in the object frame.
void set_oY(double oY)
Set the point oY coordinate in the object frame.
double get_x() const
Get the point x coordinate in the image plane.
void set_oZ(double oZ)
Set the point oZ coordinate in the object frame.
void set_oX(double oX)
Set the point oX coordinate in the object frame.
double get_oY() const
Get the point oY coordinate in the object frame.
void setWorldCoordinates(double oX, double oY, double oZ)
void set_y(double y)
Set the point y coordinate in the image plane.
Defines a generic 2D polygon.
vpRect getBoundingBox() const
bool isInside(const vpImagePoint &iP, const PointInPolygonMethod &method=PnPolyRayCasting) const
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
void setRansacMaxTrials(const int &rM)
void addPoint(const vpPoint &P)
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
vpMatrix getCovarianceMatrix() const
void setCovarianceComputation(const bool &flag)
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, FuncCheckValidityPose func=nullptr)
std::vector< unsigned int > getRansacInlierIndex() const
void setRansacFilterFlag(const RANSAC_FILTER_FLAGS &flag)
@ NO_FILTER
No filter is applied.
void setUseParallelRansac(bool use)
std::vector< vpPoint > getRansacInliers() const
void setNbParallelRansacThreads(int nb)
void setRansacThreshold(const double &t)
Defines a rectangle in the plane.
Implementation of a rotation vector as axis-angle minimal representation.
Class that consider the case of a translation vector.
std::string getDetectorName() const
double getMatchingRatioThreshold() const
std::string getExtractorName() const
void parse(const std::string &filename)
double getRansacReprojectionError() const
bool getUseRansacVVSPoseEstimation() const
double getMatchingFactorThreshold() const
int getNbRansacMinInlierCount() const
bool getUseRansacConsensusPercentage() const
std::string getMatcherName() const
int getNbRansacIterations() const
double getRansacConsensusPercentage() const
@ stdAndRatioDistanceThreshold
@ constantFactorDistanceThreshold
vpMatchingMethodEnum getMatchingMethod() const
double getRansacThreshold() const
VISP_EXPORT double measureTimeMs()