42 #include <visp3/core/vpIoTools.h>
43 #include <visp3/vision/vpKeyPoint.h>
45 #if (VISP_HAVE_OPENCV_VERSION >= 0x020101)
47 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
48 #include <opencv2/calib3d/calib3d.hpp>
51 #include <pugixml.hpp>
56 inline cv::DMatch knnToDMatch(
const std::vector<cv::DMatch> &knnMatches)
58 if (knnMatches.size() > 0) {
65 inline vpImagePoint matchRansacToVpImage(
const std::pair<cv::KeyPoint, cv::Point3f> &pair)
83 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
84 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
85 m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
86 m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
87 m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
88 m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
89 m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
90 m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
91 m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
92 m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
93 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
94 m_useBruteForceCrossCheck(true),
96 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
97 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
101 m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
102 m_extractorNames.push_back(m_mapOfDescriptorNames[descriptorType]);
118 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
119 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
120 m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
121 m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
122 m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
123 m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
124 m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
125 m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
126 m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
127 m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
128 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
129 m_useBruteForceCrossCheck(true),
131 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
132 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
136 m_detectorNames.push_back(detectorName);
137 m_extractorNames.push_back(extractorName);
153 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
154 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(detectorNames),
155 m_detectors(), m_extractionTime(0.), m_extractorNames(extractorNames), m_extractors(), m_filteredMatches(),
156 m_filterType(filterType), m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(),
157 m_matcher(), m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0),
158 m_matchingRatioThreshold(0.85), m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200),
159 m_nbRansacMinInlierCount(100), m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(),
160 m_queryFilteredKeyPoints(), m_queryKeyPoints(), m_ransacConsensusPercentage(20.0),
161 m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(), m_ransacParallel(false),
162 m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01), m_trainDescriptors(),
163 m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
164 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
165 m_useBruteForceCrossCheck(true),
167 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
168 m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
182 void vpKeyPoint::affineSkew(
double tilt,
double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai)
187 mask = cv::Mat(h, w, CV_8UC1, cv::Scalar(255));
189 cv::Mat A = cv::Mat::eye(2, 3, CV_32F);
192 if (std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
197 A = (cv::Mat_<float>(2, 2) << c, -s, s, c);
199 cv::Mat corners = (cv::Mat_<float>(4, 2) << 0, 0, w, 0, w, h, 0, h);
200 cv::Mat tcorners = corners * A.t();
201 cv::Mat tcorners_x, tcorners_y;
202 tcorners.col(0).copyTo(tcorners_x);
203 tcorners.col(1).copyTo(tcorners_y);
204 std::vector<cv::Mat> channels;
205 channels.push_back(tcorners_x);
206 channels.push_back(tcorners_y);
207 cv::merge(channels, tcorners);
209 cv::Rect rect = cv::boundingRect(tcorners);
210 A = (cv::Mat_<float>(2, 3) << c, -s, -rect.x, s, c, -rect.y);
212 cv::warpAffine(img, img, A, cv::Size(rect.width, rect.height), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
215 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon()) {
216 double s = 0.8 * sqrt(tilt * tilt - 1);
217 cv::GaussianBlur(img, img, cv::Size(0, 0), s, 0.01);
218 cv::resize(img, img, cv::Size(0, 0), 1.0 / tilt, 1.0, cv::INTER_NEAREST);
219 A.row(0) = A.row(0) / tilt;
222 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon() ||
223 std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
226 cv::warpAffine(mask, mask, A, cv::Size(w, h), cv::INTER_NEAREST);
228 cv::invertAffineTransform(A, Ai);
288 m_trainPoints.clear();
289 m_mapOfImageId.clear();
290 m_mapOfImages.clear();
291 m_currentImageId = 1;
293 if (m_useAffineDetection) {
294 std::vector<std::vector<cv::KeyPoint> > listOfTrainKeyPoints;
295 std::vector<cv::Mat> listOfTrainDescriptors;
301 m_trainKeyPoints.clear();
302 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfTrainKeyPoints.begin();
303 it != listOfTrainKeyPoints.end(); ++it) {
304 m_trainKeyPoints.insert(m_trainKeyPoints.end(), it->begin(), it->end());
308 for (std::vector<cv::Mat>::const_iterator it = listOfTrainDescriptors.begin(); it != listOfTrainDescriptors.end();
312 it->copyTo(m_trainDescriptors);
314 m_trainDescriptors.push_back(*it);
318 detect(I, m_trainKeyPoints, m_detectionTime, rectangle);
319 extract(I, m_trainKeyPoints, m_trainDescriptors, m_extractionTime);
324 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
325 m_mapOfImageId[it->class_id] = m_currentImageId;
329 m_mapOfImages[m_currentImageId] = I;
338 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
340 return static_cast<unsigned int>(m_trainKeyPoints.size());
368 std::vector<cv::Point3f> &points3f,
bool append,
int class_id)
370 cv::Mat trainDescriptors;
372 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
374 extract(I, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
376 if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
380 std::map<size_t, size_t> mapOfKeypointHashes;
382 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
384 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
387 std::vector<cv::Point3f> trainPoints_tmp;
388 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
389 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
390 trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
395 points3f = trainPoints_tmp;
398 return (
buildReference(I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
413 std::vector<cv::Point3f> &points3f,
bool append,
int class_id)
415 cv::Mat trainDescriptors;
417 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
419 extract(I_color, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
421 if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
425 std::map<size_t, size_t> mapOfKeypointHashes;
427 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
429 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
432 std::vector<cv::Point3f> trainPoints_tmp;
433 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
434 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
435 trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
440 points3f = trainPoints_tmp;
443 return (
buildReference(I_color, trainKeyPoints, trainDescriptors, points3f, append, class_id));
460 const std::vector<cv::KeyPoint> &trainKeyPoints,
461 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
462 bool append,
int class_id)
465 m_trainPoints.clear();
466 m_mapOfImageId.clear();
467 m_mapOfImages.clear();
468 m_currentImageId = 0;
469 m_trainKeyPoints.clear();
474 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
476 if (class_id != -1) {
477 for (std::vector<cv::KeyPoint>::iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
478 it->class_id = class_id;
484 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
486 m_mapOfImageId[it->class_id] = m_currentImageId;
490 m_mapOfImages[m_currentImageId] = I;
493 m_trainKeyPoints.insert(m_trainKeyPoints.end(), trainKeyPoints_tmp.begin(), trainKeyPoints_tmp.end());
495 trainDescriptors.copyTo(m_trainDescriptors);
497 m_trainDescriptors.push_back(trainDescriptors);
499 this->m_trainPoints.insert(m_trainPoints.end(), points3f.begin(), points3f.end());
507 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
511 return static_cast<unsigned int>(m_trainKeyPoints.size());
527 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
528 bool append,
int class_id)
531 return (
buildReference(m_I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
553 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
560 vpPlane Po(pts[0], pts[1], pts[2]);
561 double xc = 0.0, yc = 0.0;
572 point_obj = cMo.
inverse() * point_cam;
573 point = cv::Point3f((
float)point_obj[0], (
float)point_obj[1], (
float)point_obj[2]);
595 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
602 vpPlane Po(pts[0], pts[1], pts[2]);
603 double xc = 0.0, yc = 0.0;
614 point_obj = cMo.
inverse() * point_cam;
635 std::vector<cv::KeyPoint> &candidates,
636 const std::vector<vpPolygon> &polygons,
637 const std::vector<std::vector<vpPoint> > &roisPt,
638 std::vector<cv::Point3f> &points, cv::Mat *descriptors)
640 std::vector<cv::KeyPoint> candidatesToCheck = candidates;
647 std::vector<std::pair<cv::KeyPoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
648 for (
size_t i = 0; i < candidatesToCheck.size(); i++) {
649 pairOfCandidatesToCheck[i] = std::pair<cv::KeyPoint, size_t>(candidatesToCheck[i], i);
653 std::vector<vpPolygon> polygons_tmp = polygons;
654 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
655 std::vector<std::pair<cv::KeyPoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
657 while (it2 != pairOfCandidatesToCheck.end()) {
658 imPt.
set_ij(it2->first.pt.y, it2->first.pt.x);
659 if (it1->isInside(imPt)) {
660 candidates.push_back(it2->first);
662 points.push_back(pt);
664 if (descriptors != NULL) {
665 desc.push_back(descriptors->row((
int)it2->second));
669 it2 = pairOfCandidatesToCheck.erase(it2);
676 if (descriptors != NULL) {
677 desc.copyTo(*descriptors);
698 std::vector<vpImagePoint> &candidates,
699 const std::vector<vpPolygon> &polygons,
700 const std::vector<std::vector<vpPoint> > &roisPt,
701 std::vector<vpPoint> &points, cv::Mat *descriptors)
703 std::vector<vpImagePoint> candidatesToCheck = candidates;
709 std::vector<std::pair<vpImagePoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
710 for (
size_t i = 0; i < candidatesToCheck.size(); i++) {
711 pairOfCandidatesToCheck[i] = std::pair<vpImagePoint, size_t>(candidatesToCheck[i], i);
715 std::vector<vpPolygon> polygons_tmp = polygons;
716 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
717 std::vector<std::pair<vpImagePoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
719 while (it2 != pairOfCandidatesToCheck.end()) {
720 if (it1->isInside(it2->first)) {
721 candidates.push_back(it2->first);
723 points.push_back(pt);
725 if (descriptors != NULL) {
726 desc.push_back(descriptors->row((
int)it2->second));
730 it2 = pairOfCandidatesToCheck.erase(it2);
755 const std::vector<vpCylinder> &cylinders,
756 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<cv::Point3f> &points,
757 cv::Mat *descriptors)
759 std::vector<cv::KeyPoint> candidatesToCheck = candidates;
765 size_t cpt_keypoint = 0;
766 for (std::vector<cv::KeyPoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
767 ++it1, cpt_keypoint++) {
768 size_t cpt_cylinder = 0;
771 for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
772 it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
775 for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
777 candidates.push_back(*it1);
781 double xm = 0.0, ym = 0.0;
783 double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
785 if (!
vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
787 point_cam[0] = xm * Z;
788 point_cam[1] = ym * Z;
792 point_obj = cMo.
inverse() * point_cam;
795 points.push_back(cv::Point3f((
float)pt.
get_oX(), (
float)pt.
get_oY(), (
float)pt.
get_oZ()));
797 if (descriptors != NULL) {
798 desc.push_back(descriptors->row((
int)cpt_keypoint));
808 if (descriptors != NULL) {
809 desc.copyTo(*descriptors);
830 const std::vector<vpCylinder> &cylinders,
831 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<vpPoint> &points,
832 cv::Mat *descriptors)
834 std::vector<vpImagePoint> candidatesToCheck = candidates;
840 size_t cpt_keypoint = 0;
841 for (std::vector<vpImagePoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
842 ++it1, cpt_keypoint++) {
843 size_t cpt_cylinder = 0;
846 for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
847 it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
850 for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
852 candidates.push_back(*it1);
856 double xm = 0.0, ym = 0.0;
858 double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
860 if (!
vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
862 point_cam[0] = xm * Z;
863 point_cam[1] = ym * Z;
867 point_obj = cMo.
inverse() * point_cam;
870 points.push_back(pt);
872 if (descriptors != NULL) {
873 desc.push_back(descriptors->row((
int)cpt_keypoint));
883 if (descriptors != NULL) {
884 desc.copyTo(*descriptors);
907 if (imagePoints.size() < 4 || objectPoints.size() < 4 || imagePoints.size() != objectPoints.size()) {
909 std::cerr <<
"Not enough points to compute the pose (at least 4 points "
916 cv::Mat cameraMatrix =
926 cv::Mat distCoeffs = cv::Mat::zeros(1, 5, CV_64F);
929 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
931 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec,
false, m_nbRansacIterations,
932 (
float)m_ransacReprojectionError,
935 inlierIndex, cv::SOLVEPNP_ITERATIVE);
955 int nbInlierToReachConsensus = m_nbRansacMinInlierCount;
956 if (m_useConsensusPercentage) {
957 nbInlierToReachConsensus = (int)(m_ransacConsensusPercentage / 100.0 * (
double)m_queryFilteredKeyPoints.size());
960 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec,
false, m_nbRansacIterations,
961 (
float)m_ransacReprojectionError, nbInlierToReachConsensus, inlierIndex);
963 }
catch (cv::Exception &e) {
964 std::cerr << e.what() << std::endl;
968 vpTranslationVector translationVec(tvec.at<
double>(0), tvec.at<
double>(1), tvec.at<
double>(2));
969 vpThetaUVector thetaUVector(rvec.at<
double>(0), rvec.at<
double>(1), rvec.at<
double>(2));
998 std::vector<vpPoint> &inliers,
double &elapsedTime,
1001 std::vector<unsigned int> inlierIndex;
1002 return computePose(objectVpPoints, cMo, inliers, inlierIndex, elapsedTime, func);
1019 std::vector<vpPoint> &inliers, std::vector<unsigned int> &inlierIndex,
double &elapsedTime,
1024 if (objectVpPoints.size() < 4) {
1034 for (std::vector<vpPoint>::const_iterator it = objectVpPoints.begin(); it != objectVpPoints.end(); ++it) {
1038 unsigned int nbInlierToReachConsensus = (
unsigned int)m_nbRansacMinInlierCount;
1039 if (m_useConsensusPercentage) {
1040 nbInlierToReachConsensus =
1041 (
unsigned int)(m_ransacConsensusPercentage / 100.0 * (
double)m_queryFilteredKeyPoints.size());
1051 bool isRansacPoseEstimationOk =
false;
1058 if (m_computeCovariance) {
1062 std::cerr <<
"e=" << e.
what() << std::endl;
1080 return isRansacPoseEstimationOk;
1097 double vpKeyPoint::computePoseEstimationError(
const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
1100 if (matchKeyPoints.size() == 0) {
1106 std::vector<double> errors(matchKeyPoints.size());
1109 for (std::vector<std::pair<cv::KeyPoint, cv::Point3f> >::const_iterator it = matchKeyPoints.begin();
1110 it != matchKeyPoints.end(); ++it, cpt++) {
1115 double u = 0.0, v = 0.0;
1117 errors[cpt] = std::sqrt((u - it->first.pt.x) * (u - it->first.pt.x) + (v - it->first.pt.y) * (v - it->first.pt.y));
1120 return std::accumulate(errors.begin(), errors.end(), 0.0) / errors.size();
1172 unsigned int nbImg = (
unsigned int)(m_mapOfImages.size() + 1);
1174 if (m_mapOfImages.empty()) {
1175 std::cerr <<
"There is no training image loaded !" << std::endl;
1185 unsigned int nbImgSqrt = (
unsigned int)
vpMath::round(std::sqrt((
double)nbImg));
1188 unsigned int nbWidth = nbImgSqrt;
1190 unsigned int nbHeight = nbImgSqrt;
1193 if (nbImgSqrt * nbImgSqrt < nbImg) {
1197 unsigned int maxW = ICurrent.
getWidth();
1198 unsigned int maxH = ICurrent.
getHeight();
1199 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1201 if (maxW < it->second.getWidth()) {
1202 maxW = it->second.getWidth();
1205 if (maxH < it->second.getHeight()) {
1206 maxH = it->second.getHeight();
1227 unsigned int nbImg = (
unsigned int)(m_mapOfImages.size() + 1);
1229 if (m_mapOfImages.empty()) {
1230 std::cerr <<
"There is no training image loaded !" << std::endl;
1240 unsigned int nbImgSqrt = (
unsigned int)
vpMath::round(std::sqrt((
double)nbImg));
1243 unsigned int nbWidth = nbImgSqrt;
1245 unsigned int nbHeight = nbImgSqrt;
1248 if (nbImgSqrt * nbImgSqrt < nbImg) {
1252 unsigned int maxW = ICurrent.
getWidth();
1253 unsigned int maxH = ICurrent.
getHeight();
1254 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1256 if (maxW < it->second.getWidth()) {
1257 maxW = it->second.getWidth();
1260 if (maxH < it->second.getHeight()) {
1261 maxH = it->second.getHeight();
1279 detect(I, keyPoints, elapsedTime, rectangle);
1292 detect(I_color, keyPoints, elapsedTime, rectangle);
1303 void vpKeyPoint::detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
const cv::Mat &mask)
1306 detect(matImg, keyPoints, elapsedTime, mask);
1322 cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1325 cv::Point leftTop((
int)rectangle.
getLeft(), (
int)rectangle.
getTop()),
1327 cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1329 mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1332 detect(matImg, keyPoints, elapsedTime, mask);
1348 cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1351 cv::Point leftTop((
int)rectangle.
getLeft(), (
int)rectangle.
getTop()),
1353 cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1355 mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1358 detect(matImg, keyPoints, elapsedTime, mask);
1370 void vpKeyPoint::detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
double &elapsedTime,
1371 const cv::Mat &mask)
1376 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
1377 it != m_detectors.end(); ++it) {
1378 std::vector<cv::KeyPoint> kp;
1380 it->second->detect(matImg, kp, mask);
1381 keyPoints.insert(keyPoints.end(), kp.begin(), kp.end());
1396 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1398 std::vector<vpImagePoint> vpTrainImageKeyPoints;
1401 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1416 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1418 std::vector<vpImagePoint> vpTrainImageKeyPoints;
1421 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1436 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1439 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1453 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1456 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1473 unsigned int crossSize,
unsigned int lineThickness,
const vpColor &color)
1476 srand((
unsigned int)time(NULL));
1479 std::vector<vpImagePoint> queryImageKeyPoints;
1481 std::vector<vpImagePoint> trainImageKeyPoints;
1485 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1487 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1490 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1491 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
1492 queryImageKeyPoints[(
size_t)it->queryIdx].get_j() + IRef.
getWidth());
1511 unsigned int lineThickness,
const vpColor &color)
1514 srand((
unsigned int)time(NULL));
1517 std::vector<vpImagePoint> queryImageKeyPoints;
1519 std::vector<vpImagePoint> trainImageKeyPoints;
1523 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1525 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1528 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1529 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
1530 queryImageKeyPoints[(
size_t)it->queryIdx].get_j() + IRef.
getWidth());
1549 unsigned int lineThickness,
const vpColor &color)
1552 srand((
unsigned int)time(NULL));
1555 std::vector<vpImagePoint> queryImageKeyPoints;
1557 std::vector<vpImagePoint> trainImageKeyPoints;
1561 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1563 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1566 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1567 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
1568 queryImageKeyPoints[(
size_t)it->queryIdx].get_j() + IRef.
getWidth());
1587 const std::vector<vpImagePoint> &ransacInliers,
unsigned int crossSize,
1588 unsigned int lineThickness)
1590 if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1592 std::cerr <<
"There is no training image loaded !" << std::endl;
1598 int nbImg = (int)(m_mapOfImages.size() + 1);
1606 int nbWidth = nbImgSqrt;
1607 int nbHeight = nbImgSqrt;
1609 if (nbImgSqrt * nbImgSqrt < nbImg) {
1613 std::map<int, int> mapOfImageIdIndex;
1616 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1618 mapOfImageIdIndex[it->first] = cpt;
1620 if (maxW < it->second.getWidth()) {
1621 maxW = it->second.getWidth();
1624 if (maxH < it->second.getHeight()) {
1625 maxH = it->second.getHeight();
1631 int medianI = nbHeight / 2;
1632 int medianJ = nbWidth / 2;
1633 int medianIndex = medianI * nbWidth + medianJ;
1634 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1636 int current_class_id_index = 0;
1637 if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1638 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1642 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1645 int indexI = current_class_id_index / nbWidth;
1646 int indexJ = current_class_id_index - (indexI * nbWidth);
1647 topLeftCorner.
set_ij((
int)maxH * indexI, (int)maxW * indexJ);
1654 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
1655 for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1660 for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1665 for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1671 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1672 int current_class_id = 0;
1673 if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(
size_t)it->trainIdx].class_id]] < medianIndex) {
1674 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1678 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1681 int indexI = current_class_id / nbWidth;
1682 int indexJ = current_class_id - (indexI * nbWidth);
1684 vpImagePoint end((
int)maxH * indexI + m_trainKeyPoints[(
size_t)it->trainIdx].pt.y,
1685 (
int)maxW * indexJ + m_trainKeyPoints[(
size_t)it->trainIdx].pt.x);
1686 vpImagePoint start((
int)maxH * medianI + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.y,
1687 (
int)maxW * medianJ + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.x);
1708 const std::vector<vpImagePoint> &ransacInliers,
unsigned int crossSize,
1709 unsigned int lineThickness)
1711 if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1713 std::cerr <<
"There is no training image loaded !" << std::endl;
1719 int nbImg = (int)(m_mapOfImages.size() + 1);
1727 int nbWidth = nbImgSqrt;
1728 int nbHeight = nbImgSqrt;
1730 if (nbImgSqrt * nbImgSqrt < nbImg) {
1734 std::map<int, int> mapOfImageIdIndex;
1737 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1739 mapOfImageIdIndex[it->first] = cpt;
1741 if (maxW < it->second.getWidth()) {
1742 maxW = it->second.getWidth();
1745 if (maxH < it->second.getHeight()) {
1746 maxH = it->second.getHeight();
1752 int medianI = nbHeight / 2;
1753 int medianJ = nbWidth / 2;
1754 int medianIndex = medianI * nbWidth + medianJ;
1755 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1757 int current_class_id_index = 0;
1758 if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1759 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1763 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1766 int indexI = current_class_id_index / nbWidth;
1767 int indexJ = current_class_id_index - (indexI * nbWidth);
1768 topLeftCorner.
set_ij((
int)maxH * indexI, (int)maxW * indexJ);
1775 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
1776 for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1781 for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1786 for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1792 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1793 int current_class_id = 0;
1794 if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(
size_t)it->trainIdx].class_id]] < medianIndex) {
1795 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1799 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1802 int indexI = current_class_id / nbWidth;
1803 int indexJ = current_class_id - (indexI * nbWidth);
1805 vpImagePoint end((
int)maxH * indexI + m_trainKeyPoints[(
size_t)it->trainIdx].pt.y,
1806 (
int)maxW * indexJ + m_trainKeyPoints[(
size_t)it->trainIdx].pt.x);
1807 vpImagePoint start((
int)maxH * medianI + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.y,
1808 (
int)maxW * medianJ + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.x);
1828 std::vector<cv::Point3f> *trainPoints)
1831 extract(I, keyPoints, descriptors, elapsedTime, trainPoints);
1845 std::vector<cv::Point3f> *trainPoints)
1848 extract(I_color, keyPoints, descriptors, elapsedTime, trainPoints);
1862 std::vector<cv::Point3f> *trainPoints)
1865 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1880 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1884 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1899 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1903 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1918 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1923 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator itd = m_extractors.begin();
1924 itd != m_extractors.end(); ++itd) {
1928 if (trainPoints != NULL && !trainPoints->empty()) {
1931 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1934 itd->second->compute(matImg, keyPoints, descriptors);
1936 if (keyPoints.size() != keyPoints_tmp.size()) {
1940 std::map<size_t, size_t> mapOfKeypointHashes;
1942 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1944 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1947 std::vector<cv::Point3f> trainPoints_tmp;
1948 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1949 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1950 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1955 *trainPoints = trainPoints_tmp;
1959 itd->second->compute(matImg, keyPoints, descriptors);
1964 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1968 itd->second->compute(matImg, keyPoints, desc);
1970 if (keyPoints.size() != keyPoints_tmp.size()) {
1974 std::map<size_t, size_t> mapOfKeypointHashes;
1976 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1978 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1981 std::vector<cv::Point3f> trainPoints_tmp;
1982 cv::Mat descriptors_tmp;
1983 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1984 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1985 if (trainPoints != NULL && !trainPoints->empty()) {
1986 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1989 if (!descriptors.empty()) {
1990 descriptors_tmp.push_back(descriptors.row((
int)mapOfKeypointHashes[myKeypointHash(*it)]));
1995 if (trainPoints != NULL) {
1997 *trainPoints = trainPoints_tmp;
2000 descriptors_tmp.copyTo(descriptors);
2004 if (descriptors.empty()) {
2005 desc.copyTo(descriptors);
2007 cv::hconcat(descriptors, desc, descriptors);
2012 if (keyPoints.size() != (
size_t)descriptors.rows) {
2013 std::cerr <<
"keyPoints.size() != (size_t) descriptors.rows" << std::endl;
2021 void vpKeyPoint::filterMatches()
2023 std::vector<cv::KeyPoint> queryKpts;
2024 std::vector<cv::Point3f> trainPts;
2025 std::vector<cv::DMatch> m;
2031 double min_dist = DBL_MAX;
2033 std::vector<double> distance_vec(m_knnMatches.size());
2036 for (
size_t i = 0; i < m_knnMatches.size(); i++) {
2037 double dist = m_knnMatches[i][0].distance;
2039 distance_vec[i] = dist;
2041 if (dist < min_dist) {
2048 mean /= m_queryDescriptors.rows;
2051 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
2052 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
2053 double threshold = min_dist + stdev;
2055 for (
size_t i = 0; i < m_knnMatches.size(); i++) {
2056 if (m_knnMatches[i].size() >= 2) {
2059 float ratio = m_knnMatches[i][0].distance / m_knnMatches[i][1].distance;
2064 double dist = m_knnMatches[i][0].distance;
2067 m.push_back(cv::DMatch((
int)queryKpts.size(), m_knnMatches[i][0].trainIdx, m_knnMatches[i][0].distance));
2069 if (!m_trainPoints.empty()) {
2070 trainPts.push_back(m_trainPoints[(
size_t)m_knnMatches[i][0].trainIdx]);
2072 queryKpts.push_back(m_queryKeyPoints[(
size_t)m_knnMatches[i][0].queryIdx]);
2080 double min_dist = DBL_MAX;
2082 std::vector<double> distance_vec(m_matches.size());
2083 for (
size_t i = 0; i < m_matches.size(); i++) {
2084 double dist = m_matches[i].distance;
2086 distance_vec[i] = dist;
2088 if (dist < min_dist) {
2095 mean /= m_queryDescriptors.rows;
2097 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
2098 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
2108 for (
size_t i = 0; i < m_matches.size(); i++) {
2109 if (m_matches[i].distance <= threshold) {
2110 m.push_back(cv::DMatch((
int)queryKpts.size(), m_matches[i].trainIdx, m_matches[i].distance));
2112 if (!m_trainPoints.empty()) {
2113 trainPts.push_back(m_trainPoints[(
size_t)m_matches[i].trainIdx]);
2115 queryKpts.push_back(m_queryKeyPoints[(
size_t)m_matches[i].queryIdx]);
2120 if (m_useSingleMatchFilter) {
2123 std::vector<cv::DMatch> mTmp;
2124 std::vector<cv::Point3f> trainPtsTmp;
2125 std::vector<cv::KeyPoint> queryKptsTmp;
2127 std::map<int, int> mapOfTrainIdx;
2129 for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
2130 mapOfTrainIdx[it->trainIdx]++;
2134 for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
2135 if (mapOfTrainIdx[it->trainIdx] == 1) {
2136 mTmp.push_back(cv::DMatch((
int)queryKptsTmp.size(), it->trainIdx, it->distance));
2138 if (!m_trainPoints.empty()) {
2139 trainPtsTmp.push_back(m_trainPoints[(
size_t)it->trainIdx]);
2141 queryKptsTmp.push_back(queryKpts[(
size_t)it->queryIdx]);
2145 m_filteredMatches = mTmp;
2146 m_objectFilteredPoints = trainPtsTmp;
2147 m_queryFilteredKeyPoints = queryKptsTmp;
2149 m_filteredMatches = m;
2150 m_objectFilteredPoints = trainPts;
2151 m_queryFilteredKeyPoints = queryKpts;
2164 objectPoints = m_objectFilteredPoints;
2190 keyPoints = m_queryFilteredKeyPoints;
2192 keyPoints = m_queryKeyPoints;
2247 void vpKeyPoint::init()
2250 #if defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && (VISP_HAVE_OPENCV_VERSION < 0x030000)
2252 if (!cv::initModule_nonfree()) {
2253 std::cerr <<
"Cannot init module non free, SIFT or SURF cannot be used." << std::endl;
2263 initDetectors(m_detectorNames);
2264 initExtractors(m_extractorNames);
2273 void vpKeyPoint::initDetector(
const std::string &detectorName)
2275 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
2276 m_detectors[detectorName] = cv::FeatureDetector::create(detectorName);
2278 if (m_detectors[detectorName] == NULL) {
2279 std::stringstream ss_msg;
2280 ss_msg <<
"Fail to initialize the detector: " << detectorName
2281 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2285 std::string detectorNameTmp = detectorName;
2286 std::string pyramid =
"Pyramid";
2287 std::size_t pos = detectorName.find(pyramid);
2288 bool usePyramid =
false;
2289 if (pos != std::string::npos) {
2290 detectorNameTmp = detectorName.substr(pos + pyramid.size());
2294 if (detectorNameTmp ==
"SIFT") {
2295 #if (VISP_HAVE_OPENCV_VERSION >= 0x040500)
2296 cv::Ptr<cv::FeatureDetector> siftDetector = cv::SiftFeatureDetector::create();
2298 m_detectors[detectorNameTmp] = siftDetector;
2300 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
2303 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) || (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || \
2304 (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2306 cv::Ptr<cv::FeatureDetector> siftDetector;
2307 if (m_maxFeatures > 0) {
2308 #if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2309 siftDetector = cv::SIFT::create(m_maxFeatures);
2311 siftDetector = cv::xfeatures2d::SIFT::create(m_maxFeatures);
2314 #if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2315 siftDetector = cv::SIFT::create();
2317 siftDetector = cv::xfeatures2d::SIFT::create();
2321 m_detectors[detectorNameTmp] = siftDetector;
2323 std::cerr <<
"You should not use SIFT with Pyramid feature detection!" << std::endl;
2324 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
2327 std::stringstream ss_msg;
2328 ss_msg <<
"Fail to initialize the detector: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2329 <<
" was not build with xFeatures2d module.";
2333 }
else if (detectorNameTmp ==
"SURF") {
2334 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2335 cv::Ptr<cv::FeatureDetector> surfDetector = cv::xfeatures2d::SURF::create();
2337 m_detectors[detectorNameTmp] = surfDetector;
2339 std::cerr <<
"You should not use SURF with Pyramid feature detection!" << std::endl;
2340 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(surfDetector);
2343 std::stringstream ss_msg;
2344 ss_msg <<
"Fail to initialize the detector: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2345 <<
" was not build with xFeatures2d module.";
2348 }
else if (detectorNameTmp ==
"FAST") {
2349 cv::Ptr<cv::FeatureDetector> fastDetector = cv::FastFeatureDetector::create();
2351 m_detectors[detectorNameTmp] = fastDetector;
2353 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
2355 }
else if (detectorNameTmp ==
"MSER") {
2356 cv::Ptr<cv::FeatureDetector> fastDetector = cv::MSER::create();
2358 m_detectors[detectorNameTmp] = fastDetector;
2360 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
2362 }
else if (detectorNameTmp ==
"ORB") {
2363 cv::Ptr<cv::FeatureDetector> orbDetector;
2364 if (m_maxFeatures > 0) {
2365 orbDetector = cv::ORB::create(m_maxFeatures);
2367 orbDetector = cv::ORB::create();
2370 m_detectors[detectorNameTmp] = orbDetector;
2372 std::cerr <<
"You should not use ORB with Pyramid feature detection!" << std::endl;
2373 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(orbDetector);
2375 }
else if (detectorNameTmp ==
"BRISK") {
2376 cv::Ptr<cv::FeatureDetector> briskDetector = cv::BRISK::create();
2378 m_detectors[detectorNameTmp] = briskDetector;
2380 std::cerr <<
"You should not use BRISK with Pyramid feature detection!" << std::endl;
2381 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(briskDetector);
2383 }
else if (detectorNameTmp ==
"KAZE") {
2384 cv::Ptr<cv::FeatureDetector> kazeDetector = cv::KAZE::create();
2386 m_detectors[detectorNameTmp] = kazeDetector;
2388 std::cerr <<
"You should not use KAZE with Pyramid feature detection!" << std::endl;
2389 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(kazeDetector);
2391 }
else if (detectorNameTmp ==
"AKAZE") {
2392 cv::Ptr<cv::FeatureDetector> akazeDetector = cv::AKAZE::create();
2394 m_detectors[detectorNameTmp] = akazeDetector;
2396 std::cerr <<
"You should not use AKAZE with Pyramid feature detection!" << std::endl;
2397 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(akazeDetector);
2399 }
else if (detectorNameTmp ==
"GFTT") {
2400 cv::Ptr<cv::FeatureDetector> gfttDetector = cv::GFTTDetector::create();
2402 m_detectors[detectorNameTmp] = gfttDetector;
2404 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(gfttDetector);
2406 }
else if (detectorNameTmp ==
"SimpleBlob") {
2407 cv::Ptr<cv::FeatureDetector> simpleBlobDetector = cv::SimpleBlobDetector::create();
2409 m_detectors[detectorNameTmp] = simpleBlobDetector;
2411 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(simpleBlobDetector);
2413 }
else if (detectorNameTmp ==
"STAR") {
2414 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2415 cv::Ptr<cv::FeatureDetector> starDetector = cv::xfeatures2d::StarDetector::create();
2417 m_detectors[detectorNameTmp] = starDetector;
2419 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(starDetector);
2422 std::stringstream ss_msg;
2423 ss_msg <<
"Fail to initialize the detector: STAR. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2424 <<
" was not build with xFeatures2d module.";
2427 }
else if (detectorNameTmp ==
"AGAST") {
2428 cv::Ptr<cv::FeatureDetector> agastDetector = cv::AgastFeatureDetector::create();
2430 m_detectors[detectorNameTmp] = agastDetector;
2432 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(agastDetector);
2434 }
else if (detectorNameTmp ==
"MSD") {
2435 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100)
2436 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2437 cv::Ptr<cv::FeatureDetector> msdDetector = cv::xfeatures2d::MSDDetector::create();
2439 m_detectors[detectorNameTmp] = msdDetector;
2441 std::cerr <<
"You should not use MSD with Pyramid feature detection!" << std::endl;
2442 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(msdDetector);
2445 std::stringstream ss_msg;
2446 ss_msg <<
"Fail to initialize the detector: MSD. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2447 <<
" was not build with xFeatures2d module.";
2451 std::stringstream ss_msg;
2452 ss_msg <<
"Feature " << detectorName <<
" is not available in OpenCV version: " << std::hex
2453 << VISP_HAVE_OPENCV_VERSION <<
" (require >= OpenCV 3.1).";
2456 std::cerr <<
"The detector:" << detectorNameTmp <<
" is not available." << std::endl;
2459 bool detectorInitialized =
false;
2462 detectorInitialized = !m_detectors[detectorNameTmp].empty();
2465 detectorInitialized = !m_detectors[detectorName].empty();
2468 if (!detectorInitialized) {
2469 std::stringstream ss_msg;
2470 ss_msg <<
"Fail to initialize the detector: " << detectorNameTmp
2471 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2483 void vpKeyPoint::initDetectors(
const std::vector<std::string> &detectorNames)
2485 for (std::vector<std::string>::const_iterator it = detectorNames.begin(); it != detectorNames.end(); ++it) {
2495 void vpKeyPoint::initExtractor(
const std::string &extractorName)
2497 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
2498 m_extractors[extractorName] = cv::DescriptorExtractor::create(extractorName);
2500 if (extractorName ==
"SIFT") {
2501 #if (VISP_HAVE_OPENCV_VERSION >= 0x040500)
2502 m_extractors[extractorName] = cv::SIFT::create();
2504 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) || (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || \
2505 (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2507 #if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2508 m_extractors[extractorName] = cv::SIFT::create();
2510 m_extractors[extractorName] = cv::xfeatures2d::SIFT::create();
2513 std::stringstream ss_msg;
2514 ss_msg <<
"Fail to initialize the extractor: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2515 <<
" was not build with xFeatures2d module.";
2519 }
else if (extractorName ==
"SURF") {
2520 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2522 m_extractors[extractorName] = cv::xfeatures2d::SURF::create(100, 4, 3,
true);
2524 std::stringstream ss_msg;
2525 ss_msg <<
"Fail to initialize the extractor: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2526 <<
" was not build with xFeatures2d module.";
2529 }
else if (extractorName ==
"ORB") {
2530 m_extractors[extractorName] = cv::ORB::create();
2531 }
else if (extractorName ==
"BRISK") {
2532 m_extractors[extractorName] = cv::BRISK::create();
2533 }
else if (extractorName ==
"FREAK") {
2534 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2535 m_extractors[extractorName] = cv::xfeatures2d::FREAK::create();
2537 std::stringstream ss_msg;
2538 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2539 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2542 }
else if (extractorName ==
"BRIEF") {
2543 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2544 m_extractors[extractorName] = cv::xfeatures2d::BriefDescriptorExtractor::create();
2546 std::stringstream ss_msg;
2547 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2548 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2551 }
else if (extractorName ==
"KAZE") {
2552 m_extractors[extractorName] = cv::KAZE::create();
2553 }
else if (extractorName ==
"AKAZE") {
2554 m_extractors[extractorName] = cv::AKAZE::create();
2555 }
else if (extractorName ==
"DAISY") {
2556 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2557 m_extractors[extractorName] = cv::xfeatures2d::DAISY::create();
2559 std::stringstream ss_msg;
2560 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2561 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2564 }
else if (extractorName ==
"LATCH") {
2565 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2566 m_extractors[extractorName] = cv::xfeatures2d::LATCH::create();
2568 std::stringstream ss_msg;
2569 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2570 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2573 }
else if (extractorName ==
"LUCID") {
2574 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2579 std::stringstream ss_msg;
2580 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2581 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2584 }
else if (extractorName ==
"VGG") {
2585 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2586 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2587 m_extractors[extractorName] = cv::xfeatures2d::VGG::create();
2589 std::stringstream ss_msg;
2590 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2591 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2595 std::stringstream ss_msg;
2596 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2597 << VISP_HAVE_OPENCV_VERSION <<
" but requires at least OpenCV 3.2.";
2600 }
else if (extractorName ==
"BoostDesc") {
2601 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2602 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2603 m_extractors[extractorName] = cv::xfeatures2d::BoostDesc::create();
2605 std::stringstream ss_msg;
2606 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2607 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2611 std::stringstream ss_msg;
2612 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2613 << VISP_HAVE_OPENCV_VERSION <<
" but requires at least OpenCV 3.2.";
2617 std::cerr <<
"The extractor:" << extractorName <<
" is not available." << std::endl;
2621 if (!m_extractors[extractorName]) {
2622 std::stringstream ss_msg;
2623 ss_msg <<
"Fail to initialize the extractor: " << extractorName
2624 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2628 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2629 if (extractorName ==
"SURF") {
2631 m_extractors[extractorName]->set(
"extended", 1);
2642 void vpKeyPoint::initExtractors(
const std::vector<std::string> &extractorNames)
2644 for (std::vector<std::string>::const_iterator it = extractorNames.begin(); it != extractorNames.end(); ++it) {
2648 int descriptorType = CV_32F;
2649 bool firstIteration =
true;
2650 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2651 it != m_extractors.end(); ++it) {
2652 if (firstIteration) {
2653 firstIteration =
false;
2654 descriptorType = it->second->descriptorType();
2656 if (descriptorType != it->second->descriptorType()) {
2663 void vpKeyPoint::initFeatureNames()
2666 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2673 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2674 m_mapOfDetectorNames[DETECTOR_STAR] =
"STAR";
2676 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2677 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2680 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2683 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2688 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2689 m_mapOfDetectorNames[DETECTOR_MSD] =
"MSD";
2693 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2696 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2697 m_mapOfDescriptorNames[DESCRIPTOR_FREAK] =
"FREAK";
2698 m_mapOfDescriptorNames[DESCRIPTOR_BRIEF] =
"BRIEF";
2700 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2701 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2704 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2707 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2710 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2711 m_mapOfDescriptorNames[DESCRIPTOR_DAISY] =
"DAISY";
2712 m_mapOfDescriptorNames[DESCRIPTOR_LATCH] =
"LATCH";
2715 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2716 m_mapOfDescriptorNames[DESCRIPTOR_VGG] =
"VGG";
2717 m_mapOfDescriptorNames[DESCRIPTOR_BoostDesc] =
"BoostDesc";
2729 int descriptorType = CV_32F;
2730 bool firstIteration =
true;
2731 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2732 it != m_extractors.end(); ++it) {
2733 if (firstIteration) {
2734 firstIteration =
false;
2735 descriptorType = it->second->descriptorType();
2737 if (descriptorType != it->second->descriptorType()) {
2743 if (matcherName ==
"FlannBased") {
2744 if (m_extractors.empty()) {
2745 std::cout <<
"Warning: No extractor initialized, by default use "
2746 "floating values (CV_32F) "
2747 "for descriptor type !"
2751 if (descriptorType == CV_8U) {
2752 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2753 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::LshIndexParams>(12, 20, 2));
2755 m_matcher =
new cv::FlannBasedMatcher(
new cv::flann::LshIndexParams(12, 20, 2));
2758 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2759 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::KDTreeIndexParams>());
2761 m_matcher =
new cv::FlannBasedMatcher(
new cv::flann::KDTreeIndexParams());
2765 m_matcher = cv::DescriptorMatcher::create(matcherName);
2768 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2769 if (m_matcher != NULL && !m_useKnn && matcherName ==
"BruteForce") {
2770 m_matcher->set(
"crossCheck", m_useBruteForceCrossCheck);
2775 std::stringstream ss_msg;
2776 ss_msg <<
"Fail to initialize the matcher: " << matcherName
2777 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2794 IMatching.
insert(IRef, topLeftCorner);
2796 IMatching.
insert(ICurrent, topLeftCorner);
2811 IMatching.
insert(IRef, topLeftCorner);
2813 IMatching.
insert(ICurrent, topLeftCorner);
2827 int nbImg = (int)(m_mapOfImages.size() + 1);
2829 if (m_mapOfImages.empty()) {
2830 std::cerr <<
"There is no training image loaded !" << std::endl;
2840 int nbWidth = nbImgSqrt;
2841 int nbHeight = nbImgSqrt;
2843 if (nbImgSqrt * nbImgSqrt < nbImg) {
2848 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2850 if (maxW < it->second.getWidth()) {
2851 maxW = it->second.getWidth();
2854 if (maxH < it->second.getHeight()) {
2855 maxH = it->second.getHeight();
2861 int medianI = nbHeight / 2;
2862 int medianJ = nbWidth / 2;
2863 int medianIndex = medianI * nbWidth + medianJ;
2866 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2868 int local_cpt = cpt;
2869 if (cpt >= medianIndex) {
2874 int indexI = local_cpt / nbWidth;
2875 int indexJ = local_cpt - (indexI * nbWidth);
2876 vpImagePoint topLeftCorner((
int)maxH * indexI, (
int)maxW * indexJ);
2878 IMatching.
insert(it->second, topLeftCorner);
2881 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
2882 IMatching.
insert(ICurrent, topLeftCorner);
2897 int nbImg = (int)(m_mapOfImages.size() + 1);
2899 if (m_mapOfImages.empty()) {
2900 std::cerr <<
"There is no training image loaded !" << std::endl;
2912 int nbWidth = nbImgSqrt;
2913 int nbHeight = nbImgSqrt;
2915 if (nbImgSqrt * nbImgSqrt < nbImg) {
2920 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2922 if (maxW < it->second.getWidth()) {
2923 maxW = it->second.getWidth();
2926 if (maxH < it->second.getHeight()) {
2927 maxH = it->second.getHeight();
2933 int medianI = nbHeight / 2;
2934 int medianJ = nbWidth / 2;
2935 int medianIndex = medianI * nbWidth + medianJ;
2938 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2940 int local_cpt = cpt;
2941 if (cpt >= medianIndex) {
2946 int indexI = local_cpt / nbWidth;
2947 int indexJ = local_cpt - (indexI * nbWidth);
2948 vpImagePoint topLeftCorner((
int)maxH * indexI, (
int)maxW * indexJ);
2952 IMatching.
insert(IRef, topLeftCorner);
2955 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
2956 IMatching.
insert(ICurrent, topLeftCorner);
2971 m_detectorNames.clear();
2972 m_extractorNames.clear();
2973 m_detectors.clear();
2974 m_extractors.clear();
2976 std::cout <<
" *********** Parsing XML for configuration for vpKeyPoint "
2979 xmlp.
parse(configFile);
3043 int startClassId = 0;
3044 int startImageId = 0;
3046 m_trainKeyPoints.clear();
3047 m_trainPoints.clear();
3048 m_mapOfImageId.clear();
3049 m_mapOfImages.clear();
3052 for (std::map<int, int>::const_iterator it = m_mapOfImageId.begin(); it != m_mapOfImageId.end(); ++it) {
3053 if (startClassId < it->first) {
3054 startClassId = it->first;
3059 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
3061 if (startImageId < it->first) {
3062 startImageId = it->first;
3069 if (!parent.empty()) {
3074 std::ifstream file(filename.c_str(), std::ifstream::binary);
3075 if (!file.is_open()) {
3083 #if !defined(VISP_HAVE_MODULE_IO)
3085 std::cout <<
"Warning: The learning file contains image data that will "
3086 "not be loaded as visp_io module "
3087 "is not available !"
3092 for (
int i = 0; i < nbImgs; i++) {
3100 char *path =
new char[length + 1];
3102 for (
int cpt = 0; cpt < length; cpt++) {
3104 file.read((
char *)(&c),
sizeof(c));
3107 path[length] =
'\0';
3110 #ifdef VISP_HAVE_MODULE_IO
3118 m_mapOfImages[
id + startImageId] = I;
3126 int have3DInfoInt = 0;
3128 bool have3DInfo = have3DInfoInt != 0;
3139 int descriptorType = 5;
3142 cv::Mat trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
3143 for (
int i = 0; i < nRows; i++) {
3145 float u, v, size, angle, response;
3146 int octave, class_id, image_id;
3155 cv::KeyPoint keyPoint(cv::Point2f(u, v), size, angle, response, octave, (class_id + startClassId));
3156 m_trainKeyPoints.push_back(keyPoint);
3158 if (image_id != -1) {
3159 #ifdef VISP_HAVE_MODULE_IO
3161 m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
3171 m_trainPoints.push_back(cv::Point3f(oX, oY, oZ));
3174 for (
int j = 0; j < nCols; j++) {
3176 switch (descriptorType) {
3178 unsigned char value;
3179 file.read((
char *)(&value),
sizeof(value));
3180 trainDescriptorsTmp.at<
unsigned char>(i, j) = value;
3185 file.read((
char *)(&value),
sizeof(value));
3186 trainDescriptorsTmp.at<
char>(i, j) = value;
3190 unsigned short int value;
3192 trainDescriptorsTmp.at<
unsigned short int>(i, j) = value;
3198 trainDescriptorsTmp.at<
short int>(i, j) = value;
3204 trainDescriptorsTmp.at<
int>(i, j) = value;
3210 trainDescriptorsTmp.at<
float>(i, j) = value;
3216 trainDescriptorsTmp.at<
double>(i, j) = value;
3222 trainDescriptorsTmp.at<
float>(i, j) = value;
3228 if (!append || m_trainDescriptors.empty()) {
3229 trainDescriptorsTmp.copyTo(m_trainDescriptors);
3231 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
3236 pugi::xml_document doc;
3239 if (!doc.load_file(filename.c_str())) {
3243 pugi::xml_node root_element = doc.document_element();
3245 int descriptorType = CV_32F;
3246 int nRows = 0, nCols = 0;
3249 cv::Mat trainDescriptorsTmp;
3251 for (pugi::xml_node first_level_node = root_element.first_child(); first_level_node;
3252 first_level_node = first_level_node.next_sibling()) {
3254 std::string name(first_level_node.name());
3255 if (first_level_node.type() == pugi::node_element && name ==
"TrainingImageInfo") {
3257 for (pugi::xml_node image_info_node = first_level_node.first_child(); image_info_node;
3258 image_info_node = image_info_node.next_sibling()) {
3259 name = std::string(image_info_node.name());
3261 if (name ==
"trainImg") {
3263 int id = image_info_node.attribute(
"image_id").as_int();
3266 #ifdef VISP_HAVE_MODULE_IO
3267 std::string path(image_info_node.text().as_string());
3276 m_mapOfImages[
id + startImageId] = I;
3280 }
else if (first_level_node.type() == pugi::node_element && name ==
"DescriptorsInfo") {
3281 for (pugi::xml_node descriptors_info_node = first_level_node.first_child(); descriptors_info_node;
3282 descriptors_info_node = descriptors_info_node.next_sibling()) {
3283 if (descriptors_info_node.type() == pugi::node_element) {
3284 name = std::string(descriptors_info_node.name());
3286 if (name ==
"nrows") {
3287 nRows = descriptors_info_node.text().as_int();
3288 }
else if (name ==
"ncols") {
3289 nCols = descriptors_info_node.text().as_int();
3290 }
else if (name ==
"type") {
3291 descriptorType = descriptors_info_node.text().as_int();
3296 trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
3297 }
else if (first_level_node.type() == pugi::node_element && name ==
"DescriptorInfo") {
3298 double u = 0.0, v = 0.0, size = 0.0, angle = 0.0, response = 0.0;
3299 int octave = 0, class_id = 0, image_id = 0;
3300 double oX = 0.0, oY = 0.0, oZ = 0.0;
3302 std::stringstream ss;
3304 for (pugi::xml_node point_node = first_level_node.first_child(); point_node;
3305 point_node = point_node.next_sibling()) {
3306 if (point_node.type() == pugi::node_element) {
3307 name = std::string(point_node.name());
3311 u = point_node.text().as_double();
3312 }
else if (name ==
"v") {
3313 v = point_node.text().as_double();
3314 }
else if (name ==
"size") {
3315 size = point_node.text().as_double();
3316 }
else if (name ==
"angle") {
3317 angle = point_node.text().as_double();
3318 }
else if (name ==
"response") {
3319 response = point_node.text().as_double();
3320 }
else if (name ==
"octave") {
3321 octave = point_node.text().as_int();
3322 }
else if (name ==
"class_id") {
3323 class_id = point_node.text().as_int();
3324 cv::KeyPoint keyPoint(cv::Point2f((
float)u, (
float)v), (
float)size, (
float)angle, (
float)response, octave,
3325 (class_id + startClassId));
3326 m_trainKeyPoints.push_back(keyPoint);
3327 }
else if (name ==
"image_id") {
3328 image_id = point_node.text().as_int();
3329 if (image_id != -1) {
3330 #ifdef VISP_HAVE_MODULE_IO
3332 m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
3335 }
else if (name ==
"oX") {
3336 oX = point_node.text().as_double();
3337 }
else if (name ==
"oY") {
3338 oY = point_node.text().as_double();
3339 }
else if (name ==
"oZ") {
3340 oZ = point_node.text().as_double();
3341 m_trainPoints.push_back(cv::Point3f((
float)oX, (
float)oY, (
float)oZ));
3342 }
else if (name ==
"desc") {
3345 for (pugi::xml_node descriptor_value_node = point_node.first_child(); descriptor_value_node;
3346 descriptor_value_node = descriptor_value_node.next_sibling()) {
3348 if (descriptor_value_node.type() == pugi::node_element) {
3350 std::string parseStr(descriptor_value_node.text().as_string());
3355 switch (descriptorType) {
3360 trainDescriptorsTmp.at<
unsigned char>(i, j) = (
unsigned char)parseValue;
3367 trainDescriptorsTmp.at<
char>(i, j) = (
char)parseValue;
3371 ss >> trainDescriptorsTmp.at<
unsigned short int>(i, j);
3375 ss >> trainDescriptorsTmp.at<
short int>(i, j);
3379 ss >> trainDescriptorsTmp.at<
int>(i, j);
3383 ss >> trainDescriptorsTmp.at<
float>(i, j);
3387 ss >> trainDescriptorsTmp.at<
double>(i, j);
3391 ss >> trainDescriptorsTmp.at<
float>(i, j);
3395 std::cerr <<
"Error when converting:" << ss.str() << std::endl;
3408 if (!append || m_trainDescriptors.empty()) {
3409 trainDescriptorsTmp.copyTo(m_trainDescriptors);
3411 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
3421 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
3427 m_currentImageId = (int)m_mapOfImages.size();
3439 std::vector<cv::DMatch> &matches,
double &elapsedTime)
3444 m_knnMatches.clear();
3446 if (m_useMatchTrainToQuery) {
3447 std::vector<std::vector<cv::DMatch> > knnMatchesTmp;
3450 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(
true);
3451 matcherTmp->knnMatch(trainDescriptors, queryDescriptors, knnMatchesTmp, 2);
3453 for (std::vector<std::vector<cv::DMatch> >::const_iterator it1 = knnMatchesTmp.begin();
3454 it1 != knnMatchesTmp.end(); ++it1) {
3455 std::vector<cv::DMatch> tmp;
3456 for (std::vector<cv::DMatch>::const_iterator it2 = it1->begin(); it2 != it1->end(); ++it2) {
3457 tmp.push_back(cv::DMatch(it2->trainIdx, it2->queryIdx, it2->distance));
3459 m_knnMatches.push_back(tmp);
3462 matches.resize(m_knnMatches.size());
3463 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3466 m_matcher->knnMatch(queryDescriptors, m_knnMatches, 2);
3467 matches.resize(m_knnMatches.size());
3468 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3473 if (m_useMatchTrainToQuery) {
3474 std::vector<cv::DMatch> matchesTmp;
3476 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(
true);
3477 matcherTmp->match(trainDescriptors, queryDescriptors, matchesTmp);
3479 for (std::vector<cv::DMatch>::const_iterator it = matchesTmp.begin(); it != matchesTmp.end(); ++it) {
3480 matches.push_back(cv::DMatch(it->trainIdx, it->queryIdx, it->distance));
3484 m_matcher->match(queryDescriptors, matches);
3550 if (m_trainDescriptors.empty()) {
3551 std::cerr <<
"Reference is empty." << std::endl;
3553 std::cerr <<
"Reference is not computed." << std::endl;
3555 std::cerr <<
"Matching is not possible." << std::endl;
3560 if (m_useAffineDetection) {
3561 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3562 std::vector<cv::Mat> listOfQueryDescriptors;
3568 m_queryKeyPoints.clear();
3569 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3570 it != listOfQueryKeyPoints.end(); ++it) {
3571 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3575 for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3579 it->copyTo(m_queryDescriptors);
3581 m_queryDescriptors.push_back(*it);
3585 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3586 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3589 return matchPoint(m_queryKeyPoints, m_queryDescriptors);
3602 m_queryKeyPoints = queryKeyPoints;
3603 m_queryDescriptors = queryDescriptors;
3605 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3608 m_queryFilteredKeyPoints.clear();
3609 m_objectFilteredPoints.clear();
3610 m_filteredMatches.clear();
3614 if (m_useMatchTrainToQuery) {
3616 m_queryFilteredKeyPoints.clear();
3617 m_filteredMatches.clear();
3618 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3619 m_filteredMatches.push_back(cv::DMatch((
int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3620 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(
size_t)it->queryIdx]);
3623 m_queryFilteredKeyPoints = m_queryKeyPoints;
3624 m_filteredMatches = m_matches;
3627 if (!m_trainPoints.empty()) {
3628 m_objectFilteredPoints.clear();
3632 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3634 m_objectFilteredPoints.push_back(m_trainPoints[(
size_t)it->trainIdx]);
3643 return static_cast<unsigned int>(m_filteredMatches.size());
3675 double error, elapsedTime;
3676 return matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3694 double error, elapsedTime;
3695 return matchPoint(I_color, cam, cMo, error, elapsedTime, func, rectangle);
3718 if (m_trainDescriptors.empty()) {
3719 std::cerr <<
"Reference is empty." << std::endl;
3721 std::cerr <<
"Reference is not computed." << std::endl;
3723 std::cerr <<
"Matching is not possible." << std::endl;
3728 if (m_useAffineDetection) {
3729 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3730 std::vector<cv::Mat> listOfQueryDescriptors;
3736 m_queryKeyPoints.clear();
3737 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3738 it != listOfQueryKeyPoints.end(); ++it) {
3739 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3743 for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3747 it->copyTo(m_queryDescriptors);
3749 m_queryDescriptors.push_back(*it);
3753 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3754 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3757 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3759 elapsedTime = m_detectionTime + m_extractionTime + m_matchingTime;
3762 m_queryFilteredKeyPoints.clear();
3763 m_objectFilteredPoints.clear();
3764 m_filteredMatches.clear();
3768 if (m_useMatchTrainToQuery) {
3770 m_queryFilteredKeyPoints.clear();
3771 m_filteredMatches.clear();
3772 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3773 m_filteredMatches.push_back(cv::DMatch((
int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3774 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(
size_t)it->queryIdx]);
3777 m_queryFilteredKeyPoints = m_queryKeyPoints;
3778 m_filteredMatches = m_matches;
3781 if (!m_trainPoints.empty()) {
3782 m_objectFilteredPoints.clear();
3786 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3788 m_objectFilteredPoints.push_back(m_trainPoints[(
size_t)it->trainIdx]);
3800 m_ransacInliers.clear();
3801 m_ransacOutliers.clear();
3803 if (m_useRansacVVS) {
3804 std::vector<vpPoint> objectVpPoints(m_objectFilteredPoints.size());
3808 for (std::vector<cv::Point3f>::const_iterator it = m_objectFilteredPoints.begin();
3809 it != m_objectFilteredPoints.end(); ++it, cpt++) {
3813 vpImagePoint imP(m_queryFilteredKeyPoints[cpt].pt.y, m_queryFilteredKeyPoints[cpt].pt.x);
3815 double x = 0.0, y = 0.0;
3820 objectVpPoints[cpt] = pt;
3823 std::vector<vpPoint> inliers;
3824 std::vector<unsigned int> inlierIndex;
3826 bool res =
computePose(objectVpPoints, cMo, inliers, inlierIndex, m_poseTime, func);
3828 std::map<unsigned int, bool> mapOfInlierIndex;
3829 m_matchRansacKeyPointsToPoints.clear();
3831 for (std::vector<unsigned int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3832 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3833 m_queryFilteredKeyPoints[(
size_t)(*it)], m_objectFilteredPoints[(
size_t)(*it)]));
3834 mapOfInlierIndex[*it] =
true;
3837 for (
size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3838 if (mapOfInlierIndex.find((
unsigned int)i) == mapOfInlierIndex.end()) {
3839 m_ransacOutliers.push_back(
vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3843 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3845 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3846 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3847 m_ransacInliers.begin(), matchRansacToVpImage);
3849 elapsedTime += m_poseTime;
3853 std::vector<cv::Point2f> imageFilteredPoints;
3854 cv::KeyPoint::convert(m_queryFilteredKeyPoints, imageFilteredPoints);
3855 std::vector<int> inlierIndex;
3856 bool res =
computePose(imageFilteredPoints, m_objectFilteredPoints, cam, cMo, inlierIndex, m_poseTime);
3858 std::map<int, bool> mapOfInlierIndex;
3859 m_matchRansacKeyPointsToPoints.clear();
3861 for (std::vector<int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3862 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3863 m_queryFilteredKeyPoints[(
size_t)(*it)], m_objectFilteredPoints[(
size_t)(*it)]));
3864 mapOfInlierIndex[*it] =
true;
3867 for (
size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3868 if (mapOfInlierIndex.find((
int)i) == mapOfInlierIndex.end()) {
3869 m_ransacOutliers.push_back(
vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3873 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3875 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3876 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3877 m_ransacInliers.begin(), matchRansacToVpImage);
3879 elapsedTime += m_poseTime;
3905 return (
matchPoint(m_I, cam, cMo, error, elapsedTime, func, rectangle));
3929 vpImagePoint ¢erOfGravity,
const bool isPlanarObject,
3930 std::vector<vpImagePoint> *imPts1, std::vector<vpImagePoint> *imPts2,
3931 double *meanDescriptorDistance,
double *detection_score,
const vpRect &rectangle)
3933 if (imPts1 != NULL && imPts2 != NULL) {
3940 double meanDescriptorDistanceTmp = 0.0;
3941 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
3942 meanDescriptorDistanceTmp += (double)it->distance;
3945 meanDescriptorDistanceTmp /= (double)m_filteredMatches.size();
3946 double score = (double)m_filteredMatches.size() / meanDescriptorDistanceTmp;
3948 if (meanDescriptorDistance != NULL) {
3949 *meanDescriptorDistance = meanDescriptorDistanceTmp;
3951 if (detection_score != NULL) {
3952 *detection_score = score;
3955 if (m_filteredMatches.size() >= 4) {
3957 std::vector<cv::Point2f> points1(m_filteredMatches.size());
3959 std::vector<cv::Point2f> points2(m_filteredMatches.size());
3961 for (
size_t i = 0; i < m_filteredMatches.size(); i++) {
3962 points1[i] = cv::Point2f(m_trainKeyPoints[(
size_t)m_filteredMatches[i].trainIdx].pt);
3963 points2[i] = cv::Point2f(m_queryFilteredKeyPoints[(
size_t)m_filteredMatches[i].queryIdx].pt);
3966 std::vector<vpImagePoint> inliers;
3967 if (isPlanarObject) {
3968 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
3969 cv::Mat homographyMatrix = cv::findHomography(points1, points2, CV_RANSAC);
3971 cv::Mat homographyMatrix = cv::findHomography(points1, points2, cv::RANSAC);
3974 for (
size_t i = 0; i < m_filteredMatches.size(); i++) {
3976 cv::Mat realPoint = cv::Mat(3, 1, CV_64F);
3977 realPoint.at<
double>(0, 0) = points1[i].x;
3978 realPoint.at<
double>(1, 0) = points1[i].y;
3979 realPoint.at<
double>(2, 0) = 1.f;
3981 cv::Mat reprojectedPoint = homographyMatrix * realPoint;
3982 double err_x = (reprojectedPoint.at<
double>(0, 0) / reprojectedPoint.at<
double>(2, 0)) - points2[i].x;
3983 double err_y = (reprojectedPoint.at<
double>(1, 0) / reprojectedPoint.at<
double>(2, 0)) - points2[i].y;
3984 double reprojectionError = std::sqrt(err_x * err_x + err_y * err_y);
3986 if (reprojectionError < 6.0) {
3987 inliers.push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3988 if (imPts1 != NULL) {
3989 imPts1->push_back(
vpImagePoint((
double)points1[i].y, (
double)points1[i].x));
3992 if (imPts2 != NULL) {
3993 imPts2->push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3997 }
else if (m_filteredMatches.size() >= 8) {
3998 cv::Mat fundamentalInliers;
3999 cv::Mat fundamentalMatrix = cv::findFundamentalMat(points1, points2, cv::FM_RANSAC, 3, 0.99, fundamentalInliers);
4001 for (
size_t i = 0; i < (size_t)fundamentalInliers.rows; i++) {
4002 if (fundamentalInliers.at<uchar>((
int)i, 0)) {
4003 inliers.push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
4005 if (imPts1 != NULL) {
4006 imPts1->push_back(
vpImagePoint((
double)points1[i].y, (
double)points1[i].x));
4009 if (imPts2 != NULL) {
4010 imPts2->push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
4016 if (!inliers.empty()) {
4023 double meanU = 0.0, meanV = 0.0;
4024 for (std::vector<vpImagePoint>::const_iterator it = inliers.begin(); it != inliers.end(); ++it) {
4025 meanU += it->get_u();
4026 meanV += it->get_v();
4029 meanU /= (double)inliers.size();
4030 meanV /= (double)inliers.size();
4032 centerOfGravity.
set_u(meanU);
4033 centerOfGravity.
set_v(meanV);
4041 return meanDescriptorDistanceTmp < m_detectionThreshold;
4043 return score > m_detectionScore;
4071 bool isMatchOk =
matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
4076 std::vector<vpImagePoint> modelImagePoints(m_trainVpPoints.size());
4078 for (std::vector<vpPoint>::const_iterator it = m_trainVpPoints.begin(); it != m_trainVpPoints.end(); ++it, cpt++) {
4082 modelImagePoints[cpt] = imPt;
4091 double meanU = 0.0, meanV = 0.0;
4092 for (std::vector<vpImagePoint>::const_iterator it = m_ransacInliers.begin(); it != m_ransacInliers.end(); ++it) {
4093 meanU += it->get_u();
4094 meanV += it->get_v();
4097 meanU /= (double)m_ransacInliers.size();
4098 meanV /= (double)m_ransacInliers.size();
4100 centerOfGravity.
set_u(meanU);
4101 centerOfGravity.
set_v(meanV);
4122 std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
4123 std::vector<cv::Mat> &listOfDescriptors,
4129 listOfKeypoints.clear();
4130 listOfDescriptors.clear();
4132 for (
int tl = 1; tl < 6; tl++) {
4133 double t = pow(2, 0.5 * tl);
4134 for (
int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
4135 std::vector<cv::KeyPoint> keypoints;
4136 cv::Mat descriptors;
4138 cv::Mat timg, mask, Ai;
4141 affineSkew(t, phi, timg, mask, Ai);
4144 if(listOfAffineI != NULL) {
4146 bitwise_and(mask, timg, img_disp);
4149 listOfAffineI->push_back(tI);
4153 cv::bitwise_and(mask, timg, img_disp);
4154 cv::namedWindow(
"Skew", cv::WINDOW_AUTOSIZE );
4155 cv::imshow(
"Skew", img_disp );
4159 for(std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
4160 it != m_detectors.end(); ++it) {
4161 std::vector<cv::KeyPoint> kp;
4162 it->second->detect(timg, kp, mask);
4163 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
4167 extract(timg, keypoints, descriptors, elapsedTime);
4169 for(
unsigned int i = 0; i < keypoints.size(); i++) {
4170 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
4171 cv::Mat kpt_t = Ai * cv::Mat(kpt);
4172 keypoints[i].pt.x = kpt_t.at<
float>(0, 0);
4173 keypoints[i].pt.y = kpt_t.at<
float>(1, 0);
4176 listOfKeypoints.push_back(keypoints);
4177 listOfDescriptors.push_back(descriptors);
4186 std::vector<std::pair<double, int> > listOfAffineParams;
4187 for (
int tl = 1; tl < 6; tl++) {
4188 double t = pow(2, 0.5 * tl);
4189 for (
int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
4190 listOfAffineParams.push_back(std::pair<double, int>(t, phi));
4194 listOfKeypoints.resize(listOfAffineParams.size());
4195 listOfDescriptors.resize(listOfAffineParams.size());
4197 if (listOfAffineI != NULL) {
4198 listOfAffineI->resize(listOfAffineParams.size());
4201 #ifdef VISP_HAVE_OPENMP
4202 #pragma omp parallel for
4204 for (
int cpt = 0; cpt < static_cast<int>(listOfAffineParams.size()); cpt++) {
4205 std::vector<cv::KeyPoint> keypoints;
4206 cv::Mat descriptors;
4208 cv::Mat timg, mask, Ai;
4211 affineSkew(listOfAffineParams[(
size_t)cpt].first, listOfAffineParams[(
size_t)cpt].second, timg, mask, Ai);
4213 if (listOfAffineI != NULL) {
4215 bitwise_and(mask, timg, img_disp);
4218 (*listOfAffineI)[(size_t)cpt] = tI;
4223 cv::bitwise_and(mask, timg, img_disp);
4224 cv::namedWindow(
"Skew", cv::WINDOW_AUTOSIZE );
4225 cv::imshow(
"Skew", img_disp );
4229 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
4230 it != m_detectors.end(); ++it) {
4231 std::vector<cv::KeyPoint> kp;
4232 it->second->detect(timg, kp, mask);
4233 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
4237 extract(timg, keypoints, descriptors, elapsedTime);
4239 for (
size_t i = 0; i < keypoints.size(); i++) {
4240 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
4241 cv::Mat kpt_t = Ai * cv::Mat(kpt);
4242 keypoints[i].pt.x = kpt_t.at<
float>(0, 0);
4243 keypoints[i].pt.y = kpt_t.at<
float>(1, 0);
4246 listOfKeypoints[(size_t)cpt] = keypoints;
4247 listOfDescriptors[(size_t)cpt] = descriptors;
4263 m_computeCovariance =
false;
4265 m_currentImageId = 0;
4267 m_detectionScore = 0.15;
4268 m_detectionThreshold = 100.0;
4269 m_detectionTime = 0.0;
4270 m_detectorNames.clear();
4271 m_detectors.clear();
4272 m_extractionTime = 0.0;
4273 m_extractorNames.clear();
4274 m_extractors.clear();
4275 m_filteredMatches.clear();
4278 m_knnMatches.clear();
4279 m_mapOfImageId.clear();
4280 m_mapOfImages.clear();
4281 m_matcher = cv::Ptr<cv::DescriptorMatcher>();
4282 m_matcherName =
"BruteForce-Hamming";
4284 m_matchingFactorThreshold = 2.0;
4285 m_matchingRatioThreshold = 0.85;
4286 m_matchingTime = 0.0;
4287 m_matchRansacKeyPointsToPoints.clear();
4288 m_nbRansacIterations = 200;
4289 m_nbRansacMinInlierCount = 100;
4290 m_objectFilteredPoints.clear();
4292 m_queryDescriptors = cv::Mat();
4293 m_queryFilteredKeyPoints.clear();
4294 m_queryKeyPoints.clear();
4295 m_ransacConsensusPercentage = 20.0;
4297 m_ransacInliers.clear();
4298 m_ransacOutliers.clear();
4299 m_ransacParallel =
true;
4300 m_ransacParallelNbThreads = 0;
4301 m_ransacReprojectionError = 6.0;
4302 m_ransacThreshold = 0.01;
4303 m_trainDescriptors = cv::Mat();
4304 m_trainKeyPoints.clear();
4305 m_trainPoints.clear();
4306 m_trainVpPoints.clear();
4307 m_useAffineDetection =
false;
4308 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
4309 m_useBruteForceCrossCheck =
true;
4311 m_useConsensusPercentage =
false;
4313 m_useMatchTrainToQuery =
false;
4314 m_useRansacVVS =
true;
4315 m_useSingleMatchFilter =
true;
4317 m_detectorNames.push_back(
"ORB");
4318 m_extractorNames.push_back(
"ORB");
4334 if (!parent.empty()) {
4338 std::map<int, std::string> mapOfImgPath;
4339 if (saveTrainingImages) {
4340 #ifdef VISP_HAVE_MODULE_IO
4342 unsigned int cpt = 0;
4344 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
4350 std::stringstream ss;
4351 ss <<
"train_image_" << std::setfill(
'0') << std::setw(3) << cpt;
4353 switch (m_imageFormat) {
4375 std::string imgFilename = ss.str();
4376 mapOfImgPath[it->first] = imgFilename;
4377 vpImageIo::write(it->second, parent + (!parent.empty() ?
"/" :
"") + imgFilename);
4380 std::cout <<
"Warning: in vpKeyPoint::saveLearningData() training images "
4381 "are not saved because "
4382 "visp_io module is not available !"
4387 bool have3DInfo = m_trainPoints.size() > 0;
4388 if (have3DInfo && m_trainPoints.size() != m_trainKeyPoints.size()) {
4394 std::ofstream file(filename.c_str(), std::ofstream::binary);
4395 if (!file.is_open()) {
4400 int nbImgs = (int)mapOfImgPath.size();
4403 #ifdef VISP_HAVE_MODULE_IO
4404 for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
4410 std::string path = it->second;
4411 int length = (int)path.length();
4414 for (
int cpt = 0; cpt < length; cpt++) {
4415 file.write((
char *)(&path[(
size_t)cpt]),
sizeof(path[(
size_t)cpt]));
4421 int have3DInfoInt = have3DInfo ? 1 : 0;
4424 int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
4425 int descriptorType = m_trainDescriptors.type();
4436 for (
int i = 0; i < nRows; i++) {
4437 unsigned int i_ = (
unsigned int)i;
4439 float u = m_trainKeyPoints[i_].pt.x;
4443 float v = m_trainKeyPoints[i_].pt.y;
4447 float size = m_trainKeyPoints[i_].size;
4451 float angle = m_trainKeyPoints[i_].angle;
4455 float response = m_trainKeyPoints[i_].response;
4459 int octave = m_trainKeyPoints[i_].octave;
4463 int class_id = m_trainKeyPoints[i_].class_id;
4467 #ifdef VISP_HAVE_MODULE_IO
4468 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
4469 int image_id = (saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1;
4478 float oX = m_trainPoints[i_].x, oY = m_trainPoints[i_].y, oZ = m_trainPoints[i_].z;
4489 for (
int j = 0; j < nCols; j++) {
4491 switch (descriptorType) {
4493 file.write((
char *)(&m_trainDescriptors.at<
unsigned char>(i, j)),
4494 sizeof(m_trainDescriptors.at<
unsigned char>(i, j)));
4498 file.write((
char *)(&m_trainDescriptors.at<
char>(i, j)),
sizeof(m_trainDescriptors.at<
char>(i, j)));
4530 pugi::xml_document doc;
4531 pugi::xml_node node = doc.append_child(pugi::node_declaration);
4532 node.append_attribute(
"version") =
"1.0";
4533 node.append_attribute(
"encoding") =
"UTF-8";
4539 pugi::xml_node root_node = doc.append_child(
"LearningData");
4542 pugi::xml_node image_node = root_node.append_child(
"TrainingImageInfo");
4544 #ifdef VISP_HAVE_MODULE_IO
4545 for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
4546 pugi::xml_node image_info_node = image_node.append_child(
"trainImg");
4547 image_info_node.append_child(pugi::node_pcdata).set_value(it->second.c_str());
4548 std::stringstream ss;
4550 image_info_node.append_attribute(
"image_id") = ss.str().c_str();
4555 pugi::xml_node descriptors_info_node = root_node.append_child(
"DescriptorsInfo");
4557 int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
4558 int descriptorType = m_trainDescriptors.type();
4561 descriptors_info_node.append_child(
"nrows").append_child(pugi::node_pcdata).text() = nRows;
4564 descriptors_info_node.append_child(
"ncols").append_child(pugi::node_pcdata).text() = nCols;
4567 descriptors_info_node.append_child(
"type").append_child(pugi::node_pcdata).text() = descriptorType;
4569 for (
int i = 0; i < nRows; i++) {
4570 unsigned int i_ = (
unsigned int)i;
4571 pugi::xml_node descriptor_node = root_node.append_child(
"DescriptorInfo");
4573 descriptor_node.append_child(
"u").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.x;
4574 descriptor_node.append_child(
"v").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.y;
4575 descriptor_node.append_child(
"size").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].size;
4576 descriptor_node.append_child(
"angle").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].angle;
4577 descriptor_node.append_child(
"response").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].response;
4578 descriptor_node.append_child(
"octave").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].octave;
4579 descriptor_node.append_child(
"class_id").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].class_id;
4581 #ifdef VISP_HAVE_MODULE_IO
4582 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
4583 descriptor_node.append_child(
"image_id").append_child(pugi::node_pcdata).text() =
4584 ((saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1);
4586 descriptor_node.append_child(
"image_id").append_child(pugi::node_pcdata).text() = -1;
4590 descriptor_node.append_child(
"oX").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].x;
4591 descriptor_node.append_child(
"oY").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].y;
4592 descriptor_node.append_child(
"oZ").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].z;
4595 pugi::xml_node desc_node = descriptor_node.append_child(
"desc");
4597 for (
int j = 0; j < nCols; j++) {
4598 switch (descriptorType) {
4604 int val_tmp = m_trainDescriptors.at<
unsigned char>(i, j);
4605 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = val_tmp;
4613 int val_tmp = m_trainDescriptors.at<
char>(i, j);
4614 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = val_tmp;
4618 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() =
4619 m_trainDescriptors.at<
unsigned short int>(i, j);
4623 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<
short int>(i, j);
4627 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<
int>(i, j);
4631 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<
float>(i, j);
4635 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<
double>(i, j);
4645 doc.save_file(filename.c_str(), PUGIXML_TEXT(
" "), pugi::format_default, pugi::encoding_utf8);
4649 #if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x030000)
4650 #ifndef DOXYGEN_SHOULD_SKIP_THIS
4652 struct KeypointResponseGreaterThanThreshold {
4653 KeypointResponseGreaterThanThreshold(
float _value) : value(_value) {}
4654 inline bool operator()(
const cv::KeyPoint &kpt)
const {
return kpt.response >= value; }
4658 struct KeypointResponseGreater {
4659 inline bool operator()(
const cv::KeyPoint &kp1,
const cv::KeyPoint &kp2)
const {
return kp1.response > kp2.response; }
4663 void vpKeyPoint::KeyPointsFilter::retainBest(std::vector<cv::KeyPoint> &keypoints,
int n_points)
4667 if (n_points >= 0 && keypoints.size() > (
size_t)n_points) {
4668 if (n_points == 0) {
4674 std::nth_element(keypoints.begin(), keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreater());
4676 float ambiguous_response = keypoints[(size_t)(n_points - 1)].response;
4679 std::vector<cv::KeyPoint>::const_iterator new_end = std::partition(
4680 keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreaterThanThreshold(ambiguous_response));
4683 keypoints.resize((
size_t)(new_end - keypoints.begin()));
4687 struct RoiPredicate {
4688 RoiPredicate(
const cv::Rect &_r) : r(_r) {}
4690 bool operator()(
const cv::KeyPoint &keyPt)
const {
return !r.contains(keyPt.pt); }
4695 void vpKeyPoint::KeyPointsFilter::runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize,
4698 if (borderSize > 0) {
4699 if (imageSize.height <= borderSize * 2 || imageSize.width <= borderSize * 2)
4702 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(),
4703 RoiPredicate(cv::Rect(
4704 cv::Point(borderSize, borderSize),
4705 cv::Point(imageSize.width - borderSize, imageSize.height - borderSize)))),
4710 struct SizePredicate {
4711 SizePredicate(
float _minSize,
float _maxSize) : minSize(_minSize), maxSize(_maxSize) {}
4713 bool operator()(
const cv::KeyPoint &keyPt)
const
4715 float size = keyPt.size;
4716 return (size < minSize) || (size > maxSize);
4719 float minSize, maxSize;
4722 void vpKeyPoint::KeyPointsFilter::runByKeypointSize(std::vector<cv::KeyPoint> &keypoints,
float minSize,
float maxSize)
4724 CV_Assert(minSize >= 0);
4725 CV_Assert(maxSize >= 0);
4726 CV_Assert(minSize <= maxSize);
4728 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), SizePredicate(minSize, maxSize)), keypoints.end());
4734 MaskPredicate(
const cv::Mat &_mask) : mask(_mask) {}
4735 bool operator()(
const cv::KeyPoint &key_pt)
const
4737 return mask.at<uchar>((int)(key_pt.pt.y + 0.5f), (int)(key_pt.pt.x + 0.5f)) == 0;
4744 void vpKeyPoint::KeyPointsFilter::runByPixelsMask(std::vector<cv::KeyPoint> &keypoints,
const cv::Mat &mask)
4749 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), MaskPredicate(mask)), keypoints.end());
4752 struct KeyPoint_LessThan {
4753 KeyPoint_LessThan(
const std::vector<cv::KeyPoint> &_kp) : kp(&_kp) {}
4754 bool operator()(
size_t i,
size_t j)
const
4756 const cv::KeyPoint &kp1 = (*kp)[ i];
4757 const cv::KeyPoint &kp2 = (*kp)[ j];
4759 std::numeric_limits<float>::epsilon())) {
4761 return kp1.pt.x < kp2.pt.x;
4765 std::numeric_limits<float>::epsilon())) {
4767 return kp1.pt.y < kp2.pt.y;
4771 std::numeric_limits<float>::epsilon())) {
4773 return kp1.size > kp2.size;
4777 std::numeric_limits<float>::epsilon())) {
4779 return kp1.angle < kp2.angle;
4783 std::numeric_limits<float>::epsilon())) {
4785 return kp1.response > kp2.response;
4788 if (kp1.octave != kp2.octave) {
4789 return kp1.octave > kp2.octave;
4792 if (kp1.class_id != kp2.class_id) {
4793 return kp1.class_id > kp2.class_id;
4798 const std::vector<cv::KeyPoint> *kp;
4801 void vpKeyPoint::KeyPointsFilter::removeDuplicated(std::vector<cv::KeyPoint> &keypoints)
4803 size_t i, j, n = keypoints.size();
4804 std::vector<size_t> kpidx(n);
4805 std::vector<uchar> mask(n, (uchar)1);
4807 for (i = 0; i < n; i++) {
4810 std::sort(kpidx.begin(), kpidx.end(), KeyPoint_LessThan(keypoints));
4811 for (i = 1, j = 0; i < n; i++) {
4812 cv::KeyPoint &kp1 = keypoints[kpidx[i]];
4813 cv::KeyPoint &kp2 = keypoints[kpidx[j]];
4816 if (!
vpMath::equal(kp1.pt.x, kp2.pt.x, std::numeric_limits<float>::epsilon()) ||
4817 !
vpMath::equal(kp1.pt.y, kp2.pt.y, std::numeric_limits<float>::epsilon()) ||
4818 !
vpMath::equal(kp1.size, kp2.size, std::numeric_limits<float>::epsilon()) ||
4819 !
vpMath::equal(kp1.angle, kp2.angle, std::numeric_limits<float>::epsilon())) {
4826 for (i = j = 0; i < n; i++) {
4829 keypoints[j] = keypoints[i];
4834 keypoints.resize(j);
4840 vpKeyPoint::PyramidAdaptedFeatureDetector::PyramidAdaptedFeatureDetector(
const cv::Ptr<cv::FeatureDetector> &_detector,
4842 : detector(_detector), maxLevel(_maxLevel)
4846 bool vpKeyPoint::PyramidAdaptedFeatureDetector::empty()
const
4848 return detector.empty() || (cv::FeatureDetector *)detector->empty();
4851 void vpKeyPoint::PyramidAdaptedFeatureDetector::detect(cv::InputArray image,
4852 CV_OUT std::vector<cv::KeyPoint> &keypoints, cv::InputArray mask)
4854 detectImpl(image.getMat(), keypoints, mask.getMat());
4857 void vpKeyPoint::PyramidAdaptedFeatureDetector::detectImpl(
const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
4858 const cv::Mat &mask)
const
4860 cv::Mat src = image;
4861 cv::Mat src_mask = mask;
4863 cv::Mat dilated_mask;
4864 if (!mask.empty()) {
4865 cv::dilate(mask, dilated_mask, cv::Mat());
4866 cv::Mat mask255(mask.size(), CV_8UC1, cv::Scalar(0));
4867 mask255.setTo(cv::Scalar(255), dilated_mask != 0);
4868 dilated_mask = mask255;
4871 for (
int l = 0, multiplier = 1; l <= maxLevel; ++l, multiplier *= 2) {
4873 std::vector<cv::KeyPoint> new_pts;
4874 detector->detect(src, new_pts, src_mask);
4875 std::vector<cv::KeyPoint>::iterator it = new_pts.begin(), end = new_pts.end();
4876 for (; it != end; ++it) {
4877 it->pt.x *= multiplier;
4878 it->pt.y *= multiplier;
4879 it->size *= multiplier;
4882 keypoints.insert(keypoints.end(), new_pts.begin(), new_pts.end());
4891 resize(dilated_mask, src_mask, src.size(), 0, 0, CV_INTER_AREA);
4896 vpKeyPoint::KeyPointsFilter::runByPixelsMask(keypoints, mask);
4901 #elif !defined(VISP_BUILD_SHARED_LIBS)
4904 void dummy_vpKeyPoint(){};
bool _reference_computed
flag to indicate if the reference has been built.
std::vector< vpImagePoint > currentImagePointsList
std::vector< unsigned int > matchedReferencePoints
std::vector< vpImagePoint > referenceImagePointsList
Generic class defining intrinsic camera parameters.
Implementation of column vector and the associated operations.
Class to define RGB colors available for display functionnalities.
static const vpColor none
static const vpColor green
static void convertFromOpenCV(const cv::KeyPoint &from, vpImagePoint &to)
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void displayCircle(const vpImage< unsigned char > &I, const vpImagePoint ¢er, unsigned int radius, const vpColor &color, bool fill=false, unsigned int thickness=1)
error that can be emited by ViSP classes.
@ badValue
Used to indicate that a value is not in the allowed range.
const char * what() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpHomogeneousMatrix inverse() const
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
static void write(const vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
void set_ij(double ii, double jj)
unsigned int getWidth() const
void insert(const vpImage< Type > &src, const vpImagePoint &topLeft)
unsigned int getHeight() const
unsigned int matchPoint(const vpImage< unsigned char > &I)
void getTrainKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
void initMatcher(const std::string &matcherName)
bool computePose(const std::vector< cv::Point2f > &imagePoints, const std::vector< cv::Point3f > &objectPoints, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector< int > &inlierIndex, double &elapsedTime, bool(*func)(const vpHomogeneousMatrix &)=NULL)
void display(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, unsigned int size=3)
void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector< cv::DMatch > &matches, double &elapsedTime)
void detectExtractAffine(const vpImage< unsigned char > &I, std::vector< std::vector< cv::KeyPoint > > &listOfKeypoints, std::vector< cv::Mat > &listOfDescriptors, std::vector< vpImage< unsigned char > > *listOfAffineI=NULL)
static void compute3D(const cv::KeyPoint &candidate, const std::vector< vpPoint > &roi, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
void createImageMatching(vpImage< unsigned char > &IRef, vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
void extract(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, cv::Mat &descriptors, std::vector< cv::Point3f > *trainPoints=NULL)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
static void compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpCylinder > &cylinders, const std::vector< std::vector< std::vector< vpImagePoint > > > &vectorOfCylinderRois, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
void getQueryKeyPoints(std::vector< cv::KeyPoint > &keyPoints, bool matches=true) const
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
bool matchPointAndDetect(const vpImage< unsigned char > &I, vpRect &boundingBox, vpImagePoint ¢erOfGravity, const bool isPlanarObject=true, std::vector< vpImagePoint > *imPts1=NULL, std::vector< vpImagePoint > *imPts2=NULL, double *meanDescriptorDistance=NULL, double *detectionScore=NULL, const vpRect &rectangle=vpRect())
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType, const std::string &matcherName, const vpFilterMatchingType &filterType=ratioDistanceThreshold)
@ stdAndRatioDistanceThreshold
@ constantFactorDistanceThreshold
void displayMatching(const vpImage< unsigned char > &IRef, vpImage< unsigned char > &IMatching, unsigned int crossSize, unsigned int lineThickness=1, const vpColor &color=vpColor::green)
unsigned int buildReference(const vpImage< unsigned char > &I)
void loadConfigFile(const std::string &configFile)
void getTrainPoints(std::vector< cv::Point3f > &points) const
void getObjectPoints(std::vector< cv::Point3f > &objectPoints) const
void insertImageMatching(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
static bool isNaN(double value)
static bool equal(double x, double y, double s=0.001)
static int round(double x)
Implementation of a matrix and operations on matrices.
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
This class defines the container for a plane geometrical structure.
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
double get_oX() const
Get the point oX coordinate in the object frame.
void set_x(double x)
Set the point x coordinate in the image plane.
double get_y() const
Get the point y coordinate in the image plane.
double get_oZ() const
Get the point oZ coordinate in the object frame.
void set_oY(double oY)
Set the point oY coordinate in the object frame.
double get_x() const
Get the point x coordinate in the image plane.
void set_oZ(double oZ)
Set the point oZ coordinate in the object frame.
void set_oX(double oX)
Set the point oX coordinate in the object frame.
double get_oY() const
Get the point oY coordinate in the object frame.
void setWorldCoordinates(double oX, double oY, double oZ)
void set_y(double y)
Set the point y coordinate in the image plane.
Defines a generic 2D polygon.
vpRect getBoundingBox() const
bool isInside(const vpImagePoint &iP, const PointInPolygonMethod &method=PnPolyRayCasting) const
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
void setRansacMaxTrials(const int &rM)
void addPoint(const vpPoint &P)
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
vpMatrix getCovarianceMatrix() const
void setCovarianceComputation(const bool &flag)
std::vector< unsigned int > getRansacInlierIndex() const
void setRansacFilterFlag(const RANSAC_FILTER_FLAGS &flag)
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool(*func)(const vpHomogeneousMatrix &)=NULL)
void setUseParallelRansac(bool use)
std::vector< vpPoint > getRansacInliers() const
void setNbParallelRansacThreads(int nb)
void setRansacThreshold(const double &t)
Defines a rectangle in the plane.
Implementation of a rotation vector as axis-angle minimal representation.
Class that consider the case of a translation vector.
std::string getDetectorName() const
double getMatchingRatioThreshold() const
std::string getExtractorName() const
void parse(const std::string &filename)
double getRansacReprojectionError() const
bool getUseRansacVVSPoseEstimation() const
double getMatchingFactorThreshold() const
int getNbRansacMinInlierCount() const
bool getUseRansacConsensusPercentage() const
std::string getMatcherName() const
int getNbRansacIterations() const
double getRansacConsensusPercentage() const
@ stdAndRatioDistanceThreshold
@ constantFactorDistanceThreshold
vpMatchingMethodEnum getMatchingMethod() const
double getRansacThreshold() const
VISP_EXPORT double measureTimeMs()