42 #include <visp3/core/vpIoTools.h> 43 #include <visp3/vision/vpKeyPoint.h> 45 #if (VISP_HAVE_OPENCV_VERSION >= 0x020101) 47 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000) 48 #include <opencv2/calib3d/calib3d.hpp> 54 inline cv::DMatch knnToDMatch(
const std::vector<cv::DMatch> &knnMatches)
56 if (knnMatches.size() > 0) {
63 inline vpImagePoint matchRansacToVpImage(
const std::pair<cv::KeyPoint, cv::Point3f> &pair)
81 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
82 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
83 m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
84 m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
85 m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
86 m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
87 m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
88 m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
89 m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0),
90 m_ransacThreshold(0.01), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(),
91 m_useAffineDetection(false),
92 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
93 m_useBruteForceCrossCheck(true),
95 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
96 m_useSingleMatchFilter(true)
100 m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
101 m_extractorNames.push_back(m_mapOfDescriptorNames[descriptorType]);
117 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(
detectionScore),
118 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
119 m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
120 m_imageFormat(
jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
121 m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
122 m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
123 m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
124 m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
125 m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0),
126 m_ransacThreshold(0.01), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(),
127 m_useAffineDetection(false),
128 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
129 m_useBruteForceCrossCheck(true),
131 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
132 m_useSingleMatchFilter(true)
136 m_detectorNames.push_back(detectorName);
137 m_extractorNames.push_back(extractorName);
153 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(
detectionScore),
154 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(detectorNames),
155 m_detectors(), m_extractionTime(0.), m_extractorNames(extractorNames), m_extractors(), m_filteredMatches(),
156 m_filterType(filterType), m_imageFormat(
jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(),
157 m_matcher(), m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0),
158 m_matchingRatioThreshold(0.85), m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200),
159 m_nbRansacMinInlierCount(100), m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(),
160 m_queryFilteredKeyPoints(), m_queryKeyPoints(), m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(),
161 m_ransacOutliers(), m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
162 m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
163 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
164 m_useBruteForceCrossCheck(true),
166 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
167 m_useSingleMatchFilter(true)
181 void vpKeyPoint::affineSkew(
double tilt,
double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai)
186 mask = cv::Mat(h, w, CV_8UC1, cv::Scalar(255));
188 cv::Mat A = cv::Mat::eye(2, 3, CV_32F);
191 if (std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
196 A = (cv::Mat_<float>(2, 2) << c, -s, s, c);
198 cv::Mat corners = (cv::Mat_<float>(4, 2) << 0, 0, w, 0, w, h, 0, h);
199 cv::Mat tcorners = corners * A.t();
200 cv::Mat tcorners_x, tcorners_y;
201 tcorners.col(0).copyTo(tcorners_x);
202 tcorners.col(1).copyTo(tcorners_y);
203 std::vector<cv::Mat> channels;
204 channels.push_back(tcorners_x);
205 channels.push_back(tcorners_y);
206 cv::merge(channels, tcorners);
208 cv::Rect rect = cv::boundingRect(tcorners);
209 A = (cv::Mat_<float>(2, 3) << c, -s, -rect.x, s, c, -rect.y);
211 cv::warpAffine(img, img, A, cv::Size(rect.width, rect.height), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
214 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon()) {
215 double s = 0.8 * sqrt(tilt * tilt - 1);
216 cv::GaussianBlur(img, img, cv::Size(0, 0), s, 0.01);
217 cv::resize(img, img, cv::Size(0, 0), 1.0 / tilt, 1.0, cv::INTER_NEAREST);
218 A.row(0) = A.row(0) / tilt;
221 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon() ||
222 std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
225 cv::warpAffine(mask, mask, A, cv::Size(w, h), cv::INTER_NEAREST);
227 cv::invertAffineTransform(A, Ai);
248 const unsigned int height,
const unsigned int width)
265 m_trainPoints.clear();
266 m_mapOfImageId.clear();
267 m_mapOfImages.clear();
268 m_currentImageId = 1;
270 if (m_useAffineDetection) {
271 std::vector<std::vector<cv::KeyPoint> > listOfTrainKeyPoints;
272 std::vector<cv::Mat> listOfTrainDescriptors;
278 m_trainKeyPoints.clear();
279 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfTrainKeyPoints.begin();
280 it != listOfTrainKeyPoints.end(); ++it) {
281 m_trainKeyPoints.insert(m_trainKeyPoints.end(), it->begin(), it->end());
285 for (std::vector<cv::Mat>::const_iterator it = listOfTrainDescriptors.begin(); it != listOfTrainDescriptors.end();
289 it->copyTo(m_trainDescriptors);
291 m_trainDescriptors.push_back(*it);
295 detect(I, m_trainKeyPoints, m_detectionTime, rectangle);
296 extract(I, m_trainKeyPoints, m_trainDescriptors, m_extractionTime);
301 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
302 m_mapOfImageId[it->class_id] = m_currentImageId;
306 m_mapOfImages[m_currentImageId] = I;
315 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
317 return static_cast<unsigned int>(m_trainKeyPoints.size());
332 std::vector<cv::Point3f> &points3f,
const bool append,
const int class_id)
334 cv::Mat trainDescriptors;
336 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
338 extract(I, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
340 if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
344 std::map<size_t, size_t> mapOfKeypointHashes;
346 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
348 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
351 std::vector<cv::Point3f> trainPoints_tmp;
352 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
353 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
354 trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
359 points3f = trainPoints_tmp;
362 buildReference(I, trainKeyPoints, trainDescriptors, points3f, append, class_id);
378 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
379 const bool append,
const int class_id)
382 m_currentImageId = 0;
383 m_mapOfImageId.clear();
384 m_mapOfImages.clear();
385 this->m_trainKeyPoints.clear();
386 this->m_trainPoints.clear();
391 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
393 if (class_id != -1) {
394 for (std::vector<cv::KeyPoint>::iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
395 it->class_id = class_id;
401 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
403 m_mapOfImageId[it->class_id] = m_currentImageId;
407 m_mapOfImages[m_currentImageId] = I;
410 this->m_trainKeyPoints.insert(this->m_trainKeyPoints.end(), trainKeyPoints_tmp.begin(), trainKeyPoints_tmp.end());
412 trainDescriptors.copyTo(this->m_trainDescriptors);
414 this->m_trainDescriptors.push_back(trainDescriptors);
416 this->m_trainPoints.insert(this->m_trainPoints.end(), points3f.begin(), points3f.end());
424 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
448 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
455 vpPlane Po(pts[0], pts[1], pts[2]);
456 double xc = 0.0, yc = 0.0;
467 point_obj = cMo.
inverse() * point_cam;
468 point = cv::Point3f((
float)point_obj[0], (
float)point_obj[1], (
float)point_obj[2]);
490 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
497 vpPlane Po(pts[0], pts[1], pts[2]);
498 double xc = 0.0, yc = 0.0;
509 point_obj = cMo.
inverse() * point_cam;
529 std::vector<cv::KeyPoint> &candidates,
530 const std::vector<vpPolygon> &polygons,
531 const std::vector<std::vector<vpPoint> > &roisPt,
532 std::vector<cv::Point3f> &points, cv::Mat *descriptors)
535 std::vector<cv::KeyPoint> candidatesToCheck = candidates;
542 std::vector<std::pair<cv::KeyPoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
543 for (
size_t i = 0; i < candidatesToCheck.size(); i++) {
544 pairOfCandidatesToCheck[i] = std::pair<cv::KeyPoint, size_t>(candidatesToCheck[i], i);
548 std::vector<vpPolygon> polygons_tmp = polygons;
549 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
550 std::vector<std::pair<cv::KeyPoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
552 while (it2 != pairOfCandidatesToCheck.end()) {
553 imPt.
set_ij(it2->first.pt.y, it2->first.pt.x);
554 if (it1->isInside(imPt)) {
555 candidates.push_back(it2->first);
557 points.push_back(pt);
559 if (descriptors != NULL) {
560 desc.push_back(descriptors->row((
int)it2->second));
564 it2 = pairOfCandidatesToCheck.erase(it2);
571 if (descriptors != NULL) {
572 desc.copyTo(*descriptors);
593 std::vector<vpImagePoint> &candidates,
594 const std::vector<vpPolygon> &polygons,
595 const std::vector<std::vector<vpPoint> > &roisPt,
596 std::vector<vpPoint> &points, cv::Mat *descriptors)
599 std::vector<vpImagePoint> candidatesToCheck = candidates;
605 std::vector<std::pair<vpImagePoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
606 for (
size_t i = 0; i < candidatesToCheck.size(); i++) {
607 pairOfCandidatesToCheck[i] = std::pair<vpImagePoint, size_t>(candidatesToCheck[i], i);
611 std::vector<vpPolygon> polygons_tmp = polygons;
612 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
613 std::vector<std::pair<vpImagePoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
615 while (it2 != pairOfCandidatesToCheck.end()) {
616 if (it1->isInside(it2->first)) {
617 candidates.push_back(it2->first);
619 points.push_back(pt);
621 if (descriptors != NULL) {
622 desc.push_back(descriptors->row((
int)it2->second));
626 it2 = pairOfCandidatesToCheck.erase(it2);
651 const std::vector<vpCylinder> &cylinders,
652 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<cv::Point3f> &points,
653 cv::Mat *descriptors)
655 std::vector<cv::KeyPoint> candidatesToCheck = candidates;
661 size_t cpt_keypoint = 0;
662 for (std::vector<cv::KeyPoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
663 ++it1, cpt_keypoint++) {
664 size_t cpt_cylinder = 0;
667 for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
668 it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
671 for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
673 candidates.push_back(*it1);
677 double xm = 0.0, ym = 0.0;
679 double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
681 if (!
vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
683 point_cam[0] = xm * Z;
684 point_cam[1] = ym * Z;
688 point_obj = cMo.
inverse() * point_cam;
691 points.push_back(cv::Point3f((
float)pt.
get_oX(), (float)pt.
get_oY(), (float)pt.
get_oZ()));
693 if (descriptors != NULL) {
694 desc.push_back(descriptors->row((
int)cpt_keypoint));
704 if (descriptors != NULL) {
705 desc.copyTo(*descriptors);
726 const std::vector<vpCylinder> &cylinders,
727 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<vpPoint> &points,
728 cv::Mat *descriptors)
730 std::vector<vpImagePoint> candidatesToCheck = candidates;
736 size_t cpt_keypoint = 0;
737 for (std::vector<vpImagePoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
738 ++it1, cpt_keypoint++) {
739 size_t cpt_cylinder = 0;
742 for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
743 it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
746 for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
748 candidates.push_back(*it1);
752 double xm = 0.0, ym = 0.0;
754 double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
756 if (!
vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
758 point_cam[0] = xm * Z;
759 point_cam[1] = ym * Z;
763 point_obj = cMo.
inverse() * point_cam;
766 points.push_back(pt);
768 if (descriptors != NULL) {
769 desc.push_back(descriptors->row((
int)cpt_keypoint));
779 if (descriptors != NULL) {
780 desc.copyTo(*descriptors);
803 if (imagePoints.size() < 4 || objectPoints.size() < 4 || imagePoints.size() != objectPoints.size()) {
805 std::cerr <<
"Not enough points to compute the pose (at least 4 points " 812 cv::Mat cameraMatrix =
822 cv::Mat distCoeffs = cv::Mat::zeros(1, 5, CV_64F);
825 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000) 827 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec,
false, m_nbRansacIterations,
828 (
float)m_ransacReprojectionError,
831 inlierIndex, cv::SOLVEPNP_ITERATIVE);
851 int nbInlierToReachConsensus = m_nbRansacMinInlierCount;
852 if (m_useConsensusPercentage) {
853 nbInlierToReachConsensus = (int)(m_ransacConsensusPercentage / 100.0 * (
double)m_queryFilteredKeyPoints.size());
856 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec,
false, m_nbRansacIterations,
857 (
float)m_ransacReprojectionError, nbInlierToReachConsensus, inlierIndex);
859 }
catch (cv::Exception &e) {
860 std::cerr << e.what() << std::endl;
864 vpTranslationVector translationVec(tvec.at<
double>(0), tvec.at<
double>(1), tvec.at<
double>(2));
865 vpThetaUVector thetaUVector(rvec.at<
double>(0), rvec.at<
double>(1), rvec.at<
double>(2));
894 std::vector<vpPoint> &inliers,
double &elapsedTime,
bool (*func)(
const vpHomogeneousMatrix &))
896 std::vector<unsigned int> inlierIndex;
897 return computePose(objectVpPoints, cMo, inliers, inlierIndex, elapsedTime, func);
914 std::vector<vpPoint> &inliers, std::vector<unsigned int> &inlierIndex,
double &elapsedTime,
919 if (objectVpPoints.size() < 4) {
929 for (std::vector<vpPoint>::const_iterator it = objectVpPoints.begin(); it != objectVpPoints.end(); ++it) {
933 unsigned int nbInlierToReachConsensus = (
unsigned int)m_nbRansacMinInlierCount;
934 if (m_useConsensusPercentage) {
935 nbInlierToReachConsensus =
936 (
unsigned int)(m_ransacConsensusPercentage / 100.0 * (
double)m_queryFilteredKeyPoints.size());
946 bool isRansacPoseEstimationOk =
false;
953 if (m_computeCovariance) {
957 std::cerr <<
"e=" << e.
what() << std::endl;
975 return isRansacPoseEstimationOk;
991 double vpKeyPoint::computePoseEstimationError(
const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
994 if (matchKeyPoints.size() == 0) {
1000 std::vector<double> errors(matchKeyPoints.size());
1003 for (std::vector<std::pair<cv::KeyPoint, cv::Point3f> >::const_iterator it = matchKeyPoints.begin();
1004 it != matchKeyPoints.end(); ++it, cpt++) {
1009 double u = 0.0, v = 0.0;
1011 errors[cpt] = std::sqrt((u - it->first.pt.x) * (u - it->first.pt.x) + (v - it->first.pt.y) * (v - it->first.pt.y));
1014 return std::accumulate(errors.begin(), errors.end(), 0.0) / errors.size();
1048 unsigned int nbImg = (
unsigned int)(m_mapOfImages.size() + 1);
1050 if (m_mapOfImages.empty()) {
1051 std::cerr <<
"There is no training image loaded !" << std::endl;
1061 unsigned int nbImgSqrt = (
unsigned int)
vpMath::round(std::sqrt((
double)nbImg));
1064 unsigned int nbWidth = nbImgSqrt;
1066 unsigned int nbHeight = nbImgSqrt;
1069 if (nbImgSqrt * nbImgSqrt < nbImg) {
1073 unsigned int maxW = ICurrent.
getWidth();
1074 unsigned int maxH = ICurrent.
getHeight();
1075 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1077 if (maxW < it->second.getWidth()) {
1078 maxW = it->second.getWidth();
1081 if (maxH < it->second.getHeight()) {
1082 maxH = it->second.getHeight();
1100 detect(I, keyPoints, elapsedTime, rectangle);
1111 void vpKeyPoint::detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
const cv::Mat &mask)
1114 detect(matImg, keyPoints, elapsedTime, mask);
1130 cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1133 cv::Point leftTop((
int)rectangle.
getLeft(), (int)rectangle.
getTop()),
1135 cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1137 mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1140 detect(matImg, keyPoints, elapsedTime, mask);
1152 void vpKeyPoint::detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
double &elapsedTime,
1153 const cv::Mat &mask)
1158 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
1159 it != m_detectors.end(); ++it) {
1160 std::vector<cv::KeyPoint> kp;
1161 it->second->detect(matImg, kp, mask);
1162 keyPoints.insert(keyPoints.end(), kp.begin(), kp.end());
1177 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1179 std::vector<vpImagePoint> vpTrainImageKeyPoints;
1182 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1197 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1200 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1217 unsigned int crossSize,
unsigned int lineThickness,
const vpColor &color)
1220 srand((
unsigned int)time(NULL));
1223 std::vector<vpImagePoint> queryImageKeyPoints;
1225 std::vector<vpImagePoint> trainImageKeyPoints;
1229 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1231 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1234 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1235 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
1236 queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.
getWidth());
1255 const std::vector<vpImagePoint> &ransacInliers,
unsigned int crossSize,
1256 unsigned int lineThickness)
1258 if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1260 std::cerr <<
"There is no training image loaded !" << std::endl;
1266 int nbImg = (int)(m_mapOfImages.size() + 1);
1274 int nbWidth = nbImgSqrt;
1275 int nbHeight = nbImgSqrt;
1277 if (nbImgSqrt * nbImgSqrt < nbImg) {
1281 std::map<int, int> mapOfImageIdIndex;
1284 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1286 mapOfImageIdIndex[it->first] = cpt;
1288 if (maxW < it->second.getWidth()) {
1289 maxW = it->second.getWidth();
1292 if (maxH < it->second.getHeight()) {
1293 maxH = it->second.getHeight();
1299 int medianI = nbHeight / 2;
1300 int medianJ = nbWidth / 2;
1301 int medianIndex = medianI * nbWidth + medianJ;
1302 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1304 int current_class_id_index = 0;
1305 if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1306 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1310 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1313 int indexI = current_class_id_index / nbWidth;
1314 int indexJ = current_class_id_index - (indexI * nbWidth);
1315 topLeftCorner.
set_ij((
int)maxH * indexI, (
int)maxW * indexJ);
1322 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
1323 for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1328 for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1333 for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1339 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1340 int current_class_id = 0;
1341 if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(
size_t)it->trainIdx].class_id]] < medianIndex) {
1342 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1346 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1349 int indexI = current_class_id / nbWidth;
1350 int indexJ = current_class_id - (indexI * nbWidth);
1352 vpImagePoint end((
int)maxH * indexI + m_trainKeyPoints[(
size_t)it->trainIdx].pt.y,
1353 (
int)maxW * indexJ + m_trainKeyPoints[(
size_t)it->trainIdx].pt.x);
1354 vpImagePoint start((
int)maxH * medianI + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.y,
1355 (
int)maxW * medianJ + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.x);
1375 std::vector<cv::Point3f> *trainPoints)
1378 extract(I, keyPoints, descriptors, elapsedTime, trainPoints);
1392 std::vector<cv::Point3f> *trainPoints)
1395 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1409 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1413 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1427 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1432 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator itd = m_extractors.begin();
1433 itd != m_extractors.end(); ++itd) {
1437 if (trainPoints != NULL && !trainPoints->empty()) {
1440 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1443 itd->second->compute(matImg, keyPoints, descriptors);
1445 if (keyPoints.size() != keyPoints_tmp.size()) {
1449 std::map<size_t, size_t> mapOfKeypointHashes;
1451 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1453 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1456 std::vector<cv::Point3f> trainPoints_tmp;
1457 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1458 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1459 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1464 *trainPoints = trainPoints_tmp;
1468 itd->second->compute(matImg, keyPoints, descriptors);
1473 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1477 itd->second->compute(matImg, keyPoints, desc);
1479 if (keyPoints.size() != keyPoints_tmp.size()) {
1483 std::map<size_t, size_t> mapOfKeypointHashes;
1485 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1487 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1490 std::vector<cv::Point3f> trainPoints_tmp;
1491 cv::Mat descriptors_tmp;
1492 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1493 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1494 if (trainPoints != NULL && !trainPoints->empty()) {
1495 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1498 if (!descriptors.empty()) {
1499 descriptors_tmp.push_back(descriptors.row((
int)mapOfKeypointHashes[myKeypointHash(*it)]));
1504 if (trainPoints != NULL) {
1506 *trainPoints = trainPoints_tmp;
1509 descriptors_tmp.copyTo(descriptors);
1513 if (descriptors.empty()) {
1514 desc.copyTo(descriptors);
1516 cv::hconcat(descriptors, desc, descriptors);
1521 if (keyPoints.size() != (size_t)descriptors.rows) {
1522 std::cerr <<
"keyPoints.size() != (size_t) descriptors.rows" << std::endl;
1530 void vpKeyPoint::filterMatches()
1532 std::vector<cv::KeyPoint> queryKpts;
1533 std::vector<cv::Point3f> trainPts;
1534 std::vector<cv::DMatch> m;
1540 double min_dist = DBL_MAX;
1542 std::vector<double> distance_vec(m_knnMatches.size());
1545 for (
size_t i = 0; i < m_knnMatches.size(); i++) {
1546 double dist = m_knnMatches[i][0].distance;
1548 distance_vec[i] = dist;
1550 if (dist < min_dist) {
1557 mean /= m_queryDescriptors.rows;
1560 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1561 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1562 double threshold = min_dist + stdev;
1564 for (
size_t i = 0; i < m_knnMatches.size(); i++) {
1565 if (m_knnMatches[i].size() >= 2) {
1568 float ratio = m_knnMatches[i][0].distance / m_knnMatches[i][1].distance;
1573 double dist = m_knnMatches[i][0].distance;
1576 m.push_back(cv::DMatch((
int)queryKpts.size(), m_knnMatches[i][0].trainIdx, m_knnMatches[i][0].distance));
1578 if (!m_trainPoints.empty()) {
1579 trainPts.push_back(m_trainPoints[(
size_t)m_knnMatches[i][0].trainIdx]);
1581 queryKpts.push_back(m_queryKeyPoints[(
size_t)m_knnMatches[i][0].queryIdx]);
1589 double min_dist = DBL_MAX;
1591 std::vector<double> distance_vec(m_matches.size());
1592 for (
size_t i = 0; i < m_matches.size(); i++) {
1593 double dist = m_matches[i].distance;
1595 distance_vec[i] = dist;
1597 if (dist < min_dist) {
1604 mean /= m_queryDescriptors.rows;
1606 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1607 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1617 for (
size_t i = 0; i < m_matches.size(); i++) {
1618 if (m_matches[i].distance <= threshold) {
1619 m.push_back(cv::DMatch((
int)queryKpts.size(), m_matches[i].trainIdx, m_matches[i].distance));
1621 if (!m_trainPoints.empty()) {
1622 trainPts.push_back(m_trainPoints[(
size_t)m_matches[i].trainIdx]);
1624 queryKpts.push_back(m_queryKeyPoints[(
size_t)m_matches[i].queryIdx]);
1629 if (m_useSingleMatchFilter) {
1632 std::vector<cv::DMatch> mTmp;
1633 std::vector<cv::Point3f> trainPtsTmp;
1634 std::vector<cv::KeyPoint> queryKptsTmp;
1636 std::map<int, int> mapOfTrainIdx;
1638 for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1639 mapOfTrainIdx[it->trainIdx]++;
1643 for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1644 if (mapOfTrainIdx[it->trainIdx] == 1) {
1645 mTmp.push_back(cv::DMatch((
int)queryKptsTmp.size(), it->trainIdx, it->distance));
1647 if (!m_trainPoints.empty()) {
1648 trainPtsTmp.push_back(m_trainPoints[(
size_t)it->trainIdx]);
1650 queryKptsTmp.push_back(queryKpts[(
size_t)it->queryIdx]);
1654 m_filteredMatches = mTmp;
1655 m_objectFilteredPoints = trainPtsTmp;
1656 m_queryFilteredKeyPoints = queryKptsTmp;
1658 m_filteredMatches = m;
1659 m_objectFilteredPoints = trainPts;
1660 m_queryFilteredKeyPoints = queryKpts;
1673 objectPoints = m_objectFilteredPoints;
1738 void vpKeyPoint::init()
1741 #if defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && (VISP_HAVE_OPENCV_VERSION < 0x030000) 1743 if (!cv::initModule_nonfree()) {
1744 std::cerr <<
"Cannot init module non free, SIFT or SURF cannot be used." << std::endl;
1754 initDetectors(m_detectorNames);
1755 initExtractors(m_extractorNames);
1764 void vpKeyPoint::initDetector(
const std::string &detectorName)
1766 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) 1767 m_detectors[detectorName] = cv::FeatureDetector::create(detectorName);
1769 if (m_detectors[detectorName] == NULL) {
1770 std::stringstream ss_msg;
1771 ss_msg <<
"Fail to initialize the detector: " << detectorName
1772 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
1776 std::string detectorNameTmp = detectorName;
1777 std::string pyramid =
"Pyramid";
1778 std::size_t pos = detectorName.find(pyramid);
1779 bool usePyramid =
false;
1780 if (pos != std::string::npos) {
1781 detectorNameTmp = detectorName.substr(pos + pyramid.size());
1785 if (detectorNameTmp ==
"SIFT") {
1786 #ifdef VISP_HAVE_OPENCV_XFEATURES2D 1787 cv::Ptr<cv::FeatureDetector> siftDetector = cv::xfeatures2d::SIFT::create();
1789 m_detectors[detectorNameTmp] = siftDetector;
1791 std::cerr <<
"You should not use SIFT with Pyramid feature detection!" << std::endl;
1792 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
1795 std::stringstream ss_msg;
1796 ss_msg <<
"Fail to initialize the detector: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1797 <<
" was not build with xFeatures2d module.";
1800 }
else if (detectorNameTmp ==
"SURF") {
1801 #ifdef VISP_HAVE_OPENCV_XFEATURES2D 1802 cv::Ptr<cv::FeatureDetector> surfDetector = cv::xfeatures2d::SURF::create();
1804 m_detectors[detectorNameTmp] = surfDetector;
1806 std::cerr <<
"You should not use SURF with Pyramid feature detection!" << std::endl;
1807 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(surfDetector);
1810 std::stringstream ss_msg;
1811 ss_msg <<
"Fail to initialize the detector: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1812 <<
" was not build with xFeatures2d module.";
1815 }
else if (detectorNameTmp ==
"FAST") {
1816 cv::Ptr<cv::FeatureDetector> fastDetector = cv::FastFeatureDetector::create();
1818 m_detectors[detectorNameTmp] = fastDetector;
1820 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
1822 }
else if (detectorNameTmp ==
"MSER") {
1823 cv::Ptr<cv::FeatureDetector> fastDetector = cv::MSER::create();
1825 m_detectors[detectorNameTmp] = fastDetector;
1827 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
1829 }
else if (detectorNameTmp ==
"ORB") {
1830 cv::Ptr<cv::FeatureDetector> orbDetector = cv::ORB::create();
1832 m_detectors[detectorNameTmp] = orbDetector;
1834 std::cerr <<
"You should not use ORB with Pyramid feature detection!" << std::endl;
1835 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(orbDetector);
1837 }
else if (detectorNameTmp ==
"BRISK") {
1838 cv::Ptr<cv::FeatureDetector> briskDetector = cv::BRISK::create();
1840 m_detectors[detectorNameTmp] = briskDetector;
1842 std::cerr <<
"You should not use BRISK with Pyramid feature detection!" << std::endl;
1843 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(briskDetector);
1845 }
else if (detectorNameTmp ==
"KAZE") {
1846 cv::Ptr<cv::FeatureDetector> kazeDetector = cv::KAZE::create();
1848 m_detectors[detectorNameTmp] = kazeDetector;
1850 std::cerr <<
"You should not use KAZE with Pyramid feature detection!" << std::endl;
1851 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(kazeDetector);
1853 }
else if (detectorNameTmp ==
"AKAZE") {
1854 cv::Ptr<cv::FeatureDetector> akazeDetector = cv::AKAZE::create();
1856 m_detectors[detectorNameTmp] = akazeDetector;
1858 std::cerr <<
"You should not use AKAZE with Pyramid feature detection!" << std::endl;
1859 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(akazeDetector);
1861 }
else if (detectorNameTmp ==
"GFTT") {
1862 cv::Ptr<cv::FeatureDetector> gfttDetector = cv::GFTTDetector::create();
1864 m_detectors[detectorNameTmp] = gfttDetector;
1866 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(gfttDetector);
1868 }
else if (detectorNameTmp ==
"SimpleBlob") {
1869 cv::Ptr<cv::FeatureDetector> simpleBlobDetector = cv::SimpleBlobDetector::create();
1871 m_detectors[detectorNameTmp] = simpleBlobDetector;
1873 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(simpleBlobDetector);
1875 }
else if (detectorNameTmp ==
"STAR") {
1876 #ifdef VISP_HAVE_OPENCV_XFEATURES2D 1877 cv::Ptr<cv::FeatureDetector> starDetector = cv::xfeatures2d::StarDetector::create();
1879 m_detectors[detectorNameTmp] = starDetector;
1881 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(starDetector);
1884 std::stringstream ss_msg;
1885 ss_msg <<
"Fail to initialize the detector: STAR. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1886 <<
" was not build with xFeatures2d module.";
1889 }
else if (detectorNameTmp ==
"AGAST") {
1890 cv::Ptr<cv::FeatureDetector> agastDetector = cv::AgastFeatureDetector::create();
1892 m_detectors[detectorNameTmp] = agastDetector;
1894 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(agastDetector);
1896 }
else if (detectorNameTmp ==
"MSD") {
1897 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100) 1898 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) 1899 cv::Ptr<cv::FeatureDetector> msdDetector = cv::xfeatures2d::MSDDetector::create();
1901 m_detectors[detectorNameTmp] = msdDetector;
1903 std::cerr <<
"You should not use MSD with Pyramid feature detection!" << std::endl;
1904 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(msdDetector);
1907 std::stringstream ss_msg;
1908 ss_msg <<
"Fail to initialize the detector: MSD. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1909 <<
" was not build with xFeatures2d module.";
1913 std::stringstream ss_msg;
1914 ss_msg <<
"Feature " << detectorName <<
" is not available in OpenCV version: " << std::hex
1915 << VISP_HAVE_OPENCV_VERSION <<
" (require >= OpenCV 3.1).";
1918 std::cerr <<
"The detector:" << detectorNameTmp <<
" is not available." << std::endl;
1921 bool detectorInitialized =
false;
1924 detectorInitialized = !m_detectors[detectorNameTmp].empty();
1927 detectorInitialized = !m_detectors[detectorName].empty();
1930 if (!detectorInitialized) {
1931 std::stringstream ss_msg;
1932 ss_msg <<
"Fail to initialize the detector: " << detectorNameTmp
1933 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
1946 void vpKeyPoint::initDetectors(
const std::vector<std::string> &detectorNames)
1948 for (std::vector<std::string>::const_iterator it = detectorNames.begin(); it != detectorNames.end(); ++it) {
1958 void vpKeyPoint::initExtractor(
const std::string &extractorName)
1960 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) 1961 m_extractors[extractorName] = cv::DescriptorExtractor::create(extractorName);
1963 if (extractorName ==
"SIFT") {
1964 #ifdef VISP_HAVE_OPENCV_XFEATURES2D 1965 m_extractors[extractorName] = cv::xfeatures2d::SIFT::create();
1967 std::stringstream ss_msg;
1968 ss_msg <<
"Fail to initialize the extractor: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1969 <<
" was not build with xFeatures2d module.";
1972 }
else if (extractorName ==
"SURF") {
1973 #ifdef VISP_HAVE_OPENCV_XFEATURES2D 1975 m_extractors[extractorName] = cv::xfeatures2d::SURF::create(100, 4, 3,
true);
1977 std::stringstream ss_msg;
1978 ss_msg <<
"Fail to initialize the extractor: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1979 <<
" was not build with xFeatures2d module.";
1982 }
else if (extractorName ==
"ORB") {
1983 m_extractors[extractorName] = cv::ORB::create();
1984 }
else if (extractorName ==
"BRISK") {
1985 m_extractors[extractorName] = cv::BRISK::create();
1986 }
else if (extractorName ==
"FREAK") {
1987 #ifdef VISP_HAVE_OPENCV_XFEATURES2D 1988 m_extractors[extractorName] = cv::xfeatures2d::FREAK::create();
1990 std::stringstream ss_msg;
1991 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
1992 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
1995 }
else if (extractorName ==
"BRIEF") {
1996 #ifdef VISP_HAVE_OPENCV_XFEATURES2D 1997 m_extractors[extractorName] = cv::xfeatures2d::BriefDescriptorExtractor::create();
1999 std::stringstream ss_msg;
2000 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2001 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2004 }
else if (extractorName ==
"KAZE") {
2005 m_extractors[extractorName] = cv::KAZE::create();
2006 }
else if (extractorName ==
"AKAZE") {
2007 m_extractors[extractorName] = cv::AKAZE::create();
2008 }
else if (extractorName ==
"DAISY") {
2009 #ifdef VISP_HAVE_OPENCV_XFEATURES2D 2010 m_extractors[extractorName] = cv::xfeatures2d::DAISY::create();
2012 std::stringstream ss_msg;
2013 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2014 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2017 }
else if (extractorName ==
"LATCH") {
2018 #ifdef VISP_HAVE_OPENCV_XFEATURES2D 2019 m_extractors[extractorName] = cv::xfeatures2d::LATCH::create();
2021 std::stringstream ss_msg;
2022 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2023 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2026 }
else if (extractorName ==
"LUCID") {
2027 #ifdef VISP_HAVE_OPENCV_XFEATURES2D 2032 std::stringstream ss_msg;
2033 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2034 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2037 }
else if (extractorName ==
"VGG") {
2038 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) 2039 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) 2040 m_extractors[extractorName] = cv::xfeatures2d::VGG::create();
2042 std::stringstream ss_msg;
2043 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2044 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2048 std::stringstream ss_msg;
2049 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2050 << VISP_HAVE_OPENCV_VERSION <<
" but requires at least OpenCV 3.2.";
2053 }
else if (extractorName ==
"BoostDesc") {
2054 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) 2055 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) 2056 m_extractors[extractorName] = cv::xfeatures2d::BoostDesc::create();
2058 std::stringstream ss_msg;
2059 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2060 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2064 std::stringstream ss_msg;
2065 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2066 << VISP_HAVE_OPENCV_VERSION <<
" but requires at least OpenCV 3.2.";
2070 std::cerr <<
"The extractor:" << extractorName <<
" is not available." << std::endl;
2074 if (!m_extractors[extractorName]) {
2075 std::stringstream ss_msg;
2076 ss_msg <<
"Fail to initialize the extractor: " << extractorName
2077 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2081 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000) 2082 if (extractorName ==
"SURF") {
2084 m_extractors[extractorName]->set(
"extended", 1);
2095 void vpKeyPoint::initExtractors(
const std::vector<std::string> &extractorNames)
2097 for (std::vector<std::string>::const_iterator it = extractorNames.begin(); it != extractorNames.end(); ++it) {
2101 int descriptorType = CV_32F;
2102 bool firstIteration =
true;
2103 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2104 it != m_extractors.end(); ++it) {
2105 if (firstIteration) {
2106 firstIteration =
false;
2107 descriptorType = it->second->descriptorType();
2109 if (descriptorType != it->second->descriptorType()) {
2116 void vpKeyPoint::initFeatureNames()
2119 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403) 2126 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D)) 2127 m_mapOfDetectorNames[DETECTOR_STAR] =
"STAR";
2129 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) 2133 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000) 2138 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D) 2139 m_mapOfDetectorNames[DETECTOR_MSD] =
"MSD";
2143 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403) 2146 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D)) 2147 m_mapOfDescriptorNames[DESCRIPTOR_FREAK] =
"FREAK";
2148 m_mapOfDescriptorNames[DESCRIPTOR_BRIEF] =
"BRIEF";
2150 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) 2154 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000) 2157 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) 2158 m_mapOfDescriptorNames[DESCRIPTOR_DAISY] =
"DAISY";
2159 m_mapOfDescriptorNames[DESCRIPTOR_LATCH] =
"LATCH";
2162 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D) 2163 m_mapOfDescriptorNames[DESCRIPTOR_VGG] =
"VGG";
2164 m_mapOfDescriptorNames[DESCRIPTOR_BoostDesc] =
"BoostDesc";
2176 int descriptorType = CV_32F;
2177 bool firstIteration =
true;
2178 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2179 it != m_extractors.end(); ++it) {
2180 if (firstIteration) {
2181 firstIteration =
false;
2182 descriptorType = it->second->descriptorType();
2184 if (descriptorType != it->second->descriptorType()) {
2190 if (matcherName ==
"FlannBased") {
2191 if (m_extractors.empty()) {
2192 std::cout <<
"Warning: No extractor initialized, by default use " 2193 "floating values (CV_32F) " 2194 "for descriptor type !" 2198 if (descriptorType == CV_8U) {
2199 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000) 2200 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::LshIndexParams>(12, 20, 2));
2202 m_matcher =
new cv::FlannBasedMatcher(
new cv::flann::LshIndexParams(12, 20, 2));
2205 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000) 2206 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::KDTreeIndexParams>());
2208 m_matcher =
new cv::FlannBasedMatcher(
new cv::flann::KDTreeIndexParams());
2212 m_matcher = cv::DescriptorMatcher::create(matcherName);
2215 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000) 2216 if (m_matcher != NULL && !m_useKnn && matcherName ==
"BruteForce") {
2217 m_matcher->set(
"crossCheck", m_useBruteForceCrossCheck);
2222 std::stringstream ss_msg;
2223 ss_msg <<
"Fail to initialize the matcher: " << matcherName
2224 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2241 IMatching.
insert(IRef, topLeftCorner);
2243 IMatching.
insert(ICurrent, topLeftCorner);
2257 int nbImg = (int)(m_mapOfImages.size() + 1);
2259 if (m_mapOfImages.empty()) {
2260 std::cerr <<
"There is no training image loaded !" << std::endl;
2270 int nbWidth = nbImgSqrt;
2271 int nbHeight = nbImgSqrt;
2273 if (nbImgSqrt * nbImgSqrt < nbImg) {
2278 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2280 if (maxW < it->second.getWidth()) {
2281 maxW = it->second.getWidth();
2284 if (maxH < it->second.getHeight()) {
2285 maxH = it->second.getHeight();
2291 int medianI = nbHeight / 2;
2292 int medianJ = nbWidth / 2;
2293 int medianIndex = medianI * nbWidth + medianJ;
2296 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2298 int local_cpt = cpt;
2299 if (cpt >= medianIndex) {
2304 int indexI = local_cpt / nbWidth;
2305 int indexJ = local_cpt - (indexI * nbWidth);
2306 vpImagePoint topLeftCorner((
int)maxH * indexI, (
int)maxW * indexJ);
2308 IMatching.
insert(it->second, topLeftCorner);
2311 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
2312 IMatching.
insert(ICurrent, topLeftCorner);
2316 #ifdef VISP_HAVE_XML2 2328 m_detectorNames.clear();
2329 m_extractorNames.clear();
2330 m_detectors.clear();
2331 m_extractors.clear();
2333 std::cout <<
" *********** Parsing XML for configuration for vpKeyPoint " 2336 xmlp.
parse(configFile);
2401 int startClassId = 0;
2402 int startImageId = 0;
2404 m_trainKeyPoints.clear();
2405 m_trainPoints.clear();
2406 m_mapOfImageId.clear();
2407 m_mapOfImages.clear();
2410 for (std::map<int, int>::const_iterator it = m_mapOfImageId.begin(); it != m_mapOfImageId.end(); ++it) {
2411 if (startClassId < it->first) {
2412 startClassId = it->first;
2417 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2419 if (startImageId < it->first) {
2420 startImageId = it->first;
2427 if (!parent.empty()) {
2432 std::ifstream file(filename.c_str(), std::ifstream::binary);
2433 if (!file.is_open()) {
2441 #if !defined(VISP_HAVE_MODULE_IO) 2443 std::cout <<
"Warning: The learning file contains image data that will " 2444 "not be loaded as visp_io module " 2445 "is not available !" 2450 for (
int i = 0; i < nbImgs; i++) {
2458 char *path =
new char[length + 1];
2460 for (
int cpt = 0; cpt < length; cpt++) {
2462 file.read((
char *)(&c),
sizeof(c));
2465 path[length] =
'\0';
2468 #ifdef VISP_HAVE_MODULE_IO 2476 m_mapOfImages[
id + startImageId] = I;
2484 int have3DInfoInt = 0;
2486 bool have3DInfo = have3DInfoInt != 0;
2497 int descriptorType = 5;
2500 cv::Mat trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
2501 for (
int i = 0; i < nRows; i++) {
2503 float u, v, size, angle, response;
2504 int octave, class_id, image_id;
2513 cv::KeyPoint keyPoint(cv::Point2f(u, v), size, angle, response, octave, (class_id + startClassId));
2514 m_trainKeyPoints.push_back(keyPoint);
2516 if (image_id != -1) {
2517 #ifdef VISP_HAVE_MODULE_IO 2519 m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
2529 m_trainPoints.push_back(cv::Point3f(oX, oY, oZ));
2532 for (
int j = 0; j < nCols; j++) {
2534 switch (descriptorType) {
2536 unsigned char value;
2537 file.read((
char *)(&value),
sizeof(value));
2538 trainDescriptorsTmp.at<
unsigned char>(i, j) = value;
2543 file.read((
char *)(&value),
sizeof(value));
2544 trainDescriptorsTmp.at<
char>(i, j) = value;
2548 unsigned short int value;
2550 trainDescriptorsTmp.at<
unsigned short int>(i, j) = value;
2556 trainDescriptorsTmp.at<
short int>(i, j) = value;
2562 trainDescriptorsTmp.at<
int>(i, j) = value;
2568 trainDescriptorsTmp.at<
float>(i, j) = value;
2574 trainDescriptorsTmp.at<
double>(i, j) = value;
2580 trainDescriptorsTmp.at<
float>(i, j) = value;
2586 if (!append || m_trainDescriptors.empty()) {
2587 trainDescriptorsTmp.copyTo(m_trainDescriptors);
2589 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
2594 #ifdef VISP_HAVE_XML2 2595 xmlDocPtr doc = NULL;
2596 xmlNodePtr root_element = NULL;
2606 doc = xmlReadFile(filename.c_str(), NULL, 0);
2612 root_element = xmlDocGetRootElement(doc);
2614 xmlNodePtr first_level_node = NULL;
2617 int descriptorType = CV_32F;
2618 int nRows = 0, nCols = 0;
2621 cv::Mat trainDescriptorsTmp;
2623 for (first_level_node = root_element->children; first_level_node; first_level_node = first_level_node->next) {
2625 std::string name((
char *)first_level_node->name);
2626 if (first_level_node->type == XML_ELEMENT_NODE && name ==
"TrainingImageInfo") {
2627 xmlNodePtr image_info_node = NULL;
2629 for (image_info_node = first_level_node->children; image_info_node; image_info_node = image_info_node->next) {
2630 name = std::string((
char *)image_info_node->name);
2632 if (name ==
"trainImg") {
2634 xmlChar *image_id_property = xmlGetProp(image_info_node, BAD_CAST
"image_id");
2636 if (image_id_property) {
2637 id = std::atoi((
char *)image_id_property);
2639 xmlFree(image_id_property);
2642 #ifdef VISP_HAVE_MODULE_IO 2643 std::string path((
char *)image_info_node->children->content);
2652 m_mapOfImages[
id + startImageId] = I;
2656 }
else if (first_level_node->type == XML_ELEMENT_NODE && name ==
"DescriptorsInfo") {
2657 xmlNodePtr descriptors_info_node = NULL;
2658 for (descriptors_info_node = first_level_node->children; descriptors_info_node;
2659 descriptors_info_node = descriptors_info_node->next) {
2660 if (descriptors_info_node->type == XML_ELEMENT_NODE) {
2661 name = std::string((
char *)descriptors_info_node->name);
2663 if (name ==
"nrows") {
2664 nRows = std::atoi((
char *)descriptors_info_node->children->content);
2665 }
else if (name ==
"ncols") {
2666 nCols = std::atoi((
char *)descriptors_info_node->children->content);
2667 }
else if (name ==
"type") {
2668 descriptorType = std::atoi((
char *)descriptors_info_node->children->content);
2673 trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
2674 }
else if (first_level_node->type == XML_ELEMENT_NODE && name ==
"DescriptorInfo") {
2675 xmlNodePtr point_node = NULL;
2676 double u = 0.0, v = 0.0, size = 0.0, angle = 0.0, response = 0.0;
2677 int octave = 0, class_id = 0, image_id = 0;
2678 double oX = 0.0, oY = 0.0, oZ = 0.0;
2680 std::stringstream ss;
2682 for (point_node = first_level_node->children; point_node; point_node = point_node->next) {
2683 if (point_node->type == XML_ELEMENT_NODE) {
2684 name = std::string((
char *)point_node->name);
2688 u = std::strtod((
char *)point_node->children->content, &pEnd);
2689 }
else if (name ==
"v") {
2690 v = std::strtod((
char *)point_node->children->content, &pEnd);
2691 }
else if (name ==
"size") {
2692 size = std::strtod((
char *)point_node->children->content, &pEnd);
2693 }
else if (name ==
"angle") {
2694 angle = std::strtod((
char *)point_node->children->content, &pEnd);
2695 }
else if (name ==
"response") {
2696 response = std::strtod((
char *)point_node->children->content, &pEnd);
2697 }
else if (name ==
"octave") {
2698 octave = std::atoi((
char *)point_node->children->content);
2699 }
else if (name ==
"class_id") {
2700 class_id = std::atoi((
char *)point_node->children->content);
2701 cv::KeyPoint keyPoint(cv::Point2f((
float)u, (
float)v), (
float)size, (
float)angle, (
float)response, octave,
2702 (class_id + startClassId));
2703 m_trainKeyPoints.push_back(keyPoint);
2704 }
else if (name ==
"image_id") {
2705 image_id = std::atoi((
char *)point_node->children->content);
2706 if (image_id != -1) {
2707 #ifdef VISP_HAVE_MODULE_IO 2709 m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
2712 }
else if (name ==
"oX") {
2713 oX = std::atof((
char *)point_node->children->content);
2714 }
else if (name ==
"oY") {
2715 oY = std::atof((
char *)point_node->children->content);
2716 }
else if (name ==
"oZ") {
2717 oZ = std::atof((
char *)point_node->children->content);
2718 m_trainPoints.push_back(cv::Point3f((
float)oX, (
float)oY, (
float)oZ));
2719 }
else if (name ==
"desc") {
2720 xmlNodePtr descriptor_value_node = NULL;
2723 for (descriptor_value_node = point_node->children; descriptor_value_node;
2724 descriptor_value_node = descriptor_value_node->next) {
2726 if (descriptor_value_node->type == XML_ELEMENT_NODE) {
2728 std::string parseStr((
char *)descriptor_value_node->children->content);
2733 switch (descriptorType) {
2738 trainDescriptorsTmp.at<
unsigned char>(i, j) = (
unsigned char)parseValue;
2745 trainDescriptorsTmp.at<
char>(i, j) = (
char)parseValue;
2749 ss >> trainDescriptorsTmp.at<
unsigned short int>(i, j);
2753 ss >> trainDescriptorsTmp.at<
short int>(i, j);
2757 ss >> trainDescriptorsTmp.at<
int>(i, j);
2761 ss >> trainDescriptorsTmp.at<
float>(i, j);
2765 ss >> trainDescriptorsTmp.at<
double>(i, j);
2769 ss >> trainDescriptorsTmp.at<
float>(i, j);
2773 std::cerr <<
"Error when converting:" << ss.str() << std::endl;
2786 if (!append || m_trainDescriptors.empty()) {
2787 trainDescriptorsTmp.copyTo(m_trainDescriptors);
2789 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
2801 std::cout <<
"Error: libxml2 is required !" << std::endl;
2811 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
2817 m_currentImageId = (int)m_mapOfImages.size();
2829 std::vector<cv::DMatch> &matches,
double &elapsedTime)
2834 m_knnMatches.clear();
2836 if (m_useMatchTrainToQuery) {
2837 std::vector<std::vector<cv::DMatch> > knnMatchesTmp;
2840 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(
true);
2841 matcherTmp->knnMatch(trainDescriptors, queryDescriptors, knnMatchesTmp, 2);
2843 for (std::vector<std::vector<cv::DMatch> >::const_iterator it1 = knnMatchesTmp.begin();
2844 it1 != knnMatchesTmp.end(); ++it1) {
2845 std::vector<cv::DMatch> tmp;
2846 for (std::vector<cv::DMatch>::const_iterator it2 = it1->begin(); it2 != it1->end(); ++it2) {
2847 tmp.push_back(cv::DMatch(it2->trainIdx, it2->queryIdx, it2->distance));
2849 m_knnMatches.push_back(tmp);
2852 matches.resize(m_knnMatches.size());
2853 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
2856 m_matcher->knnMatch(queryDescriptors, m_knnMatches, 2);
2857 matches.resize(m_knnMatches.size());
2858 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
2863 if (m_useMatchTrainToQuery) {
2864 std::vector<cv::DMatch> matchesTmp;
2866 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(
true);
2867 matcherTmp->match(trainDescriptors, queryDescriptors, matchesTmp);
2869 for (std::vector<cv::DMatch>::const_iterator it = matchesTmp.begin(); it != matchesTmp.end(); ++it) {
2870 matches.push_back(cv::DMatch(it->trainIdx, it->queryIdx, it->distance));
2874 m_matcher->match(queryDescriptors, matches);
2900 const unsigned int width)
2915 if (m_trainDescriptors.empty()) {
2916 std::cerr <<
"Reference is empty." << std::endl;
2918 std::cerr <<
"Reference is not computed." << std::endl;
2920 std::cerr <<
"Matching is not possible." << std::endl;
2925 if (m_useAffineDetection) {
2926 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
2927 std::vector<cv::Mat> listOfQueryDescriptors;
2933 m_queryKeyPoints.clear();
2934 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
2935 it != listOfQueryKeyPoints.end(); ++it) {
2936 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
2940 for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
2944 it->copyTo(m_queryDescriptors);
2946 m_queryDescriptors.push_back(*it);
2950 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
2951 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
2954 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
2957 m_queryFilteredKeyPoints.clear();
2958 m_objectFilteredPoints.clear();
2959 m_filteredMatches.clear();
2963 if (m_useMatchTrainToQuery) {
2965 m_queryFilteredKeyPoints.clear();
2966 m_filteredMatches.clear();
2967 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
2968 m_filteredMatches.push_back(cv::DMatch((
int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
2969 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(
size_t)it->queryIdx]);
2972 m_queryFilteredKeyPoints = m_queryKeyPoints;
2973 m_filteredMatches = m_matches;
2976 if (!m_trainPoints.empty()) {
2977 m_objectFilteredPoints.clear();
2981 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
2983 m_objectFilteredPoints.push_back(m_trainPoints[(
size_t)it->trainIdx]);
2992 return static_cast<unsigned int>(m_filteredMatches.size());
3010 double error, elapsedTime;
3011 return matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3034 if (m_trainDescriptors.empty()) {
3035 std::cerr <<
"Reference is empty." << std::endl;
3037 std::cerr <<
"Reference is not computed." << std::endl;
3039 std::cerr <<
"Matching is not possible." << std::endl;
3044 if (m_useAffineDetection) {
3045 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3046 std::vector<cv::Mat> listOfQueryDescriptors;
3052 m_queryKeyPoints.clear();
3053 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3054 it != listOfQueryKeyPoints.end(); ++it) {
3055 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3059 for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3063 it->copyTo(m_queryDescriptors);
3065 m_queryDescriptors.push_back(*it);
3069 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3070 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3073 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3075 elapsedTime = m_detectionTime + m_extractionTime + m_matchingTime;
3078 m_queryFilteredKeyPoints.clear();
3079 m_objectFilteredPoints.clear();
3080 m_filteredMatches.clear();
3084 if (m_useMatchTrainToQuery) {
3086 m_queryFilteredKeyPoints.clear();
3087 m_filteredMatches.clear();
3088 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3089 m_filteredMatches.push_back(cv::DMatch((
int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3090 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(
size_t)it->queryIdx]);
3093 m_queryFilteredKeyPoints = m_queryKeyPoints;
3094 m_filteredMatches = m_matches;
3097 if (!m_trainPoints.empty()) {
3098 m_objectFilteredPoints.clear();
3102 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3104 m_objectFilteredPoints.push_back(m_trainPoints[(
size_t)it->trainIdx]);
3116 m_ransacInliers.clear();
3117 m_ransacOutliers.clear();
3119 if (m_useRansacVVS) {
3120 std::vector<vpPoint> objectVpPoints(m_objectFilteredPoints.size());
3124 for (std::vector<cv::Point3f>::const_iterator it = m_objectFilteredPoints.begin();
3125 it != m_objectFilteredPoints.end(); ++it, cpt++) {
3129 vpImagePoint imP(m_queryFilteredKeyPoints[cpt].pt.y, m_queryFilteredKeyPoints[cpt].pt.x);
3131 double x = 0.0, y = 0.0;
3136 objectVpPoints[cpt] = pt;
3139 std::vector<vpPoint> inliers;
3140 std::vector<unsigned int> inlierIndex;
3142 bool res =
computePose(objectVpPoints, cMo, inliers, inlierIndex, m_poseTime, func);
3144 std::map<unsigned int, bool> mapOfInlierIndex;
3145 m_matchRansacKeyPointsToPoints.clear();
3147 for (std::vector<unsigned int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3148 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3149 m_queryFilteredKeyPoints[(
size_t)(*it)], m_objectFilteredPoints[(
size_t)(*it)]));
3150 mapOfInlierIndex[*it] =
true;
3153 for (
size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3154 if (mapOfInlierIndex.find((
unsigned int)i) == mapOfInlierIndex.end()) {
3155 m_ransacOutliers.push_back(
vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3159 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3161 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3162 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3163 m_ransacInliers.begin(), matchRansacToVpImage);
3165 elapsedTime += m_poseTime;
3169 std::vector<cv::Point2f> imageFilteredPoints;
3170 cv::KeyPoint::convert(m_queryFilteredKeyPoints, imageFilteredPoints);
3171 std::vector<int> inlierIndex;
3172 bool res =
computePose(imageFilteredPoints, m_objectFilteredPoints, cam, cMo, inlierIndex, m_poseTime);
3174 std::map<int, bool> mapOfInlierIndex;
3175 m_matchRansacKeyPointsToPoints.clear();
3177 for (std::vector<int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3178 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3179 m_queryFilteredKeyPoints[(
size_t)(*it)], m_objectFilteredPoints[(
size_t)(*it)]));
3180 mapOfInlierIndex[*it] =
true;
3183 for (
size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3184 if (mapOfInlierIndex.find((
int)i) == mapOfInlierIndex.end()) {
3185 m_ransacOutliers.push_back(
vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3189 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3191 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3192 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3193 m_ransacInliers.begin(), matchRansacToVpImage);
3195 elapsedTime += m_poseTime;
3220 vpImagePoint ¢erOfGravity,
const bool isPlanarObject,
3221 std::vector<vpImagePoint> *imPts1, std::vector<vpImagePoint> *imPts2,
3222 double *meanDescriptorDistance,
double *detection_score,
const vpRect &rectangle)
3224 if (imPts1 != NULL && imPts2 != NULL) {
3231 double meanDescriptorDistanceTmp = 0.0;
3232 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
3233 meanDescriptorDistanceTmp += (double)it->distance;
3236 meanDescriptorDistanceTmp /= (double)m_filteredMatches.size();
3237 double score = (double)m_filteredMatches.size() / meanDescriptorDistanceTmp;
3239 if (meanDescriptorDistance != NULL) {
3240 *meanDescriptorDistance = meanDescriptorDistanceTmp;
3242 if (detection_score != NULL) {
3243 *detection_score = score;
3246 if (m_filteredMatches.size() >= 4) {
3248 std::vector<cv::Point2f> points1(m_filteredMatches.size());
3250 std::vector<cv::Point2f> points2(m_filteredMatches.size());
3252 for (
size_t i = 0; i < m_filteredMatches.size(); i++) {
3253 points1[i] = cv::Point2f(m_trainKeyPoints[(
size_t)m_filteredMatches[i].trainIdx].pt);
3254 points2[i] = cv::Point2f(m_queryFilteredKeyPoints[(
size_t)m_filteredMatches[i].queryIdx].pt);
3257 std::vector<vpImagePoint> inliers;
3258 if (isPlanarObject) {
3259 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) 3260 cv::Mat homographyMatrix = cv::findHomography(points1, points2, CV_RANSAC);
3262 cv::Mat homographyMatrix = cv::findHomography(points1, points2, cv::RANSAC);
3265 for (
size_t i = 0; i < m_filteredMatches.size(); i++) {
3267 cv::Mat realPoint = cv::Mat(3, 1, CV_64F);
3268 realPoint.at<
double>(0, 0) = points1[i].x;
3269 realPoint.at<
double>(1, 0) = points1[i].y;
3270 realPoint.at<
double>(2, 0) = 1.f;
3272 cv::Mat reprojectedPoint = homographyMatrix * realPoint;
3273 double err_x = (reprojectedPoint.at<
double>(0, 0) / reprojectedPoint.at<
double>(2, 0)) - points2[i].x;
3274 double err_y = (reprojectedPoint.at<
double>(1, 0) / reprojectedPoint.at<
double>(2, 0)) - points2[i].y;
3275 double reprojectionError = std::sqrt(err_x * err_x + err_y * err_y);
3277 if (reprojectionError < 6.0) {
3278 inliers.push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3279 if (imPts1 != NULL) {
3280 imPts1->push_back(
vpImagePoint((
double)points1[i].y, (
double)points1[i].x));
3283 if (imPts2 != NULL) {
3284 imPts2->push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3288 }
else if (m_filteredMatches.size() >= 8) {
3289 cv::Mat fundamentalInliers;
3290 cv::Mat fundamentalMatrix = cv::findFundamentalMat(points1, points2, cv::FM_RANSAC, 3, 0.99, fundamentalInliers);
3292 for (
size_t i = 0; i < (size_t)fundamentalInliers.rows; i++) {
3293 if (fundamentalInliers.at<uchar>((
int)i, 0)) {
3294 inliers.push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3296 if (imPts1 != NULL) {
3297 imPts1->push_back(
vpImagePoint((
double)points1[i].y, (
double)points1[i].x));
3300 if (imPts2 != NULL) {
3301 imPts2->push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3307 if (!inliers.empty()) {
3314 double meanU = 0.0, meanV = 0.0;
3315 for (std::vector<vpImagePoint>::const_iterator it = inliers.begin(); it != inliers.end(); ++it) {
3316 meanU += it->get_u();
3317 meanV += it->get_v();
3320 meanU /= (double)inliers.size();
3321 meanV /= (double)inliers.size();
3323 centerOfGravity.
set_u(meanU);
3324 centerOfGravity.
set_v(meanV);
3332 return meanDescriptorDistanceTmp < m_detectionThreshold;
3334 return score > m_detectionScore;
3362 bool isMatchOk =
matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3367 std::vector<vpImagePoint> modelImagePoints(m_trainVpPoints.size());
3369 for (std::vector<vpPoint>::const_iterator it = m_trainVpPoints.begin(); it != m_trainVpPoints.end(); ++it, cpt++) {
3373 modelImagePoints[cpt] = imPt;
3382 double meanU = 0.0, meanV = 0.0;
3383 for (std::vector<vpImagePoint>::const_iterator it = m_ransacInliers.begin(); it != m_ransacInliers.end(); ++it) {
3384 meanU += it->get_u();
3385 meanV += it->get_v();
3388 meanU /= (double)m_ransacInliers.size();
3389 meanV /= (double)m_ransacInliers.size();
3391 centerOfGravity.
set_u(meanU);
3392 centerOfGravity.
set_v(meanV);
3411 std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
3412 std::vector<cv::Mat> &listOfDescriptors,
3418 listOfKeypoints.clear();
3419 listOfDescriptors.clear();
3421 for (
int tl = 1; tl < 6; tl++) {
3422 double t = pow(2, 0.5 * tl);
3423 for (
int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
3424 std::vector<cv::KeyPoint> keypoints;
3425 cv::Mat descriptors;
3427 cv::Mat timg, mask, Ai;
3430 affineSkew(t, phi, timg, mask, Ai);
3433 if(listOfAffineI != NULL) {
3435 bitwise_and(mask, timg, img_disp);
3438 listOfAffineI->push_back(tI);
3442 cv::bitwise_and(mask, timg, img_disp);
3443 cv::namedWindow(
"Skew", cv::WINDOW_AUTOSIZE );
3444 cv::imshow(
"Skew", img_disp );
3448 for(std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
3449 it != m_detectors.end(); ++it) {
3450 std::vector<cv::KeyPoint> kp;
3451 it->second->detect(timg, kp, mask);
3452 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
3456 extract(timg, keypoints, descriptors, elapsedTime);
3458 for(
unsigned int i = 0; i < keypoints.size(); i++) {
3459 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
3460 cv::Mat kpt_t = Ai * cv::Mat(kpt);
3461 keypoints[i].pt.x = kpt_t.at<
float>(0, 0);
3462 keypoints[i].pt.y = kpt_t.at<
float>(1, 0);
3465 listOfKeypoints.push_back(keypoints);
3466 listOfDescriptors.push_back(descriptors);
3475 std::vector<std::pair<double, int> > listOfAffineParams;
3476 for (
int tl = 1; tl < 6; tl++) {
3477 double t = pow(2, 0.5 * tl);
3478 for (
int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
3479 listOfAffineParams.push_back(std::pair<double, int>(t, phi));
3483 listOfKeypoints.resize(listOfAffineParams.size());
3484 listOfDescriptors.resize(listOfAffineParams.size());
3486 if (listOfAffineI != NULL) {
3487 listOfAffineI->resize(listOfAffineParams.size());
3490 #ifdef VISP_HAVE_OPENMP 3491 #pragma omp parallel for 3493 for (
int cpt = 0; cpt < static_cast<int>(listOfAffineParams.size()); cpt++) {
3494 std::vector<cv::KeyPoint> keypoints;
3495 cv::Mat descriptors;
3497 cv::Mat timg, mask, Ai;
3500 affineSkew(listOfAffineParams[(
size_t)cpt].first, listOfAffineParams[(
size_t)cpt].second, timg, mask, Ai);
3502 if (listOfAffineI != NULL) {
3504 bitwise_and(mask, timg, img_disp);
3507 (*listOfAffineI)[(size_t)cpt] = tI;
3512 cv::bitwise_and(mask, timg, img_disp);
3513 cv::namedWindow(
"Skew", cv::WINDOW_AUTOSIZE );
3514 cv::imshow(
"Skew", img_disp );
3518 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
3519 it != m_detectors.end(); ++it) {
3520 std::vector<cv::KeyPoint> kp;
3521 it->second->detect(timg, kp, mask);
3522 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
3526 extract(timg, keypoints, descriptors, elapsedTime);
3528 for (
size_t i = 0; i < keypoints.size(); i++) {
3529 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
3530 cv::Mat kpt_t = Ai * cv::Mat(kpt);
3531 keypoints[i].pt.x = kpt_t.at<
float>(0, 0);
3532 keypoints[i].pt.y = kpt_t.at<
float>(1, 0);
3535 listOfKeypoints[(size_t)cpt] = keypoints;
3536 listOfDescriptors[(size_t)cpt] = descriptors;
3552 m_computeCovariance =
false;
3554 m_currentImageId = 0;
3556 m_detectionScore = 0.15;
3557 m_detectionThreshold = 100.0;
3558 m_detectionTime = 0.0;
3559 m_detectorNames.clear();
3560 m_detectors.clear();
3561 m_extractionTime = 0.0;
3562 m_extractorNames.clear();
3563 m_extractors.clear();
3564 m_filteredMatches.clear();
3567 m_knnMatches.clear();
3568 m_mapOfImageId.clear();
3569 m_mapOfImages.clear();
3570 m_matcher = cv::Ptr<cv::DescriptorMatcher>();
3571 m_matcherName =
"BruteForce-Hamming";
3573 m_matchingFactorThreshold = 2.0;
3574 m_matchingRatioThreshold = 0.85;
3575 m_matchingTime = 0.0;
3576 m_matchRansacKeyPointsToPoints.clear();
3577 m_nbRansacIterations = 200;
3578 m_nbRansacMinInlierCount = 100;
3579 m_objectFilteredPoints.clear();
3581 m_queryDescriptors = cv::Mat();
3582 m_queryFilteredKeyPoints.clear();
3583 m_queryKeyPoints.clear();
3584 m_ransacConsensusPercentage = 20.0;
3586 m_ransacInliers.clear();
3587 m_ransacOutliers.clear();
3588 m_ransacParallel =
true;
3589 m_ransacParallelNbThreads = 0;
3590 m_ransacReprojectionError = 6.0;
3591 m_ransacThreshold = 0.01;
3592 m_trainDescriptors = cv::Mat();
3593 m_trainKeyPoints.clear();
3594 m_trainPoints.clear();
3595 m_trainVpPoints.clear();
3596 m_useAffineDetection =
false;
3597 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000) 3598 m_useBruteForceCrossCheck =
true;
3600 m_useConsensusPercentage =
false;
3602 m_useMatchTrainToQuery =
false;
3603 m_useRansacVVS =
true;
3604 m_useSingleMatchFilter =
true;
3606 m_detectorNames.push_back(
"ORB");
3607 m_extractorNames.push_back(
"ORB");
3623 if (!parent.empty()) {
3627 std::map<int, std::string> mapOfImgPath;
3628 if (saveTrainingImages) {
3629 #ifdef VISP_HAVE_MODULE_IO 3631 unsigned int cpt = 0;
3633 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
3639 std::stringstream ss;
3640 ss <<
"train_image_" << std::setfill(
'0') << std::setw(3) << cpt;
3642 switch (m_imageFormat) {
3664 std::string imgFilename = ss.str();
3665 mapOfImgPath[it->first] = imgFilename;
3666 vpImageIo::write(it->second, parent + (!parent.empty() ?
"/" :
"") + imgFilename);
3669 std::cout <<
"Warning: in vpKeyPoint::saveLearningData() training images " 3670 "are not saved because " 3671 "visp_io module is not available !" 3676 bool have3DInfo = m_trainPoints.size() > 0;
3677 if (have3DInfo && m_trainPoints.size() != m_trainKeyPoints.size()) {
3683 std::ofstream file(filename.c_str(), std::ofstream::binary);
3684 if (!file.is_open()) {
3689 int nbImgs = (int)mapOfImgPath.size();
3692 #ifdef VISP_HAVE_MODULE_IO 3693 for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
3699 std::string path = it->second;
3700 int length = (int)path.length();
3703 for (
int cpt = 0; cpt < length; cpt++) {
3704 file.write((
char *)(&path[(
size_t)cpt]),
sizeof(path[(
size_t)cpt]));
3710 int have3DInfoInt = have3DInfo ? 1 : 0;
3713 int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
3714 int descriptorType = m_trainDescriptors.type();
3725 for (
int i = 0; i < nRows; i++) {
3726 unsigned int i_ = (
unsigned int)i;
3728 float u = m_trainKeyPoints[i_].pt.x;
3732 float v = m_trainKeyPoints[i_].pt.y;
3736 float size = m_trainKeyPoints[i_].size;
3740 float angle = m_trainKeyPoints[i_].angle;
3744 float response = m_trainKeyPoints[i_].response;
3748 int octave = m_trainKeyPoints[i_].octave;
3752 int class_id = m_trainKeyPoints[i_].class_id;
3756 #ifdef VISP_HAVE_MODULE_IO 3757 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
3758 int image_id = (saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1;
3767 float oX = m_trainPoints[i_].x, oY = m_trainPoints[i_].y, oZ = m_trainPoints[i_].z;
3778 for (
int j = 0; j < nCols; j++) {
3780 switch (descriptorType) {
3782 file.write((
char *)(&m_trainDescriptors.at<
unsigned char>(i, j)),
3783 sizeof(m_trainDescriptors.at<
unsigned char>(i, j)));
3787 file.write((
char *)(&m_trainDescriptors.at<
char>(i, j)),
sizeof(m_trainDescriptors.at<
char>(i, j)));
3819 #ifdef VISP_HAVE_XML2 3820 xmlDocPtr doc = NULL;
3821 xmlNodePtr root_node = NULL, image_node = NULL, image_info_node = NULL, descriptors_info_node = NULL,
3822 descriptor_node = NULL, desc_node = NULL;
3831 doc = xmlNewDoc(BAD_CAST
"1.0");
3836 root_node = xmlNewNode(NULL, BAD_CAST
"LearningData");
3837 xmlDocSetRootElement(doc, root_node);
3839 std::stringstream ss;
3842 image_node = xmlNewChild(root_node, NULL, BAD_CAST
"TrainingImageInfo", NULL);
3844 #ifdef VISP_HAVE_MODULE_IO 3845 for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
3846 image_info_node = xmlNewChild(image_node, NULL, BAD_CAST
"trainImg", BAD_CAST it->second.c_str());
3849 xmlNewProp(image_info_node, BAD_CAST
"image_id", BAD_CAST ss.str().c_str());
3854 descriptors_info_node = xmlNewChild(root_node, NULL, BAD_CAST
"DescriptorsInfo", NULL);
3856 int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
3857 int descriptorType = m_trainDescriptors.type();
3862 xmlNewChild(descriptors_info_node, NULL, BAD_CAST
"nrows", BAD_CAST ss.str().c_str());
3867 xmlNewChild(descriptors_info_node, NULL, BAD_CAST
"ncols", BAD_CAST ss.str().c_str());
3871 ss << descriptorType;
3872 xmlNewChild(descriptors_info_node, NULL, BAD_CAST
"type", BAD_CAST ss.str().c_str());
3874 for (
int i = 0; i < nRows; i++) {
3875 unsigned int i_ = (
unsigned int)i;
3876 descriptor_node = xmlNewChild(root_node, NULL, BAD_CAST
"DescriptorInfo", NULL);
3880 ss << std::fixed << std::setprecision(9) << m_trainKeyPoints[i_].pt.x;
3881 xmlNewChild(descriptor_node, NULL, BAD_CAST
"u", BAD_CAST ss.str().c_str());
3885 ss << std::fixed << std::setprecision(9) << m_trainKeyPoints[i_].pt.y;
3886 xmlNewChild(descriptor_node, NULL, BAD_CAST
"v", BAD_CAST ss.str().c_str());
3890 ss << std::fixed << std::setprecision(9) << m_trainKeyPoints[i_].size;
3891 xmlNewChild(descriptor_node, NULL, BAD_CAST
"size", BAD_CAST ss.str().c_str());
3895 ss << std::fixed << std::setprecision(9) << m_trainKeyPoints[i_].angle;
3896 xmlNewChild(descriptor_node, NULL, BAD_CAST
"angle", BAD_CAST ss.str().c_str());
3900 ss << std::fixed << std::setprecision(9) << m_trainKeyPoints[i_].response;
3901 xmlNewChild(descriptor_node, NULL, BAD_CAST
"response", BAD_CAST ss.str().c_str());
3904 ss << m_trainKeyPoints[i_].octave;
3905 xmlNewChild(descriptor_node, NULL, BAD_CAST
"octave", BAD_CAST ss.str().c_str());
3908 ss << m_trainKeyPoints[i_].class_id;
3909 xmlNewChild(descriptor_node, NULL, BAD_CAST
"class_id", BAD_CAST ss.str().c_str());
3912 #ifdef VISP_HAVE_MODULE_IO 3913 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
3914 ss << ((saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1);
3915 xmlNewChild(descriptor_node, NULL, BAD_CAST
"image_id", BAD_CAST ss.str().c_str());
3918 xmlNewChild(descriptor_node, NULL, BAD_CAST
"image_id", BAD_CAST ss.str().c_str());
3924 ss << std::fixed << std::setprecision(9) << m_trainPoints[i_].x;
3925 xmlNewChild(descriptor_node, NULL, BAD_CAST
"oX", BAD_CAST ss.str().c_str());
3929 ss << std::fixed << std::setprecision(9) << m_trainPoints[i_].y;
3930 xmlNewChild(descriptor_node, NULL, BAD_CAST
"oY", BAD_CAST ss.str().c_str());
3934 ss << std::fixed << std::setprecision(9) << m_trainPoints[i_].z;
3935 xmlNewChild(descriptor_node, NULL, BAD_CAST
"oZ", BAD_CAST ss.str().c_str());
3938 desc_node = xmlNewChild(descriptor_node, NULL, BAD_CAST
"desc", NULL);
3940 for (
int j = 0; j < nCols; j++) {
3943 switch (descriptorType) {
3949 int val_tmp = m_trainDescriptors.at<
unsigned char>(i, j);
3958 int val_tmp = m_trainDescriptors.at<
char>(i, j);
3963 ss << m_trainDescriptors.at<
unsigned short int>(i, j);
3967 ss << m_trainDescriptors.at<
short int>(i, j);
3971 ss << m_trainDescriptors.at<
int>(i, j);
3976 ss << std::fixed << std::setprecision(9) << m_trainDescriptors.at<
float>(i, j);
3981 ss << std::fixed << std::setprecision(17) << m_trainDescriptors.at<
double>(i, j);
3988 xmlNewChild(desc_node, NULL, BAD_CAST
"val", BAD_CAST ss.str().c_str());
3992 xmlSaveFormatFileEnc(filename.c_str(), doc,
"UTF-8", 1);
4003 std::cerr <<
"Error: libxml2 is required !" << std::endl;
4008 #if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x030000) 4012 inline bool operator()(
const cv::KeyPoint &kpt)
const {
return kpt.response >= value; }
4017 inline bool operator()(
const cv::KeyPoint &kp1,
const cv::KeyPoint &kp2)
const {
return kp1.response > kp2.response; }
4021 void vpKeyPoint::KeyPointsFilter::retainBest(std::vector<cv::KeyPoint> &keypoints,
int n_points)
4025 if (n_points >= 0 && keypoints.size() > (size_t)n_points) {
4026 if (n_points == 0) {
4032 std::nth_element(keypoints.begin(), keypoints.begin() + n_points, keypoints.end(),
KeypointResponseGreater());
4034 float ambiguous_response = keypoints[(size_t)(n_points - 1)].response;
4037 std::vector<cv::KeyPoint>::const_iterator new_end = std::partition(
4041 keypoints.resize((
size_t)(new_end - keypoints.begin()));
4048 bool operator()(
const cv::KeyPoint &keyPt)
const {
return !r.contains(keyPt.pt); }
4053 void vpKeyPoint::KeyPointsFilter::runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize,
4056 if (borderSize > 0) {
4057 if (imageSize.height <= borderSize * 2 || imageSize.width <= borderSize * 2)
4060 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(),
4062 cv::Point(borderSize, borderSize),
4063 cv::Point(imageSize.width - borderSize, imageSize.height - borderSize)))),
4069 SizePredicate(
float _minSize,
float _maxSize) : minSize(_minSize), maxSize(_maxSize) {}
4073 float size = keyPt.size;
4074 return (size < minSize) || (size > maxSize);
4080 void vpKeyPoint::KeyPointsFilter::runByKeypointSize(std::vector<cv::KeyPoint> &keypoints,
float minSize,
float maxSize)
4082 CV_Assert(minSize >= 0);
4083 CV_Assert(maxSize >= 0);
4084 CV_Assert(minSize <= maxSize);
4086 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(),
SizePredicate(minSize, maxSize)), keypoints.end());
4095 return mask.at<uchar>((int)(key_pt.pt.y + 0.5f), (int)(key_pt.pt.x + 0.5f)) == 0;
4103 void vpKeyPoint::KeyPointsFilter::runByPixelsMask(std::vector<cv::KeyPoint> &keypoints,
const cv::Mat &mask)
4108 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(),
MaskPredicate(mask)), keypoints.end());
4115 const cv::KeyPoint &kp1 = (*kp)[ i];
4116 const cv::KeyPoint &kp2 = (*kp)[ j];
4118 std::numeric_limits<float>::epsilon())) {
4120 return kp1.pt.x < kp2.pt.x;
4124 std::numeric_limits<float>::epsilon())) {
4126 return kp1.pt.y < kp2.pt.y;
4130 std::numeric_limits<float>::epsilon())) {
4132 return kp1.size > kp2.size;
4136 std::numeric_limits<float>::epsilon())) {
4138 return kp1.angle < kp2.angle;
4142 std::numeric_limits<float>::epsilon())) {
4144 return kp1.response > kp2.response;
4147 if (kp1.octave != kp2.octave) {
4148 return kp1.octave > kp2.octave;
4151 if (kp1.class_id != kp2.class_id) {
4152 return kp1.class_id > kp2.class_id;
4157 const std::vector<cv::KeyPoint> *
kp;
4160 void vpKeyPoint::KeyPointsFilter::removeDuplicated(std::vector<cv::KeyPoint> &keypoints)
4162 size_t i, j, n = keypoints.size();
4163 std::vector<size_t> kpidx(n);
4164 std::vector<uchar> mask(n, (uchar)1);
4166 for (i = 0; i < n; i++) {
4170 for (i = 1, j = 0; i < n; i++) {
4171 cv::KeyPoint &kp1 = keypoints[kpidx[i]];
4172 cv::KeyPoint &kp2 = keypoints[kpidx[j]];
4175 if (!
vpMath::equal(kp1.pt.x, kp2.pt.x, std::numeric_limits<float>::epsilon()) ||
4176 !
vpMath::equal(kp1.pt.y, kp2.pt.y, std::numeric_limits<float>::epsilon()) ||
4177 !
vpMath::equal(kp1.size, kp2.size, std::numeric_limits<float>::epsilon()) ||
4178 !
vpMath::equal(kp1.angle, kp2.angle, std::numeric_limits<float>::epsilon())) {
4185 for (i = j = 0; i < n; i++) {
4188 keypoints[j] = keypoints[i];
4193 keypoints.resize(j);
4199 vpKeyPoint::PyramidAdaptedFeatureDetector::PyramidAdaptedFeatureDetector(
const cv::Ptr<cv::FeatureDetector> &_detector,
4201 : detector(_detector), maxLevel(_maxLevel)
4205 bool vpKeyPoint::PyramidAdaptedFeatureDetector::empty()
const 4207 return detector.empty() || (cv::FeatureDetector *)detector->empty();
4210 void vpKeyPoint::PyramidAdaptedFeatureDetector::detect(cv::InputArray image,
4211 CV_OUT std::vector<cv::KeyPoint> &keypoints, cv::InputArray mask)
4213 detectImpl(image.getMat(), keypoints, mask.getMat());
4216 void vpKeyPoint::PyramidAdaptedFeatureDetector::detectImpl(
const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
4217 const cv::Mat &mask)
const 4219 cv::Mat src = image;
4220 cv::Mat src_mask = mask;
4222 cv::Mat dilated_mask;
4223 if (!mask.empty()) {
4224 cv::dilate(mask, dilated_mask, cv::Mat());
4225 cv::Mat mask255(mask.size(), CV_8UC1, cv::Scalar(0));
4226 mask255.setTo(cv::Scalar(255), dilated_mask != 0);
4227 dilated_mask = mask255;
4230 for (
int l = 0, multiplier = 1; l <= maxLevel; ++l, multiplier *= 2) {
4232 std::vector<cv::KeyPoint> new_pts;
4233 detector->
detect(src, new_pts, src_mask);
4234 std::vector<cv::KeyPoint>::iterator it = new_pts.begin(), end = new_pts.end();
4235 for (; it != end; ++it) {
4236 it->pt.x *= multiplier;
4237 it->pt.y *= multiplier;
4238 it->size *= multiplier;
4241 keypoints.insert(keypoints.end(), new_pts.begin(), new_pts.end());
4250 resize(dilated_mask, src_mask, src.size(), 0, 0, CV_INTER_AREA);
4255 vpKeyPoint::KeyPointsFilter::runByPixelsMask(keypoints, mask);
4259 #elif !defined(VISP_BUILD_SHARED_LIBS) 4262 void dummy_vpKeyPoint(){};
void getTrainKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
Used to indicate that a value is not in the allowed range.
Implementation of a matrix and operations on matrices.
void displayMatching(const vpImage< unsigned char > &IRef, vpImage< unsigned char > &IMatching, unsigned int crossSize, unsigned int lineThickness=1, const vpColor &color=vpColor::green)
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool(*func)(const vpHomogeneousMatrix &)=NULL)
static void compute3D(const cv::KeyPoint &candidate, const std::vector< vpPoint > &roi, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
RoiPredicate(const cv::Rect &_r)
void set_oZ(const double oZ)
Set the point Z coordinate in the object frame.
void detectExtractAffine(const vpImage< unsigned char > &I, std::vector< std::vector< cv::KeyPoint > > &listOfKeypoints, std::vector< cv::Mat > &listOfDescriptors, std::vector< vpImage< unsigned char > > *listOfAffineI=NULL)
void getQueryKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
std::string getMatcherName() const
unsigned int getWidth() const
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType, const std::string &matcherName, const vpFilterMatchingType &filterType=ratioDistanceThreshold)
void createImageMatching(vpImage< unsigned char > &IRef, vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
static bool isNaN(const double value)
Class to define colors available for display functionnalities.
static bool equal(double x, double y, double s=0.001)
double getMatchingFactorThreshold() const
bool getUseRansacVVSPoseEstimation() const
static const vpColor none
double get_oY() const
Get the point Y coordinate in the object frame.
error that can be emited by ViSP classes.
double getRansacConsensusPercentage() const
void setRansacThreshold(const double &t)
void set_x(const double x)
Set the point x coordinate in the image plane.
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
std::vector< unsigned int > getRansacInlierIndex() const
int getNbRansacMinInlierCount() const
double get_y() const
Get the point y coordinate in the image plane.
bool operator()(const cv::KeyPoint &keyPt) const
static const vpColor green
void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector< cv::DMatch > &matches, double &elapsedTime)
static int round(const double x)
bool getUseRansacConsensusPercentage() const
void insertImageMatching(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
VISP_EXPORT double measureTimeMs()
Class that defines what is a point.
bool isInside(const vpImagePoint &iP, const PointInPolygonMethod &method=PnPolyRayCasting) const
static void write(const vpImage< unsigned char > &I, const std::string &filename)
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
Defines a generic 2D polygon.
const char * what() const
vpRect getBoundingBox() const
double getRansacReprojectionError() const
void set_u(const double u)
static void compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpCylinder > &cylinders, const std::vector< std::vector< std::vector< vpImagePoint > > > &vectorOfCylinderRois, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
bool _reference_computed
flag to indicate if the reference has been built.
void set_v(const double v)
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
void setNbParallelRansacThreads(const int nb)
Generic class defining intrinsic camera parameters.
int getNbRansacIterations() const
KeyPoint_LessThan(const std::vector< cv::KeyPoint > &_kp)
double get_oZ() const
Get the point Z coordinate in the object frame.
double getMatchingRatioThreshold() const
void set_y(const double y)
Set the point y coordinate in the image plane.
std::string getDetectorName() const
void set_oX(const double oX)
Set the point X coordinate in the object frame.
bool operator()(const cv::KeyPoint &kp1, const cv::KeyPoint &kp2) const
void display(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, unsigned int size=3)
unsigned int buildReference(const vpImage< unsigned char > &I)
double get_x() const
Get the point x coordinate in the image plane.
bool operator()(const cv::KeyPoint &keyPt) const
vpMatrix getCovarianceMatrix() const
void insert(const vpImage< Type > &src, const vpImagePoint &topLeft)
std::vector< vpImagePoint > referenceImagePointsList
void getObjectPoints(std::vector< cv::Point3f > &objectPoints) const
unsigned int matchPoint(const vpImage< unsigned char > &I)
void setRansacMaxTrials(const int &rM)
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
KeypointResponseGreaterThanThreshold(float _value)
static void displayCircle(const vpImage< unsigned char > &I, const vpImagePoint ¢er, unsigned int radius, const vpColor &color, bool fill=false, unsigned int thickness=1)
double get_oX() const
Get the point X coordinate in the object frame.
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
void setUseParallelRansac(const bool use)
void loadLearningData(const std::string &filename, const bool binaryMode=false, const bool append=false)
void setWorldCoordinates(const double oX, const double oY, const double oZ)
static void read(vpImage< unsigned char > &I, const std::string &filename)
double getRansacThreshold() const
Implementation of column vector and the associated operations.
const std::vector< cv::KeyPoint > * kp
void set_oY(const double oY)
Set the point Y coordinate in the object frame.
void saveLearningData(const std::string &filename, const bool binaryMode=false, const bool saveTrainingImages=true)
void getTrainPoints(std::vector< cv::Point3f > &points) const
vpHomogeneousMatrix inverse() const
void setRansacFilterFlag(const RANSAC_FILTER_FLAGS &flag)
bool operator()(size_t i, size_t j) const
bool matchPointAndDetect(const vpImage< unsigned char > &I, vpRect &boundingBox, vpImagePoint ¢erOfGravity, const bool isPlanarObject=true, std::vector< vpImagePoint > *imPts1=NULL, std::vector< vpImagePoint > *imPts2=NULL, double *meanDescriptorDistance=NULL, double *detectionScore=NULL, const vpRect &rectangle=vpRect())
std::vector< unsigned int > matchedReferencePoints
unsigned int getHeight() const
Defines a rectangle in the plane.
std::vector< vpImagePoint > currentImagePointsList
bool computePose(const std::vector< cv::Point2f > &imagePoints, const std::vector< cv::Point3f > &objectPoints, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector< int > &inlierIndex, double &elapsedTime, bool(*func)(const vpHomogeneousMatrix &)=NULL)
MaskPredicate(const cv::Mat &_mask)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1)
This class defines the container for a plane geometrical structure.
void loadConfigFile(const std::string &configFile)
SizePredicate(float _minSize, float _maxSize)
void addPoint(const vpPoint &P)
static void convertFromOpenCV(const cv::KeyPoint &from, vpImagePoint &to)
Class that consider the case of a translation vector.
Implementation of a rotation vector as axis-angle minimal representation.
bool operator()(const cv::KeyPoint &key_pt) const
std::vector< vpPoint > getRansacInliers() const
bool operator()(const cv::KeyPoint &kpt) const
void setCovarianceComputation(const bool &flag)
std::string getExtractorName() const
void set_ij(const double ii, const double jj)
vpMatchingMethodEnum getMatchingMethod() const
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
void initMatcher(const std::string &matcherName)
void extract(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, cv::Mat &descriptors, std::vector< cv::Point3f > *trainPoints=NULL)
void parse(const std::string &filename)
bool detect(const vpImage< unsigned char > &I)