42 #include <visp3/vision/vpKeyPoint.h>
43 #include <visp3/core/vpIoTools.h>
45 #if (VISP_HAVE_OPENCV_VERSION >= 0x020101)
47 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
48 # include <opencv2/calib3d/calib3d.hpp>
60 inline cv::DMatch knnToDMatch(
const std::vector<cv::DMatch> &knnMatches) {
61 if(knnMatches.size() > 0) {
74 inline vpImagePoint matchRansacToVpImage(
const std::pair<cv::KeyPoint, cv::Point3f> &pair) {
82 } bint = { 0x01020304 };
84 return bint.c[0] == 1;
87 uint16_t reverse16bits(
const uint16_t n) {
88 unsigned char *np = (
unsigned char *) &n;
90 return ((uint16_t) np[0] << 8) | (uint16_t) np[1];
93 uint32_t reverse32bits(
const uint32_t n) {
94 unsigned char *np = (
unsigned char *) &n;
96 return ((uint32_t) np[0] << 24) | ((uint32_t) np[1] << 16)
97 | ((uint32_t) np[2] << 8) | (uint32_t) np[3];
100 float reverseFloat(
const float f) {
107 dat2.b[0] = dat1.b[3];
108 dat2.b[1] = dat1.b[2];
109 dat2.b[2] = dat1.b[1];
110 dat2.b[3] = dat1.b[0];
114 double reverseDouble(
const double d) {
121 dat2.b[0] = dat1.b[7];
122 dat2.b[1] = dat1.b[6];
123 dat2.b[2] = dat1.b[5];
124 dat2.b[3] = dat1.b[4];
125 dat2.b[4] = dat1.b[3];
126 dat2.b[5] = dat1.b[2];
127 dat2.b[6] = dat1.b[1];
128 dat2.b[7] = dat1.b[0];
132 void writeBinaryUShortLE(std::ofstream &file,
const unsigned short ushort_value) {
135 uint16_t reverse_ushort = reverse16bits(ushort_value);
136 file.write((
char *)(&reverse_ushort),
sizeof(reverse_ushort));
138 file.write((
char *)(&ushort_value),
sizeof(ushort_value));
142 void writeBinaryShortLE(std::ofstream &file,
const short short_value) {
145 uint16_t reverse_short = reverse16bits((uint16_t) short_value);
146 file.write((
char *)(&reverse_short),
sizeof(reverse_short));
148 file.write((
char *)(&short_value),
sizeof(short_value));
152 void writeBinaryUIntLE(std::ofstream &file,
const unsigned int uint_value) {
155 if(
sizeof(uint_value) == 4) {
156 uint32_t reverse_uint = reverse32bits(uint_value);
157 file.write((
char *)(&reverse_uint),
sizeof(reverse_uint));
159 uint16_t reverse_uint = reverse16bits(uint_value);
160 file.write((
char *)(&reverse_uint),
sizeof(reverse_uint));
163 file.write((
char *)(&uint_value),
sizeof(uint_value));
167 void writeBinaryIntLE(std::ofstream &file,
const int int_value) {
170 if(
sizeof(int_value) == 4) {
171 uint32_t reverse_int = reverse32bits((uint32_t) int_value);
172 file.write((
char *)(&reverse_int),
sizeof(reverse_int));
174 uint16_t reverse_int = reverse16bits((uint16_t) int_value);
175 file.write((
char *)(&reverse_int),
sizeof(reverse_int));
178 file.write((
char *)(&int_value),
sizeof(int_value));
182 void writeBinaryFloatLE(std::ofstream &file,
const float float_value) {
185 float reverse_float = reverseFloat(float_value);
186 file.write((
char *)(&reverse_float),
sizeof(reverse_float));
188 file.write((
char *)(&float_value),
sizeof(float_value));
192 void writeBinaryDoubleLE(std::ofstream &file,
const double double_value) {
195 double reverse_double = reverseDouble(double_value);
196 file.write((
char *)(&reverse_double),
sizeof(reverse_double));
198 file.write((
char *)(&double_value),
sizeof(double_value));
212 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
213 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(),
214 m_detectors(), m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
215 m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(),
216 m_matcher(), m_matcherName(matcherName),
217 m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85), m_matchingTime(0.),
218 m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100), m_objectFilteredPoints(),
219 m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
220 m_ransacConsensusPercentage(20.0), m_ransacInliers(), m_ransacOutliers(), m_ransacReprojectionError(6.0),
221 m_ransacThreshold(0.01), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(),
222 m_trainVpPoints(), m_useAffineDetection(false),
223 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
224 m_useBruteForceCrossCheck(true),
226 m_useConsensusPercentage(false),
227 m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true), m_useSingleMatchFilter(true)
235 m_detectorNames.push_back(detectorName);
236 m_extractorNames.push_back(extractorName);
251 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
252 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(detectorNames),
253 m_detectors(), m_extractionTime(0.), m_extractorNames(extractorNames), m_extractors(), m_filteredMatches(),
254 m_filterType(filterType), m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(),
256 m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85), m_matchingTime(0.),
257 m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100), m_objectFilteredPoints(),
258 m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
259 m_ransacConsensusPercentage(20.0), m_ransacInliers(), m_ransacOutliers(), m_ransacReprojectionError(6.0),
260 m_ransacThreshold(0.01), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(),
261 m_trainVpPoints(), m_useAffineDetection(false),
262 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
263 m_useBruteForceCrossCheck(true),
265 m_useConsensusPercentage(false),
266 m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true), m_useSingleMatchFilter(true)
285 void vpKeyPoint::affineSkew(
double tilt,
double phi, cv::Mat& img,
286 cv::Mat& mask, cv::Mat& Ai) {
290 mask = cv::Mat(h, w, CV_8UC1, cv::Scalar(255));
292 cv::Mat A = cv::Mat::eye(2, 3, CV_32F);
295 if (std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
300 A = (cv::Mat_<float>(2, 2) << c, -s, s, c);
302 cv::Mat corners = (cv::Mat_<float>(4, 2) << 0, 0, w, 0, w, h, 0, h);
303 cv::Mat tcorners = corners * A.t();
304 cv::Mat tcorners_x, tcorners_y;
305 tcorners.col(0).copyTo(tcorners_x);
306 tcorners.col(1).copyTo(tcorners_y);
307 std::vector<cv::Mat> channels;
308 channels.push_back(tcorners_x);
309 channels.push_back(tcorners_y);
310 cv::merge(channels, tcorners);
312 cv::Rect rect = cv::boundingRect(tcorners);
313 A = (cv::Mat_<float>(2, 3) << c, -s, -rect.x, s, c, -rect.y);
315 cv::warpAffine(img, img, A, cv::Size(rect.width, rect.height),
316 cv::INTER_LINEAR, cv::BORDER_REPLICATE);
319 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon()) {
320 double s = 0.8 * sqrt(tilt * tilt - 1);
321 cv::GaussianBlur(img, img, cv::Size(0, 0), s, 0.01);
322 cv::resize(img, img, cv::Size(0, 0), 1.0 / tilt, 1.0, cv::INTER_NEAREST);
323 A.row(0) = A.row(0) / tilt;
326 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon() || std::fabs(phi) > std::numeric_limits<double>::epsilon() ) {
329 cv::warpAffine(mask, mask, A, cv::Size(w, h), cv::INTER_NEAREST);
331 cv::invertAffineTransform(A, Ai);
355 const unsigned int height,
const unsigned int width) {
368 const vpRect &rectangle) {
371 m_trainPoints.clear();
372 m_mapOfImageId.clear();
373 m_mapOfImages.clear();
374 m_currentImageId = 1;
376 if(m_useAffineDetection) {
377 std::vector<std::vector<cv::KeyPoint> > listOfTrainKeyPoints;
378 std::vector<cv::Mat> listOfTrainDescriptors;
384 m_trainKeyPoints.clear();
385 for(std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfTrainKeyPoints.begin();
386 it != listOfTrainKeyPoints.end(); ++it) {
387 m_trainKeyPoints.insert(m_trainKeyPoints.end(), it->begin(), it->end());
391 for(std::vector<cv::Mat>::const_iterator it = listOfTrainDescriptors.begin(); it != listOfTrainDescriptors.end(); ++it) {
394 it->copyTo(m_trainDescriptors);
396 m_trainDescriptors.push_back(*it);
400 detect(I, m_trainKeyPoints, m_detectionTime, rectangle);
401 extract(I, m_trainKeyPoints, m_trainDescriptors, m_extractionTime);
406 for(std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
407 m_mapOfImageId[it->class_id] = m_currentImageId;
411 m_mapOfImages[m_currentImageId] = I;
420 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
422 return static_cast<unsigned int>(m_trainKeyPoints.size());
435 std::vector<cv::Point3f> &points3f,
const bool append,
const int class_id) {
436 cv::Mat trainDescriptors;
438 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
440 extract(I, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
442 if(trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
445 std::map<size_t, size_t> mapOfKeypointHashes;
447 for(std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it, cpt++) {
448 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
451 std::vector<cv::Point3f> trainPoints_tmp;
452 for(std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
453 if(mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
454 trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
459 points3f = trainPoints_tmp;
462 buildReference(I, trainKeyPoints, trainDescriptors, points3f, append, class_id);
476 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
477 const bool append,
const int class_id) {
479 m_currentImageId = 0;
480 m_mapOfImageId.clear();
481 m_mapOfImages.clear();
482 this->m_trainKeyPoints.clear();
483 this->m_trainPoints.clear();
488 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
491 for(std::vector<cv::KeyPoint>::iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
492 it->class_id = class_id;
498 for(std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
499 m_mapOfImageId[it->class_id] = m_currentImageId;
503 m_mapOfImages[m_currentImageId] = I;
506 this->m_trainKeyPoints.insert(this->m_trainKeyPoints.end(), trainKeyPoints_tmp.begin(), trainKeyPoints_tmp.end());
508 trainDescriptors.copyTo(this->m_trainDescriptors);
510 this->m_trainDescriptors.push_back(trainDescriptors);
512 this->m_trainPoints.insert(this->m_trainPoints.end(), points3f.begin(), points3f.end());
521 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
541 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
548 vpPlane Po(pts[0], pts[1], pts[2]);
549 double xc = 0.0, yc = 0.0;
560 point_obj = cMo.
inverse() * point_cam;
561 point = cv::Point3f((
float) point_obj[0], (
float) point_obj[1], (
float) point_obj[2]);
579 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
586 vpPlane Po(pts[0], pts[1], pts[2]);
587 double xc = 0.0, yc = 0.0;
598 point_obj = cMo.
inverse() * point_cam;
616 std::vector<cv::KeyPoint> &candidates,
const std::vector<vpPolygon> &polygons,
617 const std::vector<std::vector<vpPoint> > &roisPt, std::vector<cv::Point3f> &points, cv::Mat *descriptors) {
619 std::vector<cv::KeyPoint> candidatesToCheck = candidates;
626 std::vector<std::pair<cv::KeyPoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
627 for(
size_t i = 0; i < candidatesToCheck.size(); i++) {
628 pairOfCandidatesToCheck[i] = std::pair<cv::KeyPoint, size_t>(candidatesToCheck[i], i);
632 std::vector<vpPolygon> polygons_tmp = polygons;
633 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
634 std::vector<std::pair<cv::KeyPoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
636 while(it2 != pairOfCandidatesToCheck.end()) {
637 imPt.
set_ij(it2->first.pt.y, it2->first.pt.x);
638 if (it1->isInside(imPt)) {
639 candidates.push_back(it2->first);
641 points.push_back(pt);
643 if(descriptors != NULL) {
644 desc.push_back(descriptors->row((
int) it2->second));
648 it2 = pairOfCandidatesToCheck.erase(it2);
655 if(descriptors != NULL) {
656 desc.copyTo(*descriptors);
674 std::vector<vpImagePoint> &candidates,
const std::vector<vpPolygon> &polygons,
675 const std::vector<std::vector<vpPoint> > &roisPt, std::vector<vpPoint> &points, cv::Mat *descriptors) {
677 std::vector<vpImagePoint> candidatesToCheck = candidates;
683 std::vector<std::pair<vpImagePoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
684 for(
size_t i = 0; i < candidatesToCheck.size(); i++) {
685 pairOfCandidatesToCheck[i] = std::pair<vpImagePoint, size_t>(candidatesToCheck[i], i);
689 std::vector<vpPolygon> polygons_tmp = polygons;
690 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
691 std::vector<std::pair<vpImagePoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
693 while(it2 != pairOfCandidatesToCheck.end()) {
694 if (it1->isInside(it2->first)) {
695 candidates.push_back(it2->first);
697 points.push_back(pt);
699 if(descriptors != NULL) {
700 desc.push_back(descriptors->row((
int) it2->second));
704 it2 = pairOfCandidatesToCheck.erase(it2);
729 if(imagePoints.size() < 4 || objectPoints.size() < 4 || imagePoints.size() != objectPoints.size()) {
731 std::cerr <<
"Not enough points to compute the pose (at least 4 points are needed)." << std::endl;
736 cv::Mat cameraMatrix =
746 cv::Mat distCoeffs = cv::Mat::zeros(1, 5, CV_64F);
749 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
751 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs,
752 rvec, tvec,
false, m_nbRansacIterations, (
float) m_ransacReprojectionError,
755 cv::SOLVEPNP_ITERATIVE);
770 int nbInlierToReachConsensus = m_nbRansacMinInlierCount;
771 if(m_useConsensusPercentage) {
772 nbInlierToReachConsensus = (int) (m_ransacConsensusPercentage / 100.0 * (
double) m_queryFilteredKeyPoints.size());
775 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs,
776 rvec, tvec,
false, m_nbRansacIterations,
777 (
float) m_ransacReprojectionError, nbInlierToReachConsensus,
780 }
catch (cv::Exception &e) {
781 std::cerr << e.what() << std::endl;
786 tvec.at<
double>(1), tvec.at<
double>(2));
787 vpThetaUVector thetaUVector(rvec.at<
double>(0), rvec.at<
double>(1),
817 std::vector<unsigned int> inlierIndex;
818 return computePose(objectVpPoints, cMo, inliers, inlierIndex, elapsedTime, func);
834 std::vector<vpPoint> &inliers, std::vector<unsigned int> &inlierIndex,
838 if(objectVpPoints.size() < 4) {
847 for(std::vector<vpPoint>::const_iterator it = objectVpPoints.begin(); it != objectVpPoints.end(); ++it) {
851 unsigned int nbInlierToReachConsensus = (
unsigned int) m_nbRansacMinInlierCount;
852 if(m_useConsensusPercentage) {
853 nbInlierToReachConsensus = (
unsigned int) (m_ransacConsensusPercentage / 100.0 *
854 (
double) m_queryFilteredKeyPoints.size());
861 bool isRansacPoseEstimationOk =
false;
868 if(m_computeCovariance) {
872 std::cerr <<
"e=" << e.
what() << std::endl;
888 return isRansacPoseEstimationOk;
902 double vpKeyPoint::computePoseEstimationError(
const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
904 if(matchKeyPoints.size() == 0) {
909 std::vector<double> errors(matchKeyPoints.size());
912 for(std::vector<std::pair<cv::KeyPoint, cv::Point3f> >::const_iterator it = matchKeyPoints.begin();
913 it != matchKeyPoints.end(); ++it, cpt++) {
918 double u = 0.0, v = 0.0;
920 errors[cpt] = std::sqrt((u-it->first.pt.x)*(u-it->first.pt.x) + (v-it->first.pt.y)*(v-it->first.pt.y));
923 return std::accumulate(errors.begin(), errors.end(), 0.0) / errors.size();
952 unsigned int nbImg = (
unsigned int) (m_mapOfImages.size() + 1);
954 if(m_mapOfImages.empty()) {
955 std::cerr <<
"There is no training image loaded !" << std::endl;
965 unsigned int nbImgSqrt = (
unsigned int)
vpMath::round(std::sqrt((
double) nbImg));
968 unsigned int nbWidth = nbImgSqrt;
970 unsigned int nbHeight = nbImgSqrt;
973 if(nbImgSqrt * nbImgSqrt < nbImg) {
977 unsigned int maxW = ICurrent.
getWidth();
978 unsigned int maxH = ICurrent.
getHeight();
979 for(std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end(); ++it) {
980 if(maxW < it->second.getWidth()) {
981 maxW = it->second.getWidth();
984 if(maxH < it->second.getHeight()) {
985 maxH = it->second.getHeight();
1002 const vpRect &rectangle) {
1005 cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1008 cv::Point leftTop((
int) rectangle.
getLeft(), (int) rectangle.
getTop()), rightBottom((
int) rectangle.
getRight(),
1010 cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1012 mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1015 detect(matImg, keyPoints, elapsedTime, mask);
1026 void vpKeyPoint::detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
double &elapsedTime,
1027 const cv::Mat &mask) {
1031 for(std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin(); it != m_detectors.end(); ++it) {
1032 std::vector<cv::KeyPoint> kp;
1033 it->second->detect(matImg, kp, mask);
1034 keyPoints.insert(keyPoints.end(), kp.begin(), kp.end());
1049 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1051 std::vector<vpImagePoint> vpTrainImageKeyPoints;
1054 for(std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1068 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1071 for(std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1086 unsigned int crossSize,
unsigned int lineThickness,
const vpColor &color) {
1088 srand((
unsigned int) time(NULL));
1091 std::vector<vpImagePoint> queryImageKeyPoints;
1093 std::vector<vpImagePoint> trainImageKeyPoints;
1097 for(std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1099 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1102 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1103 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
1104 queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.
getWidth());
1122 const std::vector<vpImagePoint> &ransacInliers,
unsigned int crossSize,
unsigned int lineThickness) {
1123 if(m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1125 std::cerr <<
"There is no training image loaded !" << std::endl;
1130 int nbImg = (int) (m_mapOfImages.size() + 1);
1138 int nbWidth = nbImgSqrt;
1139 int nbHeight = nbImgSqrt;
1141 if(nbImgSqrt * nbImgSqrt < nbImg) {
1145 std::map<int, int> mapOfImageIdIndex;
1148 for(std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end(); ++it, cpt++) {
1149 mapOfImageIdIndex[it->first] = cpt;
1151 if(maxW < it->second.getWidth()) {
1152 maxW = it->second.getWidth();
1155 if(maxH < it->second.getHeight()) {
1156 maxH = it->second.getHeight();
1161 int medianI = nbHeight / 2;
1162 int medianJ = nbWidth / 2;
1163 int medianIndex = medianI * nbWidth + medianJ;
1164 for(std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1166 int current_class_id_index = 0;
1167 if(mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1168 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1171 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1174 int indexI = current_class_id_index / nbWidth;
1175 int indexJ = current_class_id_index - (indexI * nbWidth);
1176 topLeftCorner.
set_ij((
int)maxH*indexI, (
int)maxW*indexJ);
1183 vpImagePoint topLeftCorner((
int)maxH*medianI, (
int)maxW*medianJ);
1184 for(std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1189 for(std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin();
1190 it != ransacInliers.end(); ++it) {
1195 for(std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1201 for(std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1202 int current_class_id = 0;
1203 if(mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(
size_t) it->trainIdx].class_id]] < medianIndex) {
1204 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t) it->trainIdx].class_id]];
1207 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t) it->trainIdx].class_id]] + 1;
1210 int indexI = current_class_id / nbWidth;
1211 int indexJ = current_class_id - (indexI * nbWidth);
1213 vpImagePoint end((
int)maxH*indexI + m_trainKeyPoints[(
size_t) it->trainIdx].pt.y,
1214 (
int)maxW*indexJ + m_trainKeyPoints[(
size_t) it->trainIdx].pt.x);
1215 vpImagePoint start((
int)maxH*medianI + m_queryFilteredKeyPoints[(
size_t) it->queryIdx].pt.y,
1216 (
int)maxW*medianJ + m_queryFilteredKeyPoints[(
size_t) it->queryIdx].pt.x);
1236 double &elapsedTime, std::vector<cv::Point3f> *trainPoints) {
1239 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1253 double &elapsedTime, std::vector<cv::Point3f> *trainPoints) {
1257 for(std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator itd = m_extractors.begin();
1258 itd != m_extractors.end(); ++itd) {
1262 if(trainPoints != NULL && !trainPoints->empty()) {
1264 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1267 itd->second->compute(matImg, keyPoints, descriptors);
1269 if(keyPoints.size() != keyPoints_tmp.size()) {
1272 std::map<size_t, size_t> mapOfKeypointHashes;
1274 for(std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end(); ++it, cpt++) {
1275 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1278 std::vector<cv::Point3f> trainPoints_tmp;
1279 for(std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1280 if(mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1281 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1286 *trainPoints = trainPoints_tmp;
1290 itd->second->compute(matImg, keyPoints, descriptors);
1294 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1298 itd->second->compute(matImg, keyPoints, desc);
1300 if(keyPoints.size() != keyPoints_tmp.size()) {
1303 std::map<size_t, size_t> mapOfKeypointHashes;
1305 for(std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end(); ++it, cpt++) {
1306 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1309 std::vector<cv::Point3f> trainPoints_tmp;
1310 cv::Mat descriptors_tmp;
1311 for(std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1312 if(mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1313 if(trainPoints != NULL && !trainPoints->empty()) {
1314 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1317 if(!descriptors.empty()) {
1318 descriptors_tmp.push_back(descriptors.row((
int) mapOfKeypointHashes[myKeypointHash(*it)]));
1323 if(trainPoints != NULL) {
1325 *trainPoints = trainPoints_tmp;
1328 descriptors_tmp.copyTo(descriptors);
1332 if(descriptors.empty()) {
1333 desc.copyTo(descriptors);
1335 cv::hconcat(descriptors, desc, descriptors);
1340 if(keyPoints.size() != (size_t) descriptors.rows) {
1341 std::cerr <<
"keyPoints.size() != (size_t) descriptors.rows" << std::endl;
1349 void vpKeyPoint::filterMatches() {
1350 std::vector<cv::KeyPoint> queryKpts;
1351 std::vector<cv::Point3f> trainPts;
1352 std::vector<cv::DMatch> m;
1355 double max_dist = 0;
1357 double min_dist = DBL_MAX;
1359 std::vector<double> distance_vec(m_knnMatches.size());
1362 for(
size_t i = 0; i < m_knnMatches.size(); i++) {
1363 double dist = m_knnMatches[i][0].distance;
1365 distance_vec[i] = dist;
1367 if (dist < min_dist) {
1370 if (dist > max_dist) {
1374 mean /= m_queryDescriptors.rows;
1377 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1378 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1379 double threshold = min_dist + stdev;
1381 for(
size_t i = 0; i < m_knnMatches.size(); i++) {
1382 if(m_knnMatches[i].size() >= 2) {
1384 float ratio = m_knnMatches[i][0].distance / m_knnMatches[i][1].distance;
1387 double dist = m_knnMatches[i][0].distance;
1390 m.push_back(cv::DMatch((
int) queryKpts.size(), m_knnMatches[i][0].trainIdx, m_knnMatches[i][0].distance));
1392 if(!m_trainPoints.empty()) {
1393 trainPts.push_back(m_trainPoints[(
size_t)m_knnMatches[i][0].trainIdx]);
1395 queryKpts.push_back(m_queryKeyPoints[(
size_t)m_knnMatches[i][0].queryIdx]);
1400 double max_dist = 0;
1403 double min_dist = DBL_MAX;
1405 std::vector<double> distance_vec(m_matches.size());
1406 for(
size_t i = 0; i < m_matches.size(); i++) {
1407 double dist = m_matches[i].distance;
1409 distance_vec[i] = dist;
1411 if (dist < min_dist) {
1414 if (dist > max_dist) {
1418 mean /= m_queryDescriptors.rows;
1420 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1421 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1428 for (
size_t i = 0; i < m_matches.size(); i++) {
1429 if(m_matches[i].distance <= threshold) {
1430 m.push_back(cv::DMatch((
int) queryKpts.size(), m_matches[i].trainIdx, m_matches[i].distance));
1432 if(!m_trainPoints.empty()) {
1433 trainPts.push_back(m_trainPoints[(
size_t)m_matches[i].trainIdx]);
1435 queryKpts.push_back(m_queryKeyPoints[(
size_t)m_matches[i].queryIdx]);
1440 if(m_useSingleMatchFilter) {
1442 std::vector<cv::DMatch> mTmp;
1443 std::vector<cv::Point3f> trainPtsTmp;
1444 std::vector<cv::KeyPoint> queryKptsTmp;
1446 std::map<int, int> mapOfTrainIdx;
1448 for(std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1449 mapOfTrainIdx[it->trainIdx]++;
1453 for(std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1454 if(mapOfTrainIdx[it->trainIdx] == 1) {
1455 mTmp.push_back(cv::DMatch((
int) queryKptsTmp.size(), it->trainIdx, it->distance));
1457 if(!m_trainPoints.empty()) {
1458 trainPtsTmp.push_back(m_trainPoints[(
size_t) it->trainIdx]);
1460 queryKptsTmp.push_back(queryKpts[(
size_t) it->queryIdx]);
1464 m_filteredMatches = mTmp;
1465 m_objectFilteredPoints = trainPtsTmp;
1466 m_queryFilteredKeyPoints = queryKptsTmp;
1468 m_filteredMatches = m;
1469 m_objectFilteredPoints = trainPts;
1470 m_queryFilteredKeyPoints = queryKpts;
1481 objectPoints = m_objectFilteredPoints;
1500 keyPoints = m_queryFilteredKeyPoints;
1518 keyPoints = m_trainKeyPoints;
1536 points = m_trainPoints;
1545 points = m_trainVpPoints;
1551 void vpKeyPoint::init() {
1553 #if defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && (VISP_HAVE_OPENCV_VERSION < 0x030000)
1555 if (!cv::initModule_nonfree()) {
1556 std::cerr <<
"Cannot init module non free, SIFT or SURF cannot be used."
1561 initDetectors(m_detectorNames);
1562 initExtractors(m_extractorNames);
1571 void vpKeyPoint::initDetector(
const std::string &detectorName) {
1572 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
1573 m_detectors[detectorName] = cv::FeatureDetector::create(detectorName);
1575 if(m_detectors[detectorName] == NULL) {
1576 std::stringstream ss_msg;
1577 ss_msg <<
"Fail to initialize the detector: " << detectorName <<
" or it is not available in OpenCV version: "
1578 << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
1582 std::string detectorNameTmp = detectorName;
1583 std::string pyramid =
"Pyramid";
1584 std::size_t pos = detectorName.find(pyramid);
1585 bool usePyramid =
false;
1586 if(pos != std::string::npos) {
1587 detectorNameTmp = detectorName.substr(pos + pyramid.size());
1591 if(detectorNameTmp ==
"SIFT") {
1592 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1593 cv::Ptr<cv::FeatureDetector> siftDetector = cv::xfeatures2d::SIFT::create();
1595 m_detectors[detectorNameTmp] = siftDetector;
1597 std::cerr <<
"Kind of non sense to use SIFT with Pyramid !" << std::endl;
1598 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
1601 std::stringstream ss_msg;
1602 ss_msg <<
"Fail to initialize the detector: SIFT. OpenCV version "
1603 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
1606 }
else if(detectorNameTmp ==
"SURF") {
1607 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1608 cv::Ptr<cv::FeatureDetector> surfDetector = cv::xfeatures2d::SURF::create();
1610 m_detectors[detectorNameTmp] = surfDetector;
1612 std::cerr <<
"Kind of non sense to use SURF with Pyramid !" << std::endl;
1613 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(surfDetector);
1616 std::stringstream ss_msg;
1617 ss_msg <<
"Fail to initialize the detector: SURF. OpenCV version "
1618 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
1621 }
else if(detectorNameTmp ==
"FAST") {
1622 cv::Ptr<cv::FeatureDetector> fastDetector = cv::FastFeatureDetector::create();
1624 m_detectors[detectorNameTmp] = fastDetector;
1627 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
1629 }
else if(detectorNameTmp ==
"MSER") {
1630 cv::Ptr<cv::FeatureDetector> fastDetector = cv::MSER::create();
1632 m_detectors[detectorNameTmp] = fastDetector;
1635 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
1637 }
else if(detectorNameTmp ==
"ORB") {
1638 cv::Ptr<cv::FeatureDetector> orbDetector = cv::ORB::create();
1640 m_detectors[detectorNameTmp] = orbDetector;
1642 std::cerr <<
"Kind of non sense to use ORB with Pyramid !" << std::endl;
1643 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(orbDetector);
1645 }
else if(detectorNameTmp ==
"BRISK") {
1646 cv::Ptr<cv::FeatureDetector> briskDetector = cv::BRISK::create();
1648 m_detectors[detectorNameTmp] = briskDetector;
1650 std::cerr <<
"Kind of non sense to use BRISK with Pyramid !" << std::endl;
1651 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(briskDetector);
1653 }
else if(detectorNameTmp ==
"KAZE") {
1654 cv::Ptr<cv::FeatureDetector> kazeDetector = cv::KAZE::create();
1656 m_detectors[detectorNameTmp] = kazeDetector;
1658 std::cerr <<
"Kind of non sense to use KAZE with Pyramid !" << std::endl;
1659 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(kazeDetector);
1661 }
else if(detectorNameTmp ==
"AKAZE") {
1662 cv::Ptr<cv::FeatureDetector> akazeDetector = cv::AKAZE::create();
1664 m_detectors[detectorNameTmp] = akazeDetector;
1666 std::cerr <<
"Kind of non sense to use AKAZE with Pyramid !" << std::endl;
1667 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(akazeDetector);
1669 }
else if(detectorNameTmp ==
"GFTT") {
1670 cv::Ptr<cv::FeatureDetector> gfttDetector = cv::GFTTDetector::create();
1672 m_detectors[detectorNameTmp] = gfttDetector;
1675 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(gfttDetector);
1677 }
else if(detectorNameTmp ==
"SimpleBlob") {
1678 cv::Ptr<cv::FeatureDetector> simpleBlobDetector = cv::SimpleBlobDetector::create();
1680 m_detectors[detectorNameTmp] = simpleBlobDetector;
1683 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(simpleBlobDetector);
1685 }
else if(detectorNameTmp ==
"STAR") {
1686 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1687 cv::Ptr<cv::FeatureDetector> starDetector = cv::xfeatures2d::StarDetector::create();
1689 m_detectors[detectorNameTmp] = starDetector;
1692 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(starDetector);
1695 std::stringstream ss_msg;
1696 ss_msg <<
"Fail to initialize the detector: STAR. OpenCV version "
1697 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
1700 }
else if(detectorNameTmp ==
"AGAST") {
1701 cv::Ptr<cv::FeatureDetector> agastDetector = cv::AgastFeatureDetector::create();
1703 m_detectors[detectorNameTmp] = agastDetector;
1705 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(agastDetector);
1708 std::cerr <<
"The detector:" << detectorNameTmp <<
" is not available." << std::endl;
1711 bool detectorInitialized =
false;
1713 detectorInitialized = (m_detectors[detectorNameTmp] != NULL);
1715 detectorInitialized = (m_detectors[detectorName] != NULL);
1718 if(!detectorInitialized) {
1719 std::stringstream ss_msg;
1720 ss_msg <<
"Fail to initialize the detector: " << detectorNameTmp <<
" or it is not available in OpenCV version: "
1721 << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
1733 void vpKeyPoint::initDetectors(
const std::vector<std::string> &detectorNames) {
1734 for(std::vector<std::string>::const_iterator it = detectorNames.begin(); it != detectorNames.end(); ++it) {
1744 void vpKeyPoint::initExtractor(
const std::string &extractorName) {
1745 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
1746 m_extractors[extractorName] = cv::DescriptorExtractor::create(extractorName);
1748 if(extractorName ==
"SIFT") {
1749 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1750 m_extractors[extractorName] = cv::xfeatures2d::SIFT::create();
1752 std::stringstream ss_msg;
1753 ss_msg <<
"Fail to initialize the extractor: SIFT. OpenCV version "
1754 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
1757 }
else if(extractorName ==
"SURF") {
1758 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1760 m_extractors[extractorName] = cv::xfeatures2d::SURF::create(100, 4, 3,
true);
1762 std::stringstream ss_msg;
1763 ss_msg <<
"Fail to initialize the extractor: SURF. OpenCV version "
1764 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
1767 }
else if(extractorName ==
"ORB") {
1768 m_extractors[extractorName] = cv::ORB::create();
1769 }
else if(extractorName ==
"BRISK") {
1770 m_extractors[extractorName] = cv::BRISK::create();
1771 }
else if(extractorName ==
"FREAK") {
1772 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1773 m_extractors[extractorName] = cv::xfeatures2d::FREAK::create();
1775 std::stringstream ss_msg;
1776 ss_msg <<
"Fail to initialize the extractor: FREAK. OpenCV version "
1777 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
1780 }
else if(extractorName ==
"BRIEF") {
1781 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1782 m_extractors[extractorName] = cv::xfeatures2d::BriefDescriptorExtractor::create();
1784 std::stringstream ss_msg;
1785 ss_msg <<
"Fail to initialize the extractor: BRIEF. OpenCV version "
1786 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
1789 }
else if(extractorName ==
"KAZE") {
1790 m_extractors[extractorName] = cv::KAZE::create();
1791 }
else if(extractorName ==
"AKAZE") {
1792 m_extractors[extractorName] = cv::AKAZE::create();
1793 }
else if(extractorName ==
"DAISY") {
1794 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1795 m_extractors[extractorName] = cv::xfeatures2d::DAISY::create();
1797 std::stringstream ss_msg;
1798 ss_msg <<
"Fail to initialize the extractor: DAISY. OpenCV version "
1799 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
1802 }
else if(extractorName ==
"LATCH") {
1803 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1804 m_extractors[extractorName] = cv::xfeatures2d::LATCH::create();
1806 std::stringstream ss_msg;
1807 ss_msg <<
"Fail to initialize the extractor: LATCH. OpenCV version "
1808 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
1811 }
else if(extractorName ==
"LUCID") {
1812 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1813 m_extractors[extractorName] = cv::xfeatures2d::LUCID::create(1, 2);
1815 std::stringstream ss_msg;
1816 ss_msg <<
"Fail to initialize the extractor: LUCID. OpenCV version "
1817 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
1821 std::cerr <<
"The extractor:" << extractorName <<
" is not available." << std::endl;
1825 if(m_extractors[extractorName] == NULL) {
1826 std::stringstream ss_msg;
1827 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
" or it is not available in OpenCV version: "
1828 << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
1832 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1833 if(extractorName ==
"SURF") {
1835 m_extractors[extractorName]->set(
"extended", 1);
1845 void vpKeyPoint::initExtractors(
const std::vector<std::string> &extractorNames) {
1846 for(std::vector<std::string>::const_iterator it = extractorNames.begin(); it != extractorNames.end(); ++it) {
1850 int descriptorType = CV_32F;
1851 bool firstIteration =
true;
1852 for(std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
1853 it != m_extractors.end(); ++it) {
1854 if(firstIteration) {
1855 firstIteration =
false;
1856 descriptorType = it->second->descriptorType();
1858 if(descriptorType != it->second->descriptorType()) {
1871 int descriptorType = CV_32F;
1872 bool firstIteration =
true;
1873 for(std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
1874 it != m_extractors.end(); ++it) {
1875 if(firstIteration) {
1876 firstIteration =
false;
1877 descriptorType = it->second->descriptorType();
1879 if(descriptorType != it->second->descriptorType()) {
1885 if(matcherName ==
"FlannBased") {
1886 if(m_extractors.empty()) {
1887 std::cout <<
"Warning: No extractor initialized, by default use floating values (CV_32F) "
1888 "for descriptor type !" << std::endl;
1891 if(descriptorType == CV_8U) {
1892 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
1893 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::LshIndexParams>(12, 20, 2));
1895 m_matcher =
new cv::FlannBasedMatcher(
new cv::flann::LshIndexParams(12, 20, 2));
1898 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
1899 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::KDTreeIndexParams>());
1901 m_matcher =
new cv::FlannBasedMatcher(
new cv::flann::KDTreeIndexParams());
1905 m_matcher = cv::DescriptorMatcher::create(matcherName);
1908 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1909 if(m_matcher != NULL && !m_useKnn && matcherName ==
"BruteForce") {
1910 m_matcher->set(
"crossCheck", m_useBruteForceCrossCheck);
1914 if(m_matcher == NULL) {
1915 std::stringstream ss_msg;
1916 ss_msg <<
"Fail to initialize the matcher: " << matcherName <<
" or it is not available in OpenCV version: "
1917 << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
1933 IMatching.
insert(IRef, topLeftCorner);
1935 IMatching.
insert(ICurrent, topLeftCorner);
1947 int nbImg = (int) (m_mapOfImages.size() + 1);
1949 if(m_mapOfImages.empty()) {
1950 std::cerr <<
"There is no training image loaded !" << std::endl;
1960 int nbWidth = nbImgSqrt;
1961 int nbHeight = nbImgSqrt;
1963 if(nbImgSqrt * nbImgSqrt < nbImg) {
1968 for(std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end(); ++it) {
1969 if(maxW < it->second.getWidth()) {
1970 maxW = it->second.getWidth();
1973 if(maxH < it->second.getHeight()) {
1974 maxH = it->second.getHeight();
1979 int medianI = nbHeight / 2;
1980 int medianJ = nbWidth / 2;
1981 int medianIndex = medianI * nbWidth + medianJ;
1984 for(std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end(); ++it, cpt++) {
1985 int local_cpt = cpt;
1986 if(cpt >= medianIndex) {
1990 int indexI = local_cpt / nbWidth;
1991 int indexJ = local_cpt - (indexI * nbWidth);
1992 vpImagePoint topLeftCorner((
int)maxH*indexI, (
int)maxW*indexJ);
1994 IMatching.
insert(it->second, topLeftCorner);
1997 vpImagePoint topLeftCorner((
int)maxH*medianI, (
int)maxW*medianJ);
1998 IMatching.
insert(ICurrent, topLeftCorner);
2002 #ifdef VISP_HAVE_XML2
2013 m_detectorNames.clear();
2014 m_extractorNames.clear();
2015 m_detectors.clear();
2016 m_extractors.clear();
2018 std::cout <<
" *********** Parsing XML for configuration for vpKeyPoint ************ " << std::endl;
2019 xmlp.
parse(configFile);
2083 int startClassId = 0;
2084 int startImageId = 0;
2086 m_trainKeyPoints.clear();
2087 m_trainPoints.clear();
2088 m_mapOfImageId.clear();
2089 m_mapOfImages.clear();
2092 for(std::map<int, int>::const_iterator it = m_mapOfImageId.begin(); it != m_mapOfImageId.end(); ++it) {
2093 if(startClassId < it->first) {
2094 startClassId = it->first;
2099 for(std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end(); ++it) {
2100 if(startImageId < it->first) {
2101 startImageId = it->first;
2108 if(!parent.empty()) {
2113 std::ifstream file(filename.c_str(), std::ifstream::binary);
2114 if(!file.is_open()){
2120 file.read((
char *)(&nbImgs),
sizeof(nbImgs));
2122 #if !defined(VISP_HAVE_MODULE_IO)
2124 std::cout <<
"Warning: The learning file contains image data that will not be loaded as visp_io module "
2125 "is not available !" << std::endl;
2129 for(
int i = 0; i < nbImgs; i++) {
2132 file.read((
char *)(&
id),
sizeof(
id));
2135 file.read((
char *)(&length),
sizeof(length));
2137 char* path =
new char[length + 1];
2139 for(
int cpt = 0; cpt < length; cpt++) {
2141 file.read((
char *)(&c),
sizeof(c));
2144 path[length] =
'\0';
2147 #ifdef VISP_HAVE_MODULE_IO
2155 m_mapOfImages[
id + startImageId] = I;
2163 int have3DInfoInt = 0;
2164 file.read((
char *)(&have3DInfoInt),
sizeof(have3DInfoInt));
2165 bool have3DInfo = have3DInfoInt != 0;
2169 file.read((
char *)(&nRows),
sizeof(nRows));
2173 file.read((
char *)(&nCols),
sizeof(nCols));
2176 int descriptorType = 5;
2177 file.read((
char *)(&descriptorType),
sizeof(descriptorType));
2179 cv::Mat trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
2180 for(
int i = 0; i < nRows; i++) {
2182 float u, v, size, angle, response;
2183 int octave, class_id, image_id;
2184 file.read((
char *)(&u),
sizeof(u));
2185 file.read((
char *)(&v),
sizeof(v));
2186 file.read((
char *)(&size),
sizeof(size));
2187 file.read((
char *)(&angle),
sizeof(angle));
2188 file.read((
char *)(&response),
sizeof(response));
2189 file.read((
char *)(&octave),
sizeof(octave));
2190 file.read((
char *)(&class_id),
sizeof(class_id));
2191 file.read((
char *)(&image_id),
sizeof(image_id));
2192 cv::KeyPoint keyPoint(cv::Point2f(u, v), size, angle, response, octave, (class_id + startClassId));
2193 m_trainKeyPoints.push_back(keyPoint);
2195 if(image_id != -1) {
2196 #ifdef VISP_HAVE_MODULE_IO
2198 m_mapOfImageId[class_id] = image_id + startImageId;
2205 file.read((
char *)(&oX),
sizeof(oX));
2206 file.read((
char *)(&oY),
sizeof(oY));
2207 file.read((
char *)(&oZ),
sizeof(oZ));
2208 m_trainPoints.push_back(cv::Point3f(oX, oY, oZ));
2211 for(
int j = 0; j < nCols; j++) {
2213 switch(descriptorType) {
2216 unsigned char value;
2217 file.read((
char *)(&value),
sizeof(value));
2218 trainDescriptorsTmp.at<
unsigned char>(i, j) = value;
2225 file.read((
char *)(&value),
sizeof(value));
2226 trainDescriptorsTmp.at<
char>(i, j) = value;
2232 unsigned short int value;
2233 file.read((
char *)(&value),
sizeof(value));
2234 trainDescriptorsTmp.at<
unsigned short int>(i, j) = value;
2241 file.read((
char *)(&value),
sizeof(value));
2242 trainDescriptorsTmp.at<
short int>(i, j) = value;
2249 file.read((
char *)(&value),
sizeof(value));
2250 trainDescriptorsTmp.at<
int>(i, j) = value;
2257 file.read((
char *)(&value),
sizeof(value));
2258 trainDescriptorsTmp.at<
float>(i, j) = value;
2265 file.read((
char *)(&value),
sizeof(value));
2266 trainDescriptorsTmp.at<
double>(i, j) = value;
2273 file.read((
char *)(&value),
sizeof(value));
2274 trainDescriptorsTmp.at<
float>(i, j) = value;
2281 if(!append || m_trainDescriptors.empty()) {
2282 trainDescriptorsTmp.copyTo(m_trainDescriptors);
2284 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
2289 #ifdef VISP_HAVE_XML2
2290 xmlDocPtr doc = NULL;
2291 xmlNodePtr root_element = NULL;
2301 doc = xmlReadFile(filename.c_str(), NULL, 0);
2307 root_element = xmlDocGetRootElement(doc);
2309 xmlNodePtr first_level_node = NULL;
2312 int descriptorType = CV_32F;
2313 int nRows = 0, nCols = 0;
2316 cv::Mat trainDescriptorsTmp;
2318 for (first_level_node = root_element->children; first_level_node;
2319 first_level_node = first_level_node->next) {
2321 std::string name((
char *) first_level_node->name);
2322 if (first_level_node->type == XML_ELEMENT_NODE && name ==
"TrainingImageInfo") {
2323 xmlNodePtr image_info_node = NULL;
2325 for (image_info_node = first_level_node->children; image_info_node; image_info_node =
2326 image_info_node->next) {
2327 name = std::string ((
char *) image_info_node->name);
2329 if(name ==
"trainImg") {
2331 xmlChar *image_id_property = xmlGetProp(image_info_node, BAD_CAST
"image_id");
2333 if(image_id_property) {
2334 id = std::atoi((
char *) image_id_property);
2336 xmlFree(image_id_property);
2339 #ifdef VISP_HAVE_MODULE_IO
2340 std::string path((
char *) image_info_node->children->content);
2349 m_mapOfImages[
id + startImageId] = I;
2353 }
else if(first_level_node->type == XML_ELEMENT_NODE && name ==
"DescriptorsInfo") {
2354 xmlNodePtr descriptors_info_node = NULL;
2355 for (descriptors_info_node = first_level_node->children; descriptors_info_node; descriptors_info_node =
2356 descriptors_info_node->next) {
2357 if (descriptors_info_node->type == XML_ELEMENT_NODE) {
2358 name = std::string ((
char *) descriptors_info_node->name);
2360 if(name ==
"nrows") {
2361 nRows = std::atoi((
char *) descriptors_info_node->children->content);
2362 }
else if(name ==
"ncols") {
2363 nCols = std::atoi((
char *) descriptors_info_node->children->content);
2364 }
else if(name ==
"type") {
2365 descriptorType = std::atoi((
char *) descriptors_info_node->children->content);
2370 trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
2371 }
else if (first_level_node->type == XML_ELEMENT_NODE && name ==
"DescriptorInfo") {
2372 xmlNodePtr point_node = NULL;
2373 double u = 0.0, v = 0.0, size = 0.0, angle = 0.0, response = 0.0;
2374 int octave = 0, class_id = 0, image_id = 0;
2375 double oX = 0.0, oY = 0.0, oZ = 0.0;
2377 std::stringstream ss;
2379 for (point_node = first_level_node->children; point_node; point_node =
2381 if (point_node->type == XML_ELEMENT_NODE) {
2382 name = std::string ((
char *) point_node->name);
2386 u = std::strtod((
char *) point_node->children->content, &pEnd);
2387 }
else if(name ==
"v") {
2388 v = std::strtod((
char *) point_node->children->content, &pEnd);
2389 }
else if(name ==
"size") {
2390 size = std::strtod((
char *) point_node->children->content, &pEnd);
2391 }
else if(name ==
"angle") {
2392 angle = std::strtod((
char *) point_node->children->content, &pEnd);
2393 }
else if(name ==
"response") {
2394 response = std::strtod((
char *) point_node->children->content, &pEnd);
2395 }
else if(name ==
"octave") {
2396 octave = std::atoi((
char *) point_node->children->content);
2397 }
else if(name ==
"class_id") {
2398 class_id = std::atoi((
char *) point_node->children->content);
2399 cv::KeyPoint keyPoint(cv::Point2f((
float) u, (
float) v), (
float) size,
2400 (
float) angle, (
float) response, octave, (class_id + startClassId));
2401 m_trainKeyPoints.push_back(keyPoint);
2402 }
else if(name ==
"image_id") {
2403 image_id = std::atoi((
char *) point_node->children->content);
2404 if(image_id != -1) {
2405 #ifdef VISP_HAVE_MODULE_IO
2407 m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
2410 }
else if (name ==
"oX") {
2411 oX = std::atof((
char *) point_node->children->content);
2412 }
else if (name ==
"oY") {
2413 oY = std::atof((
char *) point_node->children->content);
2414 }
else if (name ==
"oZ") {
2415 oZ = std::atof((
char *) point_node->children->content);
2416 m_trainPoints.push_back(cv::Point3f((
float) oX, (
float) oY, (
float) oZ));
2417 }
else if (name ==
"desc") {
2418 xmlNodePtr descriptor_value_node = NULL;
2421 for (descriptor_value_node = point_node->children;
2422 descriptor_value_node; descriptor_value_node =
2423 descriptor_value_node->next) {
2425 if (descriptor_value_node->type == XML_ELEMENT_NODE) {
2427 std::string parseStr((
char *) descriptor_value_node->children->content);
2432 switch(descriptorType) {
2438 trainDescriptorsTmp.at<
unsigned char>(i, j) = (
unsigned char) parseValue;
2446 trainDescriptorsTmp.at<
char>(i, j) = (
char) parseValue;
2450 ss >> trainDescriptorsTmp.at<
unsigned short int>(i, j);
2454 ss >> trainDescriptorsTmp.at<
short int>(i, j);
2458 ss >> trainDescriptorsTmp.at<
int>(i, j);
2462 ss >> trainDescriptorsTmp.at<
float>(i, j);
2466 ss >> trainDescriptorsTmp.at<
double>(i, j);
2470 ss >> trainDescriptorsTmp.at<
float>(i, j);
2474 std::cerr <<
"Error when converting:" << ss.str() << std::endl;
2487 if(!append || m_trainDescriptors.empty()) {
2488 trainDescriptorsTmp.copyTo(m_trainDescriptors);
2490 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
2502 std::cout <<
"Error: libxml2 is required !" << std::endl;
2512 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
2527 std::vector<cv::DMatch> &matches,
double &elapsedTime) {
2531 m_knnMatches.clear();
2533 if(m_useMatchTrainToQuery) {
2534 std::vector<std::vector<cv::DMatch> > knnMatchesTmp;
2537 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(
true);
2538 matcherTmp->knnMatch(trainDescriptors, queryDescriptors, knnMatchesTmp, 2);
2540 for(std::vector<std::vector<cv::DMatch> >::const_iterator it1 = knnMatchesTmp.begin(); it1 != knnMatchesTmp.end(); ++it1) {
2541 std::vector<cv::DMatch> tmp;
2542 for(std::vector<cv::DMatch>::const_iterator it2 = it1->begin(); it2 != it1->end(); ++it2) {
2543 tmp.push_back(cv::DMatch(it2->trainIdx, it2->queryIdx, it2->distance));
2545 m_knnMatches.push_back(tmp);
2548 matches.resize(m_knnMatches.size());
2549 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
2552 m_matcher->knnMatch(queryDescriptors, m_knnMatches, 2);
2553 matches.resize(m_knnMatches.size());
2554 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
2559 if(m_useMatchTrainToQuery) {
2560 std::vector<cv::DMatch> matchesTmp;
2562 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(
true);
2563 matcherTmp->match(trainDescriptors, queryDescriptors, matchesTmp);
2565 for(std::vector<cv::DMatch>::const_iterator it = matchesTmp.begin(); it != matchesTmp.end(); ++it) {
2566 matches.push_back(cv::DMatch(it->trainIdx, it->queryIdx, it->distance));
2570 m_matcher->match(queryDescriptors, matches);
2598 const unsigned int height,
const unsigned int width) {
2611 const vpRect& rectangle) {
2612 if(m_trainDescriptors.empty()) {
2613 std::cerr <<
"Reference is empty." << std::endl;
2615 std::cerr <<
"Reference is not computed." << std::endl;
2617 std::cerr <<
"Matching is not possible." << std::endl;
2622 if(m_useAffineDetection) {
2623 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
2624 std::vector<cv::Mat> listOfQueryDescriptors;
2630 m_queryKeyPoints.clear();
2631 for(std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
2632 it != listOfQueryKeyPoints.end(); ++it) {
2633 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
2637 for(std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end(); ++it) {
2640 it->copyTo(m_queryDescriptors);
2642 m_queryDescriptors.push_back(*it);
2646 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
2647 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
2650 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
2653 m_queryFilteredKeyPoints.clear();
2654 m_objectFilteredPoints.clear();
2655 m_filteredMatches.clear();
2659 if(m_useMatchTrainToQuery) {
2661 m_queryFilteredKeyPoints.clear();
2662 m_filteredMatches.clear();
2663 for(std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
2664 m_filteredMatches.push_back(cv::DMatch((
int) m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
2665 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(
size_t) it->queryIdx]);
2668 m_queryFilteredKeyPoints = m_queryKeyPoints;
2669 m_filteredMatches = m_matches;
2672 if(!m_trainPoints.empty()) {
2673 m_objectFilteredPoints.clear();
2676 for(std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
2678 m_objectFilteredPoints.push_back(m_trainPoints[(
size_t) it->trainIdx]);
2687 return static_cast<unsigned int>(m_filteredMatches.size());
2707 if(m_trainDescriptors.empty()) {
2708 std::cerr <<
"Reference is empty." << std::endl;
2710 std::cerr <<
"Reference is not computed." << std::endl;
2712 std::cerr <<
"Matching is not possible." << std::endl;
2717 if(m_useAffineDetection) {
2718 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
2719 std::vector<cv::Mat> listOfQueryDescriptors;
2725 m_queryKeyPoints.clear();
2726 for(std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
2727 it != listOfQueryKeyPoints.end(); ++it) {
2728 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
2732 for(std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end(); ++it) {
2735 it->copyTo(m_queryDescriptors);
2737 m_queryDescriptors.push_back(*it);
2741 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
2742 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
2745 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
2747 elapsedTime = m_detectionTime + m_extractionTime + m_matchingTime;
2750 m_queryFilteredKeyPoints.clear();
2751 m_objectFilteredPoints.clear();
2752 m_filteredMatches.clear();
2756 if(m_useMatchTrainToQuery) {
2758 m_queryFilteredKeyPoints.clear();
2759 m_filteredMatches.clear();
2760 for(std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
2761 m_filteredMatches.push_back(cv::DMatch((
int) m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
2762 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(
size_t) it->queryIdx]);
2765 m_queryFilteredKeyPoints = m_queryKeyPoints;
2766 m_filteredMatches = m_matches;
2769 if(!m_trainPoints.empty()) {
2770 m_objectFilteredPoints.clear();
2773 for(std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
2775 m_objectFilteredPoints.push_back(m_trainPoints[(
size_t) it->trainIdx]);
2786 m_ransacInliers.clear();
2787 m_ransacOutliers.clear();
2789 if(m_useRansacVVS) {
2790 std::vector<vpPoint> objectVpPoints(m_objectFilteredPoints.size());
2793 for(std::vector<cv::Point3f>::const_iterator it = m_objectFilteredPoints.begin(); it != m_objectFilteredPoints.end();
2798 vpImagePoint imP(m_queryFilteredKeyPoints[cpt].pt.y, m_queryFilteredKeyPoints[cpt].pt.x);
2800 double x = 0.0, y = 0.0;
2805 objectVpPoints[cpt] = pt;
2808 std::vector<vpPoint> inliers;
2809 std::vector<unsigned int> inlierIndex;
2811 bool res =
computePose(objectVpPoints, cMo, inliers, inlierIndex, m_poseTime, func);
2813 std::map<unsigned int, bool> mapOfInlierIndex;
2814 m_matchRansacKeyPointsToPoints.clear();
2816 for (std::vector<unsigned int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
2817 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(m_queryFilteredKeyPoints[(
size_t)(*it)],
2818 m_objectFilteredPoints[(
size_t)(*it)]));
2819 mapOfInlierIndex[*it] =
true;
2822 for(
size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
2823 if(mapOfInlierIndex.find((
unsigned int) i) == mapOfInlierIndex.end()) {
2824 m_ransacOutliers.push_back(
vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
2828 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
2830 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
2831 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(), m_ransacInliers.begin(),
2832 matchRansacToVpImage);
2834 elapsedTime += m_poseTime;
2838 std::vector<cv::Point2f> imageFilteredPoints;
2839 cv::KeyPoint::convert(m_queryFilteredKeyPoints, imageFilteredPoints);
2840 std::vector<int> inlierIndex;
2841 bool res =
computePose(imageFilteredPoints, m_objectFilteredPoints, cam, cMo, inlierIndex, m_poseTime);
2843 std::map<int, bool> mapOfInlierIndex;
2844 m_matchRansacKeyPointsToPoints.clear();
2846 for (std::vector<int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
2847 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(m_queryFilteredKeyPoints[(
size_t)(*it)],
2848 m_objectFilteredPoints[(
size_t)(*it)]));
2849 mapOfInlierIndex[*it] =
true;
2852 for(
size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
2853 if(mapOfInlierIndex.find((
int) i) == mapOfInlierIndex.end()) {
2854 m_ransacOutliers.push_back(
vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
2858 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
2860 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
2861 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(), m_ransacInliers.begin(),
2862 matchRansacToVpImage);
2864 elapsedTime += m_poseTime;
2888 const bool isPlanarObject, std::vector<vpImagePoint> *imPts1, std::vector<vpImagePoint> *imPts2,
2889 double *meanDescriptorDistance,
double *detection_score,
const vpRect& rectangle) {
2890 if(imPts1 != NULL && imPts2 != NULL) {
2897 double meanDescriptorDistanceTmp = 0.0;
2898 for(std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
2899 meanDescriptorDistanceTmp += (double) it->distance;
2902 meanDescriptorDistanceTmp /= (double) m_filteredMatches.size();
2903 double score = (double) m_filteredMatches.size() / meanDescriptorDistanceTmp;
2905 if(meanDescriptorDistance != NULL) {
2906 *meanDescriptorDistance = meanDescriptorDistanceTmp;
2908 if(detection_score != NULL) {
2909 *detection_score = score;
2912 if(m_filteredMatches.size() >= 4) {
2914 std::vector<cv::Point2f> points1(m_filteredMatches.size());
2916 std::vector<cv::Point2f> points2(m_filteredMatches.size());
2918 for(
size_t i = 0; i < m_filteredMatches.size(); i++) {
2919 points1[i] = cv::Point2f(m_trainKeyPoints[(
size_t)m_filteredMatches[i].trainIdx].pt);
2920 points2[i] = cv::Point2f(m_queryFilteredKeyPoints[(
size_t)m_filteredMatches[i].queryIdx].pt);
2923 std::vector<vpImagePoint> inliers;
2924 if(isPlanarObject) {
2925 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
2926 cv::Mat homographyMatrix = cv::findHomography(points1, points2, CV_RANSAC);
2928 cv::Mat homographyMatrix = cv::findHomography(points1, points2, cv::RANSAC);
2931 for(
size_t i = 0; i < m_filteredMatches.size(); i++ ) {
2933 cv::Mat realPoint = cv::Mat(3, 1, CV_64F);
2934 realPoint.at<
double>(0,0) = points1[i].x;
2935 realPoint.at<
double>(1,0) = points1[i].y;
2936 realPoint.at<
double>(2,0) = 1.f;
2938 cv::Mat reprojectedPoint = homographyMatrix * realPoint;
2939 double err_x = (reprojectedPoint.at<
double>(0,0) / reprojectedPoint.at<
double>(2,0)) - points2[i].x;
2940 double err_y = (reprojectedPoint.at<
double>(1,0) / reprojectedPoint.at<
double>(2,0)) - points2[i].y;
2941 double reprojectionError = std::sqrt(err_x*err_x + err_y*err_y);
2943 if(reprojectionError < 6.0) {
2944 inliers.push_back(
vpImagePoint((
double) points2[i].y, (
double) points2[i].x));
2945 if(imPts1 != NULL) {
2946 imPts1->push_back(
vpImagePoint((
double) points1[i].y, (
double) points1[i].x));
2949 if(imPts2 != NULL) {
2950 imPts2->push_back(
vpImagePoint((
double) points2[i].y, (
double) points2[i].x));
2954 }
else if(m_filteredMatches.size() >= 8) {
2955 cv::Mat fundamentalInliers;
2956 cv::Mat fundamentalMatrix = cv::findFundamentalMat(points1, points2, cv::FM_RANSAC, 3, 0.99, fundamentalInliers);
2958 for(
size_t i = 0; i < (size_t) fundamentalInliers.rows; i++) {
2959 if(fundamentalInliers.at<uchar>((
int) i, 0)) {
2960 inliers.push_back(
vpImagePoint((
double) points2[i].y, (
double) points2[i].x));
2962 if(imPts1 != NULL) {
2963 imPts1->push_back(
vpImagePoint((
double) points1[i].y, (
double) points1[i].x));
2966 if(imPts2 != NULL) {
2967 imPts2->push_back(
vpImagePoint((
double) points2[i].y, (
double) points2[i].x));
2973 if(!inliers.empty()) {
2979 double meanU = 0.0, meanV = 0.0;
2980 for(std::vector<vpImagePoint>::const_iterator it = inliers.begin(); it != inliers.end(); ++it) {
2981 meanU += it->get_u();
2982 meanV += it->get_v();
2985 meanU /= (double) inliers.size();
2986 meanV /= (double) inliers.size();
2988 centerOfGravity.
set_u(meanU);
2989 centerOfGravity.
set_v(meanV);
2997 return meanDescriptorDistanceTmp < m_detectionThreshold;
2999 return score > m_detectionScore;
3022 double &error,
double &elapsedTime,
vpRect &boundingBox,
vpImagePoint ¢erOfGravity,
3024 bool isMatchOk =
matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3029 std::vector<vpImagePoint> modelImagePoints(m_trainVpPoints.size());
3031 for(std::vector<vpPoint>::const_iterator it = m_trainVpPoints.begin(); it != m_trainVpPoints.end(); ++it, cpt++) {
3035 modelImagePoints[cpt] = imPt;
3043 double meanU = 0.0, meanV = 0.0;
3044 for(std::vector<vpImagePoint>::const_iterator it = m_ransacInliers.begin(); it != m_ransacInliers.end();
3046 meanU += it->get_u();
3047 meanV += it->get_v();
3050 meanU /= (double) m_ransacInliers.size();
3051 meanV /= (double) m_ransacInliers.size();
3053 centerOfGravity.
set_u(meanU);
3054 centerOfGravity.
set_v(meanV);
3076 listOfKeypoints.clear();
3077 listOfDescriptors.clear();
3079 for (
int tl = 1; tl < 6; tl++) {
3080 double t = pow(2, 0.5 * tl);
3081 for (
int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
3082 std::vector<cv::KeyPoint> keypoints;
3083 cv::Mat descriptors;
3085 cv::Mat timg, mask, Ai;
3088 affineSkew(t, phi, timg, mask, Ai);
3091 if(listOfAffineI != NULL) {
3093 bitwise_and(mask, timg, img_disp);
3096 listOfAffineI->push_back(tI);
3100 cv::bitwise_and(mask, timg, img_disp);
3101 cv::namedWindow(
"Skew", cv::WINDOW_AUTOSIZE );
3102 cv::imshow(
"Skew", img_disp );
3106 for(std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
3107 it != m_detectors.end(); ++it) {
3108 std::vector<cv::KeyPoint> kp;
3109 it->second->detect(timg, kp, mask);
3110 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
3114 extract(timg, keypoints, descriptors, elapsedTime);
3116 for(
unsigned int i = 0; i < keypoints.size(); i++) {
3117 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
3118 cv::Mat kpt_t = Ai * cv::Mat(kpt);
3119 keypoints[i].pt.x = kpt_t.at<
float>(0, 0);
3120 keypoints[i].pt.y = kpt_t.at<
float>(1, 0);
3123 listOfKeypoints.push_back(keypoints);
3124 listOfDescriptors.push_back(descriptors);
3133 std::vector<std::pair<double, int> > listOfAffineParams;
3134 for (
int tl = 1; tl < 6; tl++) {
3135 double t = pow(2, 0.5 * tl);
3136 for (
int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
3137 listOfAffineParams.push_back(std::pair<double, int>(t, phi));
3141 listOfKeypoints.resize(listOfAffineParams.size());
3142 listOfDescriptors.resize(listOfAffineParams.size());
3144 if(listOfAffineI != NULL) {
3145 listOfAffineI->resize(listOfAffineParams.size());
3148 #ifdef VISP_HAVE_OPENMP
3149 #pragma omp parallel for
3151 for(
int cpt = 0; cpt < static_cast<int>(listOfAffineParams.size()); cpt++) {
3152 std::vector<cv::KeyPoint> keypoints;
3153 cv::Mat descriptors;
3155 cv::Mat timg, mask, Ai;
3158 affineSkew(listOfAffineParams[(
size_t) cpt].first, listOfAffineParams[(
size_t) cpt].second, timg, mask, Ai);
3161 if(listOfAffineI != NULL) {
3163 bitwise_and(mask, timg, img_disp);
3166 (*listOfAffineI)[(size_t) cpt] = tI;
3171 cv::bitwise_and(mask, timg, img_disp);
3172 cv::namedWindow(
"Skew", cv::WINDOW_AUTOSIZE );
3173 cv::imshow(
"Skew", img_disp );
3177 for(std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
3178 it != m_detectors.end(); ++it) {
3179 std::vector<cv::KeyPoint> kp;
3180 it->second->detect(timg, kp, mask);
3181 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
3185 extract(timg, keypoints, descriptors, elapsedTime);
3187 for(
size_t i = 0; i < keypoints.size(); i++) {
3188 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
3189 cv::Mat kpt_t = Ai * cv::Mat(kpt);
3190 keypoints[i].pt.x = kpt_t.at<
float>(0, 0);
3191 keypoints[i].pt.y = kpt_t.at<
float>(1, 0);
3194 listOfKeypoints[(size_t) cpt] = keypoints;
3195 listOfDescriptors[(size_t) cpt] = descriptors;
3208 m_computeCovariance =
false; m_covarianceMatrix =
vpMatrix(); m_currentImageId = 0; m_detectionMethod =
detectionScore;
3209 m_detectionScore = 0.15; m_detectionThreshold = 100.0; m_detectionTime = 0.0; m_detectorNames.clear();
3210 m_detectors.clear(); m_extractionTime = 0.0; m_extractorNames.clear(); m_extractors.clear(); m_filteredMatches.clear();
3212 m_imageFormat =
jpgImageFormat; m_knnMatches.clear(); m_mapOfImageId.clear(); m_mapOfImages.clear();
3213 m_matcher = cv::Ptr<cv::DescriptorMatcher>(); m_matcherName =
"BruteForce-Hamming";
3214 m_matches.clear(); m_matchingFactorThreshold = 2.0; m_matchingRatioThreshold = 0.85; m_matchingTime = 0.0;
3215 m_matchRansacKeyPointsToPoints.clear(); m_nbRansacIterations = 200; m_nbRansacMinInlierCount = 100;
3216 m_objectFilteredPoints.clear();
3217 m_poseTime = 0.0; m_queryDescriptors = cv::Mat(); m_queryFilteredKeyPoints.clear(); m_queryKeyPoints.clear();
3218 m_ransacConsensusPercentage = 20.0; m_ransacInliers.clear(); m_ransacOutliers.clear(); m_ransacReprojectionError = 6.0;
3219 m_ransacThreshold = 0.01; m_trainDescriptors = cv::Mat(); m_trainKeyPoints.clear(); m_trainPoints.clear();
3220 m_trainVpPoints.clear(); m_useAffineDetection =
false;
3221 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
3222 m_useBruteForceCrossCheck =
true;
3224 m_useConsensusPercentage =
false;
3226 m_useMatchTrainToQuery =
false; m_useRansacVVS =
true; m_useSingleMatchFilter =
true;
3228 m_detectorNames.push_back(
"ORB");
3229 m_extractorNames.push_back(
"ORB");
3243 if(!parent.empty()) {
3247 std::map<int, std::string> mapOfImgPath;
3248 if(saveTrainingImages) {
3249 #ifdef VISP_HAVE_MODULE_IO
3253 for(std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end(); ++it, cpt++) {
3259 sprintf(buffer,
"%03d", cpt);
3260 std::stringstream ss;
3261 ss <<
"train_image_" << buffer;
3263 switch(m_imageFormat) {
3285 std::string imgFilename = ss.str();
3286 mapOfImgPath[it->first] = imgFilename;
3287 vpImageIo::write(it->second, parent + (!parent.empty() ?
"/" :
"") + imgFilename);
3290 std::cout <<
"Warning: in vpKeyPoint::saveLearningData() training images are not saved because "
3291 "visp_io module is not available !" << std::endl;
3295 bool have3DInfo = m_trainPoints.size() > 0;
3296 if(have3DInfo && m_trainPoints.size() != m_trainKeyPoints.size()) {
3302 std::ofstream file(filename.c_str(), std::ofstream::binary);
3303 if(!file.is_open()) {
3308 int nbImgs = (int) mapOfImgPath.size();
3310 writeBinaryIntLE(file, nbImgs);
3312 #ifdef VISP_HAVE_MODULE_IO
3313 for(std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
3317 writeBinaryIntLE(file,
id);
3320 std::string path = it->second;
3321 int length = (int) path.length();
3323 writeBinaryIntLE(file, length);
3325 for(
int cpt = 0; cpt < length; cpt++) {
3326 file.write((
char *) (&path[(
size_t)cpt]),
sizeof(path[(
size_t)cpt]));
3332 int have3DInfoInt = have3DInfo ? 1 : 0;
3334 writeBinaryIntLE(file, have3DInfoInt);
3337 int nRows = m_trainDescriptors.rows,
3338 nCols = m_trainDescriptors.cols;
3339 int descriptorType = m_trainDescriptors.type();
3343 writeBinaryIntLE(file, nRows);
3347 writeBinaryIntLE(file, nCols);
3351 writeBinaryIntLE(file, descriptorType);
3353 for (
int i = 0; i < nRows; i++) {
3354 unsigned int i_ = (
unsigned int) i;
3356 float u = m_trainKeyPoints[i_].pt.x;
3358 writeBinaryFloatLE(file, u);
3361 float v = m_trainKeyPoints[i_].pt.y;
3363 writeBinaryFloatLE(file, v);
3366 float size = m_trainKeyPoints[i_].size;
3368 writeBinaryFloatLE(file, size);
3371 float angle = m_trainKeyPoints[i_].angle;
3373 writeBinaryFloatLE(file, angle);
3376 float response = m_trainKeyPoints[i_].response;
3378 writeBinaryFloatLE(file, response);
3381 int octave = m_trainKeyPoints[i_].octave;
3383 writeBinaryIntLE(file, octave);
3386 int class_id = m_trainKeyPoints[i_].class_id;
3388 writeBinaryIntLE(file, class_id);
3391 #ifdef VISP_HAVE_MODULE_IO
3392 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
3393 int image_id = (saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1;
3395 writeBinaryIntLE(file, image_id);
3399 writeBinaryIntLE(file, image_id);
3403 float oX = m_trainPoints[i_].x, oY = m_trainPoints[i_].y, oZ = m_trainPoints[i_].z;
3406 writeBinaryFloatLE(file, oX);
3410 writeBinaryFloatLE(file, oY);
3414 writeBinaryFloatLE(file, oZ);
3417 for (
int j = 0; j < nCols; j++) {
3419 switch(descriptorType) {
3421 file.write((
char *)(&m_trainDescriptors.at<
unsigned char>(i, j)),
sizeof(m_trainDescriptors.at<
unsigned char>(i, j)));
3425 file.write((
char *)(&m_trainDescriptors.at<
char>(i, j)),
sizeof(m_trainDescriptors.at<
char>(i, j)));
3430 writeBinaryUShortLE(file, m_trainDescriptors.at<
unsigned short int>(i, j));
3435 writeBinaryShortLE(file, m_trainDescriptors.at<
short int>(i, j));
3440 writeBinaryIntLE(file, m_trainDescriptors.at<
int>(i, j));
3445 writeBinaryFloatLE(file, m_trainDescriptors.at<
float>(i, j));
3450 writeBinaryDoubleLE(file, m_trainDescriptors.at<
double>(i, j));
3463 #ifdef VISP_HAVE_XML2
3464 xmlDocPtr doc = NULL;
3465 xmlNodePtr root_node = NULL, image_node = NULL, image_info_node = NULL, descriptors_info_node = NULL,
3466 descriptor_node = NULL, desc_node = NULL;
3475 doc = xmlNewDoc(BAD_CAST
"1.0");
3480 root_node = xmlNewNode(NULL, BAD_CAST
"LearningData");
3481 xmlDocSetRootElement(doc, root_node);
3483 std::stringstream ss;
3486 image_node = xmlNewChild(root_node, NULL, BAD_CAST
"TrainingImageInfo", NULL);
3488 #ifdef VISP_HAVE_MODULE_IO
3489 for(std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
3490 image_info_node = xmlNewChild(image_node, NULL, BAD_CAST
"trainImg",
3491 BAD_CAST it->second.c_str());
3494 xmlNewProp(image_info_node, BAD_CAST
"image_id", BAD_CAST ss.str().c_str());
3499 descriptors_info_node = xmlNewChild(root_node, NULL, BAD_CAST
"DescriptorsInfo", NULL);
3501 int nRows = m_trainDescriptors.rows,
3502 nCols = m_trainDescriptors.cols;
3503 int descriptorType = m_trainDescriptors.type();
3508 xmlNewChild(descriptors_info_node, NULL, BAD_CAST
"nrows", BAD_CAST ss.str().c_str());
3513 xmlNewChild(descriptors_info_node, NULL, BAD_CAST
"ncols", BAD_CAST ss.str().c_str());
3517 ss << descriptorType;
3518 xmlNewChild(descriptors_info_node, NULL, BAD_CAST
"type", BAD_CAST ss.str().c_str());
3520 for (
int i = 0; i < nRows; i++) {
3521 unsigned int i_ = (
unsigned int) i;
3522 descriptor_node = xmlNewChild(root_node, NULL, BAD_CAST
"DescriptorInfo",
3527 ss << std::fixed << std::setprecision(9) << m_trainKeyPoints[i_].pt.x;
3528 xmlNewChild(descriptor_node, NULL, BAD_CAST
"u",
3529 BAD_CAST ss.str().c_str());
3533 ss << std::fixed << std::setprecision(9) << m_trainKeyPoints[i_].pt.y;
3534 xmlNewChild(descriptor_node, NULL, BAD_CAST
"v",
3535 BAD_CAST ss.str().c_str());
3539 ss << std::fixed << std::setprecision(9) << m_trainKeyPoints[i_].size;
3540 xmlNewChild(descriptor_node, NULL, BAD_CAST
"size",
3541 BAD_CAST ss.str().c_str());
3545 ss << std::fixed << std::setprecision(9) << m_trainKeyPoints[i_].angle;
3546 xmlNewChild(descriptor_node, NULL, BAD_CAST
"angle",
3547 BAD_CAST ss.str().c_str());
3551 ss << std::fixed << std::setprecision(9) << m_trainKeyPoints[i_].response;
3552 xmlNewChild(descriptor_node, NULL, BAD_CAST
"response",
3553 BAD_CAST ss.str().c_str());
3556 ss << m_trainKeyPoints[i_].octave;
3557 xmlNewChild(descriptor_node, NULL, BAD_CAST
"octave",
3558 BAD_CAST ss.str().c_str());
3561 ss << m_trainKeyPoints[i_].class_id;
3562 xmlNewChild(descriptor_node, NULL, BAD_CAST
"class_id",
3563 BAD_CAST ss.str().c_str());
3566 #ifdef VISP_HAVE_MODULE_IO
3567 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
3568 ss << ((saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1);
3569 xmlNewChild(descriptor_node, NULL, BAD_CAST
"image_id",
3570 BAD_CAST ss.str().c_str());
3573 xmlNewChild(descriptor_node, NULL, BAD_CAST
"image_id",
3574 BAD_CAST ss.str().c_str());
3580 ss << std::fixed << std::setprecision(9) << m_trainPoints[i_].x;
3581 xmlNewChild(descriptor_node, NULL, BAD_CAST
"oX",
3582 BAD_CAST ss.str().c_str());
3586 ss << std::fixed << std::setprecision(9) << m_trainPoints[i_].y;
3587 xmlNewChild(descriptor_node, NULL, BAD_CAST
"oY",
3588 BAD_CAST ss.str().c_str());
3592 ss << std::fixed << std::setprecision(9) << m_trainPoints[i_].z;
3593 xmlNewChild(descriptor_node, NULL, BAD_CAST
"oZ",
3594 BAD_CAST ss.str().c_str());
3597 desc_node = xmlNewChild(descriptor_node, NULL, BAD_CAST
"desc", NULL);
3599 for (
int j = 0; j < nCols; j++) {
3602 switch(descriptorType) {
3609 int val_tmp = m_trainDescriptors.at<
unsigned char>(i, j);
3620 int val_tmp = m_trainDescriptors.at<
char>(i, j);
3626 ss << m_trainDescriptors.at<
unsigned short int>(i, j);
3630 ss << m_trainDescriptors.at<
short int>(i, j);
3634 ss << m_trainDescriptors.at<
int>(i, j);
3639 ss << std::fixed << std::setprecision(9) << m_trainDescriptors.at<
float>(i, j);
3644 ss << std::fixed << std::setprecision(17) << m_trainDescriptors.at<
double>(i, j);
3651 xmlNewChild(desc_node, NULL, BAD_CAST
"val",
3652 BAD_CAST ss.str().c_str());
3656 xmlSaveFormatFileEnc(filename.c_str(), doc,
"UTF-8", 1);
3667 std::cerr <<
"Error: libxml2 is required !" << std::endl;
3672 #if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x030000)
3674 struct KeypointResponseGreaterThanThreshold {
3675 KeypointResponseGreaterThanThreshold(
float _value) :
3678 inline bool operator()(
const cv::KeyPoint& kpt)
const {
3679 return kpt.response >= value;
3684 struct KeypointResponseGreater {
3685 inline bool operator()(
const cv::KeyPoint& kp1,
3686 const cv::KeyPoint& kp2)
const {
3687 return kp1.response > kp2.response;
3692 void vpKeyPoint::KeyPointsFilter::retainBest(
3693 std::vector<cv::KeyPoint>& keypoints,
int n_points) {
3695 if (n_points >= 0 && keypoints.size() > (size_t) n_points) {
3696 if (n_points == 0) {
3701 std::nth_element(keypoints.begin(), keypoints.begin() + n_points,
3702 keypoints.end(), KeypointResponseGreater());
3704 float ambiguous_response = keypoints[(size_t) (n_points - 1)].response;
3706 std::vector<cv::KeyPoint>::const_iterator new_end = std::partition(
3707 keypoints.begin() + n_points, keypoints.end(),
3708 KeypointResponseGreaterThanThreshold(ambiguous_response));
3710 keypoints.resize((
size_t) (new_end - keypoints.begin()));
3714 struct RoiPredicate {
3715 RoiPredicate(
const cv::Rect& _r) :
3719 bool operator()(
const cv::KeyPoint& keyPt)
const {
3720 return !r.contains(keyPt.pt);
3726 void vpKeyPoint::KeyPointsFilter::runByImageBorder(
3727 std::vector<cv::KeyPoint>& keypoints, cv::Size imageSize,
int borderSize) {
3728 if (borderSize > 0) {
3729 if (imageSize.height <= borderSize * 2 || imageSize.width <= borderSize * 2)
3733 std::remove_if(keypoints.begin(), keypoints.end(),
3735 cv::Rect(cv::Point(borderSize, borderSize),
3736 cv::Point(imageSize.width - borderSize,
3737 imageSize.height - borderSize)))), keypoints.end());
3741 struct SizePredicate {
3742 SizePredicate(
float _minSize,
float _maxSize) :
3743 minSize(_minSize), maxSize(_maxSize) {
3746 bool operator()(
const cv::KeyPoint& keyPt)
const {
3747 float size = keyPt.size;
3748 return (size < minSize) || (size > maxSize);
3751 float minSize, maxSize;
3754 void vpKeyPoint::KeyPointsFilter::runByKeypointSize(
3755 std::vector<cv::KeyPoint>& keypoints,
float minSize,
float maxSize) {
3756 CV_Assert(minSize >= 0);
3757 CV_Assert(maxSize >= 0);
3758 CV_Assert(minSize <= maxSize);
3761 std::remove_if(keypoints.begin(), keypoints.end(),
3762 SizePredicate(minSize, maxSize)), keypoints.end());
3765 class MaskPredicate {
3767 MaskPredicate(
const cv::Mat& _mask) :
3770 bool operator()(
const cv::KeyPoint& key_pt)
const {
3771 return mask.at<uchar>((int) (key_pt.pt.y + 0.5f),
3772 (int) (key_pt.pt.x + 0.5f)) == 0;
3777 MaskPredicate& operator=(
const MaskPredicate&);
3780 void vpKeyPoint::KeyPointsFilter::runByPixelsMask(
3781 std::vector<cv::KeyPoint>& keypoints,
const cv::Mat& mask) {
3786 std::remove_if(keypoints.begin(), keypoints.end(), MaskPredicate(mask)),
3790 struct KeyPoint_LessThan {
3791 KeyPoint_LessThan(
const std::vector<cv::KeyPoint>& _kp) :
3794 bool operator()(
size_t i,
size_t j)
const {
3795 const cv::KeyPoint& kp1 = (*kp)[ i];
3796 const cv::KeyPoint& kp2 = (*kp)[ j];
3797 if (!
vpMath::equal(kp1.pt.x, kp2.pt.x, std::numeric_limits<float>::epsilon())) {
3798 return kp1.pt.x < kp2.pt.x;
3801 if (!
vpMath::equal(kp1.pt.y, kp2.pt.y, std::numeric_limits<float>::epsilon())) {
3802 return kp1.pt.y < kp2.pt.y;
3805 if (!
vpMath::equal(kp1.size, kp2.size, std::numeric_limits<float>::epsilon())) {
3806 return kp1.size > kp2.size;
3809 if (!
vpMath::equal(kp1.angle, kp2.angle, std::numeric_limits<float>::epsilon())) {
3810 return kp1.angle < kp2.angle;
3813 if (!
vpMath::equal(kp1.response, kp2.response, std::numeric_limits<float>::epsilon())) {
3814 return kp1.response > kp2.response;
3817 if (kp1.octave != kp2.octave) {
3818 return kp1.octave > kp2.octave;
3821 if (kp1.class_id != kp2.class_id) {
3822 return kp1.class_id > kp2.class_id;
3827 const std::vector<cv::KeyPoint>* kp;
3830 void vpKeyPoint::KeyPointsFilter::removeDuplicated(
3831 std::vector<cv::KeyPoint>& keypoints) {
3832 size_t i, j, n = keypoints.size();
3833 std::vector<size_t> kpidx(n);
3834 std::vector<uchar> mask(n, (uchar) 1);
3836 for (i = 0; i < n; i++) {
3839 std::sort(kpidx.begin(), kpidx.end(), KeyPoint_LessThan(keypoints));
3840 for (i = 1, j = 0; i < n; i++) {
3841 cv::KeyPoint& kp1 = keypoints[kpidx[i]];
3842 cv::KeyPoint& kp2 = keypoints[kpidx[j]];
3844 if (!
vpMath::equal(kp1.pt.x, kp2.pt.x, std::numeric_limits<float>::epsilon()) ||
3845 !
vpMath::equal(kp1.pt.y, kp2.pt.y, std::numeric_limits<float>::epsilon()) ||
3846 !
vpMath::equal(kp1.size, kp2.size, std::numeric_limits<float>::epsilon()) ||
3847 !
vpMath::equal(kp1.angle, kp2.angle, std::numeric_limits<float>::epsilon())) {
3854 for (i = j = 0; i < n; i++) {
3857 keypoints[j] = keypoints[i];
3862 keypoints.resize(j);
3868 vpKeyPoint::PyramidAdaptedFeatureDetector::PyramidAdaptedFeatureDetector(
3869 const cv::Ptr<cv::FeatureDetector>& _detector,
int _maxLevel) :
3870 detector(_detector), maxLevel(_maxLevel) {
3873 bool vpKeyPoint::PyramidAdaptedFeatureDetector::empty()
const {
3874 return detector.empty() || (cv::FeatureDetector*) detector->empty();
3877 void vpKeyPoint::PyramidAdaptedFeatureDetector::detect( cv::InputArray image, CV_OUT std::vector<cv::KeyPoint>& keypoints,
3878 cv::InputArray mask ) {
3879 detectImpl(image.getMat(), keypoints, mask.getMat());
3882 void vpKeyPoint::PyramidAdaptedFeatureDetector::detectImpl(
const cv::Mat& image,
3883 std::vector<cv::KeyPoint>& keypoints,
const cv::Mat& mask)
const {
3884 cv::Mat src = image;
3885 cv::Mat src_mask = mask;
3887 cv::Mat dilated_mask;
3888 if (!mask.empty()) {
3889 cv::dilate(mask, dilated_mask, cv::Mat());
3890 cv::Mat mask255(mask.size(), CV_8UC1, cv::Scalar(0));
3891 mask255.setTo(cv::Scalar(255), dilated_mask != 0);
3892 dilated_mask = mask255;
3895 for (
int l = 0, multiplier = 1; l <= maxLevel; ++l, multiplier *= 2) {
3897 std::vector<cv::KeyPoint> new_pts;
3898 detector->detect(src, new_pts, src_mask);
3899 std::vector<cv::KeyPoint>::iterator it = new_pts.begin(), end =
3901 for (; it != end; ++it) {
3902 it->pt.x *= multiplier;
3903 it->pt.y *= multiplier;
3904 it->size *= multiplier;
3907 keypoints.insert(keypoints.end(), new_pts.begin(), new_pts.end());
3916 resize(dilated_mask, src_mask, src.size(), 0, 0, CV_INTER_AREA);
3921 vpKeyPoint::KeyPointsFilter::runByPixelsMask(keypoints, mask);
3925 #elif !defined(VISP_BUILD_SHARED_LIBS)
3927 void dummy_vpKeyPoint() {};
virtual void displayCircle(const vpImagePoint ¢er, unsigned int radius, const vpColor &color, bool fill=false, unsigned int thickness=1)=0
static void write(const vpImage< unsigned char > &I, const char *filename)
void getTrainKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
Implementation of a matrix and operations on matrices.
void displayMatching(const vpImage< unsigned char > &IRef, vpImage< unsigned char > &IMatching, unsigned int crossSize, unsigned int lineThickness=1, const vpColor &color=vpColor::green)
static void compute3D(const cv::KeyPoint &candidate, const std::vector< vpPoint > &roi, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
void set_oZ(const double oZ)
Set the point Z coordinate in the object frame.
void detectExtractAffine(const vpImage< unsigned char > &I, std::vector< std::vector< cv::KeyPoint > > &listOfKeypoints, std::vector< cv::Mat > &listOfDescriptors, std::vector< vpImage< unsigned char > > *listOfAffineI=NULL)
void getQueryKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
std::string getMatcherName() const
unsigned int getWidth() const
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
void createImageMatching(vpImage< unsigned char > &IRef, vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
Implementation of an homogeneous matrix and operations on such kind of matrices.
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, double &elapsedTime, const vpRect &rectangle=vpRect())
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
Point coordinates conversion from normalized coordinates in meter to pixel coordinates ...
Class to define colors available for display functionnalities.
static bool equal(double x, double y, double s=0.001)
double getMatchingFactorThreshold() const
bool getUseRansacVVSPoseEstimation() const
static const vpColor none
error that can be emited by ViSP classes.
double getRansacConsensusPercentage() const
void setRansacThreshold(const double &t)
void set_x(const double x)
Set the point x coordinate in the image plane.
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
Point coordinates conversion from pixel coordinates to normalized coordinates in meter...
std::vector< unsigned int > getRansacInlierIndex() const
int getNbRansacMinInlierCount() const
double get_y() const
Get the point y coordinate in the image plane.
static const vpColor green
void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector< cv::DMatch > &matches, double &elapsedTime)
static int round(const double x)
bool getUseRansacConsensusPercentage() const
void insertImageMatching(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
Class that defines what is a point.
Defines a generic 2D polygon.
const char * what() const
vpRect getBoundingBox() const
double getRansacReprojectionError() const
void set_u(const double u)
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidate, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
bool computePose(vpPoseMethodType methode, vpHomogeneousMatrix &cMo, bool(*func)(vpHomogeneousMatrix *)=NULL)
compute the pose for a given method
bool _reference_computed
flag to indicate if the reference has been built.
virtual void displayCross(const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)=0
void set_v(const double v)
Class used for pose computation from N points (pose from point only).
Generic class defining intrinsic camera parameters.
int getNbRansacIterations() const
double getMatchingRatioThreshold() const
void set_y(const double y)
Set the point y coordinate in the image plane.
std::string getDetectorName() const
void set_oX(const double oX)
Set the point X coordinate in the object frame.
void display(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, unsigned int size=3)
unsigned int buildReference(const vpImage< unsigned char > &I)
double get_x() const
Get the point x coordinate in the image plane.
vpMatrix getCovarianceMatrix() const
vpKeyPoint(const std::string &detectorName="ORB", const std::string &extractorName="ORB", const std::string &matcherName="BruteForce-Hamming", const vpFilterMatchingType &filterType=ratioDistanceThreshold)
std::vector< vpImagePoint > referenceImagePointsList
void getObjectPoints(std::vector< cv::Point3f > &objectPoints) const
bool computePose(const std::vector< cv::Point2f > &imagePoints, const std::vector< cv::Point3f > &objectPoints, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector< int > &inlierIndex, double &elapsedTime, bool(*func)(vpHomogeneousMatrix *)=NULL)
unsigned int matchPoint(const vpImage< unsigned char > &I)
void setRansacMaxTrials(const int &rM)
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
VISP_EXPORT double measureTimeMs()
void loadLearningData(const std::string &filename, const bool binaryMode=false, const bool append=false)
void setWorldCoordinates(const double oX, const double oY, const double oZ)
double getRansacThreshold() const
Implementation of column vector and the associated operations.
void set_oY(const double oY)
Set the point Y coordinate in the object frame.
void saveLearningData(const std::string &filename, const bool binaryMode=false, const bool saveTrainingImages=true)
void getTrainPoints(std::vector< cv::Point3f > &points) const
void insert(const vpImage< Type > &src, const vpImagePoint topLeft)
vpHomogeneousMatrix inverse() const
void extract(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, cv::Mat &descriptors, double &elapsedTime, std::vector< cv::Point3f > *trainPoints=NULL)
bool matchPointAndDetect(const vpImage< unsigned char > &I, vpRect &boundingBox, vpImagePoint ¢erOfGravity, const bool isPlanarObject=true, std::vector< vpImagePoint > *imPts1=NULL, std::vector< vpImagePoint > *imPts2=NULL, double *meanDescriptorDistance=NULL, double *detectionScore=NULL, const vpRect &rectangle=vpRect())
std::vector< unsigned int > matchedReferencePoints
unsigned int getHeight() const
Defines a rectangle in the plane.
std::vector< vpImagePoint > currentImagePointsList
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
virtual void displayLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1)=0
This class defines the container for a plane geometrical structure.
void loadConfigFile(const std::string &configFile)
static void read(vpImage< unsigned char > &I, const char *filename)
void addPoint(const vpPoint &P)
Add a new point in this array.
static void convertFromOpenCV(const cv::KeyPoint &from, vpImagePoint &to)
Class that consider the case of a translation vector.
Implementation of a rotation vector as axis-angle minimal representation.
std::vector< vpPoint > getRansacInliers() const
void setCovarianceComputation(const bool &flag)
std::string getExtractorName() const
void set_ij(const double ii, const double jj)
vpMatchingMethodEnum getMatchingMethod() const
void initMatcher(const std::string &matcherName)
void parse(const std::string &filename)