42 #include <visp3/core/vpIoTools.h> 43 #include <visp3/vision/vpKeyPoint.h> 45 #if (VISP_HAVE_OPENCV_VERSION >= 0x020101) 47 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000) 48 #include <opencv2/calib3d/calib3d.hpp> 51 #ifdef VISP_HAVE_PUGIXML 52 #include <pugixml.hpp> 58 inline cv::DMatch knnToDMatch(
const std::vector<cv::DMatch> &knnMatches)
60 if (knnMatches.size() > 0) {
67 inline vpImagePoint matchRansacToVpImage(
const std::pair<cv::KeyPoint, cv::Point3f> &pair)
85 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
86 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
87 m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
88 m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
89 m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
90 m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
91 m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
92 m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
93 m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0),
94 m_ransacThreshold(0.01), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(),
95 m_useAffineDetection(false),
96 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
97 m_useBruteForceCrossCheck(true),
99 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
100 m_useSingleMatchFilter(true), m_I()
104 m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
105 m_extractorNames.push_back(m_mapOfDescriptorNames[descriptorType]);
121 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(
detectionScore),
122 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
123 m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
124 m_imageFormat(
jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
125 m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
126 m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
127 m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
128 m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
129 m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0),
130 m_ransacThreshold(0.01), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(),
131 m_useAffineDetection(false),
132 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
133 m_useBruteForceCrossCheck(true),
135 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
136 m_useSingleMatchFilter(true), m_I()
140 m_detectorNames.push_back(detectorName);
141 m_extractorNames.push_back(extractorName);
157 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(
detectionScore),
158 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(detectorNames),
159 m_detectors(), m_extractionTime(0.), m_extractorNames(extractorNames), m_extractors(), m_filteredMatches(),
160 m_filterType(filterType), m_imageFormat(
jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(),
161 m_matcher(), m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0),
162 m_matchingRatioThreshold(0.85), m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200),
163 m_nbRansacMinInlierCount(100), m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(),
164 m_queryFilteredKeyPoints(), m_queryKeyPoints(), m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(
vpPose::NO_FILTER), m_ransacInliers(),
165 m_ransacOutliers(), m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
166 m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
167 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
168 m_useBruteForceCrossCheck(true),
170 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
171 m_useSingleMatchFilter(true), m_I()
185 void vpKeyPoint::affineSkew(
double tilt,
double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai)
190 mask = cv::Mat(h, w, CV_8UC1, cv::Scalar(255));
192 cv::Mat A = cv::Mat::eye(2, 3, CV_32F);
195 if (std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
200 A = (cv::Mat_<float>(2, 2) << c, -s, s, c);
202 cv::Mat corners = (cv::Mat_<float>(4, 2) << 0, 0, w, 0, w, h, 0, h);
203 cv::Mat tcorners = corners * A.t();
204 cv::Mat tcorners_x, tcorners_y;
205 tcorners.col(0).copyTo(tcorners_x);
206 tcorners.col(1).copyTo(tcorners_y);
207 std::vector<cv::Mat> channels;
208 channels.push_back(tcorners_x);
209 channels.push_back(tcorners_y);
210 cv::merge(channels, tcorners);
212 cv::Rect rect = cv::boundingRect(tcorners);
213 A = (cv::Mat_<float>(2, 3) << c, -s, -rect.x, s, c, -rect.y);
215 cv::warpAffine(img, img, A, cv::Size(rect.width, rect.height), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
218 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon()) {
219 double s = 0.8 * sqrt(tilt * tilt - 1);
220 cv::GaussianBlur(img, img, cv::Size(0, 0), s, 0.01);
221 cv::resize(img, img, cv::Size(0, 0), 1.0 / tilt, 1.0, cv::INTER_NEAREST);
222 A.row(0) = A.row(0) / tilt;
225 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon() ||
226 std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
229 cv::warpAffine(mask, mask, A, cv::Size(w, h), cv::INTER_NEAREST);
231 cv::invertAffineTransform(A, Ai);
260 unsigned int height,
unsigned int width)
275 unsigned int height,
unsigned int width)
291 m_trainPoints.clear();
292 m_mapOfImageId.clear();
293 m_mapOfImages.clear();
294 m_currentImageId = 1;
296 if (m_useAffineDetection) {
297 std::vector<std::vector<cv::KeyPoint> > listOfTrainKeyPoints;
298 std::vector<cv::Mat> listOfTrainDescriptors;
304 m_trainKeyPoints.clear();
305 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfTrainKeyPoints.begin();
306 it != listOfTrainKeyPoints.end(); ++it) {
307 m_trainKeyPoints.insert(m_trainKeyPoints.end(), it->begin(), it->end());
311 for (std::vector<cv::Mat>::const_iterator it = listOfTrainDescriptors.begin(); it != listOfTrainDescriptors.end();
315 it->copyTo(m_trainDescriptors);
317 m_trainDescriptors.push_back(*it);
321 detect(I, m_trainKeyPoints, m_detectionTime, rectangle);
322 extract(I, m_trainKeyPoints, m_trainDescriptors, m_extractionTime);
327 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
328 m_mapOfImageId[it->class_id] = m_currentImageId;
332 m_mapOfImages[m_currentImageId] = I;
341 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
343 return static_cast<unsigned int>(m_trainKeyPoints.size());
371 std::vector<cv::Point3f> &points3f,
bool append,
int class_id)
373 cv::Mat trainDescriptors;
375 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
377 extract(I, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
379 if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
383 std::map<size_t, size_t> mapOfKeypointHashes;
385 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
387 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
390 std::vector<cv::Point3f> trainPoints_tmp;
391 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
392 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
393 trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
398 points3f = trainPoints_tmp;
401 buildReference(I, trainKeyPoints, trainDescriptors, points3f, append, class_id);
416 std::vector<cv::Point3f> &points3f,
bool append,
int class_id)
418 cv::Mat trainDescriptors;
420 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
422 extract(I_color, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
424 if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
428 std::map<size_t, size_t> mapOfKeypointHashes;
430 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
432 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
435 std::vector<cv::Point3f> trainPoints_tmp;
436 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
437 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
438 trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
443 points3f = trainPoints_tmp;
446 buildReference(I_color, trainKeyPoints, trainDescriptors, points3f, append, class_id);
462 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
463 bool append,
int class_id)
466 m_currentImageId = 0;
467 m_mapOfImageId.clear();
468 m_mapOfImages.clear();
469 this->m_trainKeyPoints.clear();
470 this->m_trainPoints.clear();
475 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
477 if (class_id != -1) {
478 for (std::vector<cv::KeyPoint>::iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
479 it->class_id = class_id;
485 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
487 m_mapOfImageId[it->class_id] = m_currentImageId;
491 m_mapOfImages[m_currentImageId] = I;
494 this->m_trainKeyPoints.insert(this->m_trainKeyPoints.end(), trainKeyPoints_tmp.begin(), trainKeyPoints_tmp.end());
496 trainDescriptors.copyTo(this->m_trainDescriptors);
498 this->m_trainDescriptors.push_back(trainDescriptors);
500 this->m_trainPoints.insert(this->m_trainPoints.end(), points3f.begin(), points3f.end());
508 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
526 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
527 bool append,
int class_id)
530 buildReference(m_I, trainKeyPoints, trainDescriptors, points3f, append, class_id);
552 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
559 vpPlane Po(pts[0], pts[1], pts[2]);
560 double xc = 0.0, yc = 0.0;
571 point_obj = cMo.
inverse() * point_cam;
572 point = cv::Point3f((
float)point_obj[0], (
float)point_obj[1], (
float)point_obj[2]);
594 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
601 vpPlane Po(pts[0], pts[1], pts[2]);
602 double xc = 0.0, yc = 0.0;
613 point_obj = cMo.
inverse() * point_cam;
633 std::vector<cv::KeyPoint> &candidates,
634 const std::vector<vpPolygon> &polygons,
635 const std::vector<std::vector<vpPoint> > &roisPt,
636 std::vector<cv::Point3f> &points, cv::Mat *descriptors)
639 std::vector<cv::KeyPoint> candidatesToCheck = candidates;
646 std::vector<std::pair<cv::KeyPoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
647 for (
size_t i = 0; i < candidatesToCheck.size(); i++) {
648 pairOfCandidatesToCheck[i] = std::pair<cv::KeyPoint, size_t>(candidatesToCheck[i], i);
652 std::vector<vpPolygon> polygons_tmp = polygons;
653 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
654 std::vector<std::pair<cv::KeyPoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
656 while (it2 != pairOfCandidatesToCheck.end()) {
657 imPt.
set_ij(it2->first.pt.y, it2->first.pt.x);
658 if (it1->isInside(imPt)) {
659 candidates.push_back(it2->first);
661 points.push_back(pt);
663 if (descriptors != NULL) {
664 desc.push_back(descriptors->row((
int)it2->second));
668 it2 = pairOfCandidatesToCheck.erase(it2);
675 if (descriptors != NULL) {
676 desc.copyTo(*descriptors);
697 std::vector<vpImagePoint> &candidates,
698 const std::vector<vpPolygon> &polygons,
699 const std::vector<std::vector<vpPoint> > &roisPt,
700 std::vector<vpPoint> &points, cv::Mat *descriptors)
703 std::vector<vpImagePoint> candidatesToCheck = candidates;
709 std::vector<std::pair<vpImagePoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
710 for (
size_t i = 0; i < candidatesToCheck.size(); i++) {
711 pairOfCandidatesToCheck[i] = std::pair<vpImagePoint, size_t>(candidatesToCheck[i], i);
715 std::vector<vpPolygon> polygons_tmp = polygons;
716 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
717 std::vector<std::pair<vpImagePoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
719 while (it2 != pairOfCandidatesToCheck.end()) {
720 if (it1->isInside(it2->first)) {
721 candidates.push_back(it2->first);
723 points.push_back(pt);
725 if (descriptors != NULL) {
726 desc.push_back(descriptors->row((
int)it2->second));
730 it2 = pairOfCandidatesToCheck.erase(it2);
755 const std::vector<vpCylinder> &cylinders,
756 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<cv::Point3f> &points,
757 cv::Mat *descriptors)
759 std::vector<cv::KeyPoint> candidatesToCheck = candidates;
765 size_t cpt_keypoint = 0;
766 for (std::vector<cv::KeyPoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
767 ++it1, cpt_keypoint++) {
768 size_t cpt_cylinder = 0;
771 for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
772 it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
775 for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
777 candidates.push_back(*it1);
781 double xm = 0.0, ym = 0.0;
783 double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
785 if (!
vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
787 point_cam[0] = xm * Z;
788 point_cam[1] = ym * Z;
792 point_obj = cMo.
inverse() * point_cam;
795 points.push_back(cv::Point3f((
float)pt.
get_oX(), (float)pt.
get_oY(), (float)pt.
get_oZ()));
797 if (descriptors != NULL) {
798 desc.push_back(descriptors->row((
int)cpt_keypoint));
808 if (descriptors != NULL) {
809 desc.copyTo(*descriptors);
830 const std::vector<vpCylinder> &cylinders,
831 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<vpPoint> &points,
832 cv::Mat *descriptors)
834 std::vector<vpImagePoint> candidatesToCheck = candidates;
840 size_t cpt_keypoint = 0;
841 for (std::vector<vpImagePoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
842 ++it1, cpt_keypoint++) {
843 size_t cpt_cylinder = 0;
846 for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
847 it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
850 for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
852 candidates.push_back(*it1);
856 double xm = 0.0, ym = 0.0;
858 double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
860 if (!
vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
862 point_cam[0] = xm * Z;
863 point_cam[1] = ym * Z;
867 point_obj = cMo.
inverse() * point_cam;
870 points.push_back(pt);
872 if (descriptors != NULL) {
873 desc.push_back(descriptors->row((
int)cpt_keypoint));
883 if (descriptors != NULL) {
884 desc.copyTo(*descriptors);
907 if (imagePoints.size() < 4 || objectPoints.size() < 4 || imagePoints.size() != objectPoints.size()) {
909 std::cerr <<
"Not enough points to compute the pose (at least 4 points " 916 cv::Mat cameraMatrix =
926 cv::Mat distCoeffs = cv::Mat::zeros(1, 5, CV_64F);
929 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000) 931 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec,
false, m_nbRansacIterations,
932 (
float)m_ransacReprojectionError,
935 inlierIndex, cv::SOLVEPNP_ITERATIVE);
955 int nbInlierToReachConsensus = m_nbRansacMinInlierCount;
956 if (m_useConsensusPercentage) {
957 nbInlierToReachConsensus = (int)(m_ransacConsensusPercentage / 100.0 * (
double)m_queryFilteredKeyPoints.size());
960 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec,
false, m_nbRansacIterations,
961 (
float)m_ransacReprojectionError, nbInlierToReachConsensus, inlierIndex);
963 }
catch (cv::Exception &e) {
964 std::cerr << e.what() << std::endl;
968 vpTranslationVector translationVec(tvec.at<
double>(0), tvec.at<
double>(1), tvec.at<
double>(2));
969 vpThetaUVector thetaUVector(rvec.at<
double>(0), rvec.at<
double>(1), rvec.at<
double>(2));
998 std::vector<vpPoint> &inliers,
double &elapsedTime,
bool (*func)(
const vpHomogeneousMatrix &))
1000 std::vector<unsigned int> inlierIndex;
1001 return computePose(objectVpPoints, cMo, inliers, inlierIndex, elapsedTime, func);
1018 std::vector<vpPoint> &inliers, std::vector<unsigned int> &inlierIndex,
double &elapsedTime,
1023 if (objectVpPoints.size() < 4) {
1033 for (std::vector<vpPoint>::const_iterator it = objectVpPoints.begin(); it != objectVpPoints.end(); ++it) {
1037 unsigned int nbInlierToReachConsensus = (
unsigned int)m_nbRansacMinInlierCount;
1038 if (m_useConsensusPercentage) {
1039 nbInlierToReachConsensus =
1040 (
unsigned int)(m_ransacConsensusPercentage / 100.0 * (
double)m_queryFilteredKeyPoints.size());
1050 bool isRansacPoseEstimationOk =
false;
1057 if (m_computeCovariance) {
1061 std::cerr <<
"e=" << e.
what() << std::endl;
1079 return isRansacPoseEstimationOk;
1095 double vpKeyPoint::computePoseEstimationError(
const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
1098 if (matchKeyPoints.size() == 0) {
1104 std::vector<double> errors(matchKeyPoints.size());
1107 for (std::vector<std::pair<cv::KeyPoint, cv::Point3f> >::const_iterator it = matchKeyPoints.begin();
1108 it != matchKeyPoints.end(); ++it, cpt++) {
1113 double u = 0.0, v = 0.0;
1115 errors[cpt] = std::sqrt((u - it->first.pt.x) * (u - it->first.pt.x) + (v - it->first.pt.y) * (v - it->first.pt.y));
1118 return std::accumulate(errors.begin(), errors.end(), 0.0) / errors.size();
1170 unsigned int nbImg = (
unsigned int)(m_mapOfImages.size() + 1);
1172 if (m_mapOfImages.empty()) {
1173 std::cerr <<
"There is no training image loaded !" << std::endl;
1183 unsigned int nbImgSqrt = (
unsigned int)
vpMath::round(std::sqrt((
double)nbImg));
1186 unsigned int nbWidth = nbImgSqrt;
1188 unsigned int nbHeight = nbImgSqrt;
1191 if (nbImgSqrt * nbImgSqrt < nbImg) {
1195 unsigned int maxW = ICurrent.
getWidth();
1196 unsigned int maxH = ICurrent.
getHeight();
1197 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1199 if (maxW < it->second.getWidth()) {
1200 maxW = it->second.getWidth();
1203 if (maxH < it->second.getHeight()) {
1204 maxH = it->second.getHeight();
1225 unsigned int nbImg = (
unsigned int)(m_mapOfImages.size() + 1);
1227 if (m_mapOfImages.empty()) {
1228 std::cerr <<
"There is no training image loaded !" << std::endl;
1238 unsigned int nbImgSqrt = (
unsigned int)
vpMath::round(std::sqrt((
double)nbImg));
1241 unsigned int nbWidth = nbImgSqrt;
1243 unsigned int nbHeight = nbImgSqrt;
1246 if (nbImgSqrt * nbImgSqrt < nbImg) {
1250 unsigned int maxW = ICurrent.
getWidth();
1251 unsigned int maxH = ICurrent.
getHeight();
1252 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1254 if (maxW < it->second.getWidth()) {
1255 maxW = it->second.getWidth();
1258 if (maxH < it->second.getHeight()) {
1259 maxH = it->second.getHeight();
1277 detect(I, keyPoints, elapsedTime, rectangle);
1290 detect(I_color, keyPoints, elapsedTime, rectangle);
1301 void vpKeyPoint::detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
const cv::Mat &mask)
1304 detect(matImg, keyPoints, elapsedTime, mask);
1320 cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1323 cv::Point leftTop((
int)rectangle.
getLeft(), (int)rectangle.
getTop()),
1325 cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1327 mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1330 detect(matImg, keyPoints, elapsedTime, mask);
1346 cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1349 cv::Point leftTop((
int)rectangle.
getLeft(), (int)rectangle.
getTop()),
1351 cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1353 mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1356 detect(matImg, keyPoints, elapsedTime, mask);
1368 void vpKeyPoint::detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
double &elapsedTime,
1369 const cv::Mat &mask)
1374 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
1375 it != m_detectors.end(); ++it) {
1376 std::vector<cv::KeyPoint> kp;
1377 it->second->detect(matImg, kp, mask);
1378 keyPoints.insert(keyPoints.end(), kp.begin(), kp.end());
1393 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1395 std::vector<vpImagePoint> vpTrainImageKeyPoints;
1398 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1413 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1415 std::vector<vpImagePoint> vpTrainImageKeyPoints;
1418 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1433 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1436 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1450 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1453 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1470 unsigned int crossSize,
unsigned int lineThickness,
const vpColor &color)
1473 srand((
unsigned int)time(NULL));
1476 std::vector<vpImagePoint> queryImageKeyPoints;
1478 std::vector<vpImagePoint> trainImageKeyPoints;
1482 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1484 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1487 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1488 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
1489 queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.
getWidth());
1508 unsigned int crossSize,
unsigned int lineThickness,
const vpColor &color)
1511 srand((
unsigned int)time(NULL));
1514 std::vector<vpImagePoint> queryImageKeyPoints;
1516 std::vector<vpImagePoint> trainImageKeyPoints;
1520 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1522 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1525 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1526 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
1527 queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.
getWidth());
1546 unsigned int crossSize,
unsigned int lineThickness,
const vpColor &color)
1549 srand((
unsigned int)time(NULL));
1552 std::vector<vpImagePoint> queryImageKeyPoints;
1554 std::vector<vpImagePoint> trainImageKeyPoints;
1558 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1560 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1563 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1564 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
1565 queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.
getWidth());
1584 const std::vector<vpImagePoint> &ransacInliers,
unsigned int crossSize,
1585 unsigned int lineThickness)
1587 if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1589 std::cerr <<
"There is no training image loaded !" << std::endl;
1595 int nbImg = (int)(m_mapOfImages.size() + 1);
1603 int nbWidth = nbImgSqrt;
1604 int nbHeight = nbImgSqrt;
1606 if (nbImgSqrt * nbImgSqrt < nbImg) {
1610 std::map<int, int> mapOfImageIdIndex;
1613 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1615 mapOfImageIdIndex[it->first] = cpt;
1617 if (maxW < it->second.getWidth()) {
1618 maxW = it->second.getWidth();
1621 if (maxH < it->second.getHeight()) {
1622 maxH = it->second.getHeight();
1628 int medianI = nbHeight / 2;
1629 int medianJ = nbWidth / 2;
1630 int medianIndex = medianI * nbWidth + medianJ;
1631 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1633 int current_class_id_index = 0;
1634 if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1635 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1639 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1642 int indexI = current_class_id_index / nbWidth;
1643 int indexJ = current_class_id_index - (indexI * nbWidth);
1644 topLeftCorner.
set_ij((
int)maxH * indexI, (
int)maxW * indexJ);
1651 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
1652 for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1657 for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1662 for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1668 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1669 int current_class_id = 0;
1670 if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(
size_t)it->trainIdx].class_id]] < medianIndex) {
1671 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1675 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1678 int indexI = current_class_id / nbWidth;
1679 int indexJ = current_class_id - (indexI * nbWidth);
1681 vpImagePoint end((
int)maxH * indexI + m_trainKeyPoints[(
size_t)it->trainIdx].pt.y,
1682 (
int)maxW * indexJ + m_trainKeyPoints[(
size_t)it->trainIdx].pt.x);
1683 vpImagePoint start((
int)maxH * medianI + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.y,
1684 (
int)maxW * medianJ + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.x);
1705 const std::vector<vpImagePoint> &ransacInliers,
unsigned int crossSize,
1706 unsigned int lineThickness)
1708 if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1710 std::cerr <<
"There is no training image loaded !" << std::endl;
1716 int nbImg = (int)(m_mapOfImages.size() + 1);
1724 int nbWidth = nbImgSqrt;
1725 int nbHeight = nbImgSqrt;
1727 if (nbImgSqrt * nbImgSqrt < nbImg) {
1731 std::map<int, int> mapOfImageIdIndex;
1734 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1736 mapOfImageIdIndex[it->first] = cpt;
1738 if (maxW < it->second.getWidth()) {
1739 maxW = it->second.getWidth();
1742 if (maxH < it->second.getHeight()) {
1743 maxH = it->second.getHeight();
1749 int medianI = nbHeight / 2;
1750 int medianJ = nbWidth / 2;
1751 int medianIndex = medianI * nbWidth + medianJ;
1752 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1754 int current_class_id_index = 0;
1755 if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1756 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1760 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1763 int indexI = current_class_id_index / nbWidth;
1764 int indexJ = current_class_id_index - (indexI * nbWidth);
1765 topLeftCorner.
set_ij((
int)maxH * indexI, (
int)maxW * indexJ);
1772 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
1773 for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1778 for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1783 for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1789 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1790 int current_class_id = 0;
1791 if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(
size_t)it->trainIdx].class_id]] < medianIndex) {
1792 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1796 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1799 int indexI = current_class_id / nbWidth;
1800 int indexJ = current_class_id - (indexI * nbWidth);
1802 vpImagePoint end((
int)maxH * indexI + m_trainKeyPoints[(
size_t)it->trainIdx].pt.y,
1803 (
int)maxW * indexJ + m_trainKeyPoints[(
size_t)it->trainIdx].pt.x);
1804 vpImagePoint start((
int)maxH * medianI + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.y,
1805 (
int)maxW * medianJ + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.x);
1825 std::vector<cv::Point3f> *trainPoints)
1828 extract(I, keyPoints, descriptors, elapsedTime, trainPoints);
1842 std::vector<cv::Point3f> *trainPoints)
1845 extract(I_color, keyPoints, descriptors, elapsedTime, trainPoints);
1859 std::vector<cv::Point3f> *trainPoints)
1862 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1876 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1880 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1894 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1898 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1912 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1917 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator itd = m_extractors.begin();
1918 itd != m_extractors.end(); ++itd) {
1922 if (trainPoints != NULL && !trainPoints->empty()) {
1925 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1928 itd->second->compute(matImg, keyPoints, descriptors);
1930 if (keyPoints.size() != keyPoints_tmp.size()) {
1934 std::map<size_t, size_t> mapOfKeypointHashes;
1936 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1938 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1941 std::vector<cv::Point3f> trainPoints_tmp;
1942 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1943 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1944 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1949 *trainPoints = trainPoints_tmp;
1953 itd->second->compute(matImg, keyPoints, descriptors);
1958 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1962 itd->second->compute(matImg, keyPoints, desc);
1964 if (keyPoints.size() != keyPoints_tmp.size()) {
1968 std::map<size_t, size_t> mapOfKeypointHashes;
1970 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1972 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1975 std::vector<cv::Point3f> trainPoints_tmp;
1976 cv::Mat descriptors_tmp;
1977 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1978 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1979 if (trainPoints != NULL && !trainPoints->empty()) {
1980 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1983 if (!descriptors.empty()) {
1984 descriptors_tmp.push_back(descriptors.row((
int)mapOfKeypointHashes[myKeypointHash(*it)]));
1989 if (trainPoints != NULL) {
1991 *trainPoints = trainPoints_tmp;
1994 descriptors_tmp.copyTo(descriptors);
1998 if (descriptors.empty()) {
1999 desc.copyTo(descriptors);
2001 cv::hconcat(descriptors, desc, descriptors);
2006 if (keyPoints.size() != (size_t)descriptors.rows) {
2007 std::cerr <<
"keyPoints.size() != (size_t) descriptors.rows" << std::endl;
2015 void vpKeyPoint::filterMatches()
2017 std::vector<cv::KeyPoint> queryKpts;
2018 std::vector<cv::Point3f> trainPts;
2019 std::vector<cv::DMatch> m;
2025 double min_dist = DBL_MAX;
2027 std::vector<double> distance_vec(m_knnMatches.size());
2030 for (
size_t i = 0; i < m_knnMatches.size(); i++) {
2031 double dist = m_knnMatches[i][0].distance;
2033 distance_vec[i] = dist;
2035 if (dist < min_dist) {
2042 mean /= m_queryDescriptors.rows;
2045 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
2046 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
2047 double threshold = min_dist + stdev;
2049 for (
size_t i = 0; i < m_knnMatches.size(); i++) {
2050 if (m_knnMatches[i].size() >= 2) {
2053 float ratio = m_knnMatches[i][0].distance / m_knnMatches[i][1].distance;
2058 double dist = m_knnMatches[i][0].distance;
2061 m.push_back(cv::DMatch((
int)queryKpts.size(), m_knnMatches[i][0].trainIdx, m_knnMatches[i][0].distance));
2063 if (!m_trainPoints.empty()) {
2064 trainPts.push_back(m_trainPoints[(
size_t)m_knnMatches[i][0].trainIdx]);
2066 queryKpts.push_back(m_queryKeyPoints[(
size_t)m_knnMatches[i][0].queryIdx]);
2074 double min_dist = DBL_MAX;
2076 std::vector<double> distance_vec(m_matches.size());
2077 for (
size_t i = 0; i < m_matches.size(); i++) {
2078 double dist = m_matches[i].distance;
2080 distance_vec[i] = dist;
2082 if (dist < min_dist) {
2089 mean /= m_queryDescriptors.rows;
2091 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
2092 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
2102 for (
size_t i = 0; i < m_matches.size(); i++) {
2103 if (m_matches[i].distance <= threshold) {
2104 m.push_back(cv::DMatch((
int)queryKpts.size(), m_matches[i].trainIdx, m_matches[i].distance));
2106 if (!m_trainPoints.empty()) {
2107 trainPts.push_back(m_trainPoints[(
size_t)m_matches[i].trainIdx]);
2109 queryKpts.push_back(m_queryKeyPoints[(
size_t)m_matches[i].queryIdx]);
2114 if (m_useSingleMatchFilter) {
2117 std::vector<cv::DMatch> mTmp;
2118 std::vector<cv::Point3f> trainPtsTmp;
2119 std::vector<cv::KeyPoint> queryKptsTmp;
2121 std::map<int, int> mapOfTrainIdx;
2123 for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
2124 mapOfTrainIdx[it->trainIdx]++;
2128 for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
2129 if (mapOfTrainIdx[it->trainIdx] == 1) {
2130 mTmp.push_back(cv::DMatch((
int)queryKptsTmp.size(), it->trainIdx, it->distance));
2132 if (!m_trainPoints.empty()) {
2133 trainPtsTmp.push_back(m_trainPoints[(
size_t)it->trainIdx]);
2135 queryKptsTmp.push_back(queryKpts[(
size_t)it->queryIdx]);
2139 m_filteredMatches = mTmp;
2140 m_objectFilteredPoints = trainPtsTmp;
2141 m_queryFilteredKeyPoints = queryKptsTmp;
2143 m_filteredMatches = m;
2144 m_objectFilteredPoints = trainPts;
2145 m_queryFilteredKeyPoints = queryKpts;
2158 objectPoints = m_objectFilteredPoints;
2223 void vpKeyPoint::init()
2226 #if defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && (VISP_HAVE_OPENCV_VERSION < 0x030000) 2228 if (!cv::initModule_nonfree()) {
2229 std::cerr <<
"Cannot init module non free, SIFT or SURF cannot be used." << std::endl;
2239 initDetectors(m_detectorNames);
2240 initExtractors(m_extractorNames);
2249 void vpKeyPoint::initDetector(
const std::string &detectorName)
2251 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) 2252 m_detectors[detectorName] = cv::FeatureDetector::create(detectorName);
2254 if (m_detectors[detectorName] == NULL) {
2255 std::stringstream ss_msg;
2256 ss_msg <<
"Fail to initialize the detector: " << detectorName
2257 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2261 std::string detectorNameTmp = detectorName;
2262 std::string pyramid =
"Pyramid";
2263 std::size_t pos = detectorName.find(pyramid);
2264 bool usePyramid =
false;
2265 if (pos != std::string::npos) {
2266 detectorNameTmp = detectorName.substr(pos + pyramid.size());
2270 if (detectorNameTmp ==
"SIFT") {
2271 #ifdef VISP_HAVE_OPENCV_XFEATURES2D 2272 cv::Ptr<cv::FeatureDetector> siftDetector = cv::xfeatures2d::SIFT::create();
2274 m_detectors[detectorNameTmp] = siftDetector;
2276 std::cerr <<
"You should not use SIFT with Pyramid feature detection!" << std::endl;
2277 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
2280 std::stringstream ss_msg;
2281 ss_msg <<
"Fail to initialize the detector: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2282 <<
" was not build with xFeatures2d module.";
2285 }
else if (detectorNameTmp ==
"SURF") {
2286 #ifdef VISP_HAVE_OPENCV_XFEATURES2D 2287 cv::Ptr<cv::FeatureDetector> surfDetector = cv::xfeatures2d::SURF::create();
2289 m_detectors[detectorNameTmp] = surfDetector;
2291 std::cerr <<
"You should not use SURF with Pyramid feature detection!" << std::endl;
2292 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(surfDetector);
2295 std::stringstream ss_msg;
2296 ss_msg <<
"Fail to initialize the detector: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2297 <<
" was not build with xFeatures2d module.";
2300 }
else if (detectorNameTmp ==
"FAST") {
2301 cv::Ptr<cv::FeatureDetector> fastDetector = cv::FastFeatureDetector::create();
2303 m_detectors[detectorNameTmp] = fastDetector;
2305 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
2307 }
else if (detectorNameTmp ==
"MSER") {
2308 cv::Ptr<cv::FeatureDetector> fastDetector = cv::MSER::create();
2310 m_detectors[detectorNameTmp] = fastDetector;
2312 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
2314 }
else if (detectorNameTmp ==
"ORB") {
2315 cv::Ptr<cv::FeatureDetector> orbDetector = cv::ORB::create();
2317 m_detectors[detectorNameTmp] = orbDetector;
2319 std::cerr <<
"You should not use ORB with Pyramid feature detection!" << std::endl;
2320 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(orbDetector);
2322 }
else if (detectorNameTmp ==
"BRISK") {
2323 cv::Ptr<cv::FeatureDetector> briskDetector = cv::BRISK::create();
2325 m_detectors[detectorNameTmp] = briskDetector;
2327 std::cerr <<
"You should not use BRISK with Pyramid feature detection!" << std::endl;
2328 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(briskDetector);
2330 }
else if (detectorNameTmp ==
"KAZE") {
2331 cv::Ptr<cv::FeatureDetector> kazeDetector = cv::KAZE::create();
2333 m_detectors[detectorNameTmp] = kazeDetector;
2335 std::cerr <<
"You should not use KAZE with Pyramid feature detection!" << std::endl;
2336 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(kazeDetector);
2338 }
else if (detectorNameTmp ==
"AKAZE") {
2339 cv::Ptr<cv::FeatureDetector> akazeDetector = cv::AKAZE::create();
2341 m_detectors[detectorNameTmp] = akazeDetector;
2343 std::cerr <<
"You should not use AKAZE with Pyramid feature detection!" << std::endl;
2344 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(akazeDetector);
2346 }
else if (detectorNameTmp ==
"GFTT") {
2347 cv::Ptr<cv::FeatureDetector> gfttDetector = cv::GFTTDetector::create();
2349 m_detectors[detectorNameTmp] = gfttDetector;
2351 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(gfttDetector);
2353 }
else if (detectorNameTmp ==
"SimpleBlob") {
2354 cv::Ptr<cv::FeatureDetector> simpleBlobDetector = cv::SimpleBlobDetector::create();
2356 m_detectors[detectorNameTmp] = simpleBlobDetector;
2358 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(simpleBlobDetector);
2360 }
else if (detectorNameTmp ==
"STAR") {
2361 #ifdef VISP_HAVE_OPENCV_XFEATURES2D 2362 cv::Ptr<cv::FeatureDetector> starDetector = cv::xfeatures2d::StarDetector::create();
2364 m_detectors[detectorNameTmp] = starDetector;
2366 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(starDetector);
2369 std::stringstream ss_msg;
2370 ss_msg <<
"Fail to initialize the detector: STAR. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2371 <<
" was not build with xFeatures2d module.";
2374 }
else if (detectorNameTmp ==
"AGAST") {
2375 cv::Ptr<cv::FeatureDetector> agastDetector = cv::AgastFeatureDetector::create();
2377 m_detectors[detectorNameTmp] = agastDetector;
2379 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(agastDetector);
2381 }
else if (detectorNameTmp ==
"MSD") {
2382 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100) 2383 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) 2384 cv::Ptr<cv::FeatureDetector> msdDetector = cv::xfeatures2d::MSDDetector::create();
2386 m_detectors[detectorNameTmp] = msdDetector;
2388 std::cerr <<
"You should not use MSD with Pyramid feature detection!" << std::endl;
2389 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(msdDetector);
2392 std::stringstream ss_msg;
2393 ss_msg <<
"Fail to initialize the detector: MSD. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2394 <<
" was not build with xFeatures2d module.";
2398 std::stringstream ss_msg;
2399 ss_msg <<
"Feature " << detectorName <<
" is not available in OpenCV version: " << std::hex
2400 << VISP_HAVE_OPENCV_VERSION <<
" (require >= OpenCV 3.1).";
2403 std::cerr <<
"The detector:" << detectorNameTmp <<
" is not available." << std::endl;
2406 bool detectorInitialized =
false;
2409 detectorInitialized = !m_detectors[detectorNameTmp].empty();
2412 detectorInitialized = !m_detectors[detectorName].empty();
2415 if (!detectorInitialized) {
2416 std::stringstream ss_msg;
2417 ss_msg <<
"Fail to initialize the detector: " << detectorNameTmp
2418 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2431 void vpKeyPoint::initDetectors(
const std::vector<std::string> &detectorNames)
2433 for (std::vector<std::string>::const_iterator it = detectorNames.begin(); it != detectorNames.end(); ++it) {
2443 void vpKeyPoint::initExtractor(
const std::string &extractorName)
2445 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) 2446 m_extractors[extractorName] = cv::DescriptorExtractor::create(extractorName);
2448 if (extractorName ==
"SIFT") {
2449 #ifdef VISP_HAVE_OPENCV_XFEATURES2D 2450 m_extractors[extractorName] = cv::xfeatures2d::SIFT::create();
2452 std::stringstream ss_msg;
2453 ss_msg <<
"Fail to initialize the extractor: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2454 <<
" was not build with xFeatures2d module.";
2457 }
else if (extractorName ==
"SURF") {
2458 #ifdef VISP_HAVE_OPENCV_XFEATURES2D 2460 m_extractors[extractorName] = cv::xfeatures2d::SURF::create(100, 4, 3,
true);
2462 std::stringstream ss_msg;
2463 ss_msg <<
"Fail to initialize the extractor: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2464 <<
" was not build with xFeatures2d module.";
2467 }
else if (extractorName ==
"ORB") {
2468 m_extractors[extractorName] = cv::ORB::create();
2469 }
else if (extractorName ==
"BRISK") {
2470 m_extractors[extractorName] = cv::BRISK::create();
2471 }
else if (extractorName ==
"FREAK") {
2472 #ifdef VISP_HAVE_OPENCV_XFEATURES2D 2473 m_extractors[extractorName] = cv::xfeatures2d::FREAK::create();
2475 std::stringstream ss_msg;
2476 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2477 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2480 }
else if (extractorName ==
"BRIEF") {
2481 #ifdef VISP_HAVE_OPENCV_XFEATURES2D 2482 m_extractors[extractorName] = cv::xfeatures2d::BriefDescriptorExtractor::create();
2484 std::stringstream ss_msg;
2485 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2486 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2489 }
else if (extractorName ==
"KAZE") {
2490 m_extractors[extractorName] = cv::KAZE::create();
2491 }
else if (extractorName ==
"AKAZE") {
2492 m_extractors[extractorName] = cv::AKAZE::create();
2493 }
else if (extractorName ==
"DAISY") {
2494 #ifdef VISP_HAVE_OPENCV_XFEATURES2D 2495 m_extractors[extractorName] = cv::xfeatures2d::DAISY::create();
2497 std::stringstream ss_msg;
2498 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2499 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2502 }
else if (extractorName ==
"LATCH") {
2503 #ifdef VISP_HAVE_OPENCV_XFEATURES2D 2504 m_extractors[extractorName] = cv::xfeatures2d::LATCH::create();
2506 std::stringstream ss_msg;
2507 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2508 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2511 }
else if (extractorName ==
"LUCID") {
2512 #ifdef VISP_HAVE_OPENCV_XFEATURES2D 2517 std::stringstream ss_msg;
2518 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2519 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2522 }
else if (extractorName ==
"VGG") {
2523 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) 2524 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) 2525 m_extractors[extractorName] = cv::xfeatures2d::VGG::create();
2527 std::stringstream ss_msg;
2528 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2529 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2533 std::stringstream ss_msg;
2534 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2535 << VISP_HAVE_OPENCV_VERSION <<
" but requires at least OpenCV 3.2.";
2538 }
else if (extractorName ==
"BoostDesc") {
2539 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) 2540 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) 2541 m_extractors[extractorName] = cv::xfeatures2d::BoostDesc::create();
2543 std::stringstream ss_msg;
2544 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2545 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2549 std::stringstream ss_msg;
2550 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2551 << VISP_HAVE_OPENCV_VERSION <<
" but requires at least OpenCV 3.2.";
2555 std::cerr <<
"The extractor:" << extractorName <<
" is not available." << std::endl;
2559 if (!m_extractors[extractorName]) {
2560 std::stringstream ss_msg;
2561 ss_msg <<
"Fail to initialize the extractor: " << extractorName
2562 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2566 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000) 2567 if (extractorName ==
"SURF") {
2569 m_extractors[extractorName]->set(
"extended", 1);
2580 void vpKeyPoint::initExtractors(
const std::vector<std::string> &extractorNames)
2582 for (std::vector<std::string>::const_iterator it = extractorNames.begin(); it != extractorNames.end(); ++it) {
2586 int descriptorType = CV_32F;
2587 bool firstIteration =
true;
2588 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2589 it != m_extractors.end(); ++it) {
2590 if (firstIteration) {
2591 firstIteration =
false;
2592 descriptorType = it->second->descriptorType();
2594 if (descriptorType != it->second->descriptorType()) {
2601 void vpKeyPoint::initFeatureNames()
2604 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403) 2611 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D)) 2612 m_mapOfDetectorNames[DETECTOR_STAR] =
"STAR";
2614 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) 2618 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000) 2623 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D) 2624 m_mapOfDetectorNames[DETECTOR_MSD] =
"MSD";
2628 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403) 2631 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D)) 2632 m_mapOfDescriptorNames[DESCRIPTOR_FREAK] =
"FREAK";
2633 m_mapOfDescriptorNames[DESCRIPTOR_BRIEF] =
"BRIEF";
2635 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) 2639 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000) 2642 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) 2643 m_mapOfDescriptorNames[DESCRIPTOR_DAISY] =
"DAISY";
2644 m_mapOfDescriptorNames[DESCRIPTOR_LATCH] =
"LATCH";
2647 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D) 2648 m_mapOfDescriptorNames[DESCRIPTOR_VGG] =
"VGG";
2649 m_mapOfDescriptorNames[DESCRIPTOR_BoostDesc] =
"BoostDesc";
2661 int descriptorType = CV_32F;
2662 bool firstIteration =
true;
2663 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2664 it != m_extractors.end(); ++it) {
2665 if (firstIteration) {
2666 firstIteration =
false;
2667 descriptorType = it->second->descriptorType();
2669 if (descriptorType != it->second->descriptorType()) {
2675 if (matcherName ==
"FlannBased") {
2676 if (m_extractors.empty()) {
2677 std::cout <<
"Warning: No extractor initialized, by default use " 2678 "floating values (CV_32F) " 2679 "for descriptor type !" 2683 if (descriptorType == CV_8U) {
2684 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000) 2685 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::LshIndexParams>(12, 20, 2));
2687 m_matcher =
new cv::FlannBasedMatcher(
new cv::flann::LshIndexParams(12, 20, 2));
2690 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000) 2691 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::KDTreeIndexParams>());
2693 m_matcher =
new cv::FlannBasedMatcher(
new cv::flann::KDTreeIndexParams());
2697 m_matcher = cv::DescriptorMatcher::create(matcherName);
2700 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000) 2701 if (m_matcher != NULL && !m_useKnn && matcherName ==
"BruteForce") {
2702 m_matcher->set(
"crossCheck", m_useBruteForceCrossCheck);
2707 std::stringstream ss_msg;
2708 ss_msg <<
"Fail to initialize the matcher: " << matcherName
2709 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2726 IMatching.
insert(IRef, topLeftCorner);
2728 IMatching.
insert(ICurrent, topLeftCorner);
2743 IMatching.
insert(IRef, topLeftCorner);
2745 IMatching.
insert(ICurrent, topLeftCorner);
2759 int nbImg = (int)(m_mapOfImages.size() + 1);
2761 if (m_mapOfImages.empty()) {
2762 std::cerr <<
"There is no training image loaded !" << std::endl;
2772 int nbWidth = nbImgSqrt;
2773 int nbHeight = nbImgSqrt;
2775 if (nbImgSqrt * nbImgSqrt < nbImg) {
2780 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2782 if (maxW < it->second.getWidth()) {
2783 maxW = it->second.getWidth();
2786 if (maxH < it->second.getHeight()) {
2787 maxH = it->second.getHeight();
2793 int medianI = nbHeight / 2;
2794 int medianJ = nbWidth / 2;
2795 int medianIndex = medianI * nbWidth + medianJ;
2798 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2800 int local_cpt = cpt;
2801 if (cpt >= medianIndex) {
2806 int indexI = local_cpt / nbWidth;
2807 int indexJ = local_cpt - (indexI * nbWidth);
2808 vpImagePoint topLeftCorner((
int)maxH * indexI, (
int)maxW * indexJ);
2810 IMatching.
insert(it->second, topLeftCorner);
2813 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
2814 IMatching.
insert(ICurrent, topLeftCorner);
2829 int nbImg = (int)(m_mapOfImages.size() + 1);
2831 if (m_mapOfImages.empty()) {
2832 std::cerr <<
"There is no training image loaded !" << std::endl;
2844 int nbWidth = nbImgSqrt;
2845 int nbHeight = nbImgSqrt;
2847 if (nbImgSqrt * nbImgSqrt < nbImg) {
2852 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2854 if (maxW < it->second.getWidth()) {
2855 maxW = it->second.getWidth();
2858 if (maxH < it->second.getHeight()) {
2859 maxH = it->second.getHeight();
2865 int medianI = nbHeight / 2;
2866 int medianJ = nbWidth / 2;
2867 int medianIndex = medianI * nbWidth + medianJ;
2870 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2872 int local_cpt = cpt;
2873 if (cpt >= medianIndex) {
2878 int indexI = local_cpt / nbWidth;
2879 int indexJ = local_cpt - (indexI * nbWidth);
2880 vpImagePoint topLeftCorner((
int)maxH * indexI, (
int)maxW * indexJ);
2884 IMatching.
insert(IRef, topLeftCorner);
2887 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
2888 IMatching.
insert(ICurrent, topLeftCorner);
2892 #ifdef VISP_HAVE_PUGIXML 2904 m_detectorNames.clear();
2905 m_extractorNames.clear();
2906 m_detectors.clear();
2907 m_extractors.clear();
2909 std::cout <<
" *********** Parsing XML for configuration for vpKeyPoint " 2912 xmlp.
parse(configFile);
2977 int startClassId = 0;
2978 int startImageId = 0;
2980 m_trainKeyPoints.clear();
2981 m_trainPoints.clear();
2982 m_mapOfImageId.clear();
2983 m_mapOfImages.clear();
2986 for (std::map<int, int>::const_iterator it = m_mapOfImageId.begin(); it != m_mapOfImageId.end(); ++it) {
2987 if (startClassId < it->first) {
2988 startClassId = it->first;
2993 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2995 if (startImageId < it->first) {
2996 startImageId = it->first;
3003 if (!parent.empty()) {
3008 std::ifstream file(filename.c_str(), std::ifstream::binary);
3009 if (!file.is_open()) {
3017 #if !defined(VISP_HAVE_MODULE_IO) 3019 std::cout <<
"Warning: The learning file contains image data that will " 3020 "not be loaded as visp_io module " 3021 "is not available !" 3026 for (
int i = 0; i < nbImgs; i++) {
3034 char *path =
new char[length + 1];
3036 for (
int cpt = 0; cpt < length; cpt++) {
3038 file.read((
char *)(&c),
sizeof(c));
3041 path[length] =
'\0';
3044 #ifdef VISP_HAVE_MODULE_IO 3052 m_mapOfImages[
id + startImageId] = I;
3060 int have3DInfoInt = 0;
3062 bool have3DInfo = have3DInfoInt != 0;
3073 int descriptorType = 5;
3076 cv::Mat trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
3077 for (
int i = 0; i < nRows; i++) {
3079 float u, v, size, angle, response;
3080 int octave, class_id, image_id;
3089 cv::KeyPoint keyPoint(cv::Point2f(u, v), size, angle, response, octave, (class_id + startClassId));
3090 m_trainKeyPoints.push_back(keyPoint);
3092 if (image_id != -1) {
3093 #ifdef VISP_HAVE_MODULE_IO 3095 m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
3105 m_trainPoints.push_back(cv::Point3f(oX, oY, oZ));
3108 for (
int j = 0; j < nCols; j++) {
3110 switch (descriptorType) {
3112 unsigned char value;
3113 file.read((
char *)(&value),
sizeof(value));
3114 trainDescriptorsTmp.at<
unsigned char>(i, j) = value;
3119 file.read((
char *)(&value),
sizeof(value));
3120 trainDescriptorsTmp.at<
char>(i, j) = value;
3124 unsigned short int value;
3126 trainDescriptorsTmp.at<
unsigned short int>(i, j) = value;
3132 trainDescriptorsTmp.at<
short int>(i, j) = value;
3138 trainDescriptorsTmp.at<
int>(i, j) = value;
3144 trainDescriptorsTmp.at<
float>(i, j) = value;
3150 trainDescriptorsTmp.at<
double>(i, j) = value;
3156 trainDescriptorsTmp.at<
float>(i, j) = value;
3162 if (!append || m_trainDescriptors.empty()) {
3163 trainDescriptorsTmp.copyTo(m_trainDescriptors);
3165 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
3170 #ifdef VISP_HAVE_PUGIXML 3171 pugi::xml_document doc;
3174 if (!doc.load_file(filename.c_str())) {
3178 pugi::xml_node root_element = doc.document_element();
3180 int descriptorType = CV_32F;
3181 int nRows = 0, nCols = 0;
3184 cv::Mat trainDescriptorsTmp;
3186 for (pugi::xml_node first_level_node = root_element.first_child(); first_level_node; first_level_node = first_level_node.next_sibling()) {
3188 std::string name(first_level_node.name());
3189 if (first_level_node.type() == pugi::node_element && name ==
"TrainingImageInfo") {
3191 for (pugi::xml_node image_info_node = first_level_node.first_child(); image_info_node; image_info_node = image_info_node.next_sibling()) {
3192 name = std::string(image_info_node.name());
3194 if (name ==
"trainImg") {
3196 int id = image_info_node.attribute(
"image_id").as_int();
3199 #ifdef VISP_HAVE_MODULE_IO 3200 std::string path(image_info_node.text().as_string());
3209 m_mapOfImages[
id + startImageId] = I;
3213 }
else if (first_level_node.type() == pugi::node_element && name ==
"DescriptorsInfo") {
3214 for (pugi::xml_node descriptors_info_node = first_level_node.first_child(); descriptors_info_node;
3215 descriptors_info_node = descriptors_info_node.next_sibling()) {
3216 if (descriptors_info_node.type() == pugi::node_element) {
3217 name = std::string(descriptors_info_node.name());
3219 if (name ==
"nrows") {
3220 nRows = descriptors_info_node.text().as_int();
3221 }
else if (name ==
"ncols") {
3222 nCols = descriptors_info_node.text().as_int();
3223 }
else if (name ==
"type") {
3224 descriptorType = descriptors_info_node.text().as_int();
3229 trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
3230 }
else if (first_level_node.type() == pugi::node_element && name ==
"DescriptorInfo") {
3231 double u = 0.0, v = 0.0, size = 0.0, angle = 0.0, response = 0.0;
3232 int octave = 0, class_id = 0, image_id = 0;
3233 double oX = 0.0, oY = 0.0, oZ = 0.0;
3235 std::stringstream ss;
3237 for (pugi::xml_node point_node = first_level_node.first_child(); point_node; point_node = point_node.next_sibling()) {
3238 if (point_node.type() == pugi::node_element) {
3239 name = std::string(point_node.name());
3243 u = point_node.text().as_double();
3244 }
else if (name ==
"v") {
3245 v = point_node.text().as_double();
3246 }
else if (name ==
"size") {
3247 size = point_node.text().as_double();
3248 }
else if (name ==
"angle") {
3249 angle = point_node.text().as_double();
3250 }
else if (name ==
"response") {
3251 response = point_node.text().as_double();
3252 }
else if (name ==
"octave") {
3253 octave = point_node.text().as_int();
3254 }
else if (name ==
"class_id") {
3255 class_id = point_node.text().as_int();
3256 cv::KeyPoint keyPoint(cv::Point2f((
float)u, (
float)v), (
float)size, (
float)angle, (
float)response, octave,
3257 (class_id + startClassId));
3258 m_trainKeyPoints.push_back(keyPoint);
3259 }
else if (name ==
"image_id") {
3260 image_id = point_node.text().as_int();
3261 if (image_id != -1) {
3262 #ifdef VISP_HAVE_MODULE_IO 3264 m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
3267 }
else if (name ==
"oX") {
3268 oX = point_node.text().as_double();
3269 }
else if (name ==
"oY") {
3270 oY = point_node.text().as_double();
3271 }
else if (name ==
"oZ") {
3272 oZ = point_node.text().as_double();
3273 m_trainPoints.push_back(cv::Point3f((
float)oX, (
float)oY, (
float)oZ));
3274 }
else if (name ==
"desc") {
3277 for (pugi::xml_node descriptor_value_node = point_node.first_child(); descriptor_value_node;
3278 descriptor_value_node = descriptor_value_node.next_sibling()) {
3280 if (descriptor_value_node.type() == pugi::node_element) {
3282 std::string parseStr(descriptor_value_node.text().as_string());
3287 switch (descriptorType) {
3292 trainDescriptorsTmp.at<
unsigned char>(i, j) = (
unsigned char)parseValue;
3299 trainDescriptorsTmp.at<
char>(i, j) = (
char)parseValue;
3303 ss >> trainDescriptorsTmp.at<
unsigned short int>(i, j);
3307 ss >> trainDescriptorsTmp.at<
short int>(i, j);
3311 ss >> trainDescriptorsTmp.at<
int>(i, j);
3315 ss >> trainDescriptorsTmp.at<
float>(i, j);
3319 ss >> trainDescriptorsTmp.at<
double>(i, j);
3323 ss >> trainDescriptorsTmp.at<
float>(i, j);
3327 std::cerr <<
"Error when converting:" << ss.str() << std::endl;
3340 if (!append || m_trainDescriptors.empty()) {
3341 trainDescriptorsTmp.copyTo(m_trainDescriptors);
3343 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
3346 std::cout <<
"Error: pugixml is not properly built!" << std::endl;
3356 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
3362 m_currentImageId = (int)m_mapOfImages.size();
3374 std::vector<cv::DMatch> &matches,
double &elapsedTime)
3379 m_knnMatches.clear();
3381 if (m_useMatchTrainToQuery) {
3382 std::vector<std::vector<cv::DMatch> > knnMatchesTmp;
3385 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(
true);
3386 matcherTmp->knnMatch(trainDescriptors, queryDescriptors, knnMatchesTmp, 2);
3388 for (std::vector<std::vector<cv::DMatch> >::const_iterator it1 = knnMatchesTmp.begin();
3389 it1 != knnMatchesTmp.end(); ++it1) {
3390 std::vector<cv::DMatch> tmp;
3391 for (std::vector<cv::DMatch>::const_iterator it2 = it1->begin(); it2 != it1->end(); ++it2) {
3392 tmp.push_back(cv::DMatch(it2->trainIdx, it2->queryIdx, it2->distance));
3394 m_knnMatches.push_back(tmp);
3397 matches.resize(m_knnMatches.size());
3398 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3401 m_matcher->knnMatch(queryDescriptors, m_knnMatches, 2);
3402 matches.resize(m_knnMatches.size());
3403 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3408 if (m_useMatchTrainToQuery) {
3409 std::vector<cv::DMatch> matchesTmp;
3411 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(
true);
3412 matcherTmp->match(trainDescriptors, queryDescriptors, matchesTmp);
3414 for (std::vector<cv::DMatch>::const_iterator it = matchesTmp.begin(); it != matchesTmp.end(); ++it) {
3415 matches.push_back(cv::DMatch(it->trainIdx, it->queryIdx, it->distance));
3419 m_matcher->match(queryDescriptors, matches);
3485 if (m_trainDescriptors.empty()) {
3486 std::cerr <<
"Reference is empty." << std::endl;
3488 std::cerr <<
"Reference is not computed." << std::endl;
3490 std::cerr <<
"Matching is not possible." << std::endl;
3495 if (m_useAffineDetection) {
3496 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3497 std::vector<cv::Mat> listOfQueryDescriptors;
3503 m_queryKeyPoints.clear();
3504 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3505 it != listOfQueryKeyPoints.end(); ++it) {
3506 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3510 for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3514 it->copyTo(m_queryDescriptors);
3516 m_queryDescriptors.push_back(*it);
3520 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3521 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3524 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3527 m_queryFilteredKeyPoints.clear();
3528 m_objectFilteredPoints.clear();
3529 m_filteredMatches.clear();
3533 if (m_useMatchTrainToQuery) {
3535 m_queryFilteredKeyPoints.clear();
3536 m_filteredMatches.clear();
3537 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3538 m_filteredMatches.push_back(cv::DMatch((
int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3539 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(
size_t)it->queryIdx]);
3542 m_queryFilteredKeyPoints = m_queryKeyPoints;
3543 m_filteredMatches = m_matches;
3546 if (!m_trainPoints.empty()) {
3547 m_objectFilteredPoints.clear();
3551 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3553 m_objectFilteredPoints.push_back(m_trainPoints[(
size_t)it->trainIdx]);
3562 return static_cast<unsigned int>(m_filteredMatches.size());
3594 double error, elapsedTime;
3595 return matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3613 double error, elapsedTime;
3614 return matchPoint(I_color, cam, cMo, error, elapsedTime, func, rectangle);
3637 if (m_trainDescriptors.empty()) {
3638 std::cerr <<
"Reference is empty." << std::endl;
3640 std::cerr <<
"Reference is not computed." << std::endl;
3642 std::cerr <<
"Matching is not possible." << std::endl;
3647 if (m_useAffineDetection) {
3648 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3649 std::vector<cv::Mat> listOfQueryDescriptors;
3655 m_queryKeyPoints.clear();
3656 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3657 it != listOfQueryKeyPoints.end(); ++it) {
3658 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3662 for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3666 it->copyTo(m_queryDescriptors);
3668 m_queryDescriptors.push_back(*it);
3672 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3673 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3676 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3678 elapsedTime = m_detectionTime + m_extractionTime + m_matchingTime;
3681 m_queryFilteredKeyPoints.clear();
3682 m_objectFilteredPoints.clear();
3683 m_filteredMatches.clear();
3687 if (m_useMatchTrainToQuery) {
3689 m_queryFilteredKeyPoints.clear();
3690 m_filteredMatches.clear();
3691 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3692 m_filteredMatches.push_back(cv::DMatch((
int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3693 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(
size_t)it->queryIdx]);
3696 m_queryFilteredKeyPoints = m_queryKeyPoints;
3697 m_filteredMatches = m_matches;
3700 if (!m_trainPoints.empty()) {
3701 m_objectFilteredPoints.clear();
3705 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3707 m_objectFilteredPoints.push_back(m_trainPoints[(
size_t)it->trainIdx]);
3719 m_ransacInliers.clear();
3720 m_ransacOutliers.clear();
3722 if (m_useRansacVVS) {
3723 std::vector<vpPoint> objectVpPoints(m_objectFilteredPoints.size());
3727 for (std::vector<cv::Point3f>::const_iterator it = m_objectFilteredPoints.begin();
3728 it != m_objectFilteredPoints.end(); ++it, cpt++) {
3732 vpImagePoint imP(m_queryFilteredKeyPoints[cpt].pt.y, m_queryFilteredKeyPoints[cpt].pt.x);
3734 double x = 0.0, y = 0.0;
3739 objectVpPoints[cpt] = pt;
3742 std::vector<vpPoint> inliers;
3743 std::vector<unsigned int> inlierIndex;
3745 bool res =
computePose(objectVpPoints, cMo, inliers, inlierIndex, m_poseTime, func);
3747 std::map<unsigned int, bool> mapOfInlierIndex;
3748 m_matchRansacKeyPointsToPoints.clear();
3750 for (std::vector<unsigned int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3751 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3752 m_queryFilteredKeyPoints[(
size_t)(*it)], m_objectFilteredPoints[(
size_t)(*it)]));
3753 mapOfInlierIndex[*it] =
true;
3756 for (
size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3757 if (mapOfInlierIndex.find((
unsigned int)i) == mapOfInlierIndex.end()) {
3758 m_ransacOutliers.push_back(
vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3762 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3764 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3765 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3766 m_ransacInliers.begin(), matchRansacToVpImage);
3768 elapsedTime += m_poseTime;
3772 std::vector<cv::Point2f> imageFilteredPoints;
3773 cv::KeyPoint::convert(m_queryFilteredKeyPoints, imageFilteredPoints);
3774 std::vector<int> inlierIndex;
3775 bool res =
computePose(imageFilteredPoints, m_objectFilteredPoints, cam, cMo, inlierIndex, m_poseTime);
3777 std::map<int, bool> mapOfInlierIndex;
3778 m_matchRansacKeyPointsToPoints.clear();
3780 for (std::vector<int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3781 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3782 m_queryFilteredKeyPoints[(
size_t)(*it)], m_objectFilteredPoints[(
size_t)(*it)]));
3783 mapOfInlierIndex[*it] =
true;
3786 for (
size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3787 if (mapOfInlierIndex.find((
int)i) == mapOfInlierIndex.end()) {
3788 m_ransacOutliers.push_back(
vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3792 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3794 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3795 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3796 m_ransacInliers.begin(), matchRansacToVpImage);
3798 elapsedTime += m_poseTime;
3824 return (
matchPoint(m_I, cam, cMo, error, elapsedTime, func, rectangle));
3847 vpImagePoint ¢erOfGravity,
const bool isPlanarObject,
3848 std::vector<vpImagePoint> *imPts1, std::vector<vpImagePoint> *imPts2,
3849 double *meanDescriptorDistance,
double *detection_score,
const vpRect &rectangle)
3851 if (imPts1 != NULL && imPts2 != NULL) {
3858 double meanDescriptorDistanceTmp = 0.0;
3859 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
3860 meanDescriptorDistanceTmp += (double)it->distance;
3863 meanDescriptorDistanceTmp /= (double)m_filteredMatches.size();
3864 double score = (double)m_filteredMatches.size() / meanDescriptorDistanceTmp;
3866 if (meanDescriptorDistance != NULL) {
3867 *meanDescriptorDistance = meanDescriptorDistanceTmp;
3869 if (detection_score != NULL) {
3870 *detection_score = score;
3873 if (m_filteredMatches.size() >= 4) {
3875 std::vector<cv::Point2f> points1(m_filteredMatches.size());
3877 std::vector<cv::Point2f> points2(m_filteredMatches.size());
3879 for (
size_t i = 0; i < m_filteredMatches.size(); i++) {
3880 points1[i] = cv::Point2f(m_trainKeyPoints[(
size_t)m_filteredMatches[i].trainIdx].pt);
3881 points2[i] = cv::Point2f(m_queryFilteredKeyPoints[(
size_t)m_filteredMatches[i].queryIdx].pt);
3884 std::vector<vpImagePoint> inliers;
3885 if (isPlanarObject) {
3886 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) 3887 cv::Mat homographyMatrix = cv::findHomography(points1, points2, CV_RANSAC);
3889 cv::Mat homographyMatrix = cv::findHomography(points1, points2, cv::RANSAC);
3892 for (
size_t i = 0; i < m_filteredMatches.size(); i++) {
3894 cv::Mat realPoint = cv::Mat(3, 1, CV_64F);
3895 realPoint.at<
double>(0, 0) = points1[i].x;
3896 realPoint.at<
double>(1, 0) = points1[i].y;
3897 realPoint.at<
double>(2, 0) = 1.f;
3899 cv::Mat reprojectedPoint = homographyMatrix * realPoint;
3900 double err_x = (reprojectedPoint.at<
double>(0, 0) / reprojectedPoint.at<
double>(2, 0)) - points2[i].x;
3901 double err_y = (reprojectedPoint.at<
double>(1, 0) / reprojectedPoint.at<
double>(2, 0)) - points2[i].y;
3902 double reprojectionError = std::sqrt(err_x * err_x + err_y * err_y);
3904 if (reprojectionError < 6.0) {
3905 inliers.push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3906 if (imPts1 != NULL) {
3907 imPts1->push_back(
vpImagePoint((
double)points1[i].y, (
double)points1[i].x));
3910 if (imPts2 != NULL) {
3911 imPts2->push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3915 }
else if (m_filteredMatches.size() >= 8) {
3916 cv::Mat fundamentalInliers;
3917 cv::Mat fundamentalMatrix = cv::findFundamentalMat(points1, points2, cv::FM_RANSAC, 3, 0.99, fundamentalInliers);
3919 for (
size_t i = 0; i < (size_t)fundamentalInliers.rows; i++) {
3920 if (fundamentalInliers.at<uchar>((
int)i, 0)) {
3921 inliers.push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3923 if (imPts1 != NULL) {
3924 imPts1->push_back(
vpImagePoint((
double)points1[i].y, (
double)points1[i].x));
3927 if (imPts2 != NULL) {
3928 imPts2->push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3934 if (!inliers.empty()) {
3941 double meanU = 0.0, meanV = 0.0;
3942 for (std::vector<vpImagePoint>::const_iterator it = inliers.begin(); it != inliers.end(); ++it) {
3943 meanU += it->get_u();
3944 meanV += it->get_v();
3947 meanU /= (double)inliers.size();
3948 meanV /= (double)inliers.size();
3950 centerOfGravity.
set_u(meanU);
3951 centerOfGravity.
set_v(meanV);
3959 return meanDescriptorDistanceTmp < m_detectionThreshold;
3961 return score > m_detectionScore;
3989 bool isMatchOk =
matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3994 std::vector<vpImagePoint> modelImagePoints(m_trainVpPoints.size());
3996 for (std::vector<vpPoint>::const_iterator it = m_trainVpPoints.begin(); it != m_trainVpPoints.end(); ++it, cpt++) {
4000 modelImagePoints[cpt] = imPt;
4009 double meanU = 0.0, meanV = 0.0;
4010 for (std::vector<vpImagePoint>::const_iterator it = m_ransacInliers.begin(); it != m_ransacInliers.end(); ++it) {
4011 meanU += it->get_u();
4012 meanV += it->get_v();
4015 meanU /= (double)m_ransacInliers.size();
4016 meanV /= (double)m_ransacInliers.size();
4018 centerOfGravity.
set_u(meanU);
4019 centerOfGravity.
set_v(meanV);
4038 std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
4039 std::vector<cv::Mat> &listOfDescriptors,
4045 listOfKeypoints.clear();
4046 listOfDescriptors.clear();
4048 for (
int tl = 1; tl < 6; tl++) {
4049 double t = pow(2, 0.5 * tl);
4050 for (
int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
4051 std::vector<cv::KeyPoint> keypoints;
4052 cv::Mat descriptors;
4054 cv::Mat timg, mask, Ai;
4057 affineSkew(t, phi, timg, mask, Ai);
4060 if(listOfAffineI != NULL) {
4062 bitwise_and(mask, timg, img_disp);
4065 listOfAffineI->push_back(tI);
4069 cv::bitwise_and(mask, timg, img_disp);
4070 cv::namedWindow(
"Skew", cv::WINDOW_AUTOSIZE );
4071 cv::imshow(
"Skew", img_disp );
4075 for(std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
4076 it != m_detectors.end(); ++it) {
4077 std::vector<cv::KeyPoint> kp;
4078 it->second->detect(timg, kp, mask);
4079 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
4083 extract(timg, keypoints, descriptors, elapsedTime);
4085 for(
unsigned int i = 0; i < keypoints.size(); i++) {
4086 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
4087 cv::Mat kpt_t = Ai * cv::Mat(kpt);
4088 keypoints[i].pt.x = kpt_t.at<
float>(0, 0);
4089 keypoints[i].pt.y = kpt_t.at<
float>(1, 0);
4092 listOfKeypoints.push_back(keypoints);
4093 listOfDescriptors.push_back(descriptors);
4102 std::vector<std::pair<double, int> > listOfAffineParams;
4103 for (
int tl = 1; tl < 6; tl++) {
4104 double t = pow(2, 0.5 * tl);
4105 for (
int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
4106 listOfAffineParams.push_back(std::pair<double, int>(t, phi));
4110 listOfKeypoints.resize(listOfAffineParams.size());
4111 listOfDescriptors.resize(listOfAffineParams.size());
4113 if (listOfAffineI != NULL) {
4114 listOfAffineI->resize(listOfAffineParams.size());
4117 #ifdef VISP_HAVE_OPENMP 4118 #pragma omp parallel for 4120 for (
int cpt = 0; cpt < static_cast<int>(listOfAffineParams.size()); cpt++) {
4121 std::vector<cv::KeyPoint> keypoints;
4122 cv::Mat descriptors;
4124 cv::Mat timg, mask, Ai;
4127 affineSkew(listOfAffineParams[(
size_t)cpt].first, listOfAffineParams[(
size_t)cpt].second, timg, mask, Ai);
4129 if (listOfAffineI != NULL) {
4131 bitwise_and(mask, timg, img_disp);
4134 (*listOfAffineI)[(size_t)cpt] = tI;
4139 cv::bitwise_and(mask, timg, img_disp);
4140 cv::namedWindow(
"Skew", cv::WINDOW_AUTOSIZE );
4141 cv::imshow(
"Skew", img_disp );
4145 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
4146 it != m_detectors.end(); ++it) {
4147 std::vector<cv::KeyPoint> kp;
4148 it->second->detect(timg, kp, mask);
4149 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
4153 extract(timg, keypoints, descriptors, elapsedTime);
4155 for (
size_t i = 0; i < keypoints.size(); i++) {
4156 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
4157 cv::Mat kpt_t = Ai * cv::Mat(kpt);
4158 keypoints[i].pt.x = kpt_t.at<
float>(0, 0);
4159 keypoints[i].pt.y = kpt_t.at<
float>(1, 0);
4162 listOfKeypoints[(size_t)cpt] = keypoints;
4163 listOfDescriptors[(size_t)cpt] = descriptors;
4179 m_computeCovariance =
false;
4181 m_currentImageId = 0;
4183 m_detectionScore = 0.15;
4184 m_detectionThreshold = 100.0;
4185 m_detectionTime = 0.0;
4186 m_detectorNames.clear();
4187 m_detectors.clear();
4188 m_extractionTime = 0.0;
4189 m_extractorNames.clear();
4190 m_extractors.clear();
4191 m_filteredMatches.clear();
4194 m_knnMatches.clear();
4195 m_mapOfImageId.clear();
4196 m_mapOfImages.clear();
4197 m_matcher = cv::Ptr<cv::DescriptorMatcher>();
4198 m_matcherName =
"BruteForce-Hamming";
4200 m_matchingFactorThreshold = 2.0;
4201 m_matchingRatioThreshold = 0.85;
4202 m_matchingTime = 0.0;
4203 m_matchRansacKeyPointsToPoints.clear();
4204 m_nbRansacIterations = 200;
4205 m_nbRansacMinInlierCount = 100;
4206 m_objectFilteredPoints.clear();
4208 m_queryDescriptors = cv::Mat();
4209 m_queryFilteredKeyPoints.clear();
4210 m_queryKeyPoints.clear();
4211 m_ransacConsensusPercentage = 20.0;
4213 m_ransacInliers.clear();
4214 m_ransacOutliers.clear();
4215 m_ransacParallel =
true;
4216 m_ransacParallelNbThreads = 0;
4217 m_ransacReprojectionError = 6.0;
4218 m_ransacThreshold = 0.01;
4219 m_trainDescriptors = cv::Mat();
4220 m_trainKeyPoints.clear();
4221 m_trainPoints.clear();
4222 m_trainVpPoints.clear();
4223 m_useAffineDetection =
false;
4224 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000) 4225 m_useBruteForceCrossCheck =
true;
4227 m_useConsensusPercentage =
false;
4229 m_useMatchTrainToQuery =
false;
4230 m_useRansacVVS =
true;
4231 m_useSingleMatchFilter =
true;
4233 m_detectorNames.push_back(
"ORB");
4234 m_extractorNames.push_back(
"ORB");
4251 if (!parent.empty()) {
4255 std::map<int, std::string> mapOfImgPath;
4256 if (saveTrainingImages) {
4257 #ifdef VISP_HAVE_MODULE_IO 4259 unsigned int cpt = 0;
4261 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
4267 std::stringstream ss;
4268 ss <<
"train_image_" << std::setfill(
'0') << std::setw(3) << cpt;
4270 switch (m_imageFormat) {
4292 std::string imgFilename = ss.str();
4293 mapOfImgPath[it->first] = imgFilename;
4294 vpImageIo::write(it->second, parent + (!parent.empty() ?
"/" :
"") + imgFilename);
4297 std::cout <<
"Warning: in vpKeyPoint::saveLearningData() training images " 4298 "are not saved because " 4299 "visp_io module is not available !" 4304 bool have3DInfo = m_trainPoints.size() > 0;
4305 if (have3DInfo && m_trainPoints.size() != m_trainKeyPoints.size()) {
4311 std::ofstream file(filename.c_str(), std::ofstream::binary);
4312 if (!file.is_open()) {
4317 int nbImgs = (int)mapOfImgPath.size();
4320 #ifdef VISP_HAVE_MODULE_IO 4321 for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
4327 std::string path = it->second;
4328 int length = (int)path.length();
4331 for (
int cpt = 0; cpt < length; cpt++) {
4332 file.write((
char *)(&path[(
size_t)cpt]),
sizeof(path[(
size_t)cpt]));
4338 int have3DInfoInt = have3DInfo ? 1 : 0;
4341 int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
4342 int descriptorType = m_trainDescriptors.type();
4353 for (
int i = 0; i < nRows; i++) {
4354 unsigned int i_ = (
unsigned int)i;
4356 float u = m_trainKeyPoints[i_].pt.x;
4360 float v = m_trainKeyPoints[i_].pt.y;
4364 float size = m_trainKeyPoints[i_].size;
4368 float angle = m_trainKeyPoints[i_].angle;
4372 float response = m_trainKeyPoints[i_].response;
4376 int octave = m_trainKeyPoints[i_].octave;
4380 int class_id = m_trainKeyPoints[i_].class_id;
4384 #ifdef VISP_HAVE_MODULE_IO 4385 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
4386 int image_id = (saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1;
4395 float oX = m_trainPoints[i_].x, oY = m_trainPoints[i_].y, oZ = m_trainPoints[i_].z;
4406 for (
int j = 0; j < nCols; j++) {
4408 switch (descriptorType) {
4410 file.write((
char *)(&m_trainDescriptors.at<
unsigned char>(i, j)),
4411 sizeof(m_trainDescriptors.at<
unsigned char>(i, j)));
4415 file.write((
char *)(&m_trainDescriptors.at<
char>(i, j)),
sizeof(m_trainDescriptors.at<
char>(i, j)));
4447 #ifdef VISP_HAVE_PUGIXML 4448 pugi::xml_document doc;
4449 pugi::xml_node node = doc.append_child(pugi::node_declaration);
4450 node.append_attribute(
"version") =
"1.0";
4451 node.append_attribute(
"encoding") =
"UTF-8";
4457 pugi::xml_node root_node = doc.append_child(
"LearningData");
4460 pugi::xml_node image_node = root_node.append_child(
"TrainingImageInfo");
4462 #ifdef VISP_HAVE_MODULE_IO 4463 for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
4464 pugi::xml_node image_info_node = image_node.append_child(
"trainImg");
4465 image_info_node.append_child(pugi::node_pcdata).set_value(it->second.c_str());
4466 std::stringstream ss;
4468 image_info_node.append_attribute(
"image_id") = ss.str().c_str();
4473 pugi::xml_node descriptors_info_node = root_node.append_child(
"DescriptorsInfo");
4475 int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
4476 int descriptorType = m_trainDescriptors.type();
4479 descriptors_info_node.append_child(
"nrows").append_child(pugi::node_pcdata).text() = nRows;
4482 descriptors_info_node.append_child(
"ncols").append_child(pugi::node_pcdata).text() = nCols;
4485 descriptors_info_node.append_child(
"type").append_child(pugi::node_pcdata).text() = descriptorType;
4487 for (
int i = 0; i < nRows; i++) {
4488 unsigned int i_ = (
unsigned int)i;
4489 pugi::xml_node descriptor_node = root_node.append_child(
"DescriptorInfo");
4491 descriptor_node.append_child(
"u").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.x;
4492 descriptor_node.append_child(
"v").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.y;
4493 descriptor_node.append_child(
"size").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].size;
4494 descriptor_node.append_child(
"angle").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].angle;
4495 descriptor_node.append_child(
"response").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].response;
4496 descriptor_node.append_child(
"octave").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].octave;
4497 descriptor_node.append_child(
"class_id").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].class_id;
4499 #ifdef VISP_HAVE_MODULE_IO 4500 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
4501 descriptor_node.append_child(
"image_id").append_child(pugi::node_pcdata).text() =
4502 ((saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1);
4504 descriptor_node.append_child(
"image_id").append_child(pugi::node_pcdata).text() = -1;
4508 descriptor_node.append_child(
"oX").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].x;
4509 descriptor_node.append_child(
"oY").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].y;
4510 descriptor_node.append_child(
"oZ").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].z;
4513 pugi::xml_node desc_node = descriptor_node.append_child(
"desc");
4515 for (
int j = 0; j < nCols; j++) {
4516 switch (descriptorType) {
4522 int val_tmp = m_trainDescriptors.at<
unsigned char>(i, j);
4523 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = val_tmp;
4531 int val_tmp = m_trainDescriptors.at<
char>(i, j);
4532 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() = val_tmp;
4536 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() =
4537 m_trainDescriptors.at<
unsigned short int>(i, j);
4541 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() =
4542 m_trainDescriptors.at<
short int>(i, j);
4546 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() =
4547 m_trainDescriptors.at<
int>(i, j);
4551 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() =
4552 m_trainDescriptors.at<
float>(i, j);
4556 desc_node.append_child(
"val").append_child(pugi::node_pcdata).text() =
4557 m_trainDescriptors.at<
double>(i, j);
4567 doc.save_file(filename.c_str(), PUGIXML_TEXT(
" "), pugi::format_default, pugi::encoding_utf8);
4569 std::cerr <<
"Error: pugixml is not properly built!" << std::endl;
4574 #if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x030000) 4575 #ifndef DOXYGEN_SHOULD_SKIP_THIS 4577 struct KeypointResponseGreaterThanThreshold {
4578 KeypointResponseGreaterThanThreshold(
float _value) : value(_value) {}
4579 inline bool operator()(
const cv::KeyPoint &kpt)
const {
return kpt.response >= value; }
4583 struct KeypointResponseGreater {
4584 inline bool operator()(
const cv::KeyPoint &kp1,
const cv::KeyPoint &kp2)
const {
return kp1.response > kp2.response; }
4588 void vpKeyPoint::KeyPointsFilter::retainBest(std::vector<cv::KeyPoint> &keypoints,
int n_points)
4592 if (n_points >= 0 && keypoints.size() > (size_t)n_points) {
4593 if (n_points == 0) {
4599 std::nth_element(keypoints.begin(), keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreater());
4601 float ambiguous_response = keypoints[(size_t)(n_points - 1)].response;
4604 std::vector<cv::KeyPoint>::const_iterator new_end = std::partition(
4605 keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreaterThanThreshold(ambiguous_response));
4608 keypoints.resize((
size_t)(new_end - keypoints.begin()));
4612 struct RoiPredicate {
4613 RoiPredicate(
const cv::Rect &_r) : r(_r) {}
4615 bool operator()(
const cv::KeyPoint &keyPt)
const {
return !r.contains(keyPt.pt); }
4620 void vpKeyPoint::KeyPointsFilter::runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize,
4623 if (borderSize > 0) {
4624 if (imageSize.height <= borderSize * 2 || imageSize.width <= borderSize * 2)
4627 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(),
4628 RoiPredicate(cv::Rect(
4629 cv::Point(borderSize, borderSize),
4630 cv::Point(imageSize.width - borderSize, imageSize.height - borderSize)))),
4635 struct SizePredicate {
4636 SizePredicate(
float _minSize,
float _maxSize) : minSize(_minSize), maxSize(_maxSize) {}
4638 bool operator()(
const cv::KeyPoint &keyPt)
const 4640 float size = keyPt.size;
4641 return (size < minSize) || (size > maxSize);
4644 float minSize, maxSize;
4647 void vpKeyPoint::KeyPointsFilter::runByKeypointSize(std::vector<cv::KeyPoint> &keypoints,
float minSize,
float maxSize)
4649 CV_Assert(minSize >= 0);
4650 CV_Assert(maxSize >= 0);
4651 CV_Assert(minSize <= maxSize);
4653 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), SizePredicate(minSize, maxSize)), keypoints.end());
4659 MaskPredicate(
const cv::Mat &_mask) : mask(_mask) {}
4660 bool operator()(
const cv::KeyPoint &key_pt)
const 4662 return mask.at<uchar>((int)(key_pt.pt.y + 0.5f), (int)(key_pt.pt.x + 0.5f)) == 0;
4667 MaskPredicate &operator=(
const MaskPredicate &);
4670 void vpKeyPoint::KeyPointsFilter::runByPixelsMask(std::vector<cv::KeyPoint> &keypoints,
const cv::Mat &mask)
4675 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), MaskPredicate(mask)), keypoints.end());
4678 struct KeyPoint_LessThan {
4679 KeyPoint_LessThan(
const std::vector<cv::KeyPoint> &_kp) : kp(&_kp) {}
4680 bool operator()(
size_t i,
size_t j)
const 4682 const cv::KeyPoint &kp1 = (*kp)[ i];
4683 const cv::KeyPoint &kp2 = (*kp)[ j];
4685 std::numeric_limits<float>::epsilon())) {
4687 return kp1.pt.x < kp2.pt.x;
4691 std::numeric_limits<float>::epsilon())) {
4693 return kp1.pt.y < kp2.pt.y;
4697 std::numeric_limits<float>::epsilon())) {
4699 return kp1.size > kp2.size;
4703 std::numeric_limits<float>::epsilon())) {
4705 return kp1.angle < kp2.angle;
4709 std::numeric_limits<float>::epsilon())) {
4711 return kp1.response > kp2.response;
4714 if (kp1.octave != kp2.octave) {
4715 return kp1.octave > kp2.octave;
4718 if (kp1.class_id != kp2.class_id) {
4719 return kp1.class_id > kp2.class_id;
4724 const std::vector<cv::KeyPoint> *kp;
4727 void vpKeyPoint::KeyPointsFilter::removeDuplicated(std::vector<cv::KeyPoint> &keypoints)
4729 size_t i, j, n = keypoints.size();
4730 std::vector<size_t> kpidx(n);
4731 std::vector<uchar> mask(n, (uchar)1);
4733 for (i = 0; i < n; i++) {
4736 std::sort(kpidx.begin(), kpidx.end(), KeyPoint_LessThan(keypoints));
4737 for (i = 1, j = 0; i < n; i++) {
4738 cv::KeyPoint &kp1 = keypoints[kpidx[i]];
4739 cv::KeyPoint &kp2 = keypoints[kpidx[j]];
4742 if (!
vpMath::equal(kp1.pt.x, kp2.pt.x, std::numeric_limits<float>::epsilon()) ||
4743 !
vpMath::equal(kp1.pt.y, kp2.pt.y, std::numeric_limits<float>::epsilon()) ||
4744 !
vpMath::equal(kp1.size, kp2.size, std::numeric_limits<float>::epsilon()) ||
4745 !
vpMath::equal(kp1.angle, kp2.angle, std::numeric_limits<float>::epsilon())) {
4752 for (i = j = 0; i < n; i++) {
4755 keypoints[j] = keypoints[i];
4760 keypoints.resize(j);
4766 vpKeyPoint::PyramidAdaptedFeatureDetector::PyramidAdaptedFeatureDetector(
const cv::Ptr<cv::FeatureDetector> &_detector,
4768 : detector(_detector), maxLevel(_maxLevel)
4772 bool vpKeyPoint::PyramidAdaptedFeatureDetector::empty()
const 4774 return detector.empty() || (cv::FeatureDetector *)detector->empty();
4777 void vpKeyPoint::PyramidAdaptedFeatureDetector::detect(cv::InputArray image,
4778 CV_OUT std::vector<cv::KeyPoint> &keypoints, cv::InputArray mask)
4780 detectImpl(image.getMat(), keypoints, mask.getMat());
4783 void vpKeyPoint::PyramidAdaptedFeatureDetector::detectImpl(
const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
4784 const cv::Mat &mask)
const 4786 cv::Mat src = image;
4787 cv::Mat src_mask = mask;
4789 cv::Mat dilated_mask;
4790 if (!mask.empty()) {
4791 cv::dilate(mask, dilated_mask, cv::Mat());
4792 cv::Mat mask255(mask.size(), CV_8UC1, cv::Scalar(0));
4793 mask255.setTo(cv::Scalar(255), dilated_mask != 0);
4794 dilated_mask = mask255;
4797 for (
int l = 0, multiplier = 1; l <= maxLevel; ++l, multiplier *= 2) {
4799 std::vector<cv::KeyPoint> new_pts;
4800 detector->
detect(src, new_pts, src_mask);
4801 std::vector<cv::KeyPoint>::iterator it = new_pts.begin(), end = new_pts.end();
4802 for (; it != end; ++it) {
4803 it->pt.x *= multiplier;
4804 it->pt.y *= multiplier;
4805 it->size *= multiplier;
4808 keypoints.insert(keypoints.end(), new_pts.begin(), new_pts.end());
4817 resize(dilated_mask, src_mask, src.size(), 0, 0, CV_INTER_AREA);
4822 vpKeyPoint::KeyPointsFilter::runByPixelsMask(keypoints, mask);
4827 #elif !defined(VISP_BUILD_SHARED_LIBS) 4830 void dummy_vpKeyPoint(){};
Used to indicate that a value is not in the allowed range.
Implementation of a matrix and operations on matrices.
void displayMatching(const vpImage< unsigned char > &IRef, vpImage< unsigned char > &IMatching, unsigned int crossSize, unsigned int lineThickness=1, const vpColor &color=vpColor::green)
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool(*func)(const vpHomogeneousMatrix &)=NULL)
static void compute3D(const cv::KeyPoint &candidate, const std::vector< vpPoint > &roi, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
double get_oY() const
Get the point Y coordinate in the object frame.
void detectExtractAffine(const vpImage< unsigned char > &I, std::vector< std::vector< cv::KeyPoint > > &listOfKeypoints, std::vector< cv::Mat > &listOfDescriptors, std::vector< vpImage< unsigned char > > *listOfAffineI=NULL)
void setWorldCoordinates(double oX, double oY, double oZ)
std::vector< unsigned int > getRansacInlierIndex() const
double getMatchingRatioThreshold() const
int getNbRansacMinInlierCount() const
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
double getRansacConsensusPercentage() const
vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType, const std::string &matcherName, const vpFilterMatchingType &filterType=ratioDistanceThreshold)
void createImageMatching(vpImage< unsigned char > &IRef, vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
vpMatchingMethodEnum getMatchingMethod() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
Class to define colors available for display functionnalities.
static bool equal(double x, double y, double s=0.001)
static const vpColor none
error that can be emited by ViSP classes.
void setRansacThreshold(const double &t)
vpHomogeneousMatrix inverse() const
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
void getObjectPoints(std::vector< cv::Point3f > &objectPoints) const
static const vpColor green
void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector< cv::DMatch > &matches, double &elapsedTime)
std::vector< vpPoint > getRansacInliers() const
double get_oX() const
Get the point X coordinate in the object frame.
void insertImageMatching(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
VISP_EXPORT double measureTimeMs()
void getTrainPoints(std::vector< cv::Point3f > &points) const
vpRect getBoundingBox() const
Class that defines what is a point.
void set_x(double x)
Set the point x coordinate in the image plane.
void set_y(double y)
Set the point y coordinate in the image plane.
static void write(const vpImage< unsigned char > &I, const std::string &filename)
vpMatrix getCovarianceMatrix() const
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
Defines a generic 2D polygon.
std::string getMatcherName() const
double getRansacThreshold() const
void set_oY(double oY)
Set the point Y coordinate in the object frame.
std::string getDetectorName() const
void setNbParallelRansacThreads(int nb)
void getQueryKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
void getTrainKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
static void compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpCylinder > &cylinders, const std::vector< std::vector< std::vector< vpImagePoint > > > &vectorOfCylinderRois, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
bool _reference_computed
flag to indicate if the reference has been built.
bool isInside(const vpImagePoint &iP, const PointInPolygonMethod &method=PnPolyRayCasting) const
void setUseParallelRansac(bool use)
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
Generic class defining intrinsic camera parameters.
void display(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, unsigned int size=3)
unsigned int buildReference(const vpImage< unsigned char > &I)
double get_oZ() const
Get the point Z coordinate in the object frame.
std::string getExtractorName() const
static bool isNaN(double value)
void insert(const vpImage< Type > &src, const vpImagePoint &topLeft)
std::vector< vpImagePoint > referenceImagePointsList
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
const char * what() const
unsigned int matchPoint(const vpImage< unsigned char > &I)
double getRansacReprojectionError() const
void setRansacMaxTrials(const int &rM)
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
static int round(double x)
static void displayCircle(const vpImage< unsigned char > &I, const vpImagePoint ¢er, unsigned int radius, const vpColor &color, bool fill=false, unsigned int thickness=1)
void set_oZ(double oZ)
Set the point Z coordinate in the object frame.
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
void set_ij(double ii, double jj)
unsigned int getHeight() const
static void read(vpImage< unsigned char > &I, const std::string &filename)
Implementation of column vector and the associated operations.
void set_oX(double oX)
Set the point X coordinate in the object frame.
double get_x() const
Get the point x coordinate in the image plane.
void setRansacFilterFlag(const RANSAC_FILTER_FLAGS &flag)
bool matchPointAndDetect(const vpImage< unsigned char > &I, vpRect &boundingBox, vpImagePoint ¢erOfGravity, const bool isPlanarObject=true, std::vector< vpImagePoint > *imPts1=NULL, std::vector< vpImagePoint > *imPts2=NULL, double *meanDescriptorDistance=NULL, double *detectionScore=NULL, const vpRect &rectangle=vpRect())
int getNbRansacIterations() const
double get_y() const
Get the point y coordinate in the image plane.
std::vector< unsigned int > matchedReferencePoints
bool getUseRansacVVSPoseEstimation() const
Defines a rectangle in the plane.
std::vector< vpImagePoint > currentImagePointsList
bool computePose(const std::vector< cv::Point2f > &imagePoints, const std::vector< cv::Point3f > &objectPoints, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector< int > &inlierIndex, double &elapsedTime, bool(*func)(const vpHomogeneousMatrix &)=NULL)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
This class defines the container for a plane geometrical structure.
void loadConfigFile(const std::string &configFile)
void addPoint(const vpPoint &P)
unsigned int getWidth() const
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void convertFromOpenCV(const cv::KeyPoint &from, vpImagePoint &to)
Class that consider the case of a translation vector.
Implementation of a rotation vector as axis-angle minimal representation.
double getMatchingFactorThreshold() const
void setCovarianceComputation(const bool &flag)
bool getUseRansacConsensusPercentage() const
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
void initMatcher(const std::string &matcherName)
void extract(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, cv::Mat &descriptors, std::vector< cv::Point3f > *trainPoints=NULL)
void parse(const std::string &filename)
bool detect(const vpImage< unsigned char > &I)