41 #include <visp/vpKeyPoint.h>
43 #if (VISP_HAVE_OPENCV_VERSION >= 0x020101)
45 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
46 # include <opencv2/calib3d/calib3d.hpp>
57 inline cv::DMatch knnToDMatch(
const std::vector<cv::DMatch> &knnMatches) {
58 if(knnMatches.size() > 0) {
71 inline vpImagePoint matchRansacToVpImage(
const std::pair<cv::KeyPoint, cv::Point3f> &pair) {
161 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
162 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(),
163 m_detectors(), m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
164 m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(), m_matcherName(matcherName), m_matches(),
165 m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85), m_matchingTime(0.),
166 m_matchQueryToTrainKeyPoints(), m_matchRansacKeyPointsToPoints(), m_matchRansacQueryToTrainKeyPoints(),
167 m_nbRansacIterations(200), m_nbRansacMinInlierCount(100), m_objectFilteredPoints(),
168 m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
169 m_ransacConsensusPercentage(20.0), m_ransacInliers(), m_ransacOutliers(), m_ransacReprojectionError(6.0),
170 m_ransacThreshold(0.001), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(),
171 m_trainVpPoints(), m_useAffineDetection(false), m_useBruteForceCrossCheck(true),
172 m_useConsensusPercentage(false), m_useKnn(false), m_useRansacVVS(false)
180 m_detectorNames.push_back(detectorName);
181 m_extractorNames.push_back(extractorName);
196 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
197 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(detectorNames),
198 m_detectors(), m_extractionTime(0.), m_extractorNames(extractorNames), m_extractors(), m_filteredMatches(),
199 m_filterType(filterType), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(), m_matcherName(matcherName),
200 m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85), m_matchingTime(0.),
201 m_matchQueryToTrainKeyPoints(), m_matchRansacKeyPointsToPoints(), m_matchRansacQueryToTrainKeyPoints(),
202 m_nbRansacIterations(200), m_nbRansacMinInlierCount(100), m_objectFilteredPoints(),
203 m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
204 m_ransacConsensusPercentage(20.0), m_ransacInliers(), m_ransacOutliers(), m_ransacReprojectionError(6.0),
205 m_ransacThreshold(0.001), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(),
206 m_trainVpPoints(), m_useAffineDetection(false), m_useBruteForceCrossCheck(true),
207 m_useConsensusPercentage(false), m_useKnn(false), m_useRansacVVS(false)
226 void vpKeyPoint::affineSkew(
double tilt,
double phi, cv::Mat& img,
227 cv::Mat& mask, cv::Mat& Ai) {
231 mask = cv::Mat(h, w, CV_8UC1, cv::Scalar(255));
233 cv::Mat A = cv::Mat::eye(2, 3, CV_32F);
236 if (std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
241 A = (cv::Mat_<float>(2, 2) << c, -s, s, c);
243 cv::Mat corners = (cv::Mat_<float>(4, 2) << 0, 0, w, 0, w, h, 0, h);
244 cv::Mat tcorners = corners * A.t();
245 cv::Mat tcorners_x, tcorners_y;
246 tcorners.col(0).copyTo(tcorners_x);
247 tcorners.col(1).copyTo(tcorners_y);
248 std::vector<cv::Mat> channels;
249 channels.push_back(tcorners_x);
250 channels.push_back(tcorners_y);
251 merge(channels, tcorners);
253 cv::Rect rect = boundingRect(tcorners);
254 A = (cv::Mat_<float>(2, 3) << c, -s, -rect.x, s, c, -rect.y);
256 cv::warpAffine(img, img, A, cv::Size(rect.width, rect.height),
257 cv::INTER_LINEAR, cv::BORDER_REPLICATE);
260 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon()) {
261 double s = 0.8 * sqrt(tilt * tilt - 1);
262 cv::GaussianBlur(img, img, cv::Size(0, 0), s, 0.01);
263 cv::resize(img, img, cv::Size(0, 0), 1.0 / tilt, 1.0, cv::INTER_NEAREST);
264 A.row(0) = A.row(0) / tilt;
267 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon() || std::fabs(phi) > std::numeric_limits<double>::epsilon() ) {
270 cv::warpAffine(mask, mask, A, cv::Size(w, h), cv::INTER_NEAREST);
272 invertAffineTransform(A, Ai);
296 const unsigned int height,
const unsigned int width) {
309 const vpRect &rectangle) {
312 m_trainPoints.clear();
313 m_mapOfImageId.clear();
314 m_mapOfImages.clear();
315 m_currentImageId = 1;
317 if(m_useAffineDetection) {
318 std::vector<std::vector<cv::KeyPoint> > listOfTrainKeyPoints;
319 std::vector<cv::Mat> listOfTrainDescriptors;
325 m_trainKeyPoints.clear();
326 for(std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfTrainKeyPoints.begin();
327 it != listOfTrainKeyPoints.end(); ++it) {
328 m_trainKeyPoints.insert(m_trainKeyPoints.end(), it->begin(), it->end());
332 for(std::vector<cv::Mat>::const_iterator it = listOfTrainDescriptors.begin(); it != listOfTrainDescriptors.end(); ++it) {
335 it->copyTo(m_trainDescriptors);
337 m_trainDescriptors.push_back(*it);
341 detect(I, m_trainKeyPoints, m_detectionTime, rectangle);
342 extract(I, m_trainKeyPoints, m_trainDescriptors, m_extractionTime);
347 for(std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
348 m_mapOfImageId[it->class_id] = m_currentImageId;
352 m_mapOfImages[m_currentImageId] = I;
359 return static_cast<unsigned int>(m_trainKeyPoints.size());
371 std::vector<cv::Point3f> &points3f,
bool append) {
372 cv::Mat trainDescriptors;
373 extract(I, trainKeyPoints, trainDescriptors, m_extractionTime);
375 buildReference(I, trainKeyPoints, trainDescriptors, points3f, append);
388 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
bool append) {
390 m_currentImageId = 0;
391 m_mapOfImageId.clear();
392 m_mapOfImages.clear();
393 this->m_trainKeyPoints.clear();
394 this->m_trainPoints.clear();
401 for(std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
402 m_mapOfImageId[it->class_id] = m_currentImageId;
406 m_mapOfImages[m_currentImageId] = I;
409 this->m_trainKeyPoints.insert(this->m_trainKeyPoints.end(), trainKeyPoints.begin(), trainKeyPoints.end());
411 trainDescriptors.copyTo(this->m_trainDescriptors);
413 this->m_trainDescriptors.push_back(trainDescriptors);
415 this->m_trainPoints.insert(this->m_trainPoints.end(), points3f.begin(), points3f.end());
440 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
447 vpPlane Po(pts[0], pts[1], pts[2]);
448 double xc = 0.0, yc = 0.0;
459 point_obj = cMo.
inverse() * point_cam;
460 point = cv::Point3f((
float) point_obj[0], (
float) point_obj[1], (
float) point_obj[2]);
478 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
485 vpPlane Po(pts[0], pts[1], pts[2]);
486 double xc = 0.0, yc = 0.0;
497 point_obj = cMo.
inverse() * point_cam;
515 std::vector<cv::KeyPoint> &candidates, std::vector<vpPolygon> &polygons, std::vector<std::vector<vpPoint> > &roisPt,
516 std::vector<cv::Point3f> &points, cv::Mat *descriptors) {
518 std::vector<cv::KeyPoint> candidateToCheck = candidates;
525 for (std::vector<vpPolygon>::iterator it1 = polygons.begin(); it1 != polygons.end(); ++it1, cpt1++) {
528 for (std::vector<cv::KeyPoint>::iterator it2 = candidateToCheck.begin(); it2 != candidateToCheck.end(); cpt2++) {
529 iPt.
set_ij(it2->pt.y, it2->pt.x);
530 if (it1->isInside(iPt)) {
531 candidates.push_back(*it2);
533 points.push_back(pt);
535 if(descriptors != NULL) {
536 desc.push_back(descriptors->row(cpt2));
540 candidateToCheck.erase(it2);
547 if(descriptors != NULL) {
548 desc.copyTo(*descriptors);
566 std::vector<vpImagePoint> &candidates, std::vector<vpPolygon> &polygons, std::vector<std::vector<vpPoint> > &roisPt,
567 std::vector<vpPoint> &points, cv::Mat *descriptors) {
569 std::vector<vpImagePoint> candidateToCheck = candidates;
575 for (std::vector<vpPolygon>::iterator it1 = polygons.begin(); it1 != polygons.end(); ++it1, cpt1++) {
578 for (std::vector<vpImagePoint>::iterator it2 = candidateToCheck.begin(); it2 != candidateToCheck.end(); cpt2++) {
579 if (it1->isInside(*it2)) {
580 candidates.push_back(*it2);
582 points.push_back(pt);
584 if(descriptors != NULL) {
585 desc.push_back(descriptors->row(cpt2));
589 candidateToCheck.erase(it2);
614 if(imagePoints.size() < 4 || objectPoints.size() < 4 || imagePoints.size() != objectPoints.size()) {
616 std::cerr <<
"Not enough points to compute the pose (at least 4 points are needed)." << std::endl;
621 cv::Mat cameraMatrix =
627 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
629 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs,
630 rvec, tvec,
false, m_nbRansacIterations, (
float) m_ransacReprojectionError,
633 cv::SOLVEPNP_ITERATIVE);
648 int nbInlierToReachConsensus = m_nbRansacMinInlierCount;
649 if(m_useConsensusPercentage) {
650 nbInlierToReachConsensus = (int) (m_ransacConsensusPercentage / 100.0 * (
double) m_queryFilteredKeyPoints.size());
653 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs,
654 rvec, tvec,
false, m_nbRansacIterations,
655 (
float) m_ransacReprojectionError, nbInlierToReachConsensus,
658 }
catch (cv::Exception &e) {
659 std::cerr << e.what() << std::endl;
664 tvec.at<
double>(1), tvec.at<
double>(2));
665 vpThetaUVector thetaUVector(rvec.at<
double>(0), rvec.at<
double>(1),
698 if(objectVpPoints.size() < 4) {
700 std::cerr <<
"Not enough points to compute the pose (at least 4 points are needed)." << std::endl;
708 for(std::vector<vpPoint>::const_iterator it = objectVpPoints.begin(); it != objectVpPoints.end(); ++it) {
712 unsigned int nbInlierToReachConsensus = (
unsigned int) m_nbRansacMinInlierCount;
713 if(m_useConsensusPercentage) {
714 nbInlierToReachConsensus = (
unsigned int) (m_ransacConsensusPercentage / 100.0 *
715 (
double) m_queryFilteredKeyPoints.size());
722 bool isRansacPoseEstimationOk =
false;
727 if(m_computeCovariance) {
731 std::cerr <<
"e=" << e.
what() << std::endl;
736 if(func != NULL && isRansacPoseEstimationOk) {
747 return isRansacPoseEstimationOk;
761 double vpKeyPoint::computePoseEstimationError(
const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
763 if(matchKeyPoints.size() == 0) {
768 std::vector<double> errors(matchKeyPoints.size());
771 for(std::vector<std::pair<cv::KeyPoint, cv::Point3f> >::const_iterator it = matchKeyPoints.begin();
772 it != matchKeyPoints.end(); ++it, cpt++) {
777 double u = 0.0, v = 0.0;
779 errors[cpt] = std::sqrt((u-it->first.pt.x)*(u-it->first.pt.x) + (v-it->first.pt.y)*(v-it->first.pt.y));
782 return std::accumulate(errors.begin(), errors.end(), 0.0) / errors.size();
811 unsigned int nbImg = (
unsigned int) (m_mapOfImages.size() + 1);
813 if(m_mapOfImages.empty()) {
824 unsigned int nbImgSqrt = (
unsigned int) std::floor(std::sqrt((
double) nbImg) + 0.5);
827 unsigned int nbWidth = nbImgSqrt;
829 unsigned int nbHeight = nbImgSqrt;
832 if(nbImgSqrt * nbImgSqrt < nbImg) {
836 unsigned int maxW = ICurrent.
getWidth();
837 unsigned int maxH = ICurrent.
getHeight();
838 for(std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end(); ++it) {
839 if(maxW < it->second.getWidth()) {
840 maxW = it->second.getWidth();
843 if(maxH < it->second.getHeight()) {
844 maxH = it->second.getHeight();
861 const vpRect &rectangle) {
864 cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
867 cv::Point leftTop((
int) rectangle.
getLeft(), (int) rectangle.
getTop()), rightBottom((
int) rectangle.
getRight(),
869 cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
871 mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U);
874 detect(matImg, keyPoints, elapsedTime, mask);
885 void vpKeyPoint::detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
double &elapsedTime,
886 const cv::Mat &mask) {
890 for(std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin(); it != m_detectors.end(); ++it) {
891 std::vector<cv::KeyPoint> kp;
892 it->second->detect(matImg, kp, mask);
893 keyPoints.insert(keyPoints.end(), kp.begin(), kp.end());
908 std::vector<vpImagePoint> vpQueryImageKeyPoints;
910 std::vector<vpImagePoint> vpTrainImageKeyPoints;
913 for(std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
927 std::vector<vpImagePoint> vpQueryImageKeyPoints;
930 for(std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
945 unsigned int crossSize,
unsigned int lineThickness,
const vpColor &color) {
947 srand((
unsigned int) time(NULL));
950 std::vector<vpImagePoint> queryImageKeyPoints;
952 std::vector<vpImagePoint> trainImageKeyPoints;
956 for(std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
958 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
961 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
962 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
963 queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.
getWidth());
981 const std::vector<vpImagePoint> &ransacInliers,
unsigned int crossSize,
unsigned int lineThickness) {
982 if(m_mapOfImages.empty()) {
988 int nbImg = (int) (m_mapOfImages.size() + 1);
997 int nbImgSqrt = (int) std::floor(std::sqrt((
double) nbImg) + 0.5);
998 int nbWidth = nbImgSqrt;
999 int nbHeight = nbImgSqrt;
1001 if(nbImgSqrt * nbImgSqrt < nbImg) {
1005 std::map<int, int> mapOfImageIdIndex;
1008 for(std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end(); ++it, cpt++) {
1009 mapOfImageIdIndex[it->first] = cpt;
1011 if(maxW < it->second.getWidth()) {
1012 maxW = it->second.getWidth();
1015 if(maxH < it->second.getHeight()) {
1016 maxH = it->second.getHeight();
1021 int medianI = nbHeight / 2;
1022 int medianJ = nbWidth / 2;
1023 int medianIndex = medianI * nbWidth + medianJ;
1024 for(std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1026 int current_class_id_index = 0;
1027 if(mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1028 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1031 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1034 int indexI = current_class_id_index / nbWidth;
1035 int indexJ = current_class_id_index - (indexI * nbWidth);
1036 topLeftCorner.
set_ij((
int)maxH*indexI, (
int)maxW*indexJ);
1043 vpImagePoint topLeftCorner((
int)maxH*medianI, (
int)maxW*medianJ);
1044 for(std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1049 for(std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin();
1050 it != ransacInliers.end(); ++it) {
1055 for(std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1061 for(std::vector<std::pair<cv::KeyPoint, cv::KeyPoint> >::const_iterator it = m_matchQueryToTrainKeyPoints.begin();
1062 it != m_matchQueryToTrainKeyPoints.end(); ++it) {
1063 int current_class_id = 0;
1064 if(mapOfImageIdIndex[m_mapOfImageId[it->second.class_id]] < medianIndex) {
1065 current_class_id = mapOfImageIdIndex[m_mapOfImageId[it->second.class_id]];
1068 current_class_id = mapOfImageIdIndex[m_mapOfImageId[it->second.class_id]] + 1;
1071 int indexI = current_class_id / nbWidth;
1072 int indexJ = current_class_id - (indexI * nbWidth);
1074 vpImagePoint end((
int)maxH*indexI + it->second.pt.y, (
int)maxW*indexJ + it->second.pt.x);
1075 vpImagePoint start((
int)maxH*medianI + it->first.pt.y, (
int)maxW*medianJ + it->first.pt.x);
1093 double &elapsedTime) {
1096 extract(matImg, keyPoints, descriptors, elapsedTime);
1108 double &elapsedTime) {
1112 for(std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
1113 it != m_extractors.end(); ++it) {
1116 it->second->compute(matImg, keyPoints, descriptors);
1119 it->second->compute(matImg, keyPoints, desc);
1120 if(descriptors.empty()) {
1121 desc.copyTo(descriptors);
1123 cv::hconcat(descriptors, desc, descriptors);
1128 if(keyPoints.size() != (size_t) descriptors.rows) {
1129 std::cerr <<
"keyPoints.size() != (size_t) descriptors.rows" << std::endl;
1137 void vpKeyPoint::filterMatches() {
1138 m_matchQueryToTrainKeyPoints.clear();
1140 std::vector<cv::KeyPoint> queryKpts;
1141 std::vector<cv::Point3f> trainPts;
1142 std::vector<cv::DMatch> m;
1145 double max_dist = 0;
1147 double min_dist = DBL_MAX;
1149 std::vector<double> distance_vec(m_knnMatches.size());
1152 for(
size_t i = 0; i < m_knnMatches.size(); i++) {
1153 double dist = m_knnMatches[i][0].distance;
1155 distance_vec[i] = dist;
1157 if (dist < min_dist) {
1160 if (dist > max_dist) {
1164 mean /= m_queryDescriptors.rows;
1167 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1168 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1169 double threshold = min_dist + stdev;
1171 for(
size_t i = 0; i < m_knnMatches.size(); i++) {
1172 if(m_knnMatches[i].size() >= 2) {
1174 float ratio = m_knnMatches[i][0].distance / m_knnMatches[i][1].distance;
1177 double dist = m_knnMatches[i][0].distance;
1180 m.push_back(cv::DMatch((
int) queryKpts.size(), m_knnMatches[i][0].trainIdx, m_knnMatches[i][0].distance));
1182 if(!m_trainPoints.empty()) {
1183 trainPts.push_back(m_trainPoints[(
size_t)m_knnMatches[i][0].trainIdx]);
1185 queryKpts.push_back(m_queryKeyPoints[(
size_t)m_knnMatches[i][0].queryIdx]);
1195 double max_dist = 0;
1197 double min_dist = DBL_MAX;
1199 std::vector<double> distance_vec(m_matches.size());
1200 for(
size_t i = 0; i < m_matches.size(); i++) {
1201 double dist = m_matches[i].distance;
1203 distance_vec[i] = dist;
1205 if (dist < min_dist) {
1208 if (dist > max_dist) {
1212 mean /= m_queryDescriptors.rows;
1214 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1215 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1222 for (
size_t i = 0; i < m_matches.size(); i++) {
1223 if(m_matches[i].distance <= threshold) {
1224 m.push_back(cv::DMatch((
int) queryKpts.size(), m_matches[i].trainIdx, m_matches[i].distance));
1226 if(!m_trainPoints.empty()) {
1227 trainPts.push_back(m_trainPoints[(
size_t)m_matches[i].trainIdx]);
1229 queryKpts.push_back(m_queryKeyPoints[(
size_t)m_matches[i].queryIdx]);
1239 std::vector<cv::DMatch> mTmp;
1240 std::vector<cv::Point3f> trainPtsTmp;
1241 std::vector<cv::KeyPoint> queryKptsTmp;
1243 std::map<int, int> mapOfTrainIdx;
1245 for(std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1246 mapOfTrainIdx[it->trainIdx]++;
1250 for(std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1251 if(mapOfTrainIdx[it->trainIdx] == 1) {
1252 mTmp.push_back(cv::DMatch((
int) queryKptsTmp.size(), it->trainIdx, it->distance));
1254 if(!m_trainPoints.empty()) {
1255 trainPtsTmp.push_back(m_trainPoints[(
size_t) it->trainIdx]);
1257 queryKptsTmp.push_back(queryKpts[(
size_t) it->queryIdx]);
1260 m_matchQueryToTrainKeyPoints.push_back(std::pair<cv::KeyPoint, cv::KeyPoint>(
1261 queryKpts[(
size_t) it->queryIdx],
1262 m_trainKeyPoints[(
size_t) it->trainIdx]));
1266 m_filteredMatches = mTmp;
1267 m_objectFilteredPoints = trainPtsTmp;
1268 m_queryFilteredKeyPoints = queryKptsTmp;
1278 objectPoints = m_objectFilteredPoints;
1297 keyPoints = m_queryFilteredKeyPoints;
1315 keyPoints = m_trainKeyPoints;
1333 points = m_trainPoints;
1342 points = m_trainVpPoints;
1348 void vpKeyPoint::init() {
1349 #if defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && (VISP_HAVE_OPENCV_VERSION < 0x030000) // Require 2.4.0 <= opencv < 3.0.0
1351 if (!cv::initModule_nonfree()) {
1352 std::cerr <<
"Cannot init module non free, SIFT or SURF cannot be used."
1357 initDetectors(m_detectorNames);
1358 initExtractors(m_extractorNames);
1367 void vpKeyPoint::initDetector(
const std::string &detectorName) {
1379 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
1380 m_detectors[detectorName] = cv::FeatureDetector::create(detectorName);
1382 if(m_detectors[detectorName] == NULL) {
1383 std::stringstream ss_msg;
1384 ss_msg <<
"Fail to initialize the detector: " << detectorName <<
" or it is not available in OpenCV version: "
1385 << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
1390 std::string detectorNameTmp = detectorName;
1391 std::string pyramid =
"Pyramid";
1392 std::size_t pos = detectorName.find(pyramid);
1393 if(pos != std::string::npos) {
1394 detectorNameTmp = detectorName.substr(pos + pyramid.size());
1397 if(detectorNameTmp ==
"SIFT") {
1398 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1399 m_detectors[detectorNameTmp] = cv::xfeatures2d::SIFT::create();
1401 std::stringstream ss_msg;
1402 ss_msg <<
"Fail to initialize the detector: SIFT. OpenCV version "
1403 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
1406 }
else if(detectorNameTmp ==
"SURF") {
1407 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1408 m_detectors[detectorNameTmp] = cv::xfeatures2d::SURF::create();
1410 std::stringstream ss_msg;
1411 ss_msg <<
"Fail to initialize the detector: SURF. OpenCV version "
1412 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
1415 }
else if(detectorNameTmp ==
"FAST") {
1416 m_detectors[detectorNameTmp] = cv::FastFeatureDetector::create();
1417 }
else if(detectorNameTmp ==
"MSER") {
1418 m_detectors[detectorNameTmp] = cv::MSER::create();
1419 }
else if(detectorNameTmp ==
"ORB") {
1420 m_detectors[detectorNameTmp] = cv::ORB::create();
1421 }
else if(detectorNameTmp ==
"BRISK") {
1422 m_detectors[detectorNameTmp] = cv::BRISK::create();
1423 }
else if(detectorNameTmp ==
"KAZE") {
1424 m_detectors[detectorNameTmp] = cv::KAZE::create();
1425 }
else if(detectorNameTmp ==
"AKAZE") {
1426 m_detectors[detectorNameTmp] = cv::AKAZE::create();
1427 }
else if(detectorNameTmp ==
"GFFT") {
1428 m_detectors[detectorNameTmp] = cv::GFTTDetector::create();
1429 }
else if(detectorNameTmp ==
"SimpleBlob") {
1430 m_detectors[detectorNameTmp] = cv::SimpleBlobDetector::create();
1431 }
else if(detectorNameTmp ==
"STAR") {
1432 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1433 m_detectors[detectorNameTmp] = cv::xfeatures2d::StarDetector::create();
1435 std::stringstream ss_msg;
1436 ss_msg <<
"Fail to initialize the detector: STAR. OpenCV version "
1437 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
1441 std::cerr <<
"The detector:" << detectorNameTmp <<
" is not available." << std::endl;
1444 if(m_detectors[detectorNameTmp] == NULL) {
1445 std::stringstream ss_msg;
1446 ss_msg <<
"Fail to initialize the detector: " << detectorNameTmp <<
" or it is not available in OpenCV version: "
1447 << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
1459 void vpKeyPoint::initDetectors(
const std::vector<std::string> &detectorNames) {
1460 for(std::vector<std::string>::const_iterator it = detectorNames.begin(); it != detectorNames.end(); ++it) {
1470 void vpKeyPoint::initExtractor(
const std::string &extractorName) {
1471 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
1472 m_extractors[extractorName] = cv::DescriptorExtractor::create(extractorName);
1474 if(extractorName ==
"SIFT") {
1475 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1476 m_extractors[extractorName] = cv::xfeatures2d::SIFT::create();
1478 std::stringstream ss_msg;
1479 ss_msg <<
"Fail to initialize the extractor: SIFT. OpenCV version "
1480 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
1483 }
else if(extractorName ==
"SURF") {
1484 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1485 m_extractors[extractorName] = cv::xfeatures2d::SURF::create();
1487 std::stringstream ss_msg;
1488 ss_msg <<
"Fail to initialize the extractor: SURF. OpenCV version "
1489 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
1492 }
else if(extractorName ==
"ORB") {
1493 m_extractors[extractorName] = cv::ORB::create();
1494 }
else if(extractorName ==
"BRISK") {
1495 m_extractors[extractorName] = cv::BRISK::create();
1496 }
else if(extractorName ==
"FREAK") {
1497 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1498 m_extractors[extractorName] = cv::xfeatures2d::FREAK::create();
1500 std::stringstream ss_msg;
1501 ss_msg <<
"Fail to initialize the extractor: FREAK. OpenCV version "
1502 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
1505 }
else if(extractorName ==
"BRIEF") {
1506 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1507 m_extractors[extractorName] = cv::xfeatures2d::BriefDescriptorExtractor::create();
1509 std::stringstream ss_msg;
1510 ss_msg <<
"Fail to initialize the extractor: BRIEF. OpenCV version "
1511 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
1515 std::cerr <<
"The extractor:" << extractorName <<
" is not available." << std::endl;
1520 if(m_extractors[extractorName] == NULL) {
1521 std::stringstream ss_msg;
1522 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
" or it is not available in OpenCV version: "
1523 << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
1533 void vpKeyPoint::initExtractors(
const std::vector<std::string> &extractorNames) {
1534 for(std::vector<std::string>::const_iterator it = extractorNames.begin(); it != extractorNames.end(); ++it) {
1545 m_matcher = cv::DescriptorMatcher::create(matcherName);
1547 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400)
1548 if(m_matcher != NULL && !m_useKnn && matcherName ==
"BruteForce") {
1549 m_matcher->set(
"crossCheck", m_useBruteForceCrossCheck);
1553 if(m_matcher == NULL) {
1554 std::stringstream ss_msg;
1555 ss_msg <<
"Fail to initialize the matcher: " << matcherName <<
" or it is not available in OpenCV version: "
1556 << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
1572 IMatching.
insert(IRef, topLeftCorner);
1574 IMatching.
insert(ICurrent, topLeftCorner);
1586 int nbImg = (int) (m_mapOfImages.size() + 1);
1595 int nbImgSqrt = (int) std::floor(std::sqrt((
double) nbImg) + 0.5);
1596 int nbWidth = nbImgSqrt;
1597 int nbHeight = nbImgSqrt;
1599 if(nbImgSqrt * nbImgSqrt < nbImg) {
1604 for(std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end(); ++it) {
1605 if(maxW < it->second.getWidth()) {
1606 maxW = it->second.getWidth();
1609 if(maxH < it->second.getHeight()) {
1610 maxH = it->second.getHeight();
1615 int medianI = nbHeight / 2;
1616 int medianJ = nbWidth / 2;
1617 int medianIndex = medianI * nbWidth + medianJ;
1620 for(std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end(); ++it, cpt++) {
1621 int local_cpt = cpt;
1622 if(cpt >= medianIndex) {
1626 int indexI = local_cpt / nbWidth;
1627 int indexJ = local_cpt - (indexI * nbWidth);
1628 vpImagePoint topLeftCorner((
int)maxH*indexI, (
int)maxW*indexJ);
1630 IMatching.
insert(it->second, topLeftCorner);
1633 vpImagePoint topLeftCorner((
int)maxH*medianI, (
int)maxW*medianJ);
1634 IMatching.
insert(ICurrent, topLeftCorner);
1638 #ifdef VISP_HAVE_XML2
1649 m_detectorNames.clear();
1650 m_extractorNames.clear();
1651 m_detectors.clear();
1652 m_extractors.clear();
1654 std::cout <<
" *********** Parsing XML for configuration for vpKeyPoint ************ " << std::endl;
1655 xmlp.
parse(configFile);
1719 int startClassId = 0;
1720 int startImageId = 0;
1722 m_trainKeyPoints.clear();
1723 m_trainPoints.clear();
1724 m_mapOfImageId.clear();
1725 m_mapOfImages.clear();
1728 for(std::map<int, int>::const_iterator it = m_mapOfImageId.begin(); it != m_mapOfImageId.end(); ++it) {
1729 if(startClassId < it->first) {
1730 startClassId = it->first;
1735 for(std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end(); ++it) {
1736 if(startImageId < it->first) {
1737 startImageId = it->first;
1744 if(!parent.empty()) {
1749 std::ifstream file(filename.c_str(), std::ifstream::binary);
1750 if(!file.is_open()){
1756 file.read((
char *)(&nbImgs),
sizeof(nbImgs));
1758 for(
int i = 0; i < nbImgs; i++) {
1761 file.read((
char *)(&
id),
sizeof(
id));
1764 file.read((
char *)(&length),
sizeof(length));
1766 char* path =
new char[length + 1];
1768 for(
int cpt = 0; cpt < length; cpt++) {
1770 file.read((
char *)(&c),
sizeof(c));
1773 path[length] =
'\0';
1782 m_mapOfImages[
id + startImageId] = I;
1786 int have3DInfoInt = 0;
1787 file.read((
char *)(&have3DInfoInt),
sizeof(have3DInfoInt));
1788 bool have3DInfo = have3DInfoInt != 0;
1792 file.read((
char *)(&nRows),
sizeof(nRows));
1796 file.read((
char *)(&nCols),
sizeof(nCols));
1799 int descriptorType = 5;
1800 file.read((
char *)(&descriptorType),
sizeof(descriptorType));
1802 cv::Mat trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
1803 for(
int i = 0; i < nRows; i++) {
1805 float u, v, size, angle, response;
1806 int octave, class_id, image_id;
1807 file.read((
char *)(&u),
sizeof(u));
1808 file.read((
char *)(&v),
sizeof(v));
1809 file.read((
char *)(&size),
sizeof(size));
1810 file.read((
char *)(&angle),
sizeof(angle));
1811 file.read((
char *)(&response),
sizeof(response));
1812 file.read((
char *)(&octave),
sizeof(octave));
1813 file.read((
char *)(&class_id),
sizeof(class_id));
1814 file.read((
char *)(&image_id),
sizeof(image_id));
1815 cv::KeyPoint keyPoint(cv::Point2f(u, v), size, angle, response, octave, (class_id + startClassId));
1816 m_trainKeyPoints.push_back(keyPoint);
1818 if(image_id != -1) {
1820 m_mapOfImageId[class_id] = image_id + startImageId;
1826 file.read((
char *)(&oX),
sizeof(oX));
1827 file.read((
char *)(&oY),
sizeof(oY));
1828 file.read((
char *)(&oZ),
sizeof(oZ));
1829 m_trainPoints.push_back(cv::Point3f(oX, oY, oZ));
1832 for(
int j = 0; j < nCols; j++) {
1834 switch(descriptorType) {
1837 unsigned char value;
1838 file.read((
char *)(&value),
sizeof(value));
1839 trainDescriptorsTmp.at<
unsigned char>(i, j) = value;
1846 file.read((
char *)(&value),
sizeof(value));
1847 trainDescriptorsTmp.at<
char>(i, j) = value;
1853 unsigned short int value;
1854 file.read((
char *)(&value),
sizeof(value));
1855 trainDescriptorsTmp.at<
unsigned short int>(i, j) = value;
1862 file.read((
char *)(&value),
sizeof(value));
1863 trainDescriptorsTmp.at<
short int>(i, j) = value;
1870 file.read((
char *)(&value),
sizeof(value));
1871 trainDescriptorsTmp.at<
int>(i, j) = value;
1878 file.read((
char *)(&value),
sizeof(value));
1879 trainDescriptorsTmp.at<
float>(i, j) = value;
1886 file.read((
char *)(&value),
sizeof(value));
1887 trainDescriptorsTmp.at<
double>(i, j) = value;
1894 file.read((
char *)(&value),
sizeof(value));
1895 trainDescriptorsTmp.at<
float>(i, j) = value;
1902 if(!append || m_trainDescriptors.empty()) {
1903 trainDescriptorsTmp.copyTo(m_trainDescriptors);
1905 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
1910 #ifdef VISP_HAVE_XML2
1911 xmlDocPtr doc = NULL;
1912 xmlNodePtr root_element = NULL;
1922 doc = xmlReadFile(filename.c_str(), NULL, 0);
1928 root_element = xmlDocGetRootElement(doc);
1930 xmlNodePtr first_level_node = NULL;
1933 int descriptorType = CV_32F;
1934 int nRows = 0, nCols = 0;
1937 cv::Mat trainDescriptorsTmp;
1939 for (first_level_node = root_element->children; first_level_node;
1940 first_level_node = first_level_node->next) {
1942 std::string name((
char *) first_level_node->name);
1943 if (first_level_node->type == XML_ELEMENT_NODE && name ==
"TrainingImageInfo") {
1944 xmlNodePtr image_info_node = NULL;
1946 for (image_info_node = first_level_node->children; image_info_node; image_info_node =
1947 image_info_node->next) {
1948 name = std::string ((
char *) image_info_node->name);
1950 if(name ==
"trainImg") {
1952 int id = std::atoi((
char *) xmlGetProp(image_info_node, BAD_CAST
"image_id"));
1955 std::string path((
char *) image_info_node->children->content);
1963 m_mapOfImages[
id + startImageId] = I;
1966 }
else if(first_level_node->type == XML_ELEMENT_NODE && name ==
"DescriptorsInfo") {
1967 xmlNodePtr descriptors_info_node = NULL;
1968 for (descriptors_info_node = first_level_node->children; descriptors_info_node; descriptors_info_node =
1969 descriptors_info_node->next) {
1970 if (descriptors_info_node->type == XML_ELEMENT_NODE) {
1971 name = std::string ((
char *) descriptors_info_node->name);
1973 if(name ==
"nrows") {
1974 nRows = std::atoi((
char *) descriptors_info_node->children->content);
1975 }
else if(name ==
"ncols") {
1976 nCols = std::atoi((
char *) descriptors_info_node->children->content);
1977 }
else if(name ==
"type") {
1978 descriptorType = std::atoi((
char *) descriptors_info_node->children->content);
1983 trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
1984 }
else if (first_level_node->type == XML_ELEMENT_NODE && name ==
"DescriptorInfo") {
1985 xmlNodePtr point_node = NULL;
1986 double u = 0.0, v = 0.0, size = 0.0, angle = 0.0, response = 0.0;
1987 int octave = 0, class_id = 0, image_id = 0;
1988 double oX = 0.0, oY = 0.0, oZ = 0.0;
1990 std::stringstream ss;
1992 for (point_node = first_level_node->children; point_node; point_node =
1994 if (point_node->type == XML_ELEMENT_NODE) {
1995 name = std::string ((
char *) point_node->name);
1999 u = std::strtod((
char *) point_node->children->content, &pEnd);
2000 }
else if(name ==
"v") {
2001 v = std::strtod((
char *) point_node->children->content, &pEnd);
2002 }
else if(name ==
"size") {
2003 size = std::strtod((
char *) point_node->children->content, &pEnd);
2004 }
else if(name ==
"angle") {
2005 angle = std::strtod((
char *) point_node->children->content, &pEnd);
2006 }
else if(name ==
"response") {
2007 response = std::strtod((
char *) point_node->children->content, &pEnd);
2008 }
else if(name ==
"octave") {
2009 octave = std::atoi((
char *) point_node->children->content);
2010 }
else if(name ==
"class_id") {
2011 class_id = std::atoi((
char *) point_node->children->content);
2012 cv::KeyPoint keyPoint(cv::Point2f((
float) u, (
float) v), (
float) size,
2013 (
float) angle, (
float) response, octave, (class_id + startClassId));
2014 m_trainKeyPoints.push_back(keyPoint);
2015 }
else if(name ==
"image_id") {
2016 image_id = std::atoi((
char *) point_node->children->content);
2017 if(image_id != -1) {
2019 m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
2021 }
else if (name ==
"oX") {
2022 oX = std::atof((
char *) point_node->children->content);
2023 }
else if (name ==
"oY") {
2024 oY = std::atof((
char *) point_node->children->content);
2025 }
else if (name ==
"oZ") {
2026 oZ = std::atof((
char *) point_node->children->content);
2027 m_trainPoints.push_back(cv::Point3f((
float) oX, (
float) oY, (
float) oZ));
2028 }
else if (name ==
"desc") {
2029 xmlNodePtr descriptor_value_node = NULL;
2032 for (descriptor_value_node = point_node->children;
2033 descriptor_value_node; descriptor_value_node =
2034 descriptor_value_node->next) {
2036 if (descriptor_value_node->type == XML_ELEMENT_NODE) {
2038 std::string parseStr((
char *) descriptor_value_node->children->content);
2043 switch(descriptorType) {
2049 trainDescriptorsTmp.at<
unsigned char>(i, j) = (
unsigned char) parseValue;
2057 trainDescriptorsTmp.at<
char>(i, j) = (
char) parseValue;
2061 ss >> trainDescriptorsTmp.at<
unsigned short int>(i, j);
2065 ss >> trainDescriptorsTmp.at<
short int>(i, j);
2069 ss >> trainDescriptorsTmp.at<
int>(i, j);
2073 ss >> trainDescriptorsTmp.at<
float>(i, j);
2077 ss >> trainDescriptorsTmp.at<
double>(i, j);
2081 ss >> trainDescriptorsTmp.at<
float>(i, j);
2085 std::cerr <<
"Error when converting:" << ss.str() << std::endl;
2098 if(!append || m_trainDescriptors.empty()) {
2099 trainDescriptorsTmp.copyTo(m_trainDescriptors);
2101 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
2113 std::cout <<
"Error: libxml2 is required !" << std::endl;
2134 std::vector<cv::DMatch> &matches,
double &elapsedTime) {
2138 m_knnMatches.clear();
2139 m_matcher->knnMatch(queryDescriptors, trainDescriptors, m_knnMatches, 2);
2140 matches.resize(m_knnMatches.size());
2141 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
2144 m_matcher->match(queryDescriptors, trainDescriptors, matches);
2171 const unsigned int height,
const unsigned int width) {
2184 const vpRect& rectangle) {
2185 if(m_trainDescriptors.empty()) {
2186 std::cerr <<
"Reference is empty." << std::endl;
2188 std::cerr <<
"Reference is not computed." << std::endl;
2190 std::cerr <<
"Matching is not possible." << std::endl;
2195 if(m_useAffineDetection) {
2196 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
2197 std::vector<cv::Mat> listOfQueryDescriptors;
2203 m_queryKeyPoints.clear();
2204 for(std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
2205 it != listOfQueryKeyPoints.end(); ++it) {
2206 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
2210 for(std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end(); ++it) {
2213 it->copyTo(m_queryDescriptors);
2215 m_queryDescriptors.push_back(*it);
2219 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
2220 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
2223 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
2226 m_queryFilteredKeyPoints.clear();
2227 m_objectFilteredPoints.clear();
2228 m_filteredMatches.clear();
2232 m_queryFilteredKeyPoints = m_queryKeyPoints;
2233 m_objectFilteredPoints = m_trainPoints;
2234 m_filteredMatches = m_matches;
2236 m_matchQueryToTrainKeyPoints.clear();
2237 for (
size_t i = 0; i < m_matches.size(); i++) {
2238 m_matchQueryToTrainKeyPoints.push_back(std::pair<cv::KeyPoint, cv::KeyPoint>(
2239 m_queryKeyPoints[(
size_t) m_matches[i].queryIdx],
2240 m_trainKeyPoints[(
size_t) m_matches[i].trainIdx]));
2248 return static_cast<unsigned int>(m_filteredMatches.size());
2267 if(m_trainDescriptors.empty()) {
2268 std::cerr <<
"Reference is empty." << std::endl;
2270 std::cerr <<
"Reference is not computed." << std::endl;
2272 std::cerr <<
"Matching is not possible." << std::endl;
2277 if(m_useAffineDetection) {
2278 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
2279 std::vector<cv::Mat> listOfQueryDescriptors;
2285 m_queryKeyPoints.clear();
2286 for(std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
2287 it != listOfQueryKeyPoints.end(); ++it) {
2288 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
2292 for(std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end(); ++it) {
2295 it->copyTo(m_queryDescriptors);
2297 m_queryDescriptors.push_back(*it);
2301 detect(I, m_queryKeyPoints, m_detectionTime);
2302 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
2305 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
2307 elapsedTime = m_detectionTime + m_extractionTime + m_matchingTime;
2310 m_queryFilteredKeyPoints.clear();
2311 m_objectFilteredPoints.clear();
2312 m_filteredMatches.clear();
2316 m_queryFilteredKeyPoints = m_queryKeyPoints;
2317 m_objectFilteredPoints = m_trainPoints;
2318 m_filteredMatches = m_matches;
2320 m_matchQueryToTrainKeyPoints.clear();
2321 for (
size_t i = 0; i < m_matches.size(); i++) {
2322 m_matchQueryToTrainKeyPoints.push_back(std::pair<cv::KeyPoint, cv::KeyPoint>(
2323 m_queryKeyPoints[(
size_t) m_matches[i].queryIdx],
2324 m_trainKeyPoints[(
size_t) m_matches[i].trainIdx]));
2334 m_ransacInliers.clear();
2335 m_ransacOutliers.clear();
2337 if(m_useRansacVVS) {
2338 std::vector<vpPoint> objectVpPoints(m_objectFilteredPoints.size());
2341 for(std::vector<cv::Point3f>::const_iterator it = m_objectFilteredPoints.begin(); it != m_objectFilteredPoints.end();
2346 vpImagePoint imP(m_queryFilteredKeyPoints[cpt].pt.y, m_queryFilteredKeyPoints[cpt].pt.x);
2348 double x = 0.0, y = 0.0;
2353 objectVpPoints[cpt] = pt;
2356 std::vector<vpPoint> inliers;
2357 bool res =
computePose(objectVpPoints, cMo, inliers, m_poseTime, func);
2358 m_ransacInliers.resize(inliers.size());
2359 for(
size_t i = 0; i < m_ransacInliers.size(); i++) {
2363 elapsedTime += m_poseTime;
2367 std::vector<cv::Point2f> imageFilteredPoints;
2368 cv::KeyPoint::convert(m_queryFilteredKeyPoints, imageFilteredPoints);
2369 std::vector<int> inlierIndex;
2370 bool res =
computePose(imageFilteredPoints, m_objectFilteredPoints, cam, cMo, inlierIndex, m_poseTime);
2372 std::map<int, bool> mapOfInlierIndex;
2373 m_matchRansacKeyPointsToPoints.clear();
2374 m_matchRansacQueryToTrainKeyPoints.clear();
2375 for (std::vector<int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
2376 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(m_queryFilteredKeyPoints[(
size_t)(*it)],
2377 m_objectFilteredPoints[(
size_t)(*it)]));
2378 m_matchRansacQueryToTrainKeyPoints.push_back(std::pair<cv::KeyPoint, cv::KeyPoint>(m_queryFilteredKeyPoints[(
size_t)(*it)],
2379 m_trainKeyPoints[(
size_t)m_matches[(
size_t)(*it)].trainIdx]));
2380 mapOfInlierIndex[*it] =
true;
2383 for(
size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
2384 if(mapOfInlierIndex.find((
int) i) == mapOfInlierIndex.end()) {
2385 m_ransacOutliers.push_back(
vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
2389 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
2391 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
2392 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(), m_ransacInliers.begin(),
2393 matchRansacToVpImage);
2395 elapsedTime += m_poseTime;
2418 const bool isPlanarObject, std::vector<vpImagePoint> *imPts1, std::vector<vpImagePoint> *imPts2,
2419 double *meanDescriptorDistance,
double *detectionScore) {
2420 if(imPts1 != NULL && imPts2 != NULL) {
2427 double meanDescriptorDistanceTmp = 0.0;
2428 for(std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
2429 meanDescriptorDistanceTmp += (double) it->distance;
2432 meanDescriptorDistanceTmp /= (double) m_filteredMatches.size();
2433 double score = (double) m_filteredMatches.size() / meanDescriptorDistanceTmp;
2435 if(meanDescriptorDistance != NULL) {
2436 *meanDescriptorDistance = meanDescriptorDistanceTmp;
2438 if(detectionScore != NULL) {
2439 *detectionScore = score;
2442 if(m_filteredMatches.size() >= 4) {
2444 std::vector<cv::Point2f> points1(m_filteredMatches.size());
2446 std::vector<cv::Point2f> points2(m_filteredMatches.size());
2448 for(
size_t i = 0; i < m_filteredMatches.size(); i++) {
2449 points1[i] = cv::Point2f(m_trainKeyPoints[(
size_t)m_filteredMatches[i].trainIdx].pt);
2450 points2[i] = cv::Point2f(m_queryFilteredKeyPoints[(
size_t)m_filteredMatches[i].queryIdx].pt);
2453 std::vector<vpImagePoint> inliers;
2454 if(isPlanarObject) {
2455 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
2456 cv::Mat homographyMatrix = cv::findHomography(points1, points2, CV_RANSAC);
2458 cv::Mat homographyMatrix = cv::findHomography(points1, points2, cv::RANSAC);
2461 for(
size_t i = 0; i < m_filteredMatches.size(); i++ ) {
2463 cv::Mat realPoint = cv::Mat(3, 1, CV_64F);
2464 realPoint.at<
double>(0,0) = points1[i].x;
2465 realPoint.at<
double>(1,0) = points1[i].y;
2466 realPoint.at<
double>(2,0) = 1.f;
2468 cv::Mat reprojectedPoint = homographyMatrix * realPoint;
2469 double err_x = (reprojectedPoint.at<
double>(0,0) / reprojectedPoint.at<
double>(2,0)) - points2[i].x;
2470 double err_y = (reprojectedPoint.at<
double>(1,0) / reprojectedPoint.at<
double>(2,0)) - points2[i].y;
2471 double reprojectionError = std::sqrt(err_x*err_x + err_y*err_y);
2473 if(reprojectionError < 6.0) {
2474 inliers.push_back(
vpImagePoint((
double) points2[i].y, (
double) points2[i].x));
2475 if(imPts1 != NULL) {
2476 imPts1->push_back(
vpImagePoint((
double) points1[i].y, (
double) points1[i].x));
2479 if(imPts2 != NULL) {
2480 imPts2->push_back(
vpImagePoint((
double) points2[i].y, (
double) points2[i].x));
2484 }
else if(m_filteredMatches.size() >= 8) {
2485 cv::Mat fundamentalInliers;
2486 cv::Mat fundamentalMatrix = cv::findFundamentalMat(points1, points2, cv::FM_RANSAC, 3, 0.99, fundamentalInliers);
2488 for(
size_t i = 0; i < (size_t) fundamentalInliers.rows; i++) {
2489 if(fundamentalInliers.at<uchar>((
int) i, 0)) {
2490 inliers.push_back(
vpImagePoint((
double) points2[i].y, (
double) points2[i].x));
2492 if(imPts1 != NULL) {
2493 imPts1->push_back(
vpImagePoint((
double) points1[i].y, (
double) points1[i].x));
2496 if(imPts2 != NULL) {
2497 imPts2->push_back(
vpImagePoint((
double) points2[i].y, (
double) points2[i].x));
2503 if(!inliers.empty()) {
2509 double meanU = 0.0, meanV = 0.0;
2510 for(std::vector<vpImagePoint>::const_iterator it = inliers.begin(); it != inliers.end(); ++it) {
2511 meanU += it->get_u();
2512 meanV += it->get_v();
2515 meanU /= (double) inliers.size();
2516 meanV /= (double) inliers.size();
2518 centerOfGravity.
set_u(meanU);
2519 centerOfGravity.
set_v(meanV);
2527 return meanDescriptorDistanceTmp < m_detectionThreshold;
2529 return score > m_detectionScore;
2551 double &error,
double &elapsedTime,
vpRect &boundingBox,
vpImagePoint ¢erOfGravity,
2553 bool isMatchOk =
matchPoint(I, cam, cMo, error, elapsedTime, func);
2558 std::vector<vpImagePoint> modelImagePoints(m_trainVpPoints.size());
2560 for(std::vector<vpPoint>::const_iterator it = m_trainVpPoints.begin(); it != m_trainVpPoints.end(); ++it, cpt++) {
2564 modelImagePoints[cpt] = imPt;
2572 double meanU = 0.0, meanV = 0.0;
2573 for(std::vector<vpImagePoint>::const_iterator it = m_ransacInliers.begin(); it != m_ransacInliers.end();
2575 meanU += it->get_u();
2576 meanV += it->get_v();
2579 meanU /= (double) m_ransacInliers.size();
2580 meanV /= (double) m_ransacInliers.size();
2582 centerOfGravity.
set_u(meanU);
2583 centerOfGravity.
set_v(meanV);
2604 listOfKeypoints.clear();
2605 listOfDescriptors.clear();
2607 for (
int tl = 1; tl < 6; tl++) {
2608 double t = pow(2, 0.5 * tl);
2609 for (
int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
2610 std::vector<cv::KeyPoint> keypoints;
2611 cv::Mat descriptors;
2613 cv::Mat timg, mask, Ai;
2616 affineSkew(t, phi, timg, mask, Ai);
2619 if(listOfAffineI != NULL) {
2621 bitwise_and(mask, timg, img_disp);
2624 listOfAffineI->push_back(tI);
2627 cv::namedWindow(
"Skew", cv::WINDOW_AUTOSIZE );
2628 cv::imshow(
"Skew", img_disp );
2632 for(std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin(); it != m_detectors.end(); ++it) {
2633 std::vector<cv::KeyPoint> kp;
2634 it->second->detect(timg, kp, mask);
2635 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
2639 extract(timg, keypoints, descriptors, elapsedTime);
2641 for(
unsigned int i = 0; i < keypoints.size(); i++) {
2642 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
2643 cv::Mat kpt_t = Ai * cv::Mat(kpt);
2644 keypoints[i].pt.x = kpt_t.at<
float>(0, 0);
2645 keypoints[i].pt.y = kpt_t.at<
float>(1, 0);
2648 listOfKeypoints.push_back(keypoints);
2649 listOfDescriptors.push_back(descriptors);
2663 if(!parent.empty()) {
2667 std::map<int, std::string> mapOfImgPath;
2668 if(saveTrainingImages) {
2672 for(std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end(); ++it, cpt++) {
2674 sprintf(buffer,
"%03d", cpt);
2675 std::stringstream ss;
2676 ss <<
"train_image_" << buffer <<
".jpg";
2677 std::string filename_ = ss.str();
2678 mapOfImgPath[it->first] = filename_;
2679 vpImageIo::write(it->second, parent + (!parent.empty() ?
"/" :
"") + filename_);
2683 bool have3DInfo = m_trainPoints.size() > 0;
2684 if(have3DInfo && m_trainPoints.size() != m_trainKeyPoints.size()) {
2689 std::ofstream file(filename.c_str(), std::ofstream::binary);
2690 if(!file.is_open()) {
2695 int nbImgs = (int) mapOfImgPath.size();
2696 file.write((
char *)(&nbImgs),
sizeof(nbImgs));
2698 for(std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
2701 file.write((
char *)(&
id),
sizeof(
id));
2704 std::string path = it->second;
2705 int length = (int) path.length();
2706 file.write((
char *)(&length),
sizeof(length));
2708 for(
int cpt = 0; cpt < length; cpt++) {
2709 file.write((
char *) (&path[(
size_t)cpt]),
sizeof(path[(
size_t)cpt]));
2714 int have3DInfoInt = have3DInfo ? 1 : 0;
2715 file.write((
char *)(&have3DInfoInt),
sizeof(have3DInfoInt));
2718 int nRows = m_trainDescriptors.rows,
2719 nCols = m_trainDescriptors.cols;
2720 int descriptorType = m_trainDescriptors.type();
2723 file.write((
char *)(&nRows),
sizeof(nRows));
2726 file.write((
char *)(&nCols),
sizeof(nCols));
2729 file.write((
char *)(&descriptorType),
sizeof(descriptorType));
2731 for (
int i = 0; i < nRows; i++) {
2732 unsigned int i_ = (
unsigned int) i;
2734 float u = m_trainKeyPoints[i_].pt.x;
2735 file.write((
char *)(&u),
sizeof(u));
2738 float v = m_trainKeyPoints[i_].pt.y;
2739 file.write((
char *)(&v),
sizeof(v));
2742 float size = m_trainKeyPoints[i_].size;
2743 file.write((
char *)(&size),
sizeof(size));
2746 float angle = m_trainKeyPoints[i_].angle;
2747 file.write((
char *)(&angle),
sizeof(angle));
2750 float response = m_trainKeyPoints[i_].response;
2751 file.write((
char *)(&response),
sizeof(response));
2754 int octave = m_trainKeyPoints[i_].octave;
2755 file.write((
char *)(&octave),
sizeof(octave));
2758 int class_id = m_trainKeyPoints[i_].class_id;
2759 file.write((
char *)(&class_id),
sizeof(class_id));
2762 int image_id = (saveTrainingImages && m_mapOfImageId.size() > 0) ? m_mapOfImageId[m_trainKeyPoints[i_].class_id] : -1;
2763 file.write((
char *)(&image_id),
sizeof(image_id));
2766 float oX = m_trainPoints[i_].x, oY = m_trainPoints[i_].y, oZ = m_trainPoints[i_].z;
2768 file.write((
char *)(&oX),
sizeof(oX));
2771 file.write((
char *)(&oY),
sizeof(oY));
2774 file.write((
char *)(&oZ),
sizeof(oZ));
2777 for (
int j = 0; j < nCols; j++) {
2779 switch(descriptorType) {
2781 file.write((
char *)(&m_trainDescriptors.at<
unsigned char>(i, j)),
sizeof(m_trainDescriptors.at<
unsigned char>(i, j)));
2785 file.write((
char *)(&m_trainDescriptors.at<
char>(i, j)),
sizeof(m_trainDescriptors.at<
char>(i, j)));
2789 file.write((
char *)(&m_trainDescriptors.at<
unsigned short int>(i, j)),
sizeof(m_trainDescriptors.at<
unsigned short int>(i, j)));
2793 file.write((
char *)(&m_trainDescriptors.at<
short int>(i, j)),
sizeof(m_trainDescriptors.at<
short int>(i, j)));
2797 file.write((
char *)(&m_trainDescriptors.at<
int>(i, j)),
sizeof(m_trainDescriptors.at<
int>(i, j)));
2801 file.write((
char *)(&m_trainDescriptors.at<
float>(i, j)),
sizeof(m_trainDescriptors.at<
float>(i, j)));
2805 file.write((
char *)(&m_trainDescriptors.at<
double>(i, j)),
sizeof(m_trainDescriptors.at<
double>(i, j)));
2809 file.write((
char *)(&m_trainDescriptors.at<
float>(i, j)),
sizeof(m_trainDescriptors.at<
float>(i, j)));
2818 #ifdef VISP_HAVE_XML2
2819 xmlDocPtr doc = NULL;
2820 xmlNodePtr root_node = NULL, image_node = NULL, image_info_node = NULL, descriptors_info_node = NULL,
2821 descriptor_node = NULL, desc_node = NULL;
2830 doc = xmlNewDoc(BAD_CAST
"1.0");
2835 root_node = xmlNewNode(NULL, BAD_CAST
"LearningData");
2836 xmlDocSetRootElement(doc, root_node);
2838 std::stringstream ss;
2841 image_node = xmlNewChild(root_node, NULL, BAD_CAST
"TrainingImageInfo", NULL);
2843 for(std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
2844 image_info_node = xmlNewChild(image_node, NULL, BAD_CAST
"trainImg",
2845 BAD_CAST it->second.c_str());
2848 xmlNewProp(image_info_node, BAD_CAST
"image_id", BAD_CAST ss.str().c_str());
2852 descriptors_info_node = xmlNewChild(root_node, NULL, BAD_CAST
"DescriptorsInfo", NULL);
2854 int nRows = m_trainDescriptors.rows,
2855 nCols = m_trainDescriptors.cols;
2856 int descriptorType = m_trainDescriptors.type();
2861 xmlNewChild(descriptors_info_node, NULL, BAD_CAST
"nrows", BAD_CAST ss.str().c_str());
2866 xmlNewChild(descriptors_info_node, NULL, BAD_CAST
"ncols", BAD_CAST ss.str().c_str());
2870 ss << descriptorType;
2871 xmlNewChild(descriptors_info_node, NULL, BAD_CAST
"type", BAD_CAST ss.str().c_str());
2873 for (
int i = 0; i < nRows; i++) {
2874 unsigned int i_ = (
unsigned int) i;
2875 descriptor_node = xmlNewChild(root_node, NULL, BAD_CAST
"DescriptorInfo",
2879 ss << m_trainKeyPoints[i_].pt.x;
2880 xmlNewChild(descriptor_node, NULL, BAD_CAST
"u",
2881 BAD_CAST ss.str().c_str());
2884 ss << m_trainKeyPoints[i_].pt.y;
2885 xmlNewChild(descriptor_node, NULL, BAD_CAST
"v",
2886 BAD_CAST ss.str().c_str());
2889 ss << m_trainKeyPoints[i_].size;
2890 xmlNewChild(descriptor_node, NULL, BAD_CAST
"size",
2891 BAD_CAST ss.str().c_str());
2894 ss << m_trainKeyPoints[i_].angle;
2895 xmlNewChild(descriptor_node, NULL, BAD_CAST
"angle",
2896 BAD_CAST ss.str().c_str());
2899 ss << m_trainKeyPoints[i_].response;
2900 xmlNewChild(descriptor_node, NULL, BAD_CAST
"response",
2901 BAD_CAST ss.str().c_str());
2904 ss << m_trainKeyPoints[i_].octave;
2905 xmlNewChild(descriptor_node, NULL, BAD_CAST
"octave",
2906 BAD_CAST ss.str().c_str());
2909 ss << m_trainKeyPoints[i_].class_id;
2910 xmlNewChild(descriptor_node, NULL, BAD_CAST
"class_id",
2911 BAD_CAST ss.str().c_str());
2914 ss << ((saveTrainingImages && m_mapOfImageId.size() > 0) ? m_mapOfImageId[m_trainKeyPoints[i_].class_id] : -1);
2915 xmlNewChild(descriptor_node, NULL, BAD_CAST
"image_id",
2916 BAD_CAST ss.str().c_str());
2920 ss << m_trainPoints[i_].x;
2921 xmlNewChild(descriptor_node, NULL, BAD_CAST
"oX",
2922 BAD_CAST ss.str().c_str());
2925 ss << m_trainPoints[i_].y;
2926 xmlNewChild(descriptor_node, NULL, BAD_CAST
"oY",
2927 BAD_CAST ss.str().c_str());
2930 ss << m_trainPoints[i_].z;
2931 xmlNewChild(descriptor_node, NULL, BAD_CAST
"oZ",
2932 BAD_CAST ss.str().c_str());
2935 desc_node = xmlNewChild(descriptor_node, NULL, BAD_CAST
"desc", NULL);
2937 for (
int j = 0; j < nCols; j++) {
2940 switch(descriptorType) {
2947 int val_tmp = m_trainDescriptors.at<
unsigned char>(i, j);
2958 int val_tmp = m_trainDescriptors.at<
char>(i, j);
2964 ss << m_trainDescriptors.at<
unsigned short int>(i, j);
2968 ss << m_trainDescriptors.at<
short int>(i, j);
2972 ss << m_trainDescriptors.at<
int>(i, j);
2976 ss << m_trainDescriptors.at<
float>(i, j);
2980 ss << m_trainDescriptors.at<
double>(i, j);
2984 ss << m_trainDescriptors.at<
float>(i, j);
2987 xmlNewChild(desc_node, NULL, BAD_CAST
"val",
2988 BAD_CAST ss.str().c_str());
2992 xmlSaveFormatFileEnc(filename.c_str(), doc,
"UTF-8", 1);
3003 std::cerr <<
"Error: libxml2 is required !" << std::endl;
virtual void displayCircle(const vpImagePoint ¢er, unsigned int radius, const vpColor &color, bool fill=false, unsigned int thickness=1)=0
static void write(const vpImage< unsigned char > &I, const char *filename)
void getTrainKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
void displayMatching(const vpImage< unsigned char > &IRef, vpImage< unsigned char > &IMatching, unsigned int crossSize, unsigned int lineThickness=1, const vpColor &color=vpColor::green)
static void compute3D(const cv::KeyPoint &candidate, const std::vector< vpPoint > &roi, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
void detectExtractAffine(const vpImage< unsigned char > &I, std::vector< std::vector< cv::KeyPoint > > &listOfKeypoints, std::vector< cv::Mat > &listOfDescriptors, std::vector< vpImage< unsigned char > > *listOfAffineI=NULL)
void getQueryKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
std::string getMatcherName() const
unsigned int getWidth() const
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
void createImageMatching(vpImage< unsigned char > &IRef, vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
The class provides a data structure for the homogeneous matrices as well as a set of operations on th...
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, double &elapsedTime, const vpRect &rectangle=vpRect())
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
Point coordinates conversion from normalized coordinates in meter to pixel coordinates ...
Class to define colors available for display functionnalities.
double getMatchingFactorThreshold() const
bool getUseRansacVVSPoseEstimation() const
static const vpColor none
error that can be emited by ViSP classes.
double getRansacConsensusPercentage() const
void setRansacThreshold(const double &t)
void set_x(const double x)
Set the point x coordinate in the image plane.
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
Point coordinates conversion from pixel coordinates to normalized coordinates in meter...
int getNbRansacMinInlierCount() const
static double measureTimeMs()
double get_y() const
Get the point y coordinate in the image plane.
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidate, std::vector< vpPolygon > &polygons, std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
static const vpColor green
void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector< cv::DMatch > &matches, double &elapsedTime)
bool getUseRansacConsensusPercentage() const
void set_oX(const double X)
Set the point X coordinate in the object frame.
void insertImageMatching(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
Class that defines what is a point.
Defines a generic 2D polygon.
const char * what() const
vpRect getBoundingBox() const
double getRansacReprojectionError() const
void set_oZ(const double Z)
Set the point Z coordinate in the object frame.
void set_u(const double u)
bool computePose(vpPoseMethodType methode, vpHomogeneousMatrix &cMo, bool(*func)(vpHomogeneousMatrix *)=NULL)
compute the pose for a given method
bool _reference_computed
flag to indicate if the reference has been built.
virtual void displayCross(const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)=0
void set_v(const double v)
Class used for pose computation from N points (pose from point only).
Generic class defining intrinsic camera parameters.
int getNbRansacIterations() const
double getMatchingRatioThreshold() const
void set_y(const double y)
Set the point y coordinate in the image plane.
std::string getDetectorName() const
void display(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, unsigned int size=3)
unsigned int buildReference(const vpImage< unsigned char > &I)
double get_x() const
Get the point x coordinate in the image plane.
vpMatrix getCovarianceMatrix() const
vpKeyPoint(const std::string &detectorName="ORB", const std::string &extractorName="ORB", const std::string &matcherName="BruteForce-Hamming", const vpFilterMatchingType &filterType=ratioDistanceThreshold)
std::vector< vpImagePoint > referenceImagePointsList
void getObjectPoints(std::vector< cv::Point3f > &objectPoints) const
bool computePose(const std::vector< cv::Point2f > &imagePoints, const std::vector< cv::Point3f > &objectPoints, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector< int > &inlierIndex, double &elapsedTime, bool(*func)(vpHomogeneousMatrix *)=NULL)
unsigned int matchPoint(const vpImage< unsigned char > &I)
void setRansacMaxTrials(const int &rM)
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
void loadLearningData(const std::string &filename, const bool binaryMode=false, const bool append=false)
double getRansacThreshold() const
Class that provides a data structure for the column vectors as well as a set of operations on these v...
void saveLearningData(const std::string &filename, const bool binaryMode=false, const bool saveTrainingImages=true)
void getTrainPoints(std::vector< cv::Point3f > &points) const
void insert(const vpImage< Type > &src, const vpImagePoint topLeft)
vpHomogeneousMatrix inverse() const
std::vector< unsigned int > matchedReferencePoints
unsigned int getHeight() const
Defines a rectangle in the plane.
std::vector< vpImagePoint > currentImagePointsList
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
virtual void displayLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1)=0
This class defines the container for a plane geometrical structure.
void loadConfigFile(const std::string &configFile)
static void read(vpImage< unsigned char > &I, const char *filename)
void addPoint(const vpPoint &P)
Add a new point in this array.
static void convertFromOpenCV(const cv::KeyPoint &from, vpImagePoint &to)
Class that consider the case of a translation vector.
Class that consider the case of the parameterization for the rotation.
std::vector< vpPoint > getRansacInliers() const
void setCovarianceComputation(const bool &flag)
std::string getExtractorName() const
void set_ij(const double ii, const double jj)
vpMatchingMethodEnum getMatchingMethod() const
void setWorldCoordinates(const double ox, const double oy, const double oz)
Set the point world coordinates. We mean here the coordinates of the point in the object frame...
void extract(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, cv::Mat &descriptors, double &elapsedTime)
void initMatcher(const std::string &matcherName)
void set_oY(const double Y)
Set the point Y coordinate in the object frame.
bool matchPointAndDetect(const vpImage< unsigned char > &I, vpRect &boundingBox, vpImagePoint ¢erOfGravity, const bool isPlanarObject=true, std::vector< vpImagePoint > *imPts1=NULL, std::vector< vpImagePoint > *imPts2=NULL, double *meanDescriptorDistance=NULL, double *detectionScore=NULL)
void parse(const std::string &filename)