42 #include <visp3/vision/vpKeyPoint.h>
43 #include <visp3/core/vpIoTools.h>
45 #if (VISP_HAVE_OPENCV_VERSION >= 0x020101)
47 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
48 # include <opencv2/calib3d/calib3d.hpp>
53 #if defined (__GLIBC__)
55 # if (__BYTE_ORDER == __LITTLE_ENDIAN)
56 # define VISP_LITTLE_ENDIAN
57 # elif (__BYTE_ORDER == __BIG_ENDIAN)
58 # define VISP_BIG_ENDIAN
59 # elif (__BYTE_ORDER == __PDP_ENDIAN)
61 # define VISP_PDP_ENDIAN
63 # error Unknown machine endianness detected.
65 #elif defined(_BIG_ENDIAN) && !defined(_LITTLE_ENDIAN) || defined(__BIG_ENDIAN__) && !defined(__LITTLE_ENDIAN__)
66 # define VISP_BIG_ENDIAN
67 #elif defined(_LITTLE_ENDIAN) && !defined(_BIG_ENDIAN) || defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)
68 # define VISP_LITTLE_ENDIAN
69 #elif defined(__sparc) || defined(__sparc__) \
70 || defined(_POWER) || defined(__powerpc__) \
71 || defined(__ppc__) || defined(__hpux) \
72 || defined(_MIPSEB) || defined(_POWER) \
75 # define VISP_BIG_ENDIAN
76 #elif defined(__i386__) || defined(__alpha__) \
77 || defined(__ia64) || defined(__ia64__) \
78 || defined(_M_IX86) || defined(_M_IA64) \
79 || defined(_M_ALPHA) || defined(__amd64) \
80 || defined(__amd64__) || defined(_M_AMD64) \
81 || defined(__x86_64) || defined(__x86_64__) \
84 # define VISP_LITTLE_ENDIAN
86 # error Cannot detect host machine endianness.
98 inline cv::DMatch knnToDMatch(
const std::vector<cv::DMatch> &knnMatches) {
99 if(knnMatches.size() > 0) {
100 return knnMatches[0];
112 inline vpImagePoint matchRansacToVpImage(
const std::pair<cv::KeyPoint, cv::Point3f> &pair) {
126 #ifdef VISP_BIG_ENDIAN
128 uint16_t swap16bits(
const uint16_t val) {
129 return ( ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00) );
133 uint32_t swap32bits(
const uint32_t val) {
134 return ( ((val >> 24) & 0x000000FF) | ((val >> 8) & 0x0000FF00) |
135 ((val << 8) & 0x00FF0000) | ((val << 24) & 0xFF000000) );
139 float swapFloat(
const float f) {
146 dat2.b[0] = dat1.b[3];
147 dat2.b[1] = dat1.b[2];
148 dat2.b[2] = dat1.b[1];
149 dat2.b[3] = dat1.b[0];
154 double swapDouble(
const double d) {
161 dat2.b[0] = dat1.b[7];
162 dat2.b[1] = dat1.b[6];
163 dat2.b[2] = dat1.b[5];
164 dat2.b[3] = dat1.b[4];
165 dat2.b[4] = dat1.b[3];
166 dat2.b[5] = dat1.b[2];
167 dat2.b[6] = dat1.b[1];
168 dat2.b[7] = dat1.b[0];
174 void readBinaryUShortLE(std::ifstream &file,
unsigned short &ushort_value) {
176 file.read((
char *)(&ushort_value),
sizeof(ushort_value));
178 #ifdef VISP_BIG_ENDIAN
180 ushort_value = swap16bits(ushort_value);
185 void readBinaryShortLE(std::ifstream &file,
short &short_value) {
187 file.read((
char *)(&short_value),
sizeof(short_value));
189 #ifdef VISP_BIG_ENDIAN
191 short_value = (short) swap16bits((uint16_t) short_value);
211 void readBinaryIntLE(std::ifstream &file,
int &int_value) {
213 file.read((
char *)(&int_value),
sizeof(int_value));
215 #ifdef VISP_BIG_ENDIAN
217 if(
sizeof(int_value) == 4) {
218 int_value = (int) swap32bits((uint32_t) int_value);
220 int_value = swap16bits((uint16_t) int_value);
226 void readBinaryFloatLE(std::ifstream &file,
float &float_value) {
228 file.read((
char *)(&float_value),
sizeof(float_value));
230 #ifdef VISP_BIG_ENDIAN
232 float_value = swapFloat(float_value);
237 void readBinaryDoubleLE(std::ifstream &file,
double &double_value) {
239 file.read((
char *)(&double_value),
sizeof(double_value));
241 #ifdef VISP_BIG_ENDIAN
243 double_value = swapDouble(double_value);
248 void writeBinaryUShortLE(std::ofstream &file,
const unsigned short ushort_value) {
249 #ifdef VISP_BIG_ENDIAN
251 uint16_t swap_ushort = swap16bits(ushort_value);
252 file.write((
char *)(&swap_ushort),
sizeof(swap_ushort));
254 file.write((
char *)(&ushort_value),
sizeof(ushort_value));
259 void writeBinaryShortLE(std::ofstream &file,
const short short_value) {
260 #ifdef VISP_BIG_ENDIAN
262 uint16_t swap_short = swap16bits((uint16_t) short_value);
263 file.write((
char *)(&swap_short),
sizeof(swap_short));
265 file.write((
char *)(&short_value),
sizeof(short_value));
287 void writeBinaryIntLE(std::ofstream &file,
const int int_value) {
288 #ifdef VISP_BIG_ENDIAN
291 if(
sizeof(int_value) == 4) {
292 uint32_t swap_int = swap32bits((uint32_t) int_value);
293 file.write((
char *)(&swap_int),
sizeof(swap_int));
295 uint16_t swap_int = swap16bits((uint16_t) int_value);
296 file.write((
char *)(&swap_int),
sizeof(swap_int));
299 file.write((
char *)(&int_value),
sizeof(int_value));
304 void writeBinaryFloatLE(std::ofstream &file,
const float float_value) {
305 #ifdef VISP_BIG_ENDIAN
307 float swap_float = swapFloat(float_value);
308 file.write((
char *)(&swap_float),
sizeof(swap_float));
310 file.write((
char *)(&float_value),
sizeof(float_value));
315 void writeBinaryDoubleLE(std::ofstream &file,
const double double_value) {
316 #ifdef VISP_BIG_ENDIAN
318 double swap_double = swapDouble(double_value);
319 file.write((
char *)(&swap_double),
sizeof(swap_double));
321 file.write((
char *)(&double_value),
sizeof(double_value));
336 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
337 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(),
338 m_detectors(), m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
339 m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(),
340 m_matcher(), m_matcherName(matcherName),
341 m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85), m_matchingTime(0.),
342 m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100), m_objectFilteredPoints(),
343 m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
344 m_ransacConsensusPercentage(20.0), m_ransacInliers(), m_ransacOutliers(), m_ransacReprojectionError(6.0),
345 m_ransacThreshold(0.01), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(),
346 m_trainVpPoints(), m_useAffineDetection(false),
347 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
348 m_useBruteForceCrossCheck(true),
350 m_useConsensusPercentage(false),
351 m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true), m_useSingleMatchFilter(true)
355 m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
356 m_extractorNames.push_back(m_mapOfDescriptorNames[descriptorType]);
371 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
372 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(),
373 m_detectors(), m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
374 m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(),
375 m_matcher(), m_matcherName(matcherName),
376 m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85), m_matchingTime(0.),
377 m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100), m_objectFilteredPoints(),
378 m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
379 m_ransacConsensusPercentage(20.0), m_ransacInliers(), m_ransacOutliers(), m_ransacReprojectionError(6.0),
380 m_ransacThreshold(0.01), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(),
381 m_trainVpPoints(), m_useAffineDetection(false),
382 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
383 m_useBruteForceCrossCheck(true),
385 m_useConsensusPercentage(false),
386 m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true), m_useSingleMatchFilter(true)
390 m_detectorNames.push_back(detectorName);
391 m_extractorNames.push_back(extractorName);
406 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
407 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(detectorNames),
408 m_detectors(), m_extractionTime(0.), m_extractorNames(extractorNames), m_extractors(), m_filteredMatches(),
409 m_filterType(filterType), m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(),
411 m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85), m_matchingTime(0.),
412 m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100), m_objectFilteredPoints(),
413 m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
414 m_ransacConsensusPercentage(20.0), m_ransacInliers(), m_ransacOutliers(), m_ransacReprojectionError(6.0),
415 m_ransacThreshold(0.01), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(),
416 m_trainVpPoints(), m_useAffineDetection(false),
417 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
418 m_useBruteForceCrossCheck(true),
420 m_useConsensusPercentage(false),
421 m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true), m_useSingleMatchFilter(true)
435 void vpKeyPoint::affineSkew(
double tilt,
double phi, cv::Mat& img,
436 cv::Mat& mask, cv::Mat& Ai) {
440 mask = cv::Mat(h, w, CV_8UC1, cv::Scalar(255));
442 cv::Mat A = cv::Mat::eye(2, 3, CV_32F);
445 if (std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
450 A = (cv::Mat_<float>(2, 2) << c, -s, s, c);
452 cv::Mat corners = (cv::Mat_<float>(4, 2) << 0, 0, w, 0, w, h, 0, h);
453 cv::Mat tcorners = corners * A.t();
454 cv::Mat tcorners_x, tcorners_y;
455 tcorners.col(0).copyTo(tcorners_x);
456 tcorners.col(1).copyTo(tcorners_y);
457 std::vector<cv::Mat> channels;
458 channels.push_back(tcorners_x);
459 channels.push_back(tcorners_y);
460 cv::merge(channels, tcorners);
462 cv::Rect rect = cv::boundingRect(tcorners);
463 A = (cv::Mat_<float>(2, 3) << c, -s, -rect.x, s, c, -rect.y);
465 cv::warpAffine(img, img, A, cv::Size(rect.width, rect.height),
466 cv::INTER_LINEAR, cv::BORDER_REPLICATE);
469 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon()) {
470 double s = 0.8 * sqrt(tilt * tilt - 1);
471 cv::GaussianBlur(img, img, cv::Size(0, 0), s, 0.01);
472 cv::resize(img, img, cv::Size(0, 0), 1.0 / tilt, 1.0, cv::INTER_NEAREST);
473 A.row(0) = A.row(0) / tilt;
476 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon() || std::fabs(phi) > std::numeric_limits<double>::epsilon() ) {
479 cv::warpAffine(mask, mask, A, cv::Size(w, h), cv::INTER_NEAREST);
481 cv::invertAffineTransform(A, Ai);
505 const unsigned int height,
const unsigned int width) {
518 const vpRect &rectangle) {
521 m_trainPoints.clear();
522 m_mapOfImageId.clear();
523 m_mapOfImages.clear();
524 m_currentImageId = 1;
526 if(m_useAffineDetection) {
527 std::vector<std::vector<cv::KeyPoint> > listOfTrainKeyPoints;
528 std::vector<cv::Mat> listOfTrainDescriptors;
534 m_trainKeyPoints.clear();
535 for(std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfTrainKeyPoints.begin();
536 it != listOfTrainKeyPoints.end(); ++it) {
537 m_trainKeyPoints.insert(m_trainKeyPoints.end(), it->begin(), it->end());
541 for(std::vector<cv::Mat>::const_iterator it = listOfTrainDescriptors.begin(); it != listOfTrainDescriptors.end(); ++it) {
544 it->copyTo(m_trainDescriptors);
546 m_trainDescriptors.push_back(*it);
550 detect(I, m_trainKeyPoints, m_detectionTime, rectangle);
551 extract(I, m_trainKeyPoints, m_trainDescriptors, m_extractionTime);
556 for(std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
557 m_mapOfImageId[it->class_id] = m_currentImageId;
561 m_mapOfImages[m_currentImageId] = I;
570 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
572 return static_cast<unsigned int>(m_trainKeyPoints.size());
585 std::vector<cv::Point3f> &points3f,
const bool append,
const int class_id) {
586 cv::Mat trainDescriptors;
588 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
590 extract(I, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
592 if(trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
595 std::map<size_t, size_t> mapOfKeypointHashes;
597 for(std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it, cpt++) {
598 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
601 std::vector<cv::Point3f> trainPoints_tmp;
602 for(std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
603 if(mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
604 trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
609 points3f = trainPoints_tmp;
612 buildReference(I, trainKeyPoints, trainDescriptors, points3f, append, class_id);
626 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
627 const bool append,
const int class_id) {
629 m_currentImageId = 0;
630 m_mapOfImageId.clear();
631 m_mapOfImages.clear();
632 this->m_trainKeyPoints.clear();
633 this->m_trainPoints.clear();
638 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
641 for(std::vector<cv::KeyPoint>::iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
642 it->class_id = class_id;
648 for(std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
649 m_mapOfImageId[it->class_id] = m_currentImageId;
653 m_mapOfImages[m_currentImageId] = I;
656 this->m_trainKeyPoints.insert(this->m_trainKeyPoints.end(), trainKeyPoints_tmp.begin(), trainKeyPoints_tmp.end());
658 trainDescriptors.copyTo(this->m_trainDescriptors);
660 this->m_trainDescriptors.push_back(trainDescriptors);
662 this->m_trainPoints.insert(this->m_trainPoints.end(), points3f.begin(), points3f.end());
671 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
691 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
698 vpPlane Po(pts[0], pts[1], pts[2]);
699 double xc = 0.0, yc = 0.0;
710 point_obj = cMo.
inverse() * point_cam;
711 point = cv::Point3f((
float) point_obj[0], (
float) point_obj[1], (
float) point_obj[2]);
729 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
736 vpPlane Po(pts[0], pts[1], pts[2]);
737 double xc = 0.0, yc = 0.0;
748 point_obj = cMo.
inverse() * point_cam;
766 std::vector<cv::KeyPoint> &candidates,
const std::vector<vpPolygon> &polygons,
767 const std::vector<std::vector<vpPoint> > &roisPt, std::vector<cv::Point3f> &points, cv::Mat *descriptors) {
769 std::vector<cv::KeyPoint> candidatesToCheck = candidates;
776 std::vector<std::pair<cv::KeyPoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
777 for(
size_t i = 0; i < candidatesToCheck.size(); i++) {
778 pairOfCandidatesToCheck[i] = std::pair<cv::KeyPoint, size_t>(candidatesToCheck[i], i);
782 std::vector<vpPolygon> polygons_tmp = polygons;
783 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
784 std::vector<std::pair<cv::KeyPoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
786 while(it2 != pairOfCandidatesToCheck.end()) {
787 imPt.
set_ij(it2->first.pt.y, it2->first.pt.x);
788 if (it1->isInside(imPt)) {
789 candidates.push_back(it2->first);
791 points.push_back(pt);
793 if(descriptors != NULL) {
794 desc.push_back(descriptors->row((
int) it2->second));
798 it2 = pairOfCandidatesToCheck.erase(it2);
805 if(descriptors != NULL) {
806 desc.copyTo(*descriptors);
824 std::vector<vpImagePoint> &candidates,
const std::vector<vpPolygon> &polygons,
825 const std::vector<std::vector<vpPoint> > &roisPt, std::vector<vpPoint> &points, cv::Mat *descriptors) {
827 std::vector<vpImagePoint> candidatesToCheck = candidates;
833 std::vector<std::pair<vpImagePoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
834 for(
size_t i = 0; i < candidatesToCheck.size(); i++) {
835 pairOfCandidatesToCheck[i] = std::pair<vpImagePoint, size_t>(candidatesToCheck[i], i);
839 std::vector<vpPolygon> polygons_tmp = polygons;
840 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
841 std::vector<std::pair<vpImagePoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
843 while(it2 != pairOfCandidatesToCheck.end()) {
844 if (it1->isInside(it2->first)) {
845 candidates.push_back(it2->first);
847 points.push_back(pt);
849 if(descriptors != NULL) {
850 desc.push_back(descriptors->row((
int) it2->second));
854 it2 = pairOfCandidatesToCheck.erase(it2);
875 std::vector<cv::KeyPoint> &candidates,
const std::vector<vpCylinder> &cylinders,
876 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
877 std::vector<cv::Point3f> &points, cv::Mat *descriptors) {
878 std::vector<cv::KeyPoint> candidatesToCheck = candidates;
884 size_t cpt_keypoint = 0;
885 for(std::vector<cv::KeyPoint>::const_iterator it1 = candidatesToCheck.begin();
886 it1 != candidatesToCheck.end(); ++it1, cpt_keypoint++) {
887 size_t cpt_cylinder = 0;
890 for(std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
891 it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
893 for(std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
895 candidates.push_back(*it1);
898 double xm = 0.0, ym = 0.0;
900 double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
902 if(!
vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
904 point_cam[0] = xm * Z;
905 point_cam[1] = ym * Z;
909 point_obj = cMo.
inverse() * point_cam;
912 points.push_back(cv::Point3f((
float) pt.
get_oX(), (float) pt.
get_oY(), (float) pt.
get_oZ()));
914 if(descriptors != NULL) {
915 desc.push_back(descriptors->row((
int) cpt_keypoint));
925 if(descriptors != NULL) {
926 desc.copyTo(*descriptors);
943 std::vector<vpImagePoint> &candidates,
const std::vector<vpCylinder> &cylinders,
944 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
945 std::vector<vpPoint> &points, cv::Mat *descriptors) {
946 std::vector<vpImagePoint> candidatesToCheck = candidates;
952 size_t cpt_keypoint = 0;
953 for(std::vector<vpImagePoint>::const_iterator it1 = candidatesToCheck.begin();
954 it1 != candidatesToCheck.end(); ++it1, cpt_keypoint++) {
955 size_t cpt_cylinder = 0;
958 for(std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
959 it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
961 for(std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
963 candidates.push_back(*it1);
966 double xm = 0.0, ym = 0.0;
968 double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
970 if(!
vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
972 point_cam[0] = xm * Z;
973 point_cam[1] = ym * Z;
977 point_obj = cMo.
inverse() * point_cam;
980 points.push_back(pt);
982 if(descriptors != NULL) {
983 desc.push_back(descriptors->row((
int) cpt_keypoint));
993 if(descriptors != NULL) {
994 desc.copyTo(*descriptors);
1015 if(imagePoints.size() < 4 || objectPoints.size() < 4 || imagePoints.size() != objectPoints.size()) {
1017 std::cerr <<
"Not enough points to compute the pose (at least 4 points are needed)." << std::endl;
1022 cv::Mat cameraMatrix =
1032 cv::Mat distCoeffs = cv::Mat::zeros(1, 5, CV_64F);
1035 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
1037 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs,
1038 rvec, tvec,
false, m_nbRansacIterations, (
float) m_ransacReprojectionError,
1041 cv::SOLVEPNP_ITERATIVE);
1056 int nbInlierToReachConsensus = m_nbRansacMinInlierCount;
1057 if(m_useConsensusPercentage) {
1058 nbInlierToReachConsensus = (int) (m_ransacConsensusPercentage / 100.0 * (
double) m_queryFilteredKeyPoints.size());
1061 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs,
1062 rvec, tvec,
false, m_nbRansacIterations,
1063 (
float) m_ransacReprojectionError, nbInlierToReachConsensus,
1066 }
catch (cv::Exception &e) {
1067 std::cerr << e.what() << std::endl;
1072 tvec.at<
double>(1), tvec.at<
double>(2));
1073 vpThetaUVector thetaUVector(rvec.at<
double>(0), rvec.at<
double>(1),
1074 rvec.at<
double>(2));
1102 std::vector<vpPoint> &inliers,
double &elapsedTime,
bool (*func)(
vpHomogeneousMatrix *)) {
1103 std::vector<unsigned int> inlierIndex;
1104 return computePose(objectVpPoints, cMo, inliers, inlierIndex, elapsedTime, func);
1120 std::vector<vpPoint> &inliers, std::vector<unsigned int> &inlierIndex,
1124 if(objectVpPoints.size() < 4) {
1133 for(std::vector<vpPoint>::const_iterator it = objectVpPoints.begin(); it != objectVpPoints.end(); ++it) {
1137 unsigned int nbInlierToReachConsensus = (
unsigned int) m_nbRansacMinInlierCount;
1138 if(m_useConsensusPercentage) {
1139 nbInlierToReachConsensus = (
unsigned int) (m_ransacConsensusPercentage / 100.0 *
1140 (
double) m_queryFilteredKeyPoints.size());
1147 bool isRansacPoseEstimationOk =
false;
1154 if(m_computeCovariance) {
1158 std::cerr <<
"e=" << e.
what() << std::endl;
1174 return isRansacPoseEstimationOk;
1188 double vpKeyPoint::computePoseEstimationError(
const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
1190 if(matchKeyPoints.size() == 0) {
1195 std::vector<double> errors(matchKeyPoints.size());
1198 for(std::vector<std::pair<cv::KeyPoint, cv::Point3f> >::const_iterator it = matchKeyPoints.begin();
1199 it != matchKeyPoints.end(); ++it, cpt++) {
1204 double u = 0.0, v = 0.0;
1206 errors[cpt] = std::sqrt((u-it->first.pt.x)*(u-it->first.pt.x) + (v-it->first.pt.y)*(v-it->first.pt.y));
1209 return std::accumulate(errors.begin(), errors.end(), 0.0) / errors.size();
1238 unsigned int nbImg = (
unsigned int) (m_mapOfImages.size() + 1);
1240 if(m_mapOfImages.empty()) {
1241 std::cerr <<
"There is no training image loaded !" << std::endl;
1251 unsigned int nbImgSqrt = (
unsigned int)
vpMath::round(std::sqrt((
double) nbImg));
1254 unsigned int nbWidth = nbImgSqrt;
1256 unsigned int nbHeight = nbImgSqrt;
1259 if(nbImgSqrt * nbImgSqrt < nbImg) {
1263 unsigned int maxW = ICurrent.
getWidth();
1264 unsigned int maxH = ICurrent.
getHeight();
1265 for(std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end(); ++it) {
1266 if(maxW < it->second.getWidth()) {
1267 maxW = it->second.getWidth();
1270 if(maxH < it->second.getHeight()) {
1271 maxH = it->second.getHeight();
1288 detect(I, keyPoints, elapsedTime, rectangle);
1298 void vpKeyPoint::detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
const cv::Mat &mask) {
1300 detect(matImg, keyPoints, elapsedTime, mask);
1312 const vpRect &rectangle) {
1315 cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1318 cv::Point leftTop((
int) rectangle.
getLeft(), (int) rectangle.
getTop()), rightBottom((
int) rectangle.
getRight(),
1320 cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1322 mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1325 detect(matImg, keyPoints, elapsedTime, mask);
1336 void vpKeyPoint::detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
double &elapsedTime,
1337 const cv::Mat &mask) {
1341 for(std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin(); it != m_detectors.end(); ++it) {
1342 std::vector<cv::KeyPoint> kp;
1343 it->second->detect(matImg, kp, mask);
1344 keyPoints.insert(keyPoints.end(), kp.begin(), kp.end());
1359 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1361 std::vector<vpImagePoint> vpTrainImageKeyPoints;
1364 for(std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1378 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1381 for(std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1396 unsigned int crossSize,
unsigned int lineThickness,
const vpColor &color) {
1398 srand((
unsigned int) time(NULL));
1401 std::vector<vpImagePoint> queryImageKeyPoints;
1403 std::vector<vpImagePoint> trainImageKeyPoints;
1407 for(std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1409 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1412 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1413 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
1414 queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.
getWidth());
1432 const std::vector<vpImagePoint> &ransacInliers,
unsigned int crossSize,
unsigned int lineThickness) {
1433 if(m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1435 std::cerr <<
"There is no training image loaded !" << std::endl;
1440 int nbImg = (int) (m_mapOfImages.size() + 1);
1448 int nbWidth = nbImgSqrt;
1449 int nbHeight = nbImgSqrt;
1451 if(nbImgSqrt * nbImgSqrt < nbImg) {
1455 std::map<int, int> mapOfImageIdIndex;
1458 for(std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end(); ++it, cpt++) {
1459 mapOfImageIdIndex[it->first] = cpt;
1461 if(maxW < it->second.getWidth()) {
1462 maxW = it->second.getWidth();
1465 if(maxH < it->second.getHeight()) {
1466 maxH = it->second.getHeight();
1471 int medianI = nbHeight / 2;
1472 int medianJ = nbWidth / 2;
1473 int medianIndex = medianI * nbWidth + medianJ;
1474 for(std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1476 int current_class_id_index = 0;
1477 if(mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1478 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1481 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1484 int indexI = current_class_id_index / nbWidth;
1485 int indexJ = current_class_id_index - (indexI * nbWidth);
1486 topLeftCorner.
set_ij((
int)maxH*indexI, (
int)maxW*indexJ);
1493 vpImagePoint topLeftCorner((
int)maxH*medianI, (
int)maxW*medianJ);
1494 for(std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1499 for(std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin();
1500 it != ransacInliers.end(); ++it) {
1505 for(std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1511 for(std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1512 int current_class_id = 0;
1513 if(mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(
size_t) it->trainIdx].class_id]] < medianIndex) {
1514 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t) it->trainIdx].class_id]];
1517 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t) it->trainIdx].class_id]] + 1;
1520 int indexI = current_class_id / nbWidth;
1521 int indexJ = current_class_id - (indexI * nbWidth);
1523 vpImagePoint end((
int)maxH*indexI + m_trainKeyPoints[(
size_t) it->trainIdx].pt.y,
1524 (
int)maxW*indexJ + m_trainKeyPoints[(
size_t) it->trainIdx].pt.x);
1525 vpImagePoint start((
int)maxH*medianI + m_queryFilteredKeyPoints[(
size_t) it->queryIdx].pt.y,
1526 (
int)maxW*medianJ + m_queryFilteredKeyPoints[(
size_t) it->queryIdx].pt.x);
1545 std::vector<cv::Point3f> *trainPoints) {
1547 extract(I, keyPoints, descriptors, elapsedTime, trainPoints);
1560 std::vector<cv::Point3f> *trainPoints) {
1562 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1576 double &elapsedTime, std::vector<cv::Point3f> *trainPoints) {
1579 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1593 double &elapsedTime, std::vector<cv::Point3f> *trainPoints) {
1597 for(std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator itd = m_extractors.begin();
1598 itd != m_extractors.end(); ++itd) {
1602 if(trainPoints != NULL && !trainPoints->empty()) {
1604 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1607 itd->second->compute(matImg, keyPoints, descriptors);
1609 if(keyPoints.size() != keyPoints_tmp.size()) {
1612 std::map<size_t, size_t> mapOfKeypointHashes;
1614 for(std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end(); ++it, cpt++) {
1615 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1618 std::vector<cv::Point3f> trainPoints_tmp;
1619 for(std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1620 if(mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1621 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1626 *trainPoints = trainPoints_tmp;
1630 itd->second->compute(matImg, keyPoints, descriptors);
1634 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1638 itd->second->compute(matImg, keyPoints, desc);
1640 if(keyPoints.size() != keyPoints_tmp.size()) {
1643 std::map<size_t, size_t> mapOfKeypointHashes;
1645 for(std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end(); ++it, cpt++) {
1646 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1649 std::vector<cv::Point3f> trainPoints_tmp;
1650 cv::Mat descriptors_tmp;
1651 for(std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1652 if(mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1653 if(trainPoints != NULL && !trainPoints->empty()) {
1654 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1657 if(!descriptors.empty()) {
1658 descriptors_tmp.push_back(descriptors.row((
int) mapOfKeypointHashes[myKeypointHash(*it)]));
1663 if(trainPoints != NULL) {
1665 *trainPoints = trainPoints_tmp;
1668 descriptors_tmp.copyTo(descriptors);
1672 if(descriptors.empty()) {
1673 desc.copyTo(descriptors);
1675 cv::hconcat(descriptors, desc, descriptors);
1680 if(keyPoints.size() != (size_t) descriptors.rows) {
1681 std::cerr <<
"keyPoints.size() != (size_t) descriptors.rows" << std::endl;
1689 void vpKeyPoint::filterMatches() {
1690 std::vector<cv::KeyPoint> queryKpts;
1691 std::vector<cv::Point3f> trainPts;
1692 std::vector<cv::DMatch> m;
1695 double max_dist = 0;
1697 double min_dist = DBL_MAX;
1699 std::vector<double> distance_vec(m_knnMatches.size());
1702 for(
size_t i = 0; i < m_knnMatches.size(); i++) {
1703 double dist = m_knnMatches[i][0].distance;
1705 distance_vec[i] = dist;
1707 if (dist < min_dist) {
1710 if (dist > max_dist) {
1714 mean /= m_queryDescriptors.rows;
1717 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1718 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1719 double threshold = min_dist + stdev;
1721 for(
size_t i = 0; i < m_knnMatches.size(); i++) {
1722 if(m_knnMatches[i].size() >= 2) {
1724 float ratio = m_knnMatches[i][0].distance / m_knnMatches[i][1].distance;
1727 double dist = m_knnMatches[i][0].distance;
1730 m.push_back(cv::DMatch((
int) queryKpts.size(), m_knnMatches[i][0].trainIdx, m_knnMatches[i][0].distance));
1732 if(!m_trainPoints.empty()) {
1733 trainPts.push_back(m_trainPoints[(
size_t)m_knnMatches[i][0].trainIdx]);
1735 queryKpts.push_back(m_queryKeyPoints[(
size_t)m_knnMatches[i][0].queryIdx]);
1740 double max_dist = 0;
1743 double min_dist = DBL_MAX;
1745 std::vector<double> distance_vec(m_matches.size());
1746 for(
size_t i = 0; i < m_matches.size(); i++) {
1747 double dist = m_matches[i].distance;
1749 distance_vec[i] = dist;
1751 if (dist < min_dist) {
1754 if (dist > max_dist) {
1758 mean /= m_queryDescriptors.rows;
1760 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1761 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1768 for (
size_t i = 0; i < m_matches.size(); i++) {
1769 if(m_matches[i].distance <= threshold) {
1770 m.push_back(cv::DMatch((
int) queryKpts.size(), m_matches[i].trainIdx, m_matches[i].distance));
1772 if(!m_trainPoints.empty()) {
1773 trainPts.push_back(m_trainPoints[(
size_t)m_matches[i].trainIdx]);
1775 queryKpts.push_back(m_queryKeyPoints[(
size_t)m_matches[i].queryIdx]);
1780 if(m_useSingleMatchFilter) {
1782 std::vector<cv::DMatch> mTmp;
1783 std::vector<cv::Point3f> trainPtsTmp;
1784 std::vector<cv::KeyPoint> queryKptsTmp;
1786 std::map<int, int> mapOfTrainIdx;
1788 for(std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1789 mapOfTrainIdx[it->trainIdx]++;
1793 for(std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1794 if(mapOfTrainIdx[it->trainIdx] == 1) {
1795 mTmp.push_back(cv::DMatch((
int) queryKptsTmp.size(), it->trainIdx, it->distance));
1797 if(!m_trainPoints.empty()) {
1798 trainPtsTmp.push_back(m_trainPoints[(
size_t) it->trainIdx]);
1800 queryKptsTmp.push_back(queryKpts[(
size_t) it->queryIdx]);
1804 m_filteredMatches = mTmp;
1805 m_objectFilteredPoints = trainPtsTmp;
1806 m_queryFilteredKeyPoints = queryKptsTmp;
1808 m_filteredMatches = m;
1809 m_objectFilteredPoints = trainPts;
1810 m_queryFilteredKeyPoints = queryKpts;
1821 objectPoints = m_objectFilteredPoints;
1840 keyPoints = m_queryFilteredKeyPoints;
1858 keyPoints = m_trainKeyPoints;
1876 points = m_trainPoints;
1885 points = m_trainVpPoints;
1891 void vpKeyPoint::init() {
1893 #if defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && (VISP_HAVE_OPENCV_VERSION < 0x030000)
1895 if (!cv::initModule_nonfree()) {
1896 std::cerr <<
"Cannot init module non free, SIFT or SURF cannot be used."
1907 initDetectors(m_detectorNames);
1908 initExtractors(m_extractorNames);
1917 void vpKeyPoint::initDetector(
const std::string &detectorName) {
1918 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
1919 m_detectors[detectorName] = cv::FeatureDetector::create(detectorName);
1921 if(m_detectors[detectorName] == NULL) {
1922 std::stringstream ss_msg;
1923 ss_msg <<
"Fail to initialize the detector: " << detectorName <<
" or it is not available in OpenCV version: "
1924 << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
1928 std::string detectorNameTmp = detectorName;
1929 std::string pyramid =
"Pyramid";
1930 std::size_t pos = detectorName.find(pyramid);
1931 bool usePyramid =
false;
1932 if (pos != std::string::npos) {
1933 detectorNameTmp = detectorName.substr(pos + pyramid.size());
1937 if(detectorNameTmp ==
"SIFT") {
1938 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1939 cv::Ptr<cv::FeatureDetector> siftDetector = cv::xfeatures2d::SIFT::create();
1941 m_detectors[detectorNameTmp] = siftDetector;
1943 std::cerr <<
"You should not use SIFT with Pyramid feature detection!" << std::endl;
1944 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
1947 std::stringstream ss_msg;
1948 ss_msg <<
"Fail to initialize the detector: SIFT. OpenCV version "
1949 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
1952 }
else if (detectorNameTmp ==
"SURF") {
1953 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1954 cv::Ptr<cv::FeatureDetector> surfDetector = cv::xfeatures2d::SURF::create();
1956 m_detectors[detectorNameTmp] = surfDetector;
1958 std::cerr <<
"You should not use SURF with Pyramid feature detection!" << std::endl;
1959 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(surfDetector);
1962 std::stringstream ss_msg;
1963 ss_msg <<
"Fail to initialize the detector: SURF. OpenCV version "
1964 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
1967 }
else if (detectorNameTmp ==
"FAST") {
1968 cv::Ptr<cv::FeatureDetector> fastDetector = cv::FastFeatureDetector::create();
1970 m_detectors[detectorNameTmp] = fastDetector;
1972 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
1974 }
else if (detectorNameTmp ==
"MSER") {
1975 cv::Ptr<cv::FeatureDetector> fastDetector = cv::MSER::create();
1977 m_detectors[detectorNameTmp] = fastDetector;
1979 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
1981 }
else if (detectorNameTmp ==
"ORB") {
1982 cv::Ptr<cv::FeatureDetector> orbDetector = cv::ORB::create();
1984 m_detectors[detectorNameTmp] = orbDetector;
1986 std::cerr <<
"You should not use ORB with Pyramid feature detection!" << std::endl;
1987 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(orbDetector);
1989 }
else if (detectorNameTmp ==
"BRISK") {
1990 cv::Ptr<cv::FeatureDetector> briskDetector = cv::BRISK::create();
1992 m_detectors[detectorNameTmp] = briskDetector;
1994 std::cerr <<
"You should not use BRISK with Pyramid feature detection!" << std::endl;
1995 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(briskDetector);
1997 }
else if (detectorNameTmp ==
"KAZE") {
1998 cv::Ptr<cv::FeatureDetector> kazeDetector = cv::KAZE::create();
2000 m_detectors[detectorNameTmp] = kazeDetector;
2002 std::cerr <<
"You should not use KAZE with Pyramid feature detection!" << std::endl;
2003 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(kazeDetector);
2005 }
else if (detectorNameTmp ==
"AKAZE") {
2006 cv::Ptr<cv::FeatureDetector> akazeDetector = cv::AKAZE::create();
2008 m_detectors[detectorNameTmp] = akazeDetector;
2010 std::cerr <<
"You should not use AKAZE with Pyramid feature detection!" << std::endl;
2011 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(akazeDetector);
2013 }
else if (detectorNameTmp ==
"GFTT") {
2014 cv::Ptr<cv::FeatureDetector> gfttDetector = cv::GFTTDetector::create();
2016 m_detectors[detectorNameTmp] = gfttDetector;
2018 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(gfttDetector);
2020 }
else if (detectorNameTmp ==
"SimpleBlob") {
2021 cv::Ptr<cv::FeatureDetector> simpleBlobDetector = cv::SimpleBlobDetector::create();
2023 m_detectors[detectorNameTmp] = simpleBlobDetector;
2025 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(simpleBlobDetector);
2027 }
else if (detectorNameTmp ==
"STAR") {
2028 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2029 cv::Ptr<cv::FeatureDetector> starDetector = cv::xfeatures2d::StarDetector::create();
2031 m_detectors[detectorNameTmp] = starDetector;
2033 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(starDetector);
2036 std::stringstream ss_msg;
2037 ss_msg <<
"Fail to initialize the detector: STAR. OpenCV version "
2038 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2041 }
else if (detectorNameTmp ==
"AGAST") {
2042 cv::Ptr<cv::FeatureDetector> agastDetector = cv::AgastFeatureDetector::create();
2044 m_detectors[detectorNameTmp] = agastDetector;
2046 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(agastDetector);
2048 }
else if (detectorNameTmp ==
"MSD") {
2049 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100)
2050 #if defined (VISP_HAVE_OPENCV_XFEATURES2D)
2051 cv::Ptr<cv::FeatureDetector> msdDetector = cv::xfeatures2d::MSDDetector::create();
2053 m_detectors[detectorNameTmp] = msdDetector;
2055 std::cerr <<
"You should not use MSD with Pyramid feature detection!" << std::endl;
2056 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(msdDetector);
2059 std::stringstream ss_msg;
2060 ss_msg <<
"Fail to initialize the detector: MSD. OpenCV version "
2061 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2065 std::stringstream ss_msg;
2066 ss_msg <<
"Feature " << detectorName <<
" is not available in OpenCV version: "
2067 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" (require >= OpenCV 3.1).";
2071 std::cerr <<
"The detector:" << detectorNameTmp <<
" is not available." << std::endl;
2074 bool detectorInitialized =
false;
2076 detectorInitialized = (m_detectors[detectorNameTmp] != NULL);
2078 detectorInitialized = (m_detectors[detectorName] != NULL);
2081 if(!detectorInitialized) {
2082 std::stringstream ss_msg;
2083 ss_msg <<
"Fail to initialize the detector: " << detectorNameTmp <<
" or it is not available in OpenCV version: "
2084 << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2096 void vpKeyPoint::initDetectors(
const std::vector<std::string> &detectorNames) {
2097 for(std::vector<std::string>::const_iterator it = detectorNames.begin(); it != detectorNames.end(); ++it) {
2107 void vpKeyPoint::initExtractor(
const std::string &extractorName) {
2108 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
2109 m_extractors[extractorName] = cv::DescriptorExtractor::create(extractorName);
2111 if(extractorName ==
"SIFT") {
2112 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2113 m_extractors[extractorName] = cv::xfeatures2d::SIFT::create();
2115 std::stringstream ss_msg;
2116 ss_msg <<
"Fail to initialize the extractor: SIFT. OpenCV version "
2117 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2120 }
else if(extractorName ==
"SURF") {
2121 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2123 m_extractors[extractorName] = cv::xfeatures2d::SURF::create(100, 4, 3,
true);
2125 std::stringstream ss_msg;
2126 ss_msg <<
"Fail to initialize the extractor: SURF. OpenCV version "
2127 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2130 }
else if(extractorName ==
"ORB") {
2131 m_extractors[extractorName] = cv::ORB::create();
2132 }
else if(extractorName ==
"BRISK") {
2133 m_extractors[extractorName] = cv::BRISK::create();
2134 }
else if(extractorName ==
"FREAK") {
2135 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2136 m_extractors[extractorName] = cv::xfeatures2d::FREAK::create();
2138 std::stringstream ss_msg;
2139 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version "
2140 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2143 }
else if(extractorName ==
"BRIEF") {
2144 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2145 m_extractors[extractorName] = cv::xfeatures2d::BriefDescriptorExtractor::create();
2147 std::stringstream ss_msg;
2148 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version "
2149 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2152 }
else if(extractorName ==
"KAZE") {
2153 m_extractors[extractorName] = cv::KAZE::create();
2154 }
else if(extractorName ==
"AKAZE") {
2155 m_extractors[extractorName] = cv::AKAZE::create();
2156 }
else if(extractorName ==
"DAISY") {
2157 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2158 m_extractors[extractorName] = cv::xfeatures2d::DAISY::create();
2160 std::stringstream ss_msg;
2161 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version "
2162 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2165 }
else if(extractorName ==
"LATCH") {
2166 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2167 m_extractors[extractorName] = cv::xfeatures2d::LATCH::create();
2169 std::stringstream ss_msg;
2170 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version "
2171 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2174 }
else if(extractorName ==
"LUCID") {
2175 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2180 std::stringstream ss_msg;
2181 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version "
2182 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2185 }
else if (extractorName ==
"VGG") {
2186 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2187 #if defined (VISP_HAVE_OPENCV_XFEATURES2D)
2188 m_extractors[extractorName] = cv::xfeatures2d::VGG::create();
2190 std::stringstream ss_msg;
2191 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version "
2192 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2196 std::stringstream ss_msg;
2197 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version "
2198 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" but requires at least OpenCV 3.2.";
2201 }
else if (extractorName ==
"BoostDesc") {
2202 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2203 #if defined (VISP_HAVE_OPENCV_XFEATURES2D)
2204 m_extractors[extractorName] = cv::xfeatures2d::BoostDesc::create();
2206 std::stringstream ss_msg;
2207 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version "
2208 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2212 std::stringstream ss_msg;
2213 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version "
2214 << std::hex << VISP_HAVE_OPENCV_VERSION <<
" but requires at least OpenCV 3.2.";
2219 std::cerr <<
"The extractor:" << extractorName <<
" is not available." << std::endl;
2223 if(m_extractors[extractorName] == NULL) {
2224 std::stringstream ss_msg;
2225 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
" or it is not available in OpenCV version: "
2226 << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2230 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2231 if(extractorName ==
"SURF") {
2233 m_extractors[extractorName]->set(
"extended", 1);
2243 void vpKeyPoint::initExtractors(
const std::vector<std::string> &extractorNames) {
2244 for(std::vector<std::string>::const_iterator it = extractorNames.begin(); it != extractorNames.end(); ++it) {
2248 int descriptorType = CV_32F;
2249 bool firstIteration =
true;
2250 for(std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2251 it != m_extractors.end(); ++it) {
2252 if(firstIteration) {
2253 firstIteration =
false;
2254 descriptorType = it->second->descriptorType();
2256 if(descriptorType != it->second->descriptorType()) {
2263 void vpKeyPoint::initFeatureNames() {
2265 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2272 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined (VISP_HAVE_OPENCV_XFEATURES2D))
2275 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined (VISP_HAVE_OPENCV_XFEATURES2D)
2279 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2280 m_mapOfDetectorNames[DETECTOR_KAZE] =
"KAZE";
2281 m_mapOfDetectorNames[DETECTOR_AKAZE] =
"AKAZE";
2282 m_mapOfDetectorNames[DETECTOR_AGAST] =
"AGAST";
2284 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined (VISP_HAVE_OPENCV_XFEATURES2D)
2285 m_mapOfDetectorNames[DETECTOR_MSD] =
"MSD";
2289 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2292 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined (VISP_HAVE_OPENCV_XFEATURES2D))
2296 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined (VISP_HAVE_OPENCV_XFEATURES2D)
2300 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2301 m_mapOfDescriptorNames[DESCRIPTOR_KAZE] =
"KAZE";
2302 m_mapOfDescriptorNames[DESCRIPTOR_AKAZE] =
"AKAZE";
2303 #if defined (VISP_HAVE_OPENCV_XFEATURES2D)
2304 m_mapOfDescriptorNames[DESCRIPTOR_DAISY] =
"DAISY";
2305 m_mapOfDescriptorNames[DESCRIPTOR_LATCH] =
"LATCH";
2308 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined (VISP_HAVE_OPENCV_XFEATURES2D)
2309 m_mapOfDescriptorNames[DESCRIPTOR_VGG] =
"VGG";
2310 m_mapOfDescriptorNames[DESCRIPTOR_BoostDesc] =
"BoostDesc";
2321 int descriptorType = CV_32F;
2322 bool firstIteration =
true;
2323 for(std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2324 it != m_extractors.end(); ++it) {
2325 if(firstIteration) {
2326 firstIteration =
false;
2327 descriptorType = it->second->descriptorType();
2329 if(descriptorType != it->second->descriptorType()) {
2335 if(matcherName ==
"FlannBased") {
2336 if(m_extractors.empty()) {
2337 std::cout <<
"Warning: No extractor initialized, by default use floating values (CV_32F) "
2338 "for descriptor type !" << std::endl;
2341 if(descriptorType == CV_8U) {
2342 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2343 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::LshIndexParams>(12, 20, 2));
2345 m_matcher =
new cv::FlannBasedMatcher(
new cv::flann::LshIndexParams(12, 20, 2));
2348 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2349 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::KDTreeIndexParams>());
2351 m_matcher =
new cv::FlannBasedMatcher(
new cv::flann::KDTreeIndexParams());
2355 m_matcher = cv::DescriptorMatcher::create(matcherName);
2358 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2359 if(m_matcher != NULL && !m_useKnn && matcherName ==
"BruteForce") {
2360 m_matcher->set(
"crossCheck", m_useBruteForceCrossCheck);
2364 if(m_matcher == NULL) {
2365 std::stringstream ss_msg;
2366 ss_msg <<
"Fail to initialize the matcher: " << matcherName <<
" or it is not available in OpenCV version: "
2367 << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2383 IMatching.
insert(IRef, topLeftCorner);
2385 IMatching.
insert(ICurrent, topLeftCorner);
2397 int nbImg = (int) (m_mapOfImages.size() + 1);
2399 if(m_mapOfImages.empty()) {
2400 std::cerr <<
"There is no training image loaded !" << std::endl;
2410 int nbWidth = nbImgSqrt;
2411 int nbHeight = nbImgSqrt;
2413 if(nbImgSqrt * nbImgSqrt < nbImg) {
2418 for(std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end(); ++it) {
2419 if(maxW < it->second.getWidth()) {
2420 maxW = it->second.getWidth();
2423 if(maxH < it->second.getHeight()) {
2424 maxH = it->second.getHeight();
2429 int medianI = nbHeight / 2;
2430 int medianJ = nbWidth / 2;
2431 int medianIndex = medianI * nbWidth + medianJ;
2434 for(std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end(); ++it, cpt++) {
2435 int local_cpt = cpt;
2436 if(cpt >= medianIndex) {
2440 int indexI = local_cpt / nbWidth;
2441 int indexJ = local_cpt - (indexI * nbWidth);
2442 vpImagePoint topLeftCorner((
int)maxH*indexI, (
int)maxW*indexJ);
2444 IMatching.
insert(it->second, topLeftCorner);
2447 vpImagePoint topLeftCorner((
int)maxH*medianI, (
int)maxW*medianJ);
2448 IMatching.
insert(ICurrent, topLeftCorner);
2452 #ifdef VISP_HAVE_XML2
2463 m_detectorNames.clear();
2464 m_extractorNames.clear();
2465 m_detectors.clear();
2466 m_extractors.clear();
2468 std::cout <<
" *********** Parsing XML for configuration for vpKeyPoint ************ " << std::endl;
2469 xmlp.
parse(configFile);
2533 int startClassId = 0;
2534 int startImageId = 0;
2536 m_trainKeyPoints.clear();
2537 m_trainPoints.clear();
2538 m_mapOfImageId.clear();
2539 m_mapOfImages.clear();
2542 for(std::map<int, int>::const_iterator it = m_mapOfImageId.begin(); it != m_mapOfImageId.end(); ++it) {
2543 if(startClassId < it->first) {
2544 startClassId = it->first;
2549 for(std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end(); ++it) {
2550 if(startImageId < it->first) {
2551 startImageId = it->first;
2558 if(!parent.empty()) {
2563 std::ifstream file(filename.c_str(), std::ifstream::binary);
2564 if(!file.is_open()){
2571 readBinaryIntLE(file, nbImgs);
2573 #if !defined(VISP_HAVE_MODULE_IO)
2575 std::cout <<
"Warning: The learning file contains image data that will not be loaded as visp_io module "
2576 "is not available !" << std::endl;
2580 for(
int i = 0; i < nbImgs; i++) {
2584 readBinaryIntLE(file,
id);
2588 readBinaryIntLE(file, length);
2590 char* path =
new char[length + 1];
2592 for(
int cpt = 0; cpt < length; cpt++) {
2594 file.read((
char *)(&c),
sizeof(c));
2597 path[length] =
'\0';
2600 #ifdef VISP_HAVE_MODULE_IO
2608 m_mapOfImages[
id + startImageId] = I;
2616 int have3DInfoInt = 0;
2618 readBinaryIntLE(file, have3DInfoInt);
2619 bool have3DInfo = have3DInfoInt != 0;
2624 readBinaryIntLE(file, nRows);
2629 readBinaryIntLE(file, nCols);
2632 int descriptorType = 5;
2634 readBinaryIntLE(file, descriptorType);
2636 cv::Mat trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
2637 for(
int i = 0; i < nRows; i++) {
2639 float u, v, size, angle, response;
2640 int octave, class_id, image_id;
2642 readBinaryFloatLE(file, u);
2644 readBinaryFloatLE(file, v);
2646 readBinaryFloatLE(file, size);
2648 readBinaryFloatLE(file, angle);
2650 readBinaryFloatLE(file, response);
2652 readBinaryIntLE(file, octave);
2654 readBinaryIntLE(file, class_id);
2656 readBinaryIntLE(file, image_id);
2657 cv::KeyPoint keyPoint(cv::Point2f(u, v), size, angle, response, octave, (class_id + startClassId));
2658 m_trainKeyPoints.push_back(keyPoint);
2660 if(image_id != -1) {
2661 #ifdef VISP_HAVE_MODULE_IO
2663 m_mapOfImageId[class_id] = image_id + startImageId;
2671 readBinaryFloatLE(file, oX);
2673 readBinaryFloatLE(file, oY);
2675 readBinaryFloatLE(file, oZ);
2676 m_trainPoints.push_back(cv::Point3f(oX, oY, oZ));
2679 for(
int j = 0; j < nCols; j++) {
2681 switch(descriptorType) {
2684 unsigned char value;
2685 file.read((
char *)(&value),
sizeof(value));
2686 trainDescriptorsTmp.at<
unsigned char>(i, j) = value;
2693 file.read((
char *)(&value),
sizeof(value));
2694 trainDescriptorsTmp.at<
char>(i, j) = value;
2700 unsigned short int value;
2702 readBinaryUShortLE(file, value);
2703 trainDescriptorsTmp.at<
unsigned short int>(i, j) = value;
2711 readBinaryShortLE(file, value);
2712 trainDescriptorsTmp.at<
short int>(i, j) = value;
2720 readBinaryIntLE(file, value);
2721 trainDescriptorsTmp.at<
int>(i, j) = value;
2729 readBinaryFloatLE(file, value);
2730 trainDescriptorsTmp.at<
float>(i, j) = value;
2738 readBinaryDoubleLE(file, value);
2739 trainDescriptorsTmp.at<
double>(i, j) = value;
2747 readBinaryFloatLE(file, value);
2748 trainDescriptorsTmp.at<
float>(i, j) = value;
2755 if(!append || m_trainDescriptors.empty()) {
2756 trainDescriptorsTmp.copyTo(m_trainDescriptors);
2758 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
2763 #ifdef VISP_HAVE_XML2
2764 xmlDocPtr doc = NULL;
2765 xmlNodePtr root_element = NULL;
2775 doc = xmlReadFile(filename.c_str(), NULL, 0);
2781 root_element = xmlDocGetRootElement(doc);
2783 xmlNodePtr first_level_node = NULL;
2786 int descriptorType = CV_32F;
2787 int nRows = 0, nCols = 0;
2790 cv::Mat trainDescriptorsTmp;
2792 for (first_level_node = root_element->children; first_level_node;
2793 first_level_node = first_level_node->next) {
2795 std::string name((
char *) first_level_node->name);
2796 if (first_level_node->type == XML_ELEMENT_NODE && name ==
"TrainingImageInfo") {
2797 xmlNodePtr image_info_node = NULL;
2799 for (image_info_node = first_level_node->children; image_info_node; image_info_node =
2800 image_info_node->next) {
2801 name = std::string ((
char *) image_info_node->name);
2803 if(name ==
"trainImg") {
2805 xmlChar *image_id_property = xmlGetProp(image_info_node, BAD_CAST
"image_id");
2807 if(image_id_property) {
2808 id = std::atoi((
char *) image_id_property);
2810 xmlFree(image_id_property);
2813 #ifdef VISP_HAVE_MODULE_IO
2814 std::string path((
char *) image_info_node->children->content);
2823 m_mapOfImages[
id + startImageId] = I;
2827 }
else if(first_level_node->type == XML_ELEMENT_NODE && name ==
"DescriptorsInfo") {
2828 xmlNodePtr descriptors_info_node = NULL;
2829 for (descriptors_info_node = first_level_node->children; descriptors_info_node; descriptors_info_node =
2830 descriptors_info_node->next) {
2831 if (descriptors_info_node->type == XML_ELEMENT_NODE) {
2832 name = std::string ((
char *) descriptors_info_node->name);
2834 if(name ==
"nrows") {
2835 nRows = std::atoi((
char *) descriptors_info_node->children->content);
2836 }
else if(name ==
"ncols") {
2837 nCols = std::atoi((
char *) descriptors_info_node->children->content);
2838 }
else if(name ==
"type") {
2839 descriptorType = std::atoi((
char *) descriptors_info_node->children->content);
2844 trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
2845 }
else if (first_level_node->type == XML_ELEMENT_NODE && name ==
"DescriptorInfo") {
2846 xmlNodePtr point_node = NULL;
2847 double u = 0.0, v = 0.0, size = 0.0, angle = 0.0, response = 0.0;
2848 int octave = 0, class_id = 0, image_id = 0;
2849 double oX = 0.0, oY = 0.0, oZ = 0.0;
2851 std::stringstream ss;
2853 for (point_node = first_level_node->children; point_node; point_node =
2855 if (point_node->type == XML_ELEMENT_NODE) {
2856 name = std::string ((
char *) point_node->name);
2860 u = std::strtod((
char *) point_node->children->content, &pEnd);
2861 }
else if(name ==
"v") {
2862 v = std::strtod((
char *) point_node->children->content, &pEnd);
2863 }
else if(name ==
"size") {
2864 size = std::strtod((
char *) point_node->children->content, &pEnd);
2865 }
else if(name ==
"angle") {
2866 angle = std::strtod((
char *) point_node->children->content, &pEnd);
2867 }
else if(name ==
"response") {
2868 response = std::strtod((
char *) point_node->children->content, &pEnd);
2869 }
else if(name ==
"octave") {
2870 octave = std::atoi((
char *) point_node->children->content);
2871 }
else if(name ==
"class_id") {
2872 class_id = std::atoi((
char *) point_node->children->content);
2873 cv::KeyPoint keyPoint(cv::Point2f((
float) u, (
float) v), (
float) size,
2874 (
float) angle, (
float) response, octave, (class_id + startClassId));
2875 m_trainKeyPoints.push_back(keyPoint);
2876 }
else if(name ==
"image_id") {
2877 image_id = std::atoi((
char *) point_node->children->content);
2878 if(image_id != -1) {
2879 #ifdef VISP_HAVE_MODULE_IO
2881 m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
2884 }
else if (name ==
"oX") {
2885 oX = std::atof((
char *) point_node->children->content);
2886 }
else if (name ==
"oY") {
2887 oY = std::atof((
char *) point_node->children->content);
2888 }
else if (name ==
"oZ") {
2889 oZ = std::atof((
char *) point_node->children->content);
2890 m_trainPoints.push_back(cv::Point3f((
float) oX, (
float) oY, (
float) oZ));
2891 }
else if (name ==
"desc") {
2892 xmlNodePtr descriptor_value_node = NULL;
2895 for (descriptor_value_node = point_node->children;
2896 descriptor_value_node; descriptor_value_node =
2897 descriptor_value_node->next) {
2899 if (descriptor_value_node->type == XML_ELEMENT_NODE) {
2901 std::string parseStr((
char *) descriptor_value_node->children->content);
2906 switch(descriptorType) {
2912 trainDescriptorsTmp.at<
unsigned char>(i, j) = (
unsigned char) parseValue;
2920 trainDescriptorsTmp.at<
char>(i, j) = (
char) parseValue;
2924 ss >> trainDescriptorsTmp.at<
unsigned short int>(i, j);
2928 ss >> trainDescriptorsTmp.at<
short int>(i, j);
2932 ss >> trainDescriptorsTmp.at<
int>(i, j);
2936 ss >> trainDescriptorsTmp.at<
float>(i, j);
2940 ss >> trainDescriptorsTmp.at<
double>(i, j);
2944 ss >> trainDescriptorsTmp.at<
float>(i, j);
2948 std::cerr <<
"Error when converting:" << ss.str() << std::endl;
2961 if(!append || m_trainDescriptors.empty()) {
2962 trainDescriptorsTmp.copyTo(m_trainDescriptors);
2964 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
2976 std::cout <<
"Error: libxml2 is required !" << std::endl;
2986 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
3001 std::vector<cv::DMatch> &matches,
double &elapsedTime) {
3005 m_knnMatches.clear();
3007 if(m_useMatchTrainToQuery) {
3008 std::vector<std::vector<cv::DMatch> > knnMatchesTmp;
3011 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(
true);
3012 matcherTmp->knnMatch(trainDescriptors, queryDescriptors, knnMatchesTmp, 2);
3014 for(std::vector<std::vector<cv::DMatch> >::const_iterator it1 = knnMatchesTmp.begin(); it1 != knnMatchesTmp.end(); ++it1) {
3015 std::vector<cv::DMatch> tmp;
3016 for(std::vector<cv::DMatch>::const_iterator it2 = it1->begin(); it2 != it1->end(); ++it2) {
3017 tmp.push_back(cv::DMatch(it2->trainIdx, it2->queryIdx, it2->distance));
3019 m_knnMatches.push_back(tmp);
3022 matches.resize(m_knnMatches.size());
3023 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3026 m_matcher->knnMatch(queryDescriptors, m_knnMatches, 2);
3027 matches.resize(m_knnMatches.size());
3028 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3033 if(m_useMatchTrainToQuery) {
3034 std::vector<cv::DMatch> matchesTmp;
3036 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(
true);
3037 matcherTmp->match(trainDescriptors, queryDescriptors, matchesTmp);
3039 for(std::vector<cv::DMatch>::const_iterator it = matchesTmp.begin(); it != matchesTmp.end(); ++it) {
3040 matches.push_back(cv::DMatch(it->trainIdx, it->queryIdx, it->distance));
3044 m_matcher->match(queryDescriptors, matches);
3072 const unsigned int height,
const unsigned int width) {
3085 const vpRect& rectangle) {
3086 if(m_trainDescriptors.empty()) {
3087 std::cerr <<
"Reference is empty." << std::endl;
3089 std::cerr <<
"Reference is not computed." << std::endl;
3091 std::cerr <<
"Matching is not possible." << std::endl;
3096 if(m_useAffineDetection) {
3097 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3098 std::vector<cv::Mat> listOfQueryDescriptors;
3104 m_queryKeyPoints.clear();
3105 for(std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3106 it != listOfQueryKeyPoints.end(); ++it) {
3107 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3111 for(std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end(); ++it) {
3114 it->copyTo(m_queryDescriptors);
3116 m_queryDescriptors.push_back(*it);
3120 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3121 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3124 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3127 m_queryFilteredKeyPoints.clear();
3128 m_objectFilteredPoints.clear();
3129 m_filteredMatches.clear();
3133 if(m_useMatchTrainToQuery) {
3135 m_queryFilteredKeyPoints.clear();
3136 m_filteredMatches.clear();
3137 for(std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3138 m_filteredMatches.push_back(cv::DMatch((
int) m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3139 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(
size_t) it->queryIdx]);
3142 m_queryFilteredKeyPoints = m_queryKeyPoints;
3143 m_filteredMatches = m_matches;
3146 if(!m_trainPoints.empty()) {
3147 m_objectFilteredPoints.clear();
3150 for(std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3152 m_objectFilteredPoints.push_back(m_trainPoints[(
size_t) it->trainIdx]);
3161 return static_cast<unsigned int>(m_filteredMatches.size());
3177 double error, elapsedTime;
3178 return matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3198 if(m_trainDescriptors.empty()) {
3199 std::cerr <<
"Reference is empty." << std::endl;
3201 std::cerr <<
"Reference is not computed." << std::endl;
3203 std::cerr <<
"Matching is not possible." << std::endl;
3208 if(m_useAffineDetection) {
3209 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3210 std::vector<cv::Mat> listOfQueryDescriptors;
3216 m_queryKeyPoints.clear();
3217 for(std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3218 it != listOfQueryKeyPoints.end(); ++it) {
3219 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3223 for(std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end(); ++it) {
3226 it->copyTo(m_queryDescriptors);
3228 m_queryDescriptors.push_back(*it);
3232 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3233 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3236 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3238 elapsedTime = m_detectionTime + m_extractionTime + m_matchingTime;
3241 m_queryFilteredKeyPoints.clear();
3242 m_objectFilteredPoints.clear();
3243 m_filteredMatches.clear();
3247 if(m_useMatchTrainToQuery) {
3249 m_queryFilteredKeyPoints.clear();
3250 m_filteredMatches.clear();
3251 for(std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3252 m_filteredMatches.push_back(cv::DMatch((
int) m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3253 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(
size_t) it->queryIdx]);
3256 m_queryFilteredKeyPoints = m_queryKeyPoints;
3257 m_filteredMatches = m_matches;
3260 if(!m_trainPoints.empty()) {
3261 m_objectFilteredPoints.clear();
3264 for(std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3266 m_objectFilteredPoints.push_back(m_trainPoints[(
size_t) it->trainIdx]);
3277 m_ransacInliers.clear();
3278 m_ransacOutliers.clear();
3280 if(m_useRansacVVS) {
3281 std::vector<vpPoint> objectVpPoints(m_objectFilteredPoints.size());
3284 for(std::vector<cv::Point3f>::const_iterator it = m_objectFilteredPoints.begin(); it != m_objectFilteredPoints.end();
3289 vpImagePoint imP(m_queryFilteredKeyPoints[cpt].pt.y, m_queryFilteredKeyPoints[cpt].pt.x);
3291 double x = 0.0, y = 0.0;
3296 objectVpPoints[cpt] = pt;
3299 std::vector<vpPoint> inliers;
3300 std::vector<unsigned int> inlierIndex;
3302 bool res =
computePose(objectVpPoints, cMo, inliers, inlierIndex, m_poseTime, func);
3304 std::map<unsigned int, bool> mapOfInlierIndex;
3305 m_matchRansacKeyPointsToPoints.clear();
3307 for (std::vector<unsigned int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3308 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(m_queryFilteredKeyPoints[(
size_t)(*it)],
3309 m_objectFilteredPoints[(
size_t)(*it)]));
3310 mapOfInlierIndex[*it] =
true;
3313 for(
size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3314 if(mapOfInlierIndex.find((
unsigned int) i) == mapOfInlierIndex.end()) {
3315 m_ransacOutliers.push_back(
vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3319 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3321 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3322 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(), m_ransacInliers.begin(),
3323 matchRansacToVpImage);
3325 elapsedTime += m_poseTime;
3329 std::vector<cv::Point2f> imageFilteredPoints;
3330 cv::KeyPoint::convert(m_queryFilteredKeyPoints, imageFilteredPoints);
3331 std::vector<int> inlierIndex;
3332 bool res =
computePose(imageFilteredPoints, m_objectFilteredPoints, cam, cMo, inlierIndex, m_poseTime);
3334 std::map<int, bool> mapOfInlierIndex;
3335 m_matchRansacKeyPointsToPoints.clear();
3337 for (std::vector<int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3338 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(m_queryFilteredKeyPoints[(
size_t)(*it)],
3339 m_objectFilteredPoints[(
size_t)(*it)]));
3340 mapOfInlierIndex[*it] =
true;
3343 for(
size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3344 if(mapOfInlierIndex.find((
int) i) == mapOfInlierIndex.end()) {
3345 m_ransacOutliers.push_back(
vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3349 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3351 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3352 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(), m_ransacInliers.begin(),
3353 matchRansacToVpImage);
3355 elapsedTime += m_poseTime;
3379 const bool isPlanarObject, std::vector<vpImagePoint> *imPts1, std::vector<vpImagePoint> *imPts2,
3380 double *meanDescriptorDistance,
double *detection_score,
const vpRect& rectangle) {
3381 if(imPts1 != NULL && imPts2 != NULL) {
3388 double meanDescriptorDistanceTmp = 0.0;
3389 for(std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
3390 meanDescriptorDistanceTmp += (double) it->distance;
3393 meanDescriptorDistanceTmp /= (double) m_filteredMatches.size();
3394 double score = (double) m_filteredMatches.size() / meanDescriptorDistanceTmp;
3396 if(meanDescriptorDistance != NULL) {
3397 *meanDescriptorDistance = meanDescriptorDistanceTmp;
3399 if(detection_score != NULL) {
3400 *detection_score = score;
3403 if(m_filteredMatches.size() >= 4) {
3405 std::vector<cv::Point2f> points1(m_filteredMatches.size());
3407 std::vector<cv::Point2f> points2(m_filteredMatches.size());
3409 for(
size_t i = 0; i < m_filteredMatches.size(); i++) {
3410 points1[i] = cv::Point2f(m_trainKeyPoints[(
size_t)m_filteredMatches[i].trainIdx].pt);
3411 points2[i] = cv::Point2f(m_queryFilteredKeyPoints[(
size_t)m_filteredMatches[i].queryIdx].pt);
3414 std::vector<vpImagePoint> inliers;
3415 if(isPlanarObject) {
3416 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
3417 cv::Mat homographyMatrix = cv::findHomography(points1, points2, CV_RANSAC);
3419 cv::Mat homographyMatrix = cv::findHomography(points1, points2, cv::RANSAC);
3422 for(
size_t i = 0; i < m_filteredMatches.size(); i++ ) {
3424 cv::Mat realPoint = cv::Mat(3, 1, CV_64F);
3425 realPoint.at<
double>(0,0) = points1[i].x;
3426 realPoint.at<
double>(1,0) = points1[i].y;
3427 realPoint.at<
double>(2,0) = 1.f;
3429 cv::Mat reprojectedPoint = homographyMatrix * realPoint;
3430 double err_x = (reprojectedPoint.at<
double>(0,0) / reprojectedPoint.at<
double>(2,0)) - points2[i].x;
3431 double err_y = (reprojectedPoint.at<
double>(1,0) / reprojectedPoint.at<
double>(2,0)) - points2[i].y;
3432 double reprojectionError = std::sqrt(err_x*err_x + err_y*err_y);
3434 if(reprojectionError < 6.0) {
3435 inliers.push_back(
vpImagePoint((
double) points2[i].y, (
double) points2[i].x));
3436 if(imPts1 != NULL) {
3437 imPts1->push_back(
vpImagePoint((
double) points1[i].y, (
double) points1[i].x));
3440 if(imPts2 != NULL) {
3441 imPts2->push_back(
vpImagePoint((
double) points2[i].y, (
double) points2[i].x));
3445 }
else if(m_filteredMatches.size() >= 8) {
3446 cv::Mat fundamentalInliers;
3447 cv::Mat fundamentalMatrix = cv::findFundamentalMat(points1, points2, cv::FM_RANSAC, 3, 0.99, fundamentalInliers);
3449 for(
size_t i = 0; i < (size_t) fundamentalInliers.rows; i++) {
3450 if(fundamentalInliers.at<uchar>((
int) i, 0)) {
3451 inliers.push_back(
vpImagePoint((
double) points2[i].y, (
double) points2[i].x));
3453 if(imPts1 != NULL) {
3454 imPts1->push_back(
vpImagePoint((
double) points1[i].y, (
double) points1[i].x));
3457 if(imPts2 != NULL) {
3458 imPts2->push_back(
vpImagePoint((
double) points2[i].y, (
double) points2[i].x));
3464 if(!inliers.empty()) {
3470 double meanU = 0.0, meanV = 0.0;
3471 for(std::vector<vpImagePoint>::const_iterator it = inliers.begin(); it != inliers.end(); ++it) {
3472 meanU += it->get_u();
3473 meanV += it->get_v();
3476 meanU /= (double) inliers.size();
3477 meanV /= (double) inliers.size();
3479 centerOfGravity.
set_u(meanU);
3480 centerOfGravity.
set_v(meanV);
3488 return meanDescriptorDistanceTmp < m_detectionThreshold;
3490 return score > m_detectionScore;
3513 double &error,
double &elapsedTime,
vpRect &boundingBox,
vpImagePoint ¢erOfGravity,
3515 bool isMatchOk =
matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3520 std::vector<vpImagePoint> modelImagePoints(m_trainVpPoints.size());
3522 for(std::vector<vpPoint>::const_iterator it = m_trainVpPoints.begin(); it != m_trainVpPoints.end(); ++it, cpt++) {
3526 modelImagePoints[cpt] = imPt;
3534 double meanU = 0.0, meanV = 0.0;
3535 for(std::vector<vpImagePoint>::const_iterator it = m_ransacInliers.begin(); it != m_ransacInliers.end();
3537 meanU += it->get_u();
3538 meanV += it->get_v();
3541 meanU /= (double) m_ransacInliers.size();
3542 meanV /= (double) m_ransacInliers.size();
3544 centerOfGravity.
set_u(meanU);
3545 centerOfGravity.
set_v(meanV);
3567 listOfKeypoints.clear();
3568 listOfDescriptors.clear();
3570 for (
int tl = 1; tl < 6; tl++) {
3571 double t = pow(2, 0.5 * tl);
3572 for (
int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
3573 std::vector<cv::KeyPoint> keypoints;
3574 cv::Mat descriptors;
3576 cv::Mat timg, mask, Ai;
3579 affineSkew(t, phi, timg, mask, Ai);
3582 if(listOfAffineI != NULL) {
3584 bitwise_and(mask, timg, img_disp);
3587 listOfAffineI->push_back(tI);
3591 cv::bitwise_and(mask, timg, img_disp);
3592 cv::namedWindow(
"Skew", cv::WINDOW_AUTOSIZE );
3593 cv::imshow(
"Skew", img_disp );
3597 for(std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
3598 it != m_detectors.end(); ++it) {
3599 std::vector<cv::KeyPoint> kp;
3600 it->second->detect(timg, kp, mask);
3601 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
3605 extract(timg, keypoints, descriptors, elapsedTime);
3607 for(
unsigned int i = 0; i < keypoints.size(); i++) {
3608 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
3609 cv::Mat kpt_t = Ai * cv::Mat(kpt);
3610 keypoints[i].pt.x = kpt_t.at<
float>(0, 0);
3611 keypoints[i].pt.y = kpt_t.at<
float>(1, 0);
3614 listOfKeypoints.push_back(keypoints);
3615 listOfDescriptors.push_back(descriptors);
3624 std::vector<std::pair<double, int> > listOfAffineParams;
3625 for (
int tl = 1; tl < 6; tl++) {
3626 double t = pow(2, 0.5 * tl);
3627 for (
int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
3628 listOfAffineParams.push_back(std::pair<double, int>(t, phi));
3632 listOfKeypoints.resize(listOfAffineParams.size());
3633 listOfDescriptors.resize(listOfAffineParams.size());
3635 if(listOfAffineI != NULL) {
3636 listOfAffineI->resize(listOfAffineParams.size());
3639 #ifdef VISP_HAVE_OPENMP
3640 #pragma omp parallel for
3642 for(
int cpt = 0; cpt < static_cast<int>(listOfAffineParams.size()); cpt++) {
3643 std::vector<cv::KeyPoint> keypoints;
3644 cv::Mat descriptors;
3646 cv::Mat timg, mask, Ai;
3649 affineSkew(listOfAffineParams[(
size_t) cpt].first, listOfAffineParams[(
size_t) cpt].second, timg, mask, Ai);
3652 if(listOfAffineI != NULL) {
3654 bitwise_and(mask, timg, img_disp);
3657 (*listOfAffineI)[(size_t) cpt] = tI;
3662 cv::bitwise_and(mask, timg, img_disp);
3663 cv::namedWindow(
"Skew", cv::WINDOW_AUTOSIZE );
3664 cv::imshow(
"Skew", img_disp );
3668 for(std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
3669 it != m_detectors.end(); ++it) {
3670 std::vector<cv::KeyPoint> kp;
3671 it->second->detect(timg, kp, mask);
3672 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
3676 extract(timg, keypoints, descriptors, elapsedTime);
3678 for(
size_t i = 0; i < keypoints.size(); i++) {
3679 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
3680 cv::Mat kpt_t = Ai * cv::Mat(kpt);
3681 keypoints[i].pt.x = kpt_t.at<
float>(0, 0);
3682 keypoints[i].pt.y = kpt_t.at<
float>(1, 0);
3685 listOfKeypoints[(size_t) cpt] = keypoints;
3686 listOfDescriptors[(size_t) cpt] = descriptors;
3699 m_computeCovariance =
false; m_covarianceMatrix =
vpMatrix(); m_currentImageId = 0; m_detectionMethod =
detectionScore;
3700 m_detectionScore = 0.15; m_detectionThreshold = 100.0; m_detectionTime = 0.0; m_detectorNames.clear();
3701 m_detectors.clear(); m_extractionTime = 0.0; m_extractorNames.clear(); m_extractors.clear(); m_filteredMatches.clear();
3703 m_imageFormat =
jpgImageFormat; m_knnMatches.clear(); m_mapOfImageId.clear(); m_mapOfImages.clear();
3704 m_matcher = cv::Ptr<cv::DescriptorMatcher>(); m_matcherName =
"BruteForce-Hamming";
3705 m_matches.clear(); m_matchingFactorThreshold = 2.0; m_matchingRatioThreshold = 0.85; m_matchingTime = 0.0;
3706 m_matchRansacKeyPointsToPoints.clear(); m_nbRansacIterations = 200; m_nbRansacMinInlierCount = 100;
3707 m_objectFilteredPoints.clear();
3708 m_poseTime = 0.0; m_queryDescriptors = cv::Mat(); m_queryFilteredKeyPoints.clear(); m_queryKeyPoints.clear();
3709 m_ransacConsensusPercentage = 20.0; m_ransacInliers.clear(); m_ransacOutliers.clear(); m_ransacReprojectionError = 6.0;
3710 m_ransacThreshold = 0.01; m_trainDescriptors = cv::Mat(); m_trainKeyPoints.clear(); m_trainPoints.clear();
3711 m_trainVpPoints.clear(); m_useAffineDetection =
false;
3712 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
3713 m_useBruteForceCrossCheck =
true;
3715 m_useConsensusPercentage =
false;
3717 m_useMatchTrainToQuery =
false; m_useRansacVVS =
true; m_useSingleMatchFilter =
true;
3719 m_detectorNames.push_back(
"ORB");
3720 m_extractorNames.push_back(
"ORB");
3734 if(!parent.empty()) {
3738 std::map<int, std::string> mapOfImgPath;
3739 if(saveTrainingImages) {
3740 #ifdef VISP_HAVE_MODULE_IO
3744 for(std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end(); ++it, cpt++) {
3750 sprintf(buffer,
"%03d", cpt);
3751 std::stringstream ss;
3752 ss <<
"train_image_" << buffer;
3754 switch(m_imageFormat) {
3776 std::string imgFilename = ss.str();
3777 mapOfImgPath[it->first] = imgFilename;
3778 vpImageIo::write(it->second, parent + (!parent.empty() ?
"/" :
"") + imgFilename);
3781 std::cout <<
"Warning: in vpKeyPoint::saveLearningData() training images are not saved because "
3782 "visp_io module is not available !" << std::endl;
3786 bool have3DInfo = m_trainPoints.size() > 0;
3787 if(have3DInfo && m_trainPoints.size() != m_trainKeyPoints.size()) {
3793 std::ofstream file(filename.c_str(), std::ofstream::binary);
3794 if(!file.is_open()) {
3799 int nbImgs = (int) mapOfImgPath.size();
3801 writeBinaryIntLE(file, nbImgs);
3803 #ifdef VISP_HAVE_MODULE_IO
3804 for(std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
3808 writeBinaryIntLE(file,
id);
3811 std::string path = it->second;
3812 int length = (int) path.length();
3814 writeBinaryIntLE(file, length);
3816 for(
int cpt = 0; cpt < length; cpt++) {
3817 file.write((
char *) (&path[(
size_t)cpt]),
sizeof(path[(
size_t)cpt]));
3823 int have3DInfoInt = have3DInfo ? 1 : 0;
3825 writeBinaryIntLE(file, have3DInfoInt);
3828 int nRows = m_trainDescriptors.rows,
3829 nCols = m_trainDescriptors.cols;
3830 int descriptorType = m_trainDescriptors.type();
3834 writeBinaryIntLE(file, nRows);
3838 writeBinaryIntLE(file, nCols);
3842 writeBinaryIntLE(file, descriptorType);
3844 for (
int i = 0; i < nRows; i++) {
3845 unsigned int i_ = (
unsigned int) i;
3847 float u = m_trainKeyPoints[i_].pt.x;
3849 writeBinaryFloatLE(file, u);
3852 float v = m_trainKeyPoints[i_].pt.y;
3854 writeBinaryFloatLE(file, v);
3857 float size = m_trainKeyPoints[i_].size;
3859 writeBinaryFloatLE(file, size);
3862 float angle = m_trainKeyPoints[i_].angle;
3864 writeBinaryFloatLE(file, angle);
3867 float response = m_trainKeyPoints[i_].response;
3869 writeBinaryFloatLE(file, response);
3872 int octave = m_trainKeyPoints[i_].octave;
3874 writeBinaryIntLE(file, octave);
3877 int class_id = m_trainKeyPoints[i_].class_id;
3879 writeBinaryIntLE(file, class_id);
3882 #ifdef VISP_HAVE_MODULE_IO
3883 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
3884 int image_id = (saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1;
3886 writeBinaryIntLE(file, image_id);
3890 writeBinaryIntLE(file, image_id);
3894 float oX = m_trainPoints[i_].x, oY = m_trainPoints[i_].y, oZ = m_trainPoints[i_].z;
3897 writeBinaryFloatLE(file, oX);
3901 writeBinaryFloatLE(file, oY);
3905 writeBinaryFloatLE(file, oZ);
3908 for (
int j = 0; j < nCols; j++) {
3910 switch(descriptorType) {
3912 file.write((
char *)(&m_trainDescriptors.at<
unsigned char>(i, j)),
sizeof(m_trainDescriptors.at<
unsigned char>(i, j)));
3916 file.write((
char *)(&m_trainDescriptors.at<
char>(i, j)),
sizeof(m_trainDescriptors.at<
char>(i, j)));
3921 writeBinaryUShortLE(file, m_trainDescriptors.at<
unsigned short int>(i, j));
3926 writeBinaryShortLE(file, m_trainDescriptors.at<
short int>(i, j));
3931 writeBinaryIntLE(file, m_trainDescriptors.at<
int>(i, j));
3936 writeBinaryFloatLE(file, m_trainDescriptors.at<
float>(i, j));
3941 writeBinaryDoubleLE(file, m_trainDescriptors.at<
double>(i, j));
3954 #ifdef VISP_HAVE_XML2
3955 xmlDocPtr doc = NULL;
3956 xmlNodePtr root_node = NULL, image_node = NULL, image_info_node = NULL, descriptors_info_node = NULL,
3957 descriptor_node = NULL, desc_node = NULL;
3966 doc = xmlNewDoc(BAD_CAST
"1.0");
3971 root_node = xmlNewNode(NULL, BAD_CAST
"LearningData");
3972 xmlDocSetRootElement(doc, root_node);
3974 std::stringstream ss;
3977 image_node = xmlNewChild(root_node, NULL, BAD_CAST
"TrainingImageInfo", NULL);
3979 #ifdef VISP_HAVE_MODULE_IO
3980 for(std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
3981 image_info_node = xmlNewChild(image_node, NULL, BAD_CAST
"trainImg",
3982 BAD_CAST it->second.c_str());
3985 xmlNewProp(image_info_node, BAD_CAST
"image_id", BAD_CAST ss.str().c_str());
3990 descriptors_info_node = xmlNewChild(root_node, NULL, BAD_CAST
"DescriptorsInfo", NULL);
3992 int nRows = m_trainDescriptors.rows,
3993 nCols = m_trainDescriptors.cols;
3994 int descriptorType = m_trainDescriptors.type();
3999 xmlNewChild(descriptors_info_node, NULL, BAD_CAST
"nrows", BAD_CAST ss.str().c_str());
4004 xmlNewChild(descriptors_info_node, NULL, BAD_CAST
"ncols", BAD_CAST ss.str().c_str());
4008 ss << descriptorType;
4009 xmlNewChild(descriptors_info_node, NULL, BAD_CAST
"type", BAD_CAST ss.str().c_str());
4011 for (
int i = 0; i < nRows; i++) {
4012 unsigned int i_ = (
unsigned int) i;
4013 descriptor_node = xmlNewChild(root_node, NULL, BAD_CAST
"DescriptorInfo",
4018 ss << std::fixed << std::setprecision(9) << m_trainKeyPoints[i_].pt.x;
4019 xmlNewChild(descriptor_node, NULL, BAD_CAST
"u",
4020 BAD_CAST ss.str().c_str());
4024 ss << std::fixed << std::setprecision(9) << m_trainKeyPoints[i_].pt.y;
4025 xmlNewChild(descriptor_node, NULL, BAD_CAST
"v",
4026 BAD_CAST ss.str().c_str());
4030 ss << std::fixed << std::setprecision(9) << m_trainKeyPoints[i_].size;
4031 xmlNewChild(descriptor_node, NULL, BAD_CAST
"size",
4032 BAD_CAST ss.str().c_str());
4036 ss << std::fixed << std::setprecision(9) << m_trainKeyPoints[i_].angle;
4037 xmlNewChild(descriptor_node, NULL, BAD_CAST
"angle",
4038 BAD_CAST ss.str().c_str());
4042 ss << std::fixed << std::setprecision(9) << m_trainKeyPoints[i_].response;
4043 xmlNewChild(descriptor_node, NULL, BAD_CAST
"response",
4044 BAD_CAST ss.str().c_str());
4047 ss << m_trainKeyPoints[i_].octave;
4048 xmlNewChild(descriptor_node, NULL, BAD_CAST
"octave",
4049 BAD_CAST ss.str().c_str());
4052 ss << m_trainKeyPoints[i_].class_id;
4053 xmlNewChild(descriptor_node, NULL, BAD_CAST
"class_id",
4054 BAD_CAST ss.str().c_str());
4057 #ifdef VISP_HAVE_MODULE_IO
4058 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
4059 ss << ((saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1);
4060 xmlNewChild(descriptor_node, NULL, BAD_CAST
"image_id",
4061 BAD_CAST ss.str().c_str());
4064 xmlNewChild(descriptor_node, NULL, BAD_CAST
"image_id",
4065 BAD_CAST ss.str().c_str());
4071 ss << std::fixed << std::setprecision(9) << m_trainPoints[i_].x;
4072 xmlNewChild(descriptor_node, NULL, BAD_CAST
"oX",
4073 BAD_CAST ss.str().c_str());
4077 ss << std::fixed << std::setprecision(9) << m_trainPoints[i_].y;
4078 xmlNewChild(descriptor_node, NULL, BAD_CAST
"oY",
4079 BAD_CAST ss.str().c_str());
4083 ss << std::fixed << std::setprecision(9) << m_trainPoints[i_].z;
4084 xmlNewChild(descriptor_node, NULL, BAD_CAST
"oZ",
4085 BAD_CAST ss.str().c_str());
4088 desc_node = xmlNewChild(descriptor_node, NULL, BAD_CAST
"desc", NULL);
4090 for (
int j = 0; j < nCols; j++) {
4093 switch(descriptorType) {
4100 int val_tmp = m_trainDescriptors.at<
unsigned char>(i, j);
4111 int val_tmp = m_trainDescriptors.at<
char>(i, j);
4117 ss << m_trainDescriptors.at<
unsigned short int>(i, j);
4121 ss << m_trainDescriptors.at<
short int>(i, j);
4125 ss << m_trainDescriptors.at<
int>(i, j);
4130 ss << std::fixed << std::setprecision(9) << m_trainDescriptors.at<
float>(i, j);
4135 ss << std::fixed << std::setprecision(17) << m_trainDescriptors.at<
double>(i, j);
4142 xmlNewChild(desc_node, NULL, BAD_CAST
"val",
4143 BAD_CAST ss.str().c_str());
4147 xmlSaveFormatFileEnc(filename.c_str(), doc,
"UTF-8", 1);
4158 std::cerr <<
"Error: libxml2 is required !" << std::endl;
4163 #if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x030000)
4165 struct KeypointResponseGreaterThanThreshold {
4166 KeypointResponseGreaterThanThreshold(
float _value) :
4169 inline bool operator()(
const cv::KeyPoint& kpt)
const {
4170 return kpt.response >= value;
4175 struct KeypointResponseGreater {
4176 inline bool operator()(
const cv::KeyPoint& kp1,
4177 const cv::KeyPoint& kp2)
const {
4178 return kp1.response > kp2.response;
4183 void vpKeyPoint::KeyPointsFilter::retainBest(
4184 std::vector<cv::KeyPoint>& keypoints,
int n_points) {
4186 if (n_points >= 0 && keypoints.size() > (size_t) n_points) {
4187 if (n_points == 0) {
4192 std::nth_element(keypoints.begin(), keypoints.begin() + n_points,
4193 keypoints.end(), KeypointResponseGreater());
4195 float ambiguous_response = keypoints[(size_t) (n_points - 1)].response;
4197 std::vector<cv::KeyPoint>::const_iterator new_end = std::partition(
4198 keypoints.begin() + n_points, keypoints.end(),
4199 KeypointResponseGreaterThanThreshold(ambiguous_response));
4201 keypoints.resize((
size_t) (new_end - keypoints.begin()));
4205 struct RoiPredicate {
4206 RoiPredicate(
const cv::Rect& _r) :
4210 bool operator()(
const cv::KeyPoint& keyPt)
const {
4211 return !r.contains(keyPt.pt);
4217 void vpKeyPoint::KeyPointsFilter::runByImageBorder(
4218 std::vector<cv::KeyPoint>& keypoints, cv::Size imageSize,
int borderSize) {
4219 if (borderSize > 0) {
4220 if (imageSize.height <= borderSize * 2 || imageSize.width <= borderSize * 2)
4224 std::remove_if(keypoints.begin(), keypoints.end(),
4226 cv::Rect(cv::Point(borderSize, borderSize),
4227 cv::Point(imageSize.width - borderSize,
4228 imageSize.height - borderSize)))), keypoints.end());
4232 struct SizePredicate {
4233 SizePredicate(
float _minSize,
float _maxSize) :
4234 minSize(_minSize), maxSize(_maxSize) {
4237 bool operator()(
const cv::KeyPoint& keyPt)
const {
4238 float size = keyPt.size;
4239 return (size < minSize) || (size > maxSize);
4242 float minSize, maxSize;
4245 void vpKeyPoint::KeyPointsFilter::runByKeypointSize(
4246 std::vector<cv::KeyPoint>& keypoints,
float minSize,
float maxSize) {
4247 CV_Assert(minSize >= 0);
4248 CV_Assert(maxSize >= 0);
4249 CV_Assert(minSize <= maxSize);
4252 std::remove_if(keypoints.begin(), keypoints.end(),
4253 SizePredicate(minSize, maxSize)), keypoints.end());
4256 class MaskPredicate {
4258 MaskPredicate(
const cv::Mat& _mask) :
4261 bool operator()(
const cv::KeyPoint& key_pt)
const {
4262 return mask.at<uchar>((int) (key_pt.pt.y + 0.5f),
4263 (int) (key_pt.pt.x + 0.5f)) == 0;
4268 MaskPredicate& operator=(
const MaskPredicate&);
4271 void vpKeyPoint::KeyPointsFilter::runByPixelsMask(
4272 std::vector<cv::KeyPoint>& keypoints,
const cv::Mat& mask) {
4277 std::remove_if(keypoints.begin(), keypoints.end(), MaskPredicate(mask)),
4281 struct KeyPoint_LessThan {
4282 KeyPoint_LessThan(
const std::vector<cv::KeyPoint>& _kp) :
4285 bool operator()(
size_t i,
size_t j)
const {
4286 const cv::KeyPoint& kp1 = (*kp)[ i];
4287 const cv::KeyPoint& kp2 = (*kp)[ j];
4288 if (!
vpMath::equal(kp1.pt.x, kp2.pt.x, std::numeric_limits<float>::epsilon())) {
4289 return kp1.pt.x < kp2.pt.x;
4292 if (!
vpMath::equal(kp1.pt.y, kp2.pt.y, std::numeric_limits<float>::epsilon())) {
4293 return kp1.pt.y < kp2.pt.y;
4296 if (!
vpMath::equal(kp1.size, kp2.size, std::numeric_limits<float>::epsilon())) {
4297 return kp1.size > kp2.size;
4300 if (!
vpMath::equal(kp1.angle, kp2.angle, std::numeric_limits<float>::epsilon())) {
4301 return kp1.angle < kp2.angle;
4304 if (!
vpMath::equal(kp1.response, kp2.response, std::numeric_limits<float>::epsilon())) {
4305 return kp1.response > kp2.response;
4308 if (kp1.octave != kp2.octave) {
4309 return kp1.octave > kp2.octave;
4312 if (kp1.class_id != kp2.class_id) {
4313 return kp1.class_id > kp2.class_id;
4318 const std::vector<cv::KeyPoint>* kp;
4321 void vpKeyPoint::KeyPointsFilter::removeDuplicated(
4322 std::vector<cv::KeyPoint>& keypoints) {
4323 size_t i, j, n = keypoints.size();
4324 std::vector<size_t> kpidx(n);
4325 std::vector<uchar> mask(n, (uchar) 1);
4327 for (i = 0; i < n; i++) {
4330 std::sort(kpidx.begin(), kpidx.end(), KeyPoint_LessThan(keypoints));
4331 for (i = 1, j = 0; i < n; i++) {
4332 cv::KeyPoint& kp1 = keypoints[kpidx[i]];
4333 cv::KeyPoint& kp2 = keypoints[kpidx[j]];
4335 if (!
vpMath::equal(kp1.pt.x, kp2.pt.x, std::numeric_limits<float>::epsilon()) ||
4336 !
vpMath::equal(kp1.pt.y, kp2.pt.y, std::numeric_limits<float>::epsilon()) ||
4337 !
vpMath::equal(kp1.size, kp2.size, std::numeric_limits<float>::epsilon()) ||
4338 !
vpMath::equal(kp1.angle, kp2.angle, std::numeric_limits<float>::epsilon())) {
4345 for (i = j = 0; i < n; i++) {
4348 keypoints[j] = keypoints[i];
4353 keypoints.resize(j);
4359 vpKeyPoint::PyramidAdaptedFeatureDetector::PyramidAdaptedFeatureDetector(
4360 const cv::Ptr<cv::FeatureDetector>& _detector,
int _maxLevel) :
4361 detector(_detector), maxLevel(_maxLevel) {
4364 bool vpKeyPoint::PyramidAdaptedFeatureDetector::empty()
const {
4365 return detector.empty() || (cv::FeatureDetector*) detector->empty();
4368 void vpKeyPoint::PyramidAdaptedFeatureDetector::detect( cv::InputArray image, CV_OUT std::vector<cv::KeyPoint>& keypoints,
4369 cv::InputArray mask ) {
4370 detectImpl(image.getMat(), keypoints, mask.getMat());
4373 void vpKeyPoint::PyramidAdaptedFeatureDetector::detectImpl(
const cv::Mat& image,
4374 std::vector<cv::KeyPoint>& keypoints,
const cv::Mat& mask)
const {
4375 cv::Mat src = image;
4376 cv::Mat src_mask = mask;
4378 cv::Mat dilated_mask;
4379 if (!mask.empty()) {
4380 cv::dilate(mask, dilated_mask, cv::Mat());
4381 cv::Mat mask255(mask.size(), CV_8UC1, cv::Scalar(0));
4382 mask255.setTo(cv::Scalar(255), dilated_mask != 0);
4383 dilated_mask = mask255;
4386 for (
int l = 0, multiplier = 1; l <= maxLevel; ++l, multiplier *= 2) {
4388 std::vector<cv::KeyPoint> new_pts;
4389 detector->detect(src, new_pts, src_mask);
4390 std::vector<cv::KeyPoint>::iterator it = new_pts.begin(), end =
4392 for (; it != end; ++it) {
4393 it->pt.x *= multiplier;
4394 it->pt.y *= multiplier;
4395 it->size *= multiplier;
4398 keypoints.insert(keypoints.end(), new_pts.begin(), new_pts.end());
4407 resize(dilated_mask, src_mask, src.size(), 0, 0, CV_INTER_AREA);
4412 vpKeyPoint::KeyPointsFilter::runByPixelsMask(keypoints, mask);
4416 #elif !defined(VISP_BUILD_SHARED_LIBS)
4418 void dummy_vpKeyPoint() {};
void getTrainKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
Implementation of a matrix and operations on matrices.
void displayMatching(const vpImage< unsigned char > &IRef, vpImage< unsigned char > &IMatching, unsigned int crossSize, unsigned int lineThickness=1, const vpColor &color=vpColor::green)
static void compute3D(const cv::KeyPoint &candidate, const std::vector< vpPoint > &roi, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
void set_oZ(const double oZ)
Set the point Z coordinate in the object frame.
void detectExtractAffine(const vpImage< unsigned char > &I, std::vector< std::vector< cv::KeyPoint > > &listOfKeypoints, std::vector< cv::Mat > &listOfDescriptors, std::vector< vpImage< unsigned char > > *listOfAffineI=NULL)
void getQueryKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
std::string getMatcherName() const
unsigned int getWidth() const
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType, const std::string &matcherName, const vpFilterMatchingType &filterType=ratioDistanceThreshold)
void createImageMatching(vpImage< unsigned char > &IRef, vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
Point coordinates conversion from normalized coordinates in meter to pixel coordinates ...
static bool isNaN(const double value)
Class to define colors available for display functionnalities.
static bool equal(double x, double y, double s=0.001)
double getMatchingFactorThreshold() const
bool getUseRansacVVSPoseEstimation() const
static const vpColor none
double get_oY() const
Get the point Y coordinate in the object frame.
error that can be emited by ViSP classes.
double getRansacConsensusPercentage() const
void setRansacThreshold(const double &t)
void set_x(const double x)
Set the point x coordinate in the image plane.
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
Point coordinates conversion from pixel coordinates to normalized coordinates in meter...
std::vector< unsigned int > getRansacInlierIndex() const
int getNbRansacMinInlierCount() const
double get_y() const
Get the point y coordinate in the image plane.
static const vpColor green
void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector< cv::DMatch > &matches, double &elapsedTime)
static int round(const double x)
bool getUseRansacConsensusPercentage() const
void insertImageMatching(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
VISP_EXPORT double measureTimeMs()
Class that defines what is a point.
bool isInside(const vpImagePoint &iP, const PointInPolygonMethod &method=PnPolyRayCasting) const
static void write(const vpImage< unsigned char > &I, const std::string &filename)
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
Defines a generic 2D polygon.
const char * what() const
vpRect getBoundingBox() const
double getRansacReprojectionError() const
void set_u(const double u)
static void compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpCylinder > &cylinders, const std::vector< std::vector< std::vector< vpImagePoint > > > &vectorOfCylinderRois, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
bool _reference_computed
flag to indicate if the reference has been built.
void set_v(const double v)
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
Generic class defining intrinsic camera parameters.
int getNbRansacIterations() const
double get_oZ() const
Get the point Z coordinate in the object frame.
double getMatchingRatioThreshold() const
void set_y(const double y)
Set the point y coordinate in the image plane.
std::string getDetectorName() const
void set_oX(const double oX)
Set the point X coordinate in the object frame.
void display(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, unsigned int size=3)
unsigned int buildReference(const vpImage< unsigned char > &I)
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool(*func)(vpHomogeneousMatrix *)=NULL)
double get_x() const
Get the point x coordinate in the image plane.
vpMatrix getCovarianceMatrix() const
std::vector< vpImagePoint > referenceImagePointsList
void getObjectPoints(std::vector< cv::Point3f > &objectPoints) const
bool computePose(const std::vector< cv::Point2f > &imagePoints, const std::vector< cv::Point3f > &objectPoints, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector< int > &inlierIndex, double &elapsedTime, bool(*func)(vpHomogeneousMatrix *)=NULL)
unsigned int matchPoint(const vpImage< unsigned char > &I)
void setRansacMaxTrials(const int &rM)
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
static void displayCircle(const vpImage< unsigned char > &I, const vpImagePoint ¢er, unsigned int radius, const vpColor &color, bool fill=false, unsigned int thickness=1)
double get_oX() const
Get the point X coordinate in the object frame.
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
void loadLearningData(const std::string &filename, const bool binaryMode=false, const bool append=false)
void setWorldCoordinates(const double oX, const double oY, const double oZ)
static void read(vpImage< unsigned char > &I, const std::string &filename)
double getRansacThreshold() const
Implementation of column vector and the associated operations.
void set_oY(const double oY)
Set the point Y coordinate in the object frame.
void saveLearningData(const std::string &filename, const bool binaryMode=false, const bool saveTrainingImages=true)
void getTrainPoints(std::vector< cv::Point3f > &points) const
void insert(const vpImage< Type > &src, const vpImagePoint topLeft)
vpHomogeneousMatrix inverse() const
bool matchPointAndDetect(const vpImage< unsigned char > &I, vpRect &boundingBox, vpImagePoint ¢erOfGravity, const bool isPlanarObject=true, std::vector< vpImagePoint > *imPts1=NULL, std::vector< vpImagePoint > *imPts2=NULL, double *meanDescriptorDistance=NULL, double *detectionScore=NULL, const vpRect &rectangle=vpRect())
std::vector< unsigned int > matchedReferencePoints
unsigned int getHeight() const
Defines a rectangle in the plane.
std::vector< vpImagePoint > currentImagePointsList
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1)
This class defines the container for a plane geometrical structure.
void loadConfigFile(const std::string &configFile)
void addPoint(const vpPoint &P)
static void convertFromOpenCV(const cv::KeyPoint &from, vpImagePoint &to)
Class that consider the case of a translation vector.
Implementation of a rotation vector as axis-angle minimal representation.
std::vector< vpPoint > getRansacInliers() const
void setCovarianceComputation(const bool &flag)
std::string getExtractorName() const
void set_ij(const double ii, const double jj)
vpMatchingMethodEnum getMatchingMethod() const
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
void initMatcher(const std::string &matcherName)
void extract(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, cv::Mat &descriptors, std::vector< cv::Point3f > *trainPoints=NULL)
void parse(const std::string &filename)