43 #include <visp3/core/vpIoTools.h> 44 #include <visp3/vision/vpKeyPoint.h> 46 #if (VISP_HAVE_OPENCV_VERSION >= 0x020101) 48 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000) 49 #include <opencv2/calib3d/calib3d.hpp> 54 #if defined(__GLIBC__) 56 #if (__BYTE_ORDER == __LITTLE_ENDIAN) 57 #define VISP_LITTLE_ENDIAN 58 #elif (__BYTE_ORDER == __BIG_ENDIAN) 59 #define VISP_BIG_ENDIAN 60 #elif (__BYTE_ORDER == __PDP_ENDIAN) 62 #define VISP_PDP_ENDIAN 64 #error Unknown machine endianness detected. 66 #elif defined(_BIG_ENDIAN) && !defined(_LITTLE_ENDIAN) || defined(__BIG_ENDIAN__) && !defined(__LITTLE_ENDIAN__) 67 #define VISP_BIG_ENDIAN 68 #elif defined(_LITTLE_ENDIAN) && !defined(_BIG_ENDIAN) || defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__) 69 #define VISP_LITTLE_ENDIAN 70 #elif defined(__sparc) || defined(__sparc__) || defined(_POWER) || defined(__powerpc__) || defined(__ppc__) || \ 71 defined(__hpux) || defined(_MIPSEB) || defined(_POWER) || defined(__s390__) 73 #define VISP_BIG_ENDIAN 74 #elif defined(__i386__) || defined(__alpha__) || defined(__ia64) || defined(__ia64__) || defined(_M_IX86) || \ 75 defined(_M_IA64) || defined(_M_ALPHA) || defined(__amd64) || defined(__amd64__) || defined(_M_AMD64) || \ 76 defined(__x86_64) || defined(__x86_64__) || defined(_M_X64) 78 #define VISP_LITTLE_ENDIAN 80 #error Cannot detect host machine endianness. 93 inline cv::DMatch knnToDMatch(
const std::vector<cv::DMatch> &knnMatches)
95 if (knnMatches.size() > 0) {
108 inline vpImagePoint matchRansacToVpImage(
const std::pair<cv::KeyPoint, cv::Point3f> &pair)
123 #ifdef VISP_BIG_ENDIAN 126 uint16_t swap16bits(
const uint16_t val) {
return (((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00)); }
130 uint32_t swap32bits(
const uint32_t val)
132 return (((val >> 24) & 0x000000FF) | ((val >> 8) & 0x0000FF00) | ((val << 8) & 0x00FF0000) |
133 ((val << 24) & 0xFF000000));
138 float swapFloat(
const float f)
146 dat2.b[0] = dat1.b[3];
147 dat2.b[1] = dat1.b[2];
148 dat2.b[2] = dat1.b[1];
149 dat2.b[3] = dat1.b[0];
155 double swapDouble(
const double d)
163 dat2.b[0] = dat1.b[7];
164 dat2.b[1] = dat1.b[6];
165 dat2.b[2] = dat1.b[5];
166 dat2.b[3] = dat1.b[4];
167 dat2.b[4] = dat1.b[3];
168 dat2.b[5] = dat1.b[2];
169 dat2.b[6] = dat1.b[1];
170 dat2.b[7] = dat1.b[0];
176 void readBinaryUShortLE(std::ifstream &file,
unsigned short &ushort_value)
179 file.read((
char *)(&ushort_value),
sizeof(ushort_value));
181 #ifdef VISP_BIG_ENDIAN 183 ushort_value = swap16bits(ushort_value);
188 void readBinaryShortLE(std::ifstream &file,
short &short_value)
191 file.read((
char *)(&short_value),
sizeof(short_value));
193 #ifdef VISP_BIG_ENDIAN 195 short_value = (short)swap16bits((uint16_t)short_value);
200 void readBinaryIntLE(std::ifstream &file,
int &int_value)
203 file.read((
char *)(&int_value),
sizeof(int_value));
205 #ifdef VISP_BIG_ENDIAN 207 if (
sizeof(int_value) == 4) {
208 int_value = (int)swap32bits((uint32_t)int_value);
210 int_value = swap16bits((uint16_t)int_value);
216 void readBinaryFloatLE(std::ifstream &file,
float &float_value)
219 file.read((
char *)(&float_value),
sizeof(float_value));
221 #ifdef VISP_BIG_ENDIAN 223 float_value = swapFloat(float_value);
228 void readBinaryDoubleLE(std::ifstream &file,
double &double_value)
231 file.read((
char *)(&double_value),
sizeof(double_value));
233 #ifdef VISP_BIG_ENDIAN 235 double_value = swapDouble(double_value);
240 void writeBinaryUShortLE(std::ofstream &file,
const unsigned short ushort_value)
242 #ifdef VISP_BIG_ENDIAN 244 uint16_t swap_ushort = swap16bits(ushort_value);
245 file.write((
char *)(&swap_ushort),
sizeof(swap_ushort));
247 file.write((
char *)(&ushort_value),
sizeof(ushort_value));
252 void writeBinaryShortLE(std::ofstream &file,
const short short_value)
254 #ifdef VISP_BIG_ENDIAN 256 uint16_t swap_short = swap16bits((uint16_t)short_value);
257 file.write((
char *)(&swap_short),
sizeof(swap_short));
259 file.write((
char *)(&short_value),
sizeof(short_value));
264 void writeBinaryIntLE(std::ofstream &file,
const int int_value)
266 #ifdef VISP_BIG_ENDIAN 269 if (
sizeof(int_value) == 4) {
270 uint32_t swap_int = swap32bits((uint32_t)int_value);
271 file.write((
char *)(&swap_int),
sizeof(swap_int));
273 uint16_t swap_int = swap16bits((uint16_t)int_value);
274 file.write((
char *)(&swap_int),
sizeof(swap_int));
277 file.write((
char *)(&int_value),
sizeof(int_value));
282 void writeBinaryFloatLE(std::ofstream &file,
const float float_value)
284 #ifdef VISP_BIG_ENDIAN 286 float swap_float = swapFloat(float_value);
287 file.write((
char *)(&swap_float),
sizeof(swap_float));
289 file.write((
char *)(&float_value),
sizeof(float_value));
294 void writeBinaryDoubleLE(std::ofstream &file,
const double double_value)
296 #ifdef VISP_BIG_ENDIAN 298 double swap_double = swapDouble(double_value);
299 file.write((
char *)(&swap_double),
sizeof(swap_double));
301 file.write((
char *)(&double_value),
sizeof(double_value));
317 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
318 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
319 m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
320 m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
321 m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
322 m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
323 m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
324 m_ransacConsensusPercentage(20.0), m_ransacInliers(), m_ransacOutliers(), m_ransacReprojectionError(6.0),
325 m_ransacThreshold(0.01), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(),
326 m_useAffineDetection(false),
327 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
328 m_useBruteForceCrossCheck(true),
330 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
331 m_useSingleMatchFilter(true)
335 m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
336 m_extractorNames.push_back(m_mapOfDescriptorNames[descriptorType]);
352 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(
detectionScore),
353 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
354 m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
355 m_imageFormat(
jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
356 m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
357 m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
358 m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
359 m_ransacConsensusPercentage(20.0), m_ransacInliers(), m_ransacOutliers(), m_ransacReprojectionError(6.0),
360 m_ransacThreshold(0.01), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(),
361 m_useAffineDetection(false),
362 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
363 m_useBruteForceCrossCheck(true),
365 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
366 m_useSingleMatchFilter(true)
370 m_detectorNames.push_back(detectorName);
371 m_extractorNames.push_back(extractorName);
387 : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(
detectionScore),
388 m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(detectorNames),
389 m_detectors(), m_extractionTime(0.), m_extractorNames(extractorNames), m_extractors(), m_filteredMatches(),
390 m_filterType(filterType), m_imageFormat(
jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(),
391 m_matcher(), m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0),
392 m_matchingRatioThreshold(0.85), m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200),
393 m_nbRansacMinInlierCount(100), m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(),
394 m_queryFilteredKeyPoints(), m_queryKeyPoints(), m_ransacConsensusPercentage(20.0), m_ransacInliers(),
395 m_ransacOutliers(), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01), m_trainDescriptors(),
396 m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
397 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
398 m_useBruteForceCrossCheck(true),
400 m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
401 m_useSingleMatchFilter(true)
415 void vpKeyPoint::affineSkew(
double tilt,
double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai)
420 mask = cv::Mat(h, w, CV_8UC1, cv::Scalar(255));
422 cv::Mat A = cv::Mat::eye(2, 3, CV_32F);
425 if (std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
430 A = (cv::Mat_<float>(2, 2) << c, -s, s, c);
432 cv::Mat corners = (cv::Mat_<float>(4, 2) << 0, 0, w, 0, w, h, 0, h);
433 cv::Mat tcorners = corners * A.t();
434 cv::Mat tcorners_x, tcorners_y;
435 tcorners.col(0).copyTo(tcorners_x);
436 tcorners.col(1).copyTo(tcorners_y);
437 std::vector<cv::Mat> channels;
438 channels.push_back(tcorners_x);
439 channels.push_back(tcorners_y);
440 cv::merge(channels, tcorners);
442 cv::Rect rect = cv::boundingRect(tcorners);
443 A = (cv::Mat_<float>(2, 3) << c, -s, -rect.x, s, c, -rect.y);
445 cv::warpAffine(img, img, A, cv::Size(rect.width, rect.height), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
448 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon()) {
449 double s = 0.8 * sqrt(tilt * tilt - 1);
450 cv::GaussianBlur(img, img, cv::Size(0, 0), s, 0.01);
451 cv::resize(img, img, cv::Size(0, 0), 1.0 / tilt, 1.0, cv::INTER_NEAREST);
452 A.row(0) = A.row(0) / tilt;
455 if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon() ||
456 std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
459 cv::warpAffine(mask, mask, A, cv::Size(w, h), cv::INTER_NEAREST);
461 cv::invertAffineTransform(A, Ai);
482 const unsigned int height,
const unsigned int width)
499 m_trainPoints.clear();
500 m_mapOfImageId.clear();
501 m_mapOfImages.clear();
502 m_currentImageId = 1;
504 if (m_useAffineDetection) {
505 std::vector<std::vector<cv::KeyPoint> > listOfTrainKeyPoints;
506 std::vector<cv::Mat> listOfTrainDescriptors;
512 m_trainKeyPoints.clear();
513 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfTrainKeyPoints.begin();
514 it != listOfTrainKeyPoints.end(); ++it) {
515 m_trainKeyPoints.insert(m_trainKeyPoints.end(), it->begin(), it->end());
519 for (std::vector<cv::Mat>::const_iterator it = listOfTrainDescriptors.begin(); it != listOfTrainDescriptors.end();
523 it->copyTo(m_trainDescriptors);
525 m_trainDescriptors.push_back(*it);
529 detect(I, m_trainKeyPoints, m_detectionTime, rectangle);
530 extract(I, m_trainKeyPoints, m_trainDescriptors, m_extractionTime);
535 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
536 m_mapOfImageId[it->class_id] = m_currentImageId;
540 m_mapOfImages[m_currentImageId] = I;
549 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
551 return static_cast<unsigned int>(m_trainKeyPoints.size());
566 std::vector<cv::Point3f> &points3f,
const bool append,
const int class_id)
568 cv::Mat trainDescriptors;
570 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
572 extract(I, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
574 if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
578 std::map<size_t, size_t> mapOfKeypointHashes;
580 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
582 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
585 std::vector<cv::Point3f> trainPoints_tmp;
586 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
587 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
588 trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
593 points3f = trainPoints_tmp;
596 buildReference(I, trainKeyPoints, trainDescriptors, points3f, append, class_id);
612 const cv::Mat &trainDescriptors,
const std::vector<cv::Point3f> &points3f,
613 const bool append,
const int class_id)
616 m_currentImageId = 0;
617 m_mapOfImageId.clear();
618 m_mapOfImages.clear();
619 this->m_trainKeyPoints.clear();
620 this->m_trainPoints.clear();
625 std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
627 if (class_id != -1) {
628 for (std::vector<cv::KeyPoint>::iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
629 it->class_id = class_id;
635 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
637 m_mapOfImageId[it->class_id] = m_currentImageId;
641 m_mapOfImages[m_currentImageId] = I;
644 this->m_trainKeyPoints.insert(this->m_trainKeyPoints.end(), trainKeyPoints_tmp.begin(), trainKeyPoints_tmp.end());
646 trainDescriptors.copyTo(this->m_trainDescriptors);
648 this->m_trainDescriptors.push_back(trainDescriptors);
650 this->m_trainPoints.insert(this->m_trainPoints.end(), points3f.begin(), points3f.end());
658 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
682 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
689 vpPlane Po(pts[0], pts[1], pts[2]);
690 double xc = 0.0, yc = 0.0;
701 point_obj = cMo.
inverse() * point_cam;
702 point = cv::Point3f((
float)point_obj[0], (
float)point_obj[1], (
float)point_obj[2]);
724 std::vector<vpPoint>::const_iterator it_roi = roi.begin();
731 vpPlane Po(pts[0], pts[1], pts[2]);
732 double xc = 0.0, yc = 0.0;
743 point_obj = cMo.
inverse() * point_cam;
763 std::vector<cv::KeyPoint> &candidates,
764 const std::vector<vpPolygon> &polygons,
765 const std::vector<std::vector<vpPoint> > &roisPt,
766 std::vector<cv::Point3f> &points, cv::Mat *descriptors)
769 std::vector<cv::KeyPoint> candidatesToCheck = candidates;
776 std::vector<std::pair<cv::KeyPoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
777 for (
size_t i = 0; i < candidatesToCheck.size(); i++) {
778 pairOfCandidatesToCheck[i] = std::pair<cv::KeyPoint, size_t>(candidatesToCheck[i], i);
782 std::vector<vpPolygon> polygons_tmp = polygons;
783 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
784 std::vector<std::pair<cv::KeyPoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
786 while (it2 != pairOfCandidatesToCheck.end()) {
787 imPt.
set_ij(it2->first.pt.y, it2->first.pt.x);
788 if (it1->isInside(imPt)) {
789 candidates.push_back(it2->first);
791 points.push_back(pt);
793 if (descriptors != NULL) {
794 desc.push_back(descriptors->row((
int)it2->second));
798 it2 = pairOfCandidatesToCheck.erase(it2);
805 if (descriptors != NULL) {
806 desc.copyTo(*descriptors);
827 std::vector<vpImagePoint> &candidates,
828 const std::vector<vpPolygon> &polygons,
829 const std::vector<std::vector<vpPoint> > &roisPt,
830 std::vector<vpPoint> &points, cv::Mat *descriptors)
833 std::vector<vpImagePoint> candidatesToCheck = candidates;
839 std::vector<std::pair<vpImagePoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
840 for (
size_t i = 0; i < candidatesToCheck.size(); i++) {
841 pairOfCandidatesToCheck[i] = std::pair<vpImagePoint, size_t>(candidatesToCheck[i], i);
845 std::vector<vpPolygon> polygons_tmp = polygons;
846 for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
847 std::vector<std::pair<vpImagePoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
849 while (it2 != pairOfCandidatesToCheck.end()) {
850 if (it1->isInside(it2->first)) {
851 candidates.push_back(it2->first);
853 points.push_back(pt);
855 if (descriptors != NULL) {
856 desc.push_back(descriptors->row((
int)it2->second));
860 it2 = pairOfCandidatesToCheck.erase(it2);
885 const std::vector<vpCylinder> &cylinders,
886 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<cv::Point3f> &points,
887 cv::Mat *descriptors)
889 std::vector<cv::KeyPoint> candidatesToCheck = candidates;
895 size_t cpt_keypoint = 0;
896 for (std::vector<cv::KeyPoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
897 ++it1, cpt_keypoint++) {
898 size_t cpt_cylinder = 0;
901 for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
902 it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
905 for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
907 candidates.push_back(*it1);
911 double xm = 0.0, ym = 0.0;
913 double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
915 if (!
vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
917 point_cam[0] = xm * Z;
918 point_cam[1] = ym * Z;
922 point_obj = cMo.
inverse() * point_cam;
925 points.push_back(cv::Point3f((
float)pt.
get_oX(), (float)pt.
get_oY(), (float)pt.
get_oZ()));
927 if (descriptors != NULL) {
928 desc.push_back(descriptors->row((
int)cpt_keypoint));
938 if (descriptors != NULL) {
939 desc.copyTo(*descriptors);
960 const std::vector<vpCylinder> &cylinders,
961 const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<vpPoint> &points,
962 cv::Mat *descriptors)
964 std::vector<vpImagePoint> candidatesToCheck = candidates;
970 size_t cpt_keypoint = 0;
971 for (std::vector<vpImagePoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
972 ++it1, cpt_keypoint++) {
973 size_t cpt_cylinder = 0;
976 for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
977 it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
980 for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
982 candidates.push_back(*it1);
986 double xm = 0.0, ym = 0.0;
988 double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
990 if (!
vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
992 point_cam[0] = xm * Z;
993 point_cam[1] = ym * Z;
997 point_obj = cMo.
inverse() * point_cam;
1000 points.push_back(pt);
1002 if (descriptors != NULL) {
1003 desc.push_back(descriptors->row((
int)cpt_keypoint));
1013 if (descriptors != NULL) {
1014 desc.copyTo(*descriptors);
1037 if (imagePoints.size() < 4 || objectPoints.size() < 4 || imagePoints.size() != objectPoints.size()) {
1039 std::cerr <<
"Not enough points to compute the pose (at least 4 points " 1046 cv::Mat cameraMatrix =
1056 cv::Mat distCoeffs = cv::Mat::zeros(1, 5, CV_64F);
1059 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000) 1061 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec,
false, m_nbRansacIterations,
1062 (
float)m_ransacReprojectionError,
1065 inlierIndex, cv::SOLVEPNP_ITERATIVE);
1085 int nbInlierToReachConsensus = m_nbRansacMinInlierCount;
1086 if (m_useConsensusPercentage) {
1087 nbInlierToReachConsensus = (int)(m_ransacConsensusPercentage / 100.0 * (
double)m_queryFilteredKeyPoints.size());
1090 cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec,
false, m_nbRansacIterations,
1091 (
float)m_ransacReprojectionError, nbInlierToReachConsensus, inlierIndex);
1093 }
catch (cv::Exception &e) {
1094 std::cerr << e.what() << std::endl;
1098 vpTranslationVector translationVec(tvec.at<
double>(0), tvec.at<
double>(1), tvec.at<
double>(2));
1099 vpThetaUVector thetaUVector(rvec.at<
double>(0), rvec.at<
double>(1), rvec.at<
double>(2));
1130 std::vector<unsigned int> inlierIndex;
1131 return computePose(objectVpPoints, cMo, inliers, inlierIndex, elapsedTime, func);
1149 std::vector<vpPoint> &inliers, std::vector<unsigned int> &inlierIndex,
double &elapsedTime,
1154 if (objectVpPoints.size() < 4) {
1164 for (std::vector<vpPoint>::const_iterator it = objectVpPoints.begin(); it != objectVpPoints.end(); ++it) {
1168 unsigned int nbInlierToReachConsensus = (
unsigned int)m_nbRansacMinInlierCount;
1169 if (m_useConsensusPercentage) {
1170 nbInlierToReachConsensus =
1171 (
unsigned int)(m_ransacConsensusPercentage / 100.0 * (
double)m_queryFilteredKeyPoints.size());
1178 bool isRansacPoseEstimationOk =
false;
1185 if (m_computeCovariance) {
1189 std::cerr <<
"e=" << e.
what() << std::endl;
1207 return isRansacPoseEstimationOk;
1223 double vpKeyPoint::computePoseEstimationError(
const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
1226 if (matchKeyPoints.size() == 0) {
1232 std::vector<double> errors(matchKeyPoints.size());
1235 for (std::vector<std::pair<cv::KeyPoint, cv::Point3f> >::const_iterator it = matchKeyPoints.begin();
1236 it != matchKeyPoints.end(); ++it, cpt++) {
1241 double u = 0.0, v = 0.0;
1243 errors[cpt] = std::sqrt((u - it->first.pt.x) * (u - it->first.pt.x) + (v - it->first.pt.y) * (v - it->first.pt.y));
1246 return std::accumulate(errors.begin(), errors.end(), 0.0) / errors.size();
1280 unsigned int nbImg = (
unsigned int)(m_mapOfImages.size() + 1);
1282 if (m_mapOfImages.empty()) {
1283 std::cerr <<
"There is no training image loaded !" << std::endl;
1293 unsigned int nbImgSqrt = (
unsigned int)
vpMath::round(std::sqrt((
double)nbImg));
1296 unsigned int nbWidth = nbImgSqrt;
1298 unsigned int nbHeight = nbImgSqrt;
1301 if (nbImgSqrt * nbImgSqrt < nbImg) {
1305 unsigned int maxW = ICurrent.
getWidth();
1306 unsigned int maxH = ICurrent.
getHeight();
1307 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1309 if (maxW < it->second.getWidth()) {
1310 maxW = it->second.getWidth();
1313 if (maxH < it->second.getHeight()) {
1314 maxH = it->second.getHeight();
1332 detect(I, keyPoints, elapsedTime, rectangle);
1343 void vpKeyPoint::detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
const cv::Mat &mask)
1346 detect(matImg, keyPoints, elapsedTime, mask);
1362 cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1365 cv::Point leftTop((
int)rectangle.
getLeft(), (int)rectangle.
getTop()),
1367 cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1369 mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1372 detect(matImg, keyPoints, elapsedTime, mask);
1384 void vpKeyPoint::detect(
const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints,
double &elapsedTime,
1385 const cv::Mat &mask)
1390 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
1391 it != m_detectors.end(); ++it) {
1392 std::vector<cv::KeyPoint> kp;
1393 it->second->detect(matImg, kp, mask);
1394 keyPoints.insert(keyPoints.end(), kp.begin(), kp.end());
1409 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1411 std::vector<vpImagePoint> vpTrainImageKeyPoints;
1414 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1429 std::vector<vpImagePoint> vpQueryImageKeyPoints;
1432 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1449 unsigned int crossSize,
unsigned int lineThickness,
const vpColor &color)
1452 srand((
unsigned int)time(NULL));
1455 std::vector<vpImagePoint> queryImageKeyPoints;
1457 std::vector<vpImagePoint> trainImageKeyPoints;
1461 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1463 currentColor =
vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1466 leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1467 rightPt =
vpImagePoint(queryImageKeyPoints[(
size_t)(it->queryIdx)].get_i(),
1468 queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.
getWidth());
1487 const std::vector<vpImagePoint> &ransacInliers,
unsigned int crossSize,
1488 unsigned int lineThickness)
1490 if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1492 std::cerr <<
"There is no training image loaded !" << std::endl;
1498 int nbImg = (int)(m_mapOfImages.size() + 1);
1506 int nbWidth = nbImgSqrt;
1507 int nbHeight = nbImgSqrt;
1509 if (nbImgSqrt * nbImgSqrt < nbImg) {
1513 std::map<int, int> mapOfImageIdIndex;
1516 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1518 mapOfImageIdIndex[it->first] = cpt;
1520 if (maxW < it->second.getWidth()) {
1521 maxW = it->second.getWidth();
1524 if (maxH < it->second.getHeight()) {
1525 maxH = it->second.getHeight();
1531 int medianI = nbHeight / 2;
1532 int medianJ = nbWidth / 2;
1533 int medianIndex = medianI * nbWidth + medianJ;
1534 for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1536 int current_class_id_index = 0;
1537 if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1538 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1542 current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1545 int indexI = current_class_id_index / nbWidth;
1546 int indexJ = current_class_id_index - (indexI * nbWidth);
1547 topLeftCorner.
set_ij((
int)maxH * indexI, (
int)maxW * indexJ);
1554 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
1555 for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1560 for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1565 for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1571 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1572 int current_class_id = 0;
1573 if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(
size_t)it->trainIdx].class_id]] < medianIndex) {
1574 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1578 current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1581 int indexI = current_class_id / nbWidth;
1582 int indexJ = current_class_id - (indexI * nbWidth);
1584 vpImagePoint end((
int)maxH * indexI + m_trainKeyPoints[(
size_t)it->trainIdx].pt.y,
1585 (
int)maxW * indexJ + m_trainKeyPoints[(
size_t)it->trainIdx].pt.x);
1586 vpImagePoint start((
int)maxH * medianI + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.y,
1587 (
int)maxW * medianJ + m_queryFilteredKeyPoints[(
size_t)it->queryIdx].pt.x);
1607 std::vector<cv::Point3f> *trainPoints)
1610 extract(I, keyPoints, descriptors, elapsedTime, trainPoints);
1624 std::vector<cv::Point3f> *trainPoints)
1627 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1641 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1645 extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1659 double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1664 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator itd = m_extractors.begin();
1665 itd != m_extractors.end(); ++itd) {
1669 if (trainPoints != NULL && !trainPoints->empty()) {
1672 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1675 itd->second->compute(matImg, keyPoints, descriptors);
1677 if (keyPoints.size() != keyPoints_tmp.size()) {
1681 std::map<size_t, size_t> mapOfKeypointHashes;
1683 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1685 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1688 std::vector<cv::Point3f> trainPoints_tmp;
1689 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1690 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1691 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1696 *trainPoints = trainPoints_tmp;
1700 itd->second->compute(matImg, keyPoints, descriptors);
1705 std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1709 itd->second->compute(matImg, keyPoints, desc);
1711 if (keyPoints.size() != keyPoints_tmp.size()) {
1715 std::map<size_t, size_t> mapOfKeypointHashes;
1717 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1719 mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1722 std::vector<cv::Point3f> trainPoints_tmp;
1723 cv::Mat descriptors_tmp;
1724 for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1725 if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1726 if (trainPoints != NULL && !trainPoints->empty()) {
1727 trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1730 if (!descriptors.empty()) {
1731 descriptors_tmp.push_back(descriptors.row((
int)mapOfKeypointHashes[myKeypointHash(*it)]));
1736 if (trainPoints != NULL) {
1738 *trainPoints = trainPoints_tmp;
1741 descriptors_tmp.copyTo(descriptors);
1745 if (descriptors.empty()) {
1746 desc.copyTo(descriptors);
1748 cv::hconcat(descriptors, desc, descriptors);
1753 if (keyPoints.size() != (size_t)descriptors.rows) {
1754 std::cerr <<
"keyPoints.size() != (size_t) descriptors.rows" << std::endl;
1762 void vpKeyPoint::filterMatches()
1764 std::vector<cv::KeyPoint> queryKpts;
1765 std::vector<cv::Point3f> trainPts;
1766 std::vector<cv::DMatch> m;
1772 double min_dist = DBL_MAX;
1774 std::vector<double> distance_vec(m_knnMatches.size());
1777 for (
size_t i = 0; i < m_knnMatches.size(); i++) {
1778 double dist = m_knnMatches[i][0].distance;
1780 distance_vec[i] = dist;
1782 if (dist < min_dist) {
1789 mean /= m_queryDescriptors.rows;
1792 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1793 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1794 double threshold = min_dist + stdev;
1796 for (
size_t i = 0; i < m_knnMatches.size(); i++) {
1797 if (m_knnMatches[i].size() >= 2) {
1800 float ratio = m_knnMatches[i][0].distance / m_knnMatches[i][1].distance;
1805 double dist = m_knnMatches[i][0].distance;
1808 m.push_back(cv::DMatch((
int)queryKpts.size(), m_knnMatches[i][0].trainIdx, m_knnMatches[i][0].distance));
1810 if (!m_trainPoints.empty()) {
1811 trainPts.push_back(m_trainPoints[(
size_t)m_knnMatches[i][0].trainIdx]);
1813 queryKpts.push_back(m_queryKeyPoints[(
size_t)m_knnMatches[i][0].queryIdx]);
1821 double min_dist = DBL_MAX;
1823 std::vector<double> distance_vec(m_matches.size());
1824 for (
size_t i = 0; i < m_matches.size(); i++) {
1825 double dist = m_matches[i].distance;
1827 distance_vec[i] = dist;
1829 if (dist < min_dist) {
1836 mean /= m_queryDescriptors.rows;
1838 double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1839 double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1849 for (
size_t i = 0; i < m_matches.size(); i++) {
1850 if (m_matches[i].distance <= threshold) {
1851 m.push_back(cv::DMatch((
int)queryKpts.size(), m_matches[i].trainIdx, m_matches[i].distance));
1853 if (!m_trainPoints.empty()) {
1854 trainPts.push_back(m_trainPoints[(
size_t)m_matches[i].trainIdx]);
1856 queryKpts.push_back(m_queryKeyPoints[(
size_t)m_matches[i].queryIdx]);
1861 if (m_useSingleMatchFilter) {
1864 std::vector<cv::DMatch> mTmp;
1865 std::vector<cv::Point3f> trainPtsTmp;
1866 std::vector<cv::KeyPoint> queryKptsTmp;
1868 std::map<int, int> mapOfTrainIdx;
1870 for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1871 mapOfTrainIdx[it->trainIdx]++;
1875 for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1876 if (mapOfTrainIdx[it->trainIdx] == 1) {
1877 mTmp.push_back(cv::DMatch((
int)queryKptsTmp.size(), it->trainIdx, it->distance));
1879 if (!m_trainPoints.empty()) {
1880 trainPtsTmp.push_back(m_trainPoints[(
size_t)it->trainIdx]);
1882 queryKptsTmp.push_back(queryKpts[(
size_t)it->queryIdx]);
1886 m_filteredMatches = mTmp;
1887 m_objectFilteredPoints = trainPtsTmp;
1888 m_queryFilteredKeyPoints = queryKptsTmp;
1890 m_filteredMatches = m;
1891 m_objectFilteredPoints = trainPts;
1892 m_queryFilteredKeyPoints = queryKpts;
1905 objectPoints = m_objectFilteredPoints;
1970 void vpKeyPoint::init()
1973 #if defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && (VISP_HAVE_OPENCV_VERSION < 0x030000) 1975 if (!cv::initModule_nonfree()) {
1976 std::cerr <<
"Cannot init module non free, SIFT or SURF cannot be used." << std::endl;
1986 initDetectors(m_detectorNames);
1987 initExtractors(m_extractorNames);
1996 void vpKeyPoint::initDetector(
const std::string &detectorName)
1998 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) 1999 m_detectors[detectorName] = cv::FeatureDetector::create(detectorName);
2001 if (m_detectors[detectorName] == NULL) {
2002 std::stringstream ss_msg;
2003 ss_msg <<
"Fail to initialize the detector: " << detectorName
2004 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2008 std::string detectorNameTmp = detectorName;
2009 std::string pyramid =
"Pyramid";
2010 std::size_t pos = detectorName.find(pyramid);
2011 bool usePyramid =
false;
2012 if (pos != std::string::npos) {
2013 detectorNameTmp = detectorName.substr(pos + pyramid.size());
2017 if (detectorNameTmp ==
"SIFT") {
2018 #ifdef VISP_HAVE_OPENCV_XFEATURES2D 2019 cv::Ptr<cv::FeatureDetector> siftDetector = cv::xfeatures2d::SIFT::create();
2021 m_detectors[detectorNameTmp] = siftDetector;
2023 std::cerr <<
"You should not use SIFT with Pyramid feature detection!" << std::endl;
2024 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
2027 std::stringstream ss_msg;
2028 ss_msg <<
"Fail to initialize the detector: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2029 <<
" was not build with xFeatures2d module.";
2032 }
else if (detectorNameTmp ==
"SURF") {
2033 #ifdef VISP_HAVE_OPENCV_XFEATURES2D 2034 cv::Ptr<cv::FeatureDetector> surfDetector = cv::xfeatures2d::SURF::create();
2036 m_detectors[detectorNameTmp] = surfDetector;
2038 std::cerr <<
"You should not use SURF with Pyramid feature detection!" << std::endl;
2039 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(surfDetector);
2042 std::stringstream ss_msg;
2043 ss_msg <<
"Fail to initialize the detector: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2044 <<
" was not build with xFeatures2d module.";
2047 }
else if (detectorNameTmp ==
"FAST") {
2048 cv::Ptr<cv::FeatureDetector> fastDetector = cv::FastFeatureDetector::create();
2050 m_detectors[detectorNameTmp] = fastDetector;
2052 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
2054 }
else if (detectorNameTmp ==
"MSER") {
2055 cv::Ptr<cv::FeatureDetector> fastDetector = cv::MSER::create();
2057 m_detectors[detectorNameTmp] = fastDetector;
2059 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
2061 }
else if (detectorNameTmp ==
"ORB") {
2062 cv::Ptr<cv::FeatureDetector> orbDetector = cv::ORB::create();
2064 m_detectors[detectorNameTmp] = orbDetector;
2066 std::cerr <<
"You should not use ORB with Pyramid feature detection!" << std::endl;
2067 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(orbDetector);
2069 }
else if (detectorNameTmp ==
"BRISK") {
2070 cv::Ptr<cv::FeatureDetector> briskDetector = cv::BRISK::create();
2072 m_detectors[detectorNameTmp] = briskDetector;
2074 std::cerr <<
"You should not use BRISK with Pyramid feature detection!" << std::endl;
2075 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(briskDetector);
2077 }
else if (detectorNameTmp ==
"KAZE") {
2078 cv::Ptr<cv::FeatureDetector> kazeDetector = cv::KAZE::create();
2080 m_detectors[detectorNameTmp] = kazeDetector;
2082 std::cerr <<
"You should not use KAZE with Pyramid feature detection!" << std::endl;
2083 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(kazeDetector);
2085 }
else if (detectorNameTmp ==
"AKAZE") {
2086 cv::Ptr<cv::FeatureDetector> akazeDetector = cv::AKAZE::create();
2088 m_detectors[detectorNameTmp] = akazeDetector;
2090 std::cerr <<
"You should not use AKAZE with Pyramid feature detection!" << std::endl;
2091 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(akazeDetector);
2093 }
else if (detectorNameTmp ==
"GFTT") {
2094 cv::Ptr<cv::FeatureDetector> gfttDetector = cv::GFTTDetector::create();
2096 m_detectors[detectorNameTmp] = gfttDetector;
2098 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(gfttDetector);
2100 }
else if (detectorNameTmp ==
"SimpleBlob") {
2101 cv::Ptr<cv::FeatureDetector> simpleBlobDetector = cv::SimpleBlobDetector::create();
2103 m_detectors[detectorNameTmp] = simpleBlobDetector;
2105 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(simpleBlobDetector);
2107 }
else if (detectorNameTmp ==
"STAR") {
2108 #ifdef VISP_HAVE_OPENCV_XFEATURES2D 2109 cv::Ptr<cv::FeatureDetector> starDetector = cv::xfeatures2d::StarDetector::create();
2111 m_detectors[detectorNameTmp] = starDetector;
2113 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(starDetector);
2116 std::stringstream ss_msg;
2117 ss_msg <<
"Fail to initialize the detector: STAR. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2118 <<
" was not build with xFeatures2d module.";
2121 }
else if (detectorNameTmp ==
"AGAST") {
2122 cv::Ptr<cv::FeatureDetector> agastDetector = cv::AgastFeatureDetector::create();
2124 m_detectors[detectorNameTmp] = agastDetector;
2126 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(agastDetector);
2128 }
else if (detectorNameTmp ==
"MSD") {
2129 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100) 2130 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) 2131 cv::Ptr<cv::FeatureDetector> msdDetector = cv::xfeatures2d::MSDDetector::create();
2133 m_detectors[detectorNameTmp] = msdDetector;
2135 std::cerr <<
"You should not use MSD with Pyramid feature detection!" << std::endl;
2136 m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(msdDetector);
2139 std::stringstream ss_msg;
2140 ss_msg <<
"Fail to initialize the detector: MSD. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2141 <<
" was not build with xFeatures2d module.";
2145 std::stringstream ss_msg;
2146 ss_msg <<
"Feature " << detectorName <<
" is not available in OpenCV version: " << std::hex
2147 << VISP_HAVE_OPENCV_VERSION <<
" (require >= OpenCV 3.1).";
2150 std::cerr <<
"The detector:" << detectorNameTmp <<
" is not available." << std::endl;
2153 bool detectorInitialized =
false;
2155 detectorInitialized = (m_detectors[detectorNameTmp] != NULL);
2157 detectorInitialized = (m_detectors[detectorName] != NULL);
2160 if (!detectorInitialized) {
2161 std::stringstream ss_msg;
2162 ss_msg <<
"Fail to initialize the detector: " << detectorNameTmp
2163 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2176 void vpKeyPoint::initDetectors(
const std::vector<std::string> &detectorNames)
2178 for (std::vector<std::string>::const_iterator it = detectorNames.begin(); it != detectorNames.end(); ++it) {
2188 void vpKeyPoint::initExtractor(
const std::string &extractorName)
2190 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) 2191 m_extractors[extractorName] = cv::DescriptorExtractor::create(extractorName);
2193 if (extractorName ==
"SIFT") {
2194 #ifdef VISP_HAVE_OPENCV_XFEATURES2D 2195 m_extractors[extractorName] = cv::xfeatures2d::SIFT::create();
2197 std::stringstream ss_msg;
2198 ss_msg <<
"Fail to initialize the extractor: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2199 <<
" was not build with xFeatures2d module.";
2202 }
else if (extractorName ==
"SURF") {
2203 #ifdef VISP_HAVE_OPENCV_XFEATURES2D 2205 m_extractors[extractorName] = cv::xfeatures2d::SURF::create(100, 4, 3,
true);
2207 std::stringstream ss_msg;
2208 ss_msg <<
"Fail to initialize the extractor: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2209 <<
" was not build with xFeatures2d module.";
2212 }
else if (extractorName ==
"ORB") {
2213 m_extractors[extractorName] = cv::ORB::create();
2214 }
else if (extractorName ==
"BRISK") {
2215 m_extractors[extractorName] = cv::BRISK::create();
2216 }
else if (extractorName ==
"FREAK") {
2217 #ifdef VISP_HAVE_OPENCV_XFEATURES2D 2218 m_extractors[extractorName] = cv::xfeatures2d::FREAK::create();
2220 std::stringstream ss_msg;
2221 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2222 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2225 }
else if (extractorName ==
"BRIEF") {
2226 #ifdef VISP_HAVE_OPENCV_XFEATURES2D 2227 m_extractors[extractorName] = cv::xfeatures2d::BriefDescriptorExtractor::create();
2229 std::stringstream ss_msg;
2230 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2231 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2234 }
else if (extractorName ==
"KAZE") {
2235 m_extractors[extractorName] = cv::KAZE::create();
2236 }
else if (extractorName ==
"AKAZE") {
2237 m_extractors[extractorName] = cv::AKAZE::create();
2238 }
else if (extractorName ==
"DAISY") {
2239 #ifdef VISP_HAVE_OPENCV_XFEATURES2D 2240 m_extractors[extractorName] = cv::xfeatures2d::DAISY::create();
2242 std::stringstream ss_msg;
2243 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2244 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2247 }
else if (extractorName ==
"LATCH") {
2248 #ifdef VISP_HAVE_OPENCV_XFEATURES2D 2249 m_extractors[extractorName] = cv::xfeatures2d::LATCH::create();
2251 std::stringstream ss_msg;
2252 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2253 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2256 }
else if (extractorName ==
"LUCID") {
2257 #ifdef VISP_HAVE_OPENCV_XFEATURES2D 2262 std::stringstream ss_msg;
2263 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2264 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2267 }
else if (extractorName ==
"VGG") {
2268 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) 2269 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) 2270 m_extractors[extractorName] = cv::xfeatures2d::VGG::create();
2272 std::stringstream ss_msg;
2273 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2274 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2278 std::stringstream ss_msg;
2279 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2280 << VISP_HAVE_OPENCV_VERSION <<
" but requires at least OpenCV 3.2.";
2283 }
else if (extractorName ==
"BoostDesc") {
2284 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) 2285 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) 2286 m_extractors[extractorName] = cv::xfeatures2d::BoostDesc::create();
2288 std::stringstream ss_msg;
2289 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2290 << VISP_HAVE_OPENCV_VERSION <<
" was not build with xFeatures2d module.";
2294 std::stringstream ss_msg;
2295 ss_msg <<
"Fail to initialize the extractor: " << extractorName <<
". OpenCV version " << std::hex
2296 << VISP_HAVE_OPENCV_VERSION <<
" but requires at least OpenCV 3.2.";
2300 std::cerr <<
"The extractor:" << extractorName <<
" is not available." << std::endl;
2304 if (m_extractors[extractorName] == NULL) {
2305 std::stringstream ss_msg;
2306 ss_msg <<
"Fail to initialize the extractor: " << extractorName
2307 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2311 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000) 2312 if (extractorName ==
"SURF") {
2314 m_extractors[extractorName]->set(
"extended", 1);
2325 void vpKeyPoint::initExtractors(
const std::vector<std::string> &extractorNames)
2327 for (std::vector<std::string>::const_iterator it = extractorNames.begin(); it != extractorNames.end(); ++it) {
2331 int descriptorType = CV_32F;
2332 bool firstIteration =
true;
2333 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2334 it != m_extractors.end(); ++it) {
2335 if (firstIteration) {
2336 firstIteration =
false;
2337 descriptorType = it->second->descriptorType();
2339 if (descriptorType != it->second->descriptorType()) {
2346 void vpKeyPoint::initFeatureNames()
2349 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403) 2356 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D)) 2359 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) 2363 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000) 2364 m_mapOfDetectorNames[DETECTOR_KAZE] =
"KAZE";
2365 m_mapOfDetectorNames[DETECTOR_AKAZE] =
"AKAZE";
2366 m_mapOfDetectorNames[DETECTOR_AGAST] =
"AGAST";
2368 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D) 2369 m_mapOfDetectorNames[DETECTOR_MSD] =
"MSD";
2373 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403) 2376 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D)) 2380 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) 2384 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000) 2385 m_mapOfDescriptorNames[DESCRIPTOR_KAZE] =
"KAZE";
2386 m_mapOfDescriptorNames[DESCRIPTOR_AKAZE] =
"AKAZE";
2387 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) 2388 m_mapOfDescriptorNames[DESCRIPTOR_DAISY] =
"DAISY";
2389 m_mapOfDescriptorNames[DESCRIPTOR_LATCH] =
"LATCH";
2392 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D) 2393 m_mapOfDescriptorNames[DESCRIPTOR_VGG] =
"VGG";
2394 m_mapOfDescriptorNames[DESCRIPTOR_BoostDesc] =
"BoostDesc";
2406 int descriptorType = CV_32F;
2407 bool firstIteration =
true;
2408 for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2409 it != m_extractors.end(); ++it) {
2410 if (firstIteration) {
2411 firstIteration =
false;
2412 descriptorType = it->second->descriptorType();
2414 if (descriptorType != it->second->descriptorType()) {
2420 if (matcherName ==
"FlannBased") {
2421 if (m_extractors.empty()) {
2422 std::cout <<
"Warning: No extractor initialized, by default use " 2423 "floating values (CV_32F) " 2424 "for descriptor type !" 2428 if (descriptorType == CV_8U) {
2429 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000) 2430 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::LshIndexParams>(12, 20, 2));
2432 m_matcher =
new cv::FlannBasedMatcher(
new cv::flann::LshIndexParams(12, 20, 2));
2435 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000) 2436 m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::KDTreeIndexParams>());
2438 m_matcher =
new cv::FlannBasedMatcher(
new cv::flann::KDTreeIndexParams());
2442 m_matcher = cv::DescriptorMatcher::create(matcherName);
2445 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000) 2446 if (m_matcher != NULL && !m_useKnn && matcherName ==
"BruteForce") {
2447 m_matcher->set(
"crossCheck", m_useBruteForceCrossCheck);
2451 if (m_matcher == NULL) {
2452 std::stringstream ss_msg;
2453 ss_msg <<
"Fail to initialize the matcher: " << matcherName
2454 <<
" or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION <<
".";
2471 IMatching.
insert(IRef, topLeftCorner);
2473 IMatching.
insert(ICurrent, topLeftCorner);
2487 int nbImg = (int)(m_mapOfImages.size() + 1);
2489 if (m_mapOfImages.empty()) {
2490 std::cerr <<
"There is no training image loaded !" << std::endl;
2500 int nbWidth = nbImgSqrt;
2501 int nbHeight = nbImgSqrt;
2503 if (nbImgSqrt * nbImgSqrt < nbImg) {
2508 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2510 if (maxW < it->second.getWidth()) {
2511 maxW = it->second.getWidth();
2514 if (maxH < it->second.getHeight()) {
2515 maxH = it->second.getHeight();
2521 int medianI = nbHeight / 2;
2522 int medianJ = nbWidth / 2;
2523 int medianIndex = medianI * nbWidth + medianJ;
2526 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2528 int local_cpt = cpt;
2529 if (cpt >= medianIndex) {
2534 int indexI = local_cpt / nbWidth;
2535 int indexJ = local_cpt - (indexI * nbWidth);
2536 vpImagePoint topLeftCorner((
int)maxH * indexI, (
int)maxW * indexJ);
2538 IMatching.
insert(it->second, topLeftCorner);
2541 vpImagePoint topLeftCorner((
int)maxH * medianI, (
int)maxW * medianJ);
2542 IMatching.
insert(ICurrent, topLeftCorner);
2546 #ifdef VISP_HAVE_XML2 2558 m_detectorNames.clear();
2559 m_extractorNames.clear();
2560 m_detectors.clear();
2561 m_extractors.clear();
2563 std::cout <<
" *********** Parsing XML for configuration for vpKeyPoint " 2566 xmlp.
parse(configFile);
2631 int startClassId = 0;
2632 int startImageId = 0;
2634 m_trainKeyPoints.clear();
2635 m_trainPoints.clear();
2636 m_mapOfImageId.clear();
2637 m_mapOfImages.clear();
2640 for (std::map<int, int>::const_iterator it = m_mapOfImageId.begin(); it != m_mapOfImageId.end(); ++it) {
2641 if (startClassId < it->first) {
2642 startClassId = it->first;
2647 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2649 if (startImageId < it->first) {
2650 startImageId = it->first;
2657 if (!parent.empty()) {
2662 std::ifstream file(filename.c_str(), std::ifstream::binary);
2663 if (!file.is_open()) {
2669 readBinaryIntLE(file, nbImgs);
2671 #if !defined(VISP_HAVE_MODULE_IO) 2673 std::cout <<
"Warning: The learning file contains image data that will " 2674 "not be loaded as visp_io module " 2675 "is not available !" 2680 for (
int i = 0; i < nbImgs; i++) {
2683 readBinaryIntLE(file,
id);
2686 readBinaryIntLE(file, length);
2688 char *path =
new char[length + 1];
2690 for (
int cpt = 0; cpt < length; cpt++) {
2692 file.read((
char *)(&c),
sizeof(c));
2695 path[length] =
'\0';
2698 #ifdef VISP_HAVE_MODULE_IO 2706 m_mapOfImages[
id + startImageId] = I;
2714 int have3DInfoInt = 0;
2715 readBinaryIntLE(file, have3DInfoInt);
2716 bool have3DInfo = have3DInfoInt != 0;
2720 readBinaryIntLE(file, nRows);
2724 readBinaryIntLE(file, nCols);
2727 int descriptorType = 5;
2728 readBinaryIntLE(file, descriptorType);
2730 cv::Mat trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
2731 for (
int i = 0; i < nRows; i++) {
2733 float u, v, size, angle, response;
2734 int octave, class_id, image_id;
2735 readBinaryFloatLE(file, u);
2736 readBinaryFloatLE(file, v);
2737 readBinaryFloatLE(file, size);
2738 readBinaryFloatLE(file, angle);
2739 readBinaryFloatLE(file, response);
2740 readBinaryIntLE(file, octave);
2741 readBinaryIntLE(file, class_id);
2742 readBinaryIntLE(file, image_id);
2743 cv::KeyPoint keyPoint(cv::Point2f(u, v), size, angle, response, octave, (class_id + startClassId));
2744 m_trainKeyPoints.push_back(keyPoint);
2746 if (image_id != -1) {
2747 #ifdef VISP_HAVE_MODULE_IO 2749 m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
2756 readBinaryFloatLE(file, oX);
2757 readBinaryFloatLE(file, oY);
2758 readBinaryFloatLE(file, oZ);
2759 m_trainPoints.push_back(cv::Point3f(oX, oY, oZ));
2762 for (
int j = 0; j < nCols; j++) {
2764 switch (descriptorType) {
2766 unsigned char value;
2767 file.read((
char *)(&value),
sizeof(value));
2768 trainDescriptorsTmp.at<
unsigned char>(i, j) = value;
2773 file.read((
char *)(&value),
sizeof(value));
2774 trainDescriptorsTmp.at<
char>(i, j) = value;
2778 unsigned short int value;
2779 readBinaryUShortLE(file, value);
2780 trainDescriptorsTmp.at<
unsigned short int>(i, j) = value;
2785 readBinaryShortLE(file, value);
2786 trainDescriptorsTmp.at<
short int>(i, j) = value;
2791 readBinaryIntLE(file, value);
2792 trainDescriptorsTmp.at<
int>(i, j) = value;
2797 readBinaryFloatLE(file, value);
2798 trainDescriptorsTmp.at<
float>(i, j) = value;
2803 readBinaryDoubleLE(file, value);
2804 trainDescriptorsTmp.at<
double>(i, j) = value;
2809 readBinaryFloatLE(file, value);
2810 trainDescriptorsTmp.at<
float>(i, j) = value;
2816 if (!append || m_trainDescriptors.empty()) {
2817 trainDescriptorsTmp.copyTo(m_trainDescriptors);
2819 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
2824 #ifdef VISP_HAVE_XML2 2825 xmlDocPtr doc = NULL;
2826 xmlNodePtr root_element = NULL;
2836 doc = xmlReadFile(filename.c_str(), NULL, 0);
2842 root_element = xmlDocGetRootElement(doc);
2844 xmlNodePtr first_level_node = NULL;
2847 int descriptorType = CV_32F;
2848 int nRows = 0, nCols = 0;
2851 cv::Mat trainDescriptorsTmp;
2853 for (first_level_node = root_element->children; first_level_node; first_level_node = first_level_node->next) {
2855 std::string name((
char *)first_level_node->name);
2856 if (first_level_node->type == XML_ELEMENT_NODE && name ==
"TrainingImageInfo") {
2857 xmlNodePtr image_info_node = NULL;
2859 for (image_info_node = first_level_node->children; image_info_node; image_info_node = image_info_node->next) {
2860 name = std::string((
char *)image_info_node->name);
2862 if (name ==
"trainImg") {
2864 xmlChar *image_id_property = xmlGetProp(image_info_node, BAD_CAST
"image_id");
2866 if (image_id_property) {
2867 id = std::atoi((
char *)image_id_property);
2869 xmlFree(image_id_property);
2872 #ifdef VISP_HAVE_MODULE_IO 2873 std::string path((
char *)image_info_node->children->content);
2882 m_mapOfImages[
id + startImageId] = I;
2886 }
else if (first_level_node->type == XML_ELEMENT_NODE && name ==
"DescriptorsInfo") {
2887 xmlNodePtr descriptors_info_node = NULL;
2888 for (descriptors_info_node = first_level_node->children; descriptors_info_node;
2889 descriptors_info_node = descriptors_info_node->next) {
2890 if (descriptors_info_node->type == XML_ELEMENT_NODE) {
2891 name = std::string((
char *)descriptors_info_node->name);
2893 if (name ==
"nrows") {
2894 nRows = std::atoi((
char *)descriptors_info_node->children->content);
2895 }
else if (name ==
"ncols") {
2896 nCols = std::atoi((
char *)descriptors_info_node->children->content);
2897 }
else if (name ==
"type") {
2898 descriptorType = std::atoi((
char *)descriptors_info_node->children->content);
2903 trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
2904 }
else if (first_level_node->type == XML_ELEMENT_NODE && name ==
"DescriptorInfo") {
2905 xmlNodePtr point_node = NULL;
2906 double u = 0.0, v = 0.0, size = 0.0, angle = 0.0, response = 0.0;
2907 int octave = 0, class_id = 0, image_id = 0;
2908 double oX = 0.0, oY = 0.0, oZ = 0.0;
2910 std::stringstream ss;
2912 for (point_node = first_level_node->children; point_node; point_node = point_node->next) {
2913 if (point_node->type == XML_ELEMENT_NODE) {
2914 name = std::string((
char *)point_node->name);
2918 u = std::strtod((
char *)point_node->children->content, &pEnd);
2919 }
else if (name ==
"v") {
2920 v = std::strtod((
char *)point_node->children->content, &pEnd);
2921 }
else if (name ==
"size") {
2922 size = std::strtod((
char *)point_node->children->content, &pEnd);
2923 }
else if (name ==
"angle") {
2924 angle = std::strtod((
char *)point_node->children->content, &pEnd);
2925 }
else if (name ==
"response") {
2926 response = std::strtod((
char *)point_node->children->content, &pEnd);
2927 }
else if (name ==
"octave") {
2928 octave = std::atoi((
char *)point_node->children->content);
2929 }
else if (name ==
"class_id") {
2930 class_id = std::atoi((
char *)point_node->children->content);
2931 cv::KeyPoint keyPoint(cv::Point2f((
float)u, (
float)v), (
float)size, (
float)angle, (
float)response, octave,
2932 (class_id + startClassId));
2933 m_trainKeyPoints.push_back(keyPoint);
2934 }
else if (name ==
"image_id") {
2935 image_id = std::atoi((
char *)point_node->children->content);
2936 if (image_id != -1) {
2937 #ifdef VISP_HAVE_MODULE_IO 2939 m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
2942 }
else if (name ==
"oX") {
2943 oX = std::atof((
char *)point_node->children->content);
2944 }
else if (name ==
"oY") {
2945 oY = std::atof((
char *)point_node->children->content);
2946 }
else if (name ==
"oZ") {
2947 oZ = std::atof((
char *)point_node->children->content);
2948 m_trainPoints.push_back(cv::Point3f((
float)oX, (
float)oY, (
float)oZ));
2949 }
else if (name ==
"desc") {
2950 xmlNodePtr descriptor_value_node = NULL;
2953 for (descriptor_value_node = point_node->children; descriptor_value_node;
2954 descriptor_value_node = descriptor_value_node->next) {
2956 if (descriptor_value_node->type == XML_ELEMENT_NODE) {
2958 std::string parseStr((
char *)descriptor_value_node->children->content);
2963 switch (descriptorType) {
2968 trainDescriptorsTmp.at<
unsigned char>(i, j) = (
unsigned char)parseValue;
2975 trainDescriptorsTmp.at<
char>(i, j) = (
char)parseValue;
2979 ss >> trainDescriptorsTmp.at<
unsigned short int>(i, j);
2983 ss >> trainDescriptorsTmp.at<
short int>(i, j);
2987 ss >> trainDescriptorsTmp.at<
int>(i, j);
2991 ss >> trainDescriptorsTmp.at<
float>(i, j);
2995 ss >> trainDescriptorsTmp.at<
double>(i, j);
2999 ss >> trainDescriptorsTmp.at<
float>(i, j);
3003 std::cerr <<
"Error when converting:" << ss.str() << std::endl;
3016 if (!append || m_trainDescriptors.empty()) {
3017 trainDescriptorsTmp.copyTo(m_trainDescriptors);
3019 cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
3031 std::cout <<
"Error: libxml2 is required !" << std::endl;
3041 m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
3047 m_currentImageId = (int)m_mapOfImages.size();
3059 std::vector<cv::DMatch> &matches,
double &elapsedTime)
3064 m_knnMatches.clear();
3066 if (m_useMatchTrainToQuery) {
3067 std::vector<std::vector<cv::DMatch> > knnMatchesTmp;
3070 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(
true);
3071 matcherTmp->knnMatch(trainDescriptors, queryDescriptors, knnMatchesTmp, 2);
3073 for (std::vector<std::vector<cv::DMatch> >::const_iterator it1 = knnMatchesTmp.begin();
3074 it1 != knnMatchesTmp.end(); ++it1) {
3075 std::vector<cv::DMatch> tmp;
3076 for (std::vector<cv::DMatch>::const_iterator it2 = it1->begin(); it2 != it1->end(); ++it2) {
3077 tmp.push_back(cv::DMatch(it2->trainIdx, it2->queryIdx, it2->distance));
3079 m_knnMatches.push_back(tmp);
3082 matches.resize(m_knnMatches.size());
3083 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3086 m_matcher->knnMatch(queryDescriptors, m_knnMatches, 2);
3087 matches.resize(m_knnMatches.size());
3088 std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3093 if (m_useMatchTrainToQuery) {
3094 std::vector<cv::DMatch> matchesTmp;
3096 cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(
true);
3097 matcherTmp->match(trainDescriptors, queryDescriptors, matchesTmp);
3099 for (std::vector<cv::DMatch>::const_iterator it = matchesTmp.begin(); it != matchesTmp.end(); ++it) {
3100 matches.push_back(cv::DMatch(it->trainIdx, it->queryIdx, it->distance));
3104 m_matcher->match(queryDescriptors, matches);
3130 const unsigned int width)
3145 if (m_trainDescriptors.empty()) {
3146 std::cerr <<
"Reference is empty." << std::endl;
3148 std::cerr <<
"Reference is not computed." << std::endl;
3150 std::cerr <<
"Matching is not possible." << std::endl;
3155 if (m_useAffineDetection) {
3156 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3157 std::vector<cv::Mat> listOfQueryDescriptors;
3163 m_queryKeyPoints.clear();
3164 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3165 it != listOfQueryKeyPoints.end(); ++it) {
3166 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3170 for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3174 it->copyTo(m_queryDescriptors);
3176 m_queryDescriptors.push_back(*it);
3180 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3181 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3184 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3187 m_queryFilteredKeyPoints.clear();
3188 m_objectFilteredPoints.clear();
3189 m_filteredMatches.clear();
3193 if (m_useMatchTrainToQuery) {
3195 m_queryFilteredKeyPoints.clear();
3196 m_filteredMatches.clear();
3197 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3198 m_filteredMatches.push_back(cv::DMatch((
int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3199 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(
size_t)it->queryIdx]);
3202 m_queryFilteredKeyPoints = m_queryKeyPoints;
3203 m_filteredMatches = m_matches;
3206 if (!m_trainPoints.empty()) {
3207 m_objectFilteredPoints.clear();
3211 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3213 m_objectFilteredPoints.push_back(m_trainPoints[(
size_t)it->trainIdx]);
3222 return static_cast<unsigned int>(m_filteredMatches.size());
3241 double error, elapsedTime;
3242 return matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3266 if (m_trainDescriptors.empty()) {
3267 std::cerr <<
"Reference is empty." << std::endl;
3269 std::cerr <<
"Reference is not computed." << std::endl;
3271 std::cerr <<
"Matching is not possible." << std::endl;
3276 if (m_useAffineDetection) {
3277 std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3278 std::vector<cv::Mat> listOfQueryDescriptors;
3284 m_queryKeyPoints.clear();
3285 for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3286 it != listOfQueryKeyPoints.end(); ++it) {
3287 m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3291 for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3295 it->copyTo(m_queryDescriptors);
3297 m_queryDescriptors.push_back(*it);
3301 detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3302 extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3305 match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3307 elapsedTime = m_detectionTime + m_extractionTime + m_matchingTime;
3310 m_queryFilteredKeyPoints.clear();
3311 m_objectFilteredPoints.clear();
3312 m_filteredMatches.clear();
3316 if (m_useMatchTrainToQuery) {
3318 m_queryFilteredKeyPoints.clear();
3319 m_filteredMatches.clear();
3320 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3321 m_filteredMatches.push_back(cv::DMatch((
int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3322 m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(
size_t)it->queryIdx]);
3325 m_queryFilteredKeyPoints = m_queryKeyPoints;
3326 m_filteredMatches = m_matches;
3329 if (!m_trainPoints.empty()) {
3330 m_objectFilteredPoints.clear();
3334 for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3336 m_objectFilteredPoints.push_back(m_trainPoints[(
size_t)it->trainIdx]);
3348 m_ransacInliers.clear();
3349 m_ransacOutliers.clear();
3351 if (m_useRansacVVS) {
3352 std::vector<vpPoint> objectVpPoints(m_objectFilteredPoints.size());
3356 for (std::vector<cv::Point3f>::const_iterator it = m_objectFilteredPoints.begin();
3357 it != m_objectFilteredPoints.end(); ++it, cpt++) {
3361 vpImagePoint imP(m_queryFilteredKeyPoints[cpt].pt.y, m_queryFilteredKeyPoints[cpt].pt.x);
3363 double x = 0.0, y = 0.0;
3368 objectVpPoints[cpt] = pt;
3371 std::vector<vpPoint> inliers;
3372 std::vector<unsigned int> inlierIndex;
3374 bool res =
computePose(objectVpPoints, cMo, inliers, inlierIndex, m_poseTime, func);
3376 std::map<unsigned int, bool> mapOfInlierIndex;
3377 m_matchRansacKeyPointsToPoints.clear();
3379 for (std::vector<unsigned int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3380 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3381 m_queryFilteredKeyPoints[(
size_t)(*it)], m_objectFilteredPoints[(
size_t)(*it)]));
3382 mapOfInlierIndex[*it] =
true;
3385 for (
size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3386 if (mapOfInlierIndex.find((
unsigned int)i) == mapOfInlierIndex.end()) {
3387 m_ransacOutliers.push_back(
vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3391 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3393 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3394 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3395 m_ransacInliers.begin(), matchRansacToVpImage);
3397 elapsedTime += m_poseTime;
3401 std::vector<cv::Point2f> imageFilteredPoints;
3402 cv::KeyPoint::convert(m_queryFilteredKeyPoints, imageFilteredPoints);
3403 std::vector<int> inlierIndex;
3404 bool res =
computePose(imageFilteredPoints, m_objectFilteredPoints, cam, cMo, inlierIndex, m_poseTime);
3406 std::map<int, bool> mapOfInlierIndex;
3407 m_matchRansacKeyPointsToPoints.clear();
3409 for (std::vector<int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3410 m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3411 m_queryFilteredKeyPoints[(
size_t)(*it)], m_objectFilteredPoints[(
size_t)(*it)]));
3412 mapOfInlierIndex[*it] =
true;
3415 for (
size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3416 if (mapOfInlierIndex.find((
int)i) == mapOfInlierIndex.end()) {
3417 m_ransacOutliers.push_back(
vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3421 error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3423 m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3424 std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3425 m_ransacInliers.begin(), matchRansacToVpImage);
3427 elapsedTime += m_poseTime;
3452 vpImagePoint ¢erOfGravity,
const bool isPlanarObject,
3453 std::vector<vpImagePoint> *imPts1, std::vector<vpImagePoint> *imPts2,
3454 double *meanDescriptorDistance,
double *detection_score,
const vpRect &rectangle)
3456 if (imPts1 != NULL && imPts2 != NULL) {
3463 double meanDescriptorDistanceTmp = 0.0;
3464 for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
3465 meanDescriptorDistanceTmp += (double)it->distance;
3468 meanDescriptorDistanceTmp /= (double)m_filteredMatches.size();
3469 double score = (double)m_filteredMatches.size() / meanDescriptorDistanceTmp;
3471 if (meanDescriptorDistance != NULL) {
3472 *meanDescriptorDistance = meanDescriptorDistanceTmp;
3474 if (detection_score != NULL) {
3475 *detection_score = score;
3478 if (m_filteredMatches.size() >= 4) {
3480 std::vector<cv::Point2f> points1(m_filteredMatches.size());
3482 std::vector<cv::Point2f> points2(m_filteredMatches.size());
3484 for (
size_t i = 0; i < m_filteredMatches.size(); i++) {
3485 points1[i] = cv::Point2f(m_trainKeyPoints[(
size_t)m_filteredMatches[i].trainIdx].pt);
3486 points2[i] = cv::Point2f(m_queryFilteredKeyPoints[(
size_t)m_filteredMatches[i].queryIdx].pt);
3489 std::vector<vpImagePoint> inliers;
3490 if (isPlanarObject) {
3491 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) 3492 cv::Mat homographyMatrix = cv::findHomography(points1, points2, CV_RANSAC);
3494 cv::Mat homographyMatrix = cv::findHomography(points1, points2, cv::RANSAC);
3497 for (
size_t i = 0; i < m_filteredMatches.size(); i++) {
3499 cv::Mat realPoint = cv::Mat(3, 1, CV_64F);
3500 realPoint.at<
double>(0, 0) = points1[i].x;
3501 realPoint.at<
double>(1, 0) = points1[i].y;
3502 realPoint.at<
double>(2, 0) = 1.f;
3504 cv::Mat reprojectedPoint = homographyMatrix * realPoint;
3505 double err_x = (reprojectedPoint.at<
double>(0, 0) / reprojectedPoint.at<
double>(2, 0)) - points2[i].x;
3506 double err_y = (reprojectedPoint.at<
double>(1, 0) / reprojectedPoint.at<
double>(2, 0)) - points2[i].y;
3507 double reprojectionError = std::sqrt(err_x * err_x + err_y * err_y);
3509 if (reprojectionError < 6.0) {
3510 inliers.push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3511 if (imPts1 != NULL) {
3512 imPts1->push_back(
vpImagePoint((
double)points1[i].y, (
double)points1[i].x));
3515 if (imPts2 != NULL) {
3516 imPts2->push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3520 }
else if (m_filteredMatches.size() >= 8) {
3521 cv::Mat fundamentalInliers;
3522 cv::Mat fundamentalMatrix = cv::findFundamentalMat(points1, points2, cv::FM_RANSAC, 3, 0.99, fundamentalInliers);
3524 for (
size_t i = 0; i < (size_t)fundamentalInliers.rows; i++) {
3525 if (fundamentalInliers.at<uchar>((
int)i, 0)) {
3526 inliers.push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3528 if (imPts1 != NULL) {
3529 imPts1->push_back(
vpImagePoint((
double)points1[i].y, (
double)points1[i].x));
3532 if (imPts2 != NULL) {
3533 imPts2->push_back(
vpImagePoint((
double)points2[i].y, (
double)points2[i].x));
3539 if (!inliers.empty()) {
3546 double meanU = 0.0, meanV = 0.0;
3547 for (std::vector<vpImagePoint>::const_iterator it = inliers.begin(); it != inliers.end(); ++it) {
3548 meanU += it->get_u();
3549 meanV += it->get_v();
3552 meanU /= (double)inliers.size();
3553 meanV /= (double)inliers.size();
3555 centerOfGravity.
set_u(meanU);
3556 centerOfGravity.
set_v(meanV);
3564 return meanDescriptorDistanceTmp < m_detectionThreshold;
3566 return score > m_detectionScore;
3595 bool isMatchOk =
matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3600 std::vector<vpImagePoint> modelImagePoints(m_trainVpPoints.size());
3602 for (std::vector<vpPoint>::const_iterator it = m_trainVpPoints.begin(); it != m_trainVpPoints.end(); ++it, cpt++) {
3606 modelImagePoints[cpt] = imPt;
3615 double meanU = 0.0, meanV = 0.0;
3616 for (std::vector<vpImagePoint>::const_iterator it = m_ransacInliers.begin(); it != m_ransacInliers.end(); ++it) {
3617 meanU += it->get_u();
3618 meanV += it->get_v();
3621 meanU /= (double)m_ransacInliers.size();
3622 meanV /= (double)m_ransacInliers.size();
3624 centerOfGravity.
set_u(meanU);
3625 centerOfGravity.
set_v(meanV);
3644 std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
3645 std::vector<cv::Mat> &listOfDescriptors,
3651 listOfKeypoints.clear();
3652 listOfDescriptors.clear();
3654 for (
int tl = 1; tl < 6; tl++) {
3655 double t = pow(2, 0.5 * tl);
3656 for (
int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
3657 std::vector<cv::KeyPoint> keypoints;
3658 cv::Mat descriptors;
3660 cv::Mat timg, mask, Ai;
3663 affineSkew(t, phi, timg, mask, Ai);
3666 if(listOfAffineI != NULL) {
3668 bitwise_and(mask, timg, img_disp);
3671 listOfAffineI->push_back(tI);
3675 cv::bitwise_and(mask, timg, img_disp);
3676 cv::namedWindow(
"Skew", cv::WINDOW_AUTOSIZE );
3677 cv::imshow(
"Skew", img_disp );
3681 for(std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
3682 it != m_detectors.end(); ++it) {
3683 std::vector<cv::KeyPoint> kp;
3684 it->second->detect(timg, kp, mask);
3685 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
3689 extract(timg, keypoints, descriptors, elapsedTime);
3691 for(
unsigned int i = 0; i < keypoints.size(); i++) {
3692 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
3693 cv::Mat kpt_t = Ai * cv::Mat(kpt);
3694 keypoints[i].pt.x = kpt_t.at<
float>(0, 0);
3695 keypoints[i].pt.y = kpt_t.at<
float>(1, 0);
3698 listOfKeypoints.push_back(keypoints);
3699 listOfDescriptors.push_back(descriptors);
3708 std::vector<std::pair<double, int> > listOfAffineParams;
3709 for (
int tl = 1; tl < 6; tl++) {
3710 double t = pow(2, 0.5 * tl);
3711 for (
int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
3712 listOfAffineParams.push_back(std::pair<double, int>(t, phi));
3716 listOfKeypoints.resize(listOfAffineParams.size());
3717 listOfDescriptors.resize(listOfAffineParams.size());
3719 if (listOfAffineI != NULL) {
3720 listOfAffineI->resize(listOfAffineParams.size());
3723 #ifdef VISP_HAVE_OPENMP 3724 #pragma omp parallel for 3726 for (
int cpt = 0; cpt < static_cast<int>(listOfAffineParams.size()); cpt++) {
3727 std::vector<cv::KeyPoint> keypoints;
3728 cv::Mat descriptors;
3730 cv::Mat timg, mask, Ai;
3733 affineSkew(listOfAffineParams[(
size_t)cpt].first, listOfAffineParams[(
size_t)cpt].second, timg, mask, Ai);
3735 if (listOfAffineI != NULL) {
3737 bitwise_and(mask, timg, img_disp);
3740 (*listOfAffineI)[(size_t)cpt] = tI;
3745 cv::bitwise_and(mask, timg, img_disp);
3746 cv::namedWindow(
"Skew", cv::WINDOW_AUTOSIZE );
3747 cv::imshow(
"Skew", img_disp );
3751 for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
3752 it != m_detectors.end(); ++it) {
3753 std::vector<cv::KeyPoint> kp;
3754 it->second->detect(timg, kp, mask);
3755 keypoints.insert(keypoints.end(), kp.begin(), kp.end());
3759 extract(timg, keypoints, descriptors, elapsedTime);
3761 for (
size_t i = 0; i < keypoints.size(); i++) {
3762 cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
3763 cv::Mat kpt_t = Ai * cv::Mat(kpt);
3764 keypoints[i].pt.x = kpt_t.at<
float>(0, 0);
3765 keypoints[i].pt.y = kpt_t.at<
float>(1, 0);
3768 listOfKeypoints[(size_t)cpt] = keypoints;
3769 listOfDescriptors[(size_t)cpt] = descriptors;
3785 m_computeCovariance =
false;
3787 m_currentImageId = 0;
3789 m_detectionScore = 0.15;
3790 m_detectionThreshold = 100.0;
3791 m_detectionTime = 0.0;
3792 m_detectorNames.clear();
3793 m_detectors.clear();
3794 m_extractionTime = 0.0;
3795 m_extractorNames.clear();
3796 m_extractors.clear();
3797 m_filteredMatches.clear();
3800 m_knnMatches.clear();
3801 m_mapOfImageId.clear();
3802 m_mapOfImages.clear();
3803 m_matcher = cv::Ptr<cv::DescriptorMatcher>();
3804 m_matcherName =
"BruteForce-Hamming";
3806 m_matchingFactorThreshold = 2.0;
3807 m_matchingRatioThreshold = 0.85;
3808 m_matchingTime = 0.0;
3809 m_matchRansacKeyPointsToPoints.clear();
3810 m_nbRansacIterations = 200;
3811 m_nbRansacMinInlierCount = 100;
3812 m_objectFilteredPoints.clear();
3814 m_queryDescriptors = cv::Mat();
3815 m_queryFilteredKeyPoints.clear();
3816 m_queryKeyPoints.clear();
3817 m_ransacConsensusPercentage = 20.0;
3818 m_ransacInliers.clear();
3819 m_ransacOutliers.clear();
3820 m_ransacReprojectionError = 6.0;
3821 m_ransacThreshold = 0.01;
3822 m_trainDescriptors = cv::Mat();
3823 m_trainKeyPoints.clear();
3824 m_trainPoints.clear();
3825 m_trainVpPoints.clear();
3826 m_useAffineDetection =
false;
3827 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000) 3828 m_useBruteForceCrossCheck =
true;
3830 m_useConsensusPercentage =
false;
3832 m_useMatchTrainToQuery =
false;
3833 m_useRansacVVS =
true;
3834 m_useSingleMatchFilter =
true;
3836 m_detectorNames.push_back(
"ORB");
3837 m_extractorNames.push_back(
"ORB");
3853 if (!parent.empty()) {
3857 std::map<int, std::string> mapOfImgPath;
3858 if (saveTrainingImages) {
3859 #ifdef VISP_HAVE_MODULE_IO 3861 unsigned int cpt = 0;
3863 for (std::map<
int,
vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
3869 std::stringstream ss;
3870 ss <<
"train_image_" << std::setfill(
'0') << std::setw(3) << cpt;
3872 switch (m_imageFormat) {
3894 std::string imgFilename = ss.str();
3895 mapOfImgPath[it->first] = imgFilename;
3896 vpImageIo::write(it->second, parent + (!parent.empty() ?
"/" :
"") + imgFilename);
3899 std::cout <<
"Warning: in vpKeyPoint::saveLearningData() training images " 3900 "are not saved because " 3901 "visp_io module is not available !" 3906 bool have3DInfo = m_trainPoints.size() > 0;
3907 if (have3DInfo && m_trainPoints.size() != m_trainKeyPoints.size()) {
3913 std::ofstream file(filename.c_str(), std::ofstream::binary);
3914 if (!file.is_open()) {
3919 int nbImgs = (int)mapOfImgPath.size();
3921 writeBinaryIntLE(file, nbImgs);
3923 #ifdef VISP_HAVE_MODULE_IO 3924 for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
3928 writeBinaryIntLE(file,
id);
3931 std::string path = it->second;
3932 int length = (int)path.length();
3934 writeBinaryIntLE(file, length);
3936 for (
int cpt = 0; cpt < length; cpt++) {
3937 file.write((
char *)(&path[(
size_t)cpt]),
sizeof(path[(
size_t)cpt]));
3943 int have3DInfoInt = have3DInfo ? 1 : 0;
3945 writeBinaryIntLE(file, have3DInfoInt);
3947 int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
3948 int descriptorType = m_trainDescriptors.type();
3952 writeBinaryIntLE(file, nRows);
3956 writeBinaryIntLE(file, nCols);
3960 writeBinaryIntLE(file, descriptorType);
3962 for (
int i = 0; i < nRows; i++) {
3963 unsigned int i_ = (
unsigned int)i;
3965 float u = m_trainKeyPoints[i_].pt.x;
3967 writeBinaryFloatLE(file, u);
3970 float v = m_trainKeyPoints[i_].pt.y;
3972 writeBinaryFloatLE(file, v);
3975 float size = m_trainKeyPoints[i_].size;
3977 writeBinaryFloatLE(file, size);
3980 float angle = m_trainKeyPoints[i_].angle;
3982 writeBinaryFloatLE(file, angle);
3985 float response = m_trainKeyPoints[i_].response;
3987 writeBinaryFloatLE(file, response);
3990 int octave = m_trainKeyPoints[i_].octave;
3992 writeBinaryIntLE(file, octave);
3995 int class_id = m_trainKeyPoints[i_].class_id;
3997 writeBinaryIntLE(file, class_id);
4000 #ifdef VISP_HAVE_MODULE_IO 4001 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
4002 int image_id = (saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1;
4004 writeBinaryIntLE(file, image_id);
4008 writeBinaryIntLE(file, image_id);
4012 float oX = m_trainPoints[i_].x, oY = m_trainPoints[i_].y, oZ = m_trainPoints[i_].z;
4015 writeBinaryFloatLE(file, oX);
4019 writeBinaryFloatLE(file, oY);
4023 writeBinaryFloatLE(file, oZ);
4026 for (
int j = 0; j < nCols; j++) {
4028 switch (descriptorType) {
4030 file.write((
char *)(&m_trainDescriptors.at<
unsigned char>(i, j)),
4031 sizeof(m_trainDescriptors.at<
unsigned char>(i, j)));
4035 file.write((
char *)(&m_trainDescriptors.at<
char>(i, j)),
sizeof(m_trainDescriptors.at<
char>(i, j)));
4042 writeBinaryUShortLE(file, m_trainDescriptors.at<
unsigned short int>(i, j));
4048 writeBinaryShortLE(file, m_trainDescriptors.at<
short int>(i, j));
4054 writeBinaryIntLE(file, m_trainDescriptors.at<
int>(i, j));
4060 writeBinaryFloatLE(file, m_trainDescriptors.at<
float>(i, j));
4066 writeBinaryDoubleLE(file, m_trainDescriptors.at<
double>(i, j));
4078 #ifdef VISP_HAVE_XML2 4079 xmlDocPtr doc = NULL;
4080 xmlNodePtr root_node = NULL, image_node = NULL, image_info_node = NULL, descriptors_info_node = NULL,
4081 descriptor_node = NULL, desc_node = NULL;
4090 doc = xmlNewDoc(BAD_CAST
"1.0");
4095 root_node = xmlNewNode(NULL, BAD_CAST
"LearningData");
4096 xmlDocSetRootElement(doc, root_node);
4098 std::stringstream ss;
4101 image_node = xmlNewChild(root_node, NULL, BAD_CAST
"TrainingImageInfo", NULL);
4103 #ifdef VISP_HAVE_MODULE_IO 4104 for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
4105 image_info_node = xmlNewChild(image_node, NULL, BAD_CAST
"trainImg", BAD_CAST it->second.c_str());
4108 xmlNewProp(image_info_node, BAD_CAST
"image_id", BAD_CAST ss.str().c_str());
4113 descriptors_info_node = xmlNewChild(root_node, NULL, BAD_CAST
"DescriptorsInfo", NULL);
4115 int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
4116 int descriptorType = m_trainDescriptors.type();
4121 xmlNewChild(descriptors_info_node, NULL, BAD_CAST
"nrows", BAD_CAST ss.str().c_str());
4126 xmlNewChild(descriptors_info_node, NULL, BAD_CAST
"ncols", BAD_CAST ss.str().c_str());
4130 ss << descriptorType;
4131 xmlNewChild(descriptors_info_node, NULL, BAD_CAST
"type", BAD_CAST ss.str().c_str());
4133 for (
int i = 0; i < nRows; i++) {
4134 unsigned int i_ = (
unsigned int)i;
4135 descriptor_node = xmlNewChild(root_node, NULL, BAD_CAST
"DescriptorInfo", NULL);
4139 ss << std::fixed << std::setprecision(9) << m_trainKeyPoints[i_].pt.x;
4140 xmlNewChild(descriptor_node, NULL, BAD_CAST
"u", BAD_CAST ss.str().c_str());
4144 ss << std::fixed << std::setprecision(9) << m_trainKeyPoints[i_].pt.y;
4145 xmlNewChild(descriptor_node, NULL, BAD_CAST
"v", BAD_CAST ss.str().c_str());
4149 ss << std::fixed << std::setprecision(9) << m_trainKeyPoints[i_].size;
4150 xmlNewChild(descriptor_node, NULL, BAD_CAST
"size", BAD_CAST ss.str().c_str());
4154 ss << std::fixed << std::setprecision(9) << m_trainKeyPoints[i_].angle;
4155 xmlNewChild(descriptor_node, NULL, BAD_CAST
"angle", BAD_CAST ss.str().c_str());
4159 ss << std::fixed << std::setprecision(9) << m_trainKeyPoints[i_].response;
4160 xmlNewChild(descriptor_node, NULL, BAD_CAST
"response", BAD_CAST ss.str().c_str());
4163 ss << m_trainKeyPoints[i_].octave;
4164 xmlNewChild(descriptor_node, NULL, BAD_CAST
"octave", BAD_CAST ss.str().c_str());
4167 ss << m_trainKeyPoints[i_].class_id;
4168 xmlNewChild(descriptor_node, NULL, BAD_CAST
"class_id", BAD_CAST ss.str().c_str());
4171 #ifdef VISP_HAVE_MODULE_IO 4172 std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
4173 ss << ((saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1);
4174 xmlNewChild(descriptor_node, NULL, BAD_CAST
"image_id", BAD_CAST ss.str().c_str());
4177 xmlNewChild(descriptor_node, NULL, BAD_CAST
"image_id", BAD_CAST ss.str().c_str());
4183 ss << std::fixed << std::setprecision(9) << m_trainPoints[i_].x;
4184 xmlNewChild(descriptor_node, NULL, BAD_CAST
"oX", BAD_CAST ss.str().c_str());
4188 ss << std::fixed << std::setprecision(9) << m_trainPoints[i_].y;
4189 xmlNewChild(descriptor_node, NULL, BAD_CAST
"oY", BAD_CAST ss.str().c_str());
4193 ss << std::fixed << std::setprecision(9) << m_trainPoints[i_].z;
4194 xmlNewChild(descriptor_node, NULL, BAD_CAST
"oZ", BAD_CAST ss.str().c_str());
4197 desc_node = xmlNewChild(descriptor_node, NULL, BAD_CAST
"desc", NULL);
4199 for (
int j = 0; j < nCols; j++) {
4202 switch (descriptorType) {
4208 int val_tmp = m_trainDescriptors.at<
unsigned char>(i, j);
4217 int val_tmp = m_trainDescriptors.at<
char>(i, j);
4222 ss << m_trainDescriptors.at<
unsigned short int>(i, j);
4226 ss << m_trainDescriptors.at<
short int>(i, j);
4230 ss << m_trainDescriptors.at<
int>(i, j);
4235 ss << std::fixed << std::setprecision(9) << m_trainDescriptors.at<
float>(i, j);
4240 ss << std::fixed << std::setprecision(17) << m_trainDescriptors.at<
double>(i, j);
4247 xmlNewChild(desc_node, NULL, BAD_CAST
"val", BAD_CAST ss.str().c_str());
4251 xmlSaveFormatFileEnc(filename.c_str(), doc,
"UTF-8", 1);
4262 std::cerr <<
"Error: libxml2 is required !" << std::endl;
4267 #if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x030000) 4269 struct KeypointResponseGreaterThanThreshold {
4270 KeypointResponseGreaterThanThreshold(
float _value) : value(_value) {}
4271 inline bool operator()(
const cv::KeyPoint &kpt)
const {
return kpt.response >= value; }
4275 struct KeypointResponseGreater {
4276 inline bool operator()(
const cv::KeyPoint &kp1,
const cv::KeyPoint &kp2)
const {
return kp1.response > kp2.response; }
4280 void vpKeyPoint::KeyPointsFilter::retainBest(std::vector<cv::KeyPoint> &keypoints,
int n_points)
4284 if (n_points >= 0 && keypoints.size() > (size_t)n_points) {
4285 if (n_points == 0) {
4291 std::nth_element(keypoints.begin(), keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreater());
4293 float ambiguous_response = keypoints[(size_t)(n_points - 1)].response;
4296 std::vector<cv::KeyPoint>::const_iterator new_end = std::partition(
4297 keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreaterThanThreshold(ambiguous_response));
4300 keypoints.resize((
size_t)(new_end - keypoints.begin()));
4304 struct RoiPredicate {
4305 RoiPredicate(
const cv::Rect &_r) : r(_r) {}
4307 bool operator()(
const cv::KeyPoint &keyPt)
const {
return !r.contains(keyPt.pt); }
4312 void vpKeyPoint::KeyPointsFilter::runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize,
4315 if (borderSize > 0) {
4316 if (imageSize.height <= borderSize * 2 || imageSize.width <= borderSize * 2)
4319 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(),
4320 RoiPredicate(cv::Rect(
4321 cv::Point(borderSize, borderSize),
4322 cv::Point(imageSize.width - borderSize, imageSize.height - borderSize)))),
4327 struct SizePredicate {
4328 SizePredicate(
float _minSize,
float _maxSize) : minSize(_minSize), maxSize(_maxSize) {}
4330 bool operator()(
const cv::KeyPoint &keyPt)
const 4332 float size = keyPt.size;
4333 return (size < minSize) || (size > maxSize);
4336 float minSize, maxSize;
4339 void vpKeyPoint::KeyPointsFilter::runByKeypointSize(std::vector<cv::KeyPoint> &keypoints,
float minSize,
float maxSize)
4341 CV_Assert(minSize >= 0);
4342 CV_Assert(maxSize >= 0);
4343 CV_Assert(minSize <= maxSize);
4345 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), SizePredicate(minSize, maxSize)), keypoints.end());
4351 MaskPredicate(
const cv::Mat &_mask) : mask(_mask) {}
4352 bool operator()(
const cv::KeyPoint &key_pt)
const 4354 return mask.at<uchar>((int)(key_pt.pt.y + 0.5f), (int)(key_pt.pt.x + 0.5f)) == 0;
4359 MaskPredicate &operator=(
const MaskPredicate &);
4362 void vpKeyPoint::KeyPointsFilter::runByPixelsMask(std::vector<cv::KeyPoint> &keypoints,
const cv::Mat &mask)
4367 keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), MaskPredicate(mask)), keypoints.end());
4370 struct KeyPoint_LessThan {
4371 KeyPoint_LessThan(
const std::vector<cv::KeyPoint> &_kp) : kp(&_kp) {}
4372 bool operator()(
size_t i,
size_t j)
const 4374 const cv::KeyPoint &kp1 = (*kp)[ i];
4375 const cv::KeyPoint &kp2 = (*kp)[ j];
4377 std::numeric_limits<float>::epsilon())) {
4379 return kp1.pt.x < kp2.pt.x;
4383 std::numeric_limits<float>::epsilon())) {
4385 return kp1.pt.y < kp2.pt.y;
4389 std::numeric_limits<float>::epsilon())) {
4391 return kp1.size > kp2.size;
4395 std::numeric_limits<float>::epsilon())) {
4397 return kp1.angle < kp2.angle;
4401 std::numeric_limits<float>::epsilon())) {
4403 return kp1.response > kp2.response;
4406 if (kp1.octave != kp2.octave) {
4407 return kp1.octave > kp2.octave;
4410 if (kp1.class_id != kp2.class_id) {
4411 return kp1.class_id > kp2.class_id;
4416 const std::vector<cv::KeyPoint> *kp;
4419 void vpKeyPoint::KeyPointsFilter::removeDuplicated(std::vector<cv::KeyPoint> &keypoints)
4421 size_t i, j, n = keypoints.size();
4422 std::vector<size_t> kpidx(n);
4423 std::vector<uchar> mask(n, (uchar)1);
4425 for (i = 0; i < n; i++) {
4428 std::sort(kpidx.begin(), kpidx.end(), KeyPoint_LessThan(keypoints));
4429 for (i = 1, j = 0; i < n; i++) {
4430 cv::KeyPoint &kp1 = keypoints[kpidx[i]];
4431 cv::KeyPoint &kp2 = keypoints[kpidx[j]];
4434 if (!
vpMath::equal(kp1.pt.x, kp2.pt.x, std::numeric_limits<float>::epsilon()) ||
4435 !
vpMath::equal(kp1.pt.y, kp2.pt.y, std::numeric_limits<float>::epsilon()) ||
4436 !
vpMath::equal(kp1.size, kp2.size, std::numeric_limits<float>::epsilon()) ||
4437 !
vpMath::equal(kp1.angle, kp2.angle, std::numeric_limits<float>::epsilon())) {
4444 for (i = j = 0; i < n; i++) {
4447 keypoints[j] = keypoints[i];
4452 keypoints.resize(j);
4458 vpKeyPoint::PyramidAdaptedFeatureDetector::PyramidAdaptedFeatureDetector(
const cv::Ptr<cv::FeatureDetector> &_detector,
4460 : detector(_detector), maxLevel(_maxLevel)
4464 bool vpKeyPoint::PyramidAdaptedFeatureDetector::empty()
const 4466 return detector.empty() || (cv::FeatureDetector *)detector->empty();
4469 void vpKeyPoint::PyramidAdaptedFeatureDetector::detect(cv::InputArray image,
4470 CV_OUT std::vector<cv::KeyPoint> &keypoints, cv::InputArray mask)
4472 detectImpl(image.getMat(), keypoints, mask.getMat());
4475 void vpKeyPoint::PyramidAdaptedFeatureDetector::detectImpl(
const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
4476 const cv::Mat &mask)
const 4478 cv::Mat src = image;
4479 cv::Mat src_mask = mask;
4481 cv::Mat dilated_mask;
4482 if (!mask.empty()) {
4483 cv::dilate(mask, dilated_mask, cv::Mat());
4484 cv::Mat mask255(mask.size(), CV_8UC1, cv::Scalar(0));
4485 mask255.setTo(cv::Scalar(255), dilated_mask != 0);
4486 dilated_mask = mask255;
4489 for (
int l = 0, multiplier = 1; l <= maxLevel; ++l, multiplier *= 2) {
4491 std::vector<cv::KeyPoint> new_pts;
4492 detector->
detect(src, new_pts, src_mask);
4493 std::vector<cv::KeyPoint>::iterator it = new_pts.begin(), end = new_pts.end();
4494 for (; it != end; ++it) {
4495 it->pt.x *= multiplier;
4496 it->pt.y *= multiplier;
4497 it->size *= multiplier;
4500 keypoints.insert(keypoints.end(), new_pts.begin(), new_pts.end());
4509 resize(dilated_mask, src_mask, src.size(), 0, 0, CV_INTER_AREA);
4514 vpKeyPoint::KeyPointsFilter::runByPixelsMask(keypoints, mask);
4518 #elif !defined(VISP_BUILD_SHARED_LIBS) 4521 void dummy_vpKeyPoint(){};
Implementation of a matrix and operations on matrices.
void displayMatching(const vpImage< unsigned char > &IRef, vpImage< unsigned char > &IMatching, unsigned int crossSize, unsigned int lineThickness=1, const vpColor &color=vpColor::green)
static void compute3D(const cv::KeyPoint &candidate, const std::vector< vpPoint > &roi, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
void set_oZ(const double oZ)
Set the point Z coordinate in the object frame.
double get_oY() const
Get the point Y coordinate in the object frame.
void detectExtractAffine(const vpImage< unsigned char > &I, std::vector< std::vector< cv::KeyPoint > > &listOfKeypoints, std::vector< cv::Mat > &listOfDescriptors, std::vector< vpImage< unsigned char > > *listOfAffineI=NULL)
std::vector< unsigned int > getRansacInlierIndex() const
double getMatchingRatioThreshold() const
int getNbRansacMinInlierCount() const
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
double getRansacConsensusPercentage() const
vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType, const std::string &matcherName, const vpFilterMatchingType &filterType=ratioDistanceThreshold)
void createImageMatching(vpImage< unsigned char > &IRef, vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
Point coordinates conversion from normalized coordinates in meter to pixel coordinates ...
static bool isNaN(const double value)
Class to define colors available for display functionnalities.
static bool equal(double x, double y, double s=0.001)
static const vpColor none
error that can be emited by ViSP classes.
void setRansacThreshold(const double &t)
void set_x(const double x)
Set the point x coordinate in the image plane.
vpHomogeneousMatrix inverse() const
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
Point coordinates conversion from pixel coordinates to normalized coordinates in meter...
void getObjectPoints(std::vector< cv::Point3f > &objectPoints) const
static const vpColor green
void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector< cv::DMatch > &matches, double &elapsedTime)
std::vector< vpPoint > getRansacInliers() const
double get_oX() const
Get the point X coordinate in the object frame.
static int round(const double x)
void insertImageMatching(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
VISP_EXPORT double measureTimeMs()
void getTrainPoints(std::vector< cv::Point3f > &points) const
vpRect getBoundingBox() const
Class that defines what is a point.
static void write(const vpImage< unsigned char > &I, const std::string &filename)
vpMatrix getCovarianceMatrix() const
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
Defines a generic 2D polygon.
std::string getMatcherName() const
double getRansacThreshold() const
std::string getDetectorName() const
void getQueryKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
void set_u(const double u)
void getTrainKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
static void compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpCylinder > &cylinders, const std::vector< std::vector< std::vector< vpImagePoint > > > &vectorOfCylinderRois, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
bool _reference_computed
flag to indicate if the reference has been built.
bool isInside(const vpImagePoint &iP, const PointInPolygonMethod &method=PnPolyRayCasting) const
void set_v(const double v)
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
Generic class defining intrinsic camera parameters.
void set_y(const double y)
Set the point y coordinate in the image plane.
void set_oX(const double oX)
Set the point X coordinate in the object frame.
void display(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, unsigned int size=3)
unsigned int buildReference(const vpImage< unsigned char > &I)
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool(*func)(vpHomogeneousMatrix *)=NULL)
double get_oZ() const
Get the point Z coordinate in the object frame.
std::string getExtractorName() const
void insert(const vpImage< Type > &src, const vpImagePoint &topLeft)
std::vector< vpImagePoint > referenceImagePointsList
const char * what() const
bool computePose(const std::vector< cv::Point2f > &imagePoints, const std::vector< cv::Point3f > &objectPoints, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector< int > &inlierIndex, double &elapsedTime, bool(*func)(vpHomogeneousMatrix *)=NULL)
unsigned int matchPoint(const vpImage< unsigned char > &I)
double getRansacReprojectionError() const
void setRansacMaxTrials(const int &rM)
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
static void displayCircle(const vpImage< unsigned char > &I, const vpImagePoint ¢er, unsigned int radius, const vpColor &color, bool fill=false, unsigned int thickness=1)
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
void loadLearningData(const std::string &filename, const bool binaryMode=false, const bool append=false)
void setWorldCoordinates(const double oX, const double oY, const double oZ)
unsigned int getHeight() const
static void read(vpImage< unsigned char > &I, const std::string &filename)
Implementation of column vector and the associated operations.
double get_x() const
Get the point x coordinate in the image plane.
void set_oY(const double oY)
Set the point Y coordinate in the object frame.
void saveLearningData(const std::string &filename, const bool binaryMode=false, const bool saveTrainingImages=true)
bool matchPointAndDetect(const vpImage< unsigned char > &I, vpRect &boundingBox, vpImagePoint ¢erOfGravity, const bool isPlanarObject=true, std::vector< vpImagePoint > *imPts1=NULL, std::vector< vpImagePoint > *imPts2=NULL, double *meanDescriptorDistance=NULL, double *detectionScore=NULL, const vpRect &rectangle=vpRect())
int getNbRansacIterations() const
double get_y() const
Get the point y coordinate in the image plane.
std::vector< unsigned int > matchedReferencePoints
bool getUseRansacVVSPoseEstimation() const
Defines a rectangle in the plane.
std::vector< vpImagePoint > currentImagePointsList
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1)
This class defines the container for a plane geometrical structure.
void loadConfigFile(const std::string &configFile)
void addPoint(const vpPoint &P)
unsigned int getWidth() const
static void convertFromOpenCV(const cv::KeyPoint &from, vpImagePoint &to)
Class that consider the case of a translation vector.
Implementation of a rotation vector as axis-angle minimal representation.
double getMatchingFactorThreshold() const
void setCovarianceComputation(const bool &flag)
bool getUseRansacConsensusPercentage() const
void set_ij(const double ii, const double jj)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
vpMatchingMethodEnum getMatchingMethod() const
void initMatcher(const std::string &matcherName)
void extract(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, cv::Mat &descriptors, std::vector< cv::Point3f > *trainPoints=NULL)
void parse(const std::string &filename)
bool detect(const vpImage< unsigned char > &I)