Visual Servoing Platform  version 3.4.0
vpKeyPoint.cpp
1 /****************************************************************************
2  *
3  * ViSP, open source Visual Servoing Platform software.
4  * Copyright (C) 2005 - 2019 by Inria. All rights reserved.
5  *
6  * This software is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  * See the file LICENSE.txt at the root directory of this source
11  * distribution for additional information about the GNU GPL.
12  *
13  * For using ViSP with software that can not be combined with the GNU
14  * GPL, please contact Inria about acquiring a ViSP Professional
15  * Edition License.
16  *
17  * See http://visp.inria.fr for more information.
18  *
19  * This software was developed at:
20  * Inria Rennes - Bretagne Atlantique
21  * Campus Universitaire de Beaulieu
22  * 35042 Rennes Cedex
23  * France
24  *
25  * If you have questions regarding the use of this file, please contact
26  * Inria at visp@inria.fr
27  *
28  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
29  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
30  *
31  * Description:
32  * Key point functionalities.
33  *
34  * Authors:
35  * Souriya Trinh
36  *
37  *****************************************************************************/
38 
39 #include <iomanip>
40 #include <limits>
41 
42 #include <visp3/core/vpIoTools.h>
43 #include <visp3/vision/vpKeyPoint.h>
44 
45 #if (VISP_HAVE_OPENCV_VERSION >= 0x020101)
46 
47 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
48 #include <opencv2/calib3d/calib3d.hpp>
49 #endif
50 
51 #include <pugixml.hpp>
52 
53 namespace
54 {
55 // Specific Type transformation functions
56 inline cv::DMatch knnToDMatch(const std::vector<cv::DMatch> &knnMatches)
57 {
58  if (knnMatches.size() > 0) {
59  return knnMatches[0];
60  }
61 
62  return cv::DMatch();
63 }
64 
65 inline vpImagePoint matchRansacToVpImage(const std::pair<cv::KeyPoint, cv::Point3f> &pair)
66 {
67  return vpImagePoint(pair.first.pt.y, pair.first.pt.x);
68 }
69 
70 }
71 
81 vpKeyPoint::vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType,
82  const std::string &matcherName, const vpFilterMatchingType &filterType)
83  : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
84  m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
85  m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
86  m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
87  m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
88  m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
89  m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
90  m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
91  m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0),
92  m_ransacThreshold(0.01), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(),
93  m_useAffineDetection(false),
94 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
95  m_useBruteForceCrossCheck(true),
96 #endif
97  m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
98  m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
99 {
100  initFeatureNames();
101 
102  m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
103  m_extractorNames.push_back(m_mapOfDescriptorNames[descriptorType]);
104 
105  init();
106 }
107 
117 vpKeyPoint::vpKeyPoint(const std::string &detectorName, const std::string &extractorName,
118  const std::string &matcherName, const vpFilterMatchingType &filterType)
119  : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
120  m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
121  m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
122  m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
123  m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
124  m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
125  m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
126  m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
127  m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0),
128  m_ransacThreshold(0.01), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(),
129  m_useAffineDetection(false),
130 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
131  m_useBruteForceCrossCheck(true),
132 #endif
133  m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
134  m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
135 {
136  initFeatureNames();
137 
138  m_detectorNames.push_back(detectorName);
139  m_extractorNames.push_back(extractorName);
140 
141  init();
142 }
143 
153 vpKeyPoint::vpKeyPoint(const std::vector<std::string> &detectorNames, const std::vector<std::string> &extractorNames,
154  const std::string &matcherName, const vpFilterMatchingType &filterType)
155  : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
156  m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(detectorNames),
157  m_detectors(), m_extractionTime(0.), m_extractorNames(extractorNames), m_extractors(), m_filteredMatches(),
158  m_filterType(filterType), m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(),
159  m_matcher(), m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0),
160  m_matchingRatioThreshold(0.85), m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200),
161  m_nbRansacMinInlierCount(100), m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(),
162  m_queryFilteredKeyPoints(), m_queryKeyPoints(), m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(vpPose::NO_FILTER), m_ransacInliers(),
163  m_ransacOutliers(), m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
164  m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
165 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
166  m_useBruteForceCrossCheck(true),
167 #endif
168  m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
169  m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
170 {
171  initFeatureNames();
172  init();
173 }
174 
183 void vpKeyPoint::affineSkew(double tilt, double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai)
184 {
185  int h = img.rows;
186  int w = img.cols;
187 
188  mask = cv::Mat(h, w, CV_8UC1, cv::Scalar(255));
189 
190  cv::Mat A = cv::Mat::eye(2, 3, CV_32F);
191 
192  // if (phi != 0.0) {
193  if (std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
194  phi *= M_PI / 180.;
195  double s = sin(phi);
196  double c = cos(phi);
197 
198  A = (cv::Mat_<float>(2, 2) << c, -s, s, c);
199 
200  cv::Mat corners = (cv::Mat_<float>(4, 2) << 0, 0, w, 0, w, h, 0, h);
201  cv::Mat tcorners = corners * A.t();
202  cv::Mat tcorners_x, tcorners_y;
203  tcorners.col(0).copyTo(tcorners_x);
204  tcorners.col(1).copyTo(tcorners_y);
205  std::vector<cv::Mat> channels;
206  channels.push_back(tcorners_x);
207  channels.push_back(tcorners_y);
208  cv::merge(channels, tcorners);
209 
210  cv::Rect rect = cv::boundingRect(tcorners);
211  A = (cv::Mat_<float>(2, 3) << c, -s, -rect.x, s, c, -rect.y);
212 
213  cv::warpAffine(img, img, A, cv::Size(rect.width, rect.height), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
214  }
215  // if (tilt != 1.0) {
216  if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon()) {
217  double s = 0.8 * sqrt(tilt * tilt - 1);
218  cv::GaussianBlur(img, img, cv::Size(0, 0), s, 0.01);
219  cv::resize(img, img, cv::Size(0, 0), 1.0 / tilt, 1.0, cv::INTER_NEAREST);
220  A.row(0) = A.row(0) / tilt;
221  }
222  // if (tilt != 1.0 || phi != 0.0) {
223  if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon() ||
224  std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
225  h = img.rows;
226  w = img.cols;
227  cv::warpAffine(mask, mask, A, cv::Size(w, h), cv::INTER_NEAREST);
228  }
229  cv::invertAffineTransform(A, Ai);
230 }
231 
239 
246 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color) { return buildReference(I_color, vpRect()); }
247 
258  unsigned int height, unsigned int width)
259 {
260  return buildReference(I, vpRect(iP, width, height));
261 }
262 
272 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, const vpImagePoint &iP,
273  unsigned int height, unsigned int width)
274 {
275  return buildReference(I_color, vpRect(iP, width, height));
276 }
277 
285 unsigned int vpKeyPoint::buildReference(const vpImage<unsigned char> &I, const vpRect &rectangle)
286 {
287  // Reset variables used when dealing with 3D models
288  // So as no 3D point list is passed, we dont need this variables
289  m_trainPoints.clear();
290  m_mapOfImageId.clear();
291  m_mapOfImages.clear();
292  m_currentImageId = 1;
293 
294  if (m_useAffineDetection) {
295  std::vector<std::vector<cv::KeyPoint> > listOfTrainKeyPoints;
296  std::vector<cv::Mat> listOfTrainDescriptors;
297 
298  // Detect keypoints and extract descriptors on multiple images
299  detectExtractAffine(I, listOfTrainKeyPoints, listOfTrainDescriptors);
300 
301  // Flatten the different train lists
302  m_trainKeyPoints.clear();
303  for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfTrainKeyPoints.begin();
304  it != listOfTrainKeyPoints.end(); ++it) {
305  m_trainKeyPoints.insert(m_trainKeyPoints.end(), it->begin(), it->end());
306  }
307 
308  bool first = true;
309  for (std::vector<cv::Mat>::const_iterator it = listOfTrainDescriptors.begin(); it != listOfTrainDescriptors.end(); ++it) {
310  if (first) {
311  first = false;
312  it->copyTo(m_trainDescriptors);
313  } else {
314  m_trainDescriptors.push_back(*it);
315  }
316  }
317  } else {
318  detect(I, m_trainKeyPoints, m_detectionTime, rectangle);
319  extract(I, m_trainKeyPoints, m_trainDescriptors, m_extractionTime);
320  }
321 
322  // Save the correspondence keypoint class_id with the training image_id in a map.
323  // Used to display the matching with all the training images.
324  for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
325  m_mapOfImageId[it->class_id] = m_currentImageId;
326  }
327 
328  // Save the image in a map at a specific image_id
329  m_mapOfImages[m_currentImageId] = I;
330 
331  // Convert OpenCV type to ViSP type for compatibility
333 
334  _reference_computed = true;
335 
336  // Add train descriptors in matcher object
337  m_matcher->clear();
338  m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
339 
340  return static_cast<unsigned int>(m_trainKeyPoints.size());
341 }
342 
350 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, const vpRect &rectangle)
351 {
352  vpImageConvert::convert(I_color, m_I);
353  return (buildReference(m_I, rectangle));
354 }
355 
367 unsigned int vpKeyPoint::buildReference(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &trainKeyPoints,
368  std::vector<cv::Point3f> &points3f, bool append, int class_id)
369 {
370  cv::Mat trainDescriptors;
371  // Copy the input list of keypoints
372  std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
373 
374  extract(I, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
375 
376  if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
377  // Keypoints have been removed
378  // Store the hash of a keypoint as the key and the index of the keypoint
379  // as the value
380  std::map<size_t, size_t> mapOfKeypointHashes;
381  size_t cpt = 0;
382  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
383  ++it, cpt++) {
384  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
385  }
386 
387  std::vector<cv::Point3f> trainPoints_tmp;
388  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
389  if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
390  trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
391  }
392  }
393 
394  // Copy trainPoints_tmp to points3f
395  points3f = trainPoints_tmp;
396  }
397 
398  return (buildReference(I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
399 }
400 
412 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &trainKeyPoints,
413  std::vector<cv::Point3f> &points3f, bool append, int class_id)
414 {
415  cv::Mat trainDescriptors;
416  // Copy the input list of keypoints
417  std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
418 
419  extract(I_color, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
420 
421  if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
422  // Keypoints have been removed
423  // Store the hash of a keypoint as the key and the index of the keypoint
424  // as the value
425  std::map<size_t, size_t> mapOfKeypointHashes;
426  size_t cpt = 0;
427  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
428  ++it, cpt++) {
429  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
430  }
431 
432  std::vector<cv::Point3f> trainPoints_tmp;
433  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
434  if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
435  trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
436  }
437  }
438 
439  // Copy trainPoints_tmp to points3f
440  points3f = trainPoints_tmp;
441  }
442 
443  return (buildReference(I_color, trainKeyPoints, trainDescriptors, points3f, append, class_id));
444 }
445 
459 unsigned int vpKeyPoint::buildReference(const vpImage<unsigned char> &I, const std::vector<cv::KeyPoint> &trainKeyPoints,
460  const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
461  bool append, int class_id)
462 {
463  if (!append) {
464  m_trainPoints.clear();
465  m_mapOfImageId.clear();
466  m_mapOfImages.clear();
467  m_currentImageId = 0;
468  m_trainKeyPoints.clear();
469  }
470 
471  m_currentImageId++;
472 
473  std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
474  // Set class_id if != -1
475  if (class_id != -1) {
476  for (std::vector<cv::KeyPoint>::iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
477  it->class_id = class_id;
478  }
479  }
480 
481  // Save the correspondence keypoint class_id with the training image_id in a map.
482  // Used to display the matching with all the training images.
483  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
484  m_mapOfImageId[it->class_id] = m_currentImageId;
485  }
486 
487  // Save the image in a map at a specific image_id
488  m_mapOfImages[m_currentImageId] = I;
489 
490  // Append reference lists
491  m_trainKeyPoints.insert(m_trainKeyPoints.end(), trainKeyPoints_tmp.begin(), trainKeyPoints_tmp.end());
492  if (!append) {
493  trainDescriptors.copyTo(m_trainDescriptors);
494  } else {
495  m_trainDescriptors.push_back(trainDescriptors);
496  }
497  this->m_trainPoints.insert(m_trainPoints.end(), points3f.begin(), points3f.end());
498 
499  // Convert OpenCV type to ViSP type for compatibility
501  vpConvert::convertFromOpenCV(m_trainPoints, m_trainVpPoints);
502 
503  // Add train descriptors in matcher object
504  m_matcher->clear();
505  m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
506 
507  _reference_computed = true;
508 
509  return static_cast<unsigned int>(m_trainKeyPoints.size());
510 }
511 
524 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, const std::vector<cv::KeyPoint> &trainKeyPoints,
525  const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
526  bool append, int class_id)
527 {
528  vpImageConvert::convert(I_color, m_I);
529  return (buildReference(m_I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
530 }
531 
547 void vpKeyPoint::compute3D(const cv::KeyPoint &candidate, const std::vector<vpPoint> &roi,
548  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
549 {
550  /* compute plane equation */
551  std::vector<vpPoint>::const_iterator it_roi = roi.begin();
552  vpPoint pts[3];
553  pts[0] = *it_roi;
554  ++it_roi;
555  pts[1] = *it_roi;
556  ++it_roi;
557  pts[2] = *it_roi;
558  vpPlane Po(pts[0], pts[1], pts[2]);
559  double xc = 0.0, yc = 0.0;
560  vpPixelMeterConversion::convertPoint(cam, candidate.pt.x, candidate.pt.y, xc, yc);
561  double Z = -Po.getD() / (Po.getA() * xc + Po.getB() * yc + Po.getC());
562  double X = xc * Z;
563  double Y = yc * Z;
564  vpColVector point_cam(4);
565  point_cam[0] = X;
566  point_cam[1] = Y;
567  point_cam[2] = Z;
568  point_cam[3] = 1;
569  vpColVector point_obj(4);
570  point_obj = cMo.inverse() * point_cam;
571  point = cv::Point3f((float)point_obj[0], (float)point_obj[1], (float)point_obj[2]);
572 }
573 
589 void vpKeyPoint::compute3D(const vpImagePoint &candidate, const std::vector<vpPoint> &roi,
590  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, vpPoint &point)
591 {
592  /* compute plane equation */
593  std::vector<vpPoint>::const_iterator it_roi = roi.begin();
594  vpPoint pts[3];
595  pts[0] = *it_roi;
596  ++it_roi;
597  pts[1] = *it_roi;
598  ++it_roi;
599  pts[2] = *it_roi;
600  vpPlane Po(pts[0], pts[1], pts[2]);
601  double xc = 0.0, yc = 0.0;
602  vpPixelMeterConversion::convertPoint(cam, candidate, xc, yc);
603  double Z = -Po.getD() / (Po.getA() * xc + Po.getB() * yc + Po.getC());
604  double X = xc * Z;
605  double Y = yc * Z;
606  vpColVector point_cam(4);
607  point_cam[0] = X;
608  point_cam[1] = Y;
609  point_cam[2] = Z;
610  point_cam[3] = 1;
611  vpColVector point_obj(4);
612  point_obj = cMo.inverse() * point_cam;
613  point.setWorldCoordinates(point_obj);
614 }
615 
633  std::vector<cv::KeyPoint> &candidates,
634  const std::vector<vpPolygon> &polygons,
635  const std::vector<std::vector<vpPoint> > &roisPt,
636  std::vector<cv::Point3f> &points, cv::Mat *descriptors)
637 {
638  std::vector<cv::KeyPoint> candidatesToCheck = candidates;
639  candidates.clear();
640  points.clear();
641  vpImagePoint imPt;
642  cv::Point3f pt;
643  cv::Mat desc;
644 
645  std::vector<std::pair<cv::KeyPoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
646  for (size_t i = 0; i < candidatesToCheck.size(); i++) {
647  pairOfCandidatesToCheck[i] = std::pair<cv::KeyPoint, size_t>(candidatesToCheck[i], i);
648  }
649 
650  size_t cpt1 = 0;
651  std::vector<vpPolygon> polygons_tmp = polygons;
652  for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
653  std::vector<std::pair<cv::KeyPoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
654 
655  while (it2 != pairOfCandidatesToCheck.end()) {
656  imPt.set_ij(it2->first.pt.y, it2->first.pt.x);
657  if (it1->isInside(imPt)) {
658  candidates.push_back(it2->first);
659  vpKeyPoint::compute3D(it2->first, roisPt[cpt1], cam, cMo, pt);
660  points.push_back(pt);
661 
662  if (descriptors != NULL) {
663  desc.push_back(descriptors->row((int)it2->second));
664  }
665 
666  // Remove candidate keypoint which is located on the current polygon
667  it2 = pairOfCandidatesToCheck.erase(it2);
668  } else {
669  ++it2;
670  }
671  }
672  }
673 
674  if (descriptors != NULL) {
675  desc.copyTo(*descriptors);
676  }
677 }
678 
696  std::vector<vpImagePoint> &candidates,
697  const std::vector<vpPolygon> &polygons,
698  const std::vector<std::vector<vpPoint> > &roisPt,
699  std::vector<vpPoint> &points, cv::Mat *descriptors)
700 {
701  std::vector<vpImagePoint> candidatesToCheck = candidates;
702  candidates.clear();
703  points.clear();
704  vpPoint pt;
705  cv::Mat desc;
706 
707  std::vector<std::pair<vpImagePoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
708  for (size_t i = 0; i < candidatesToCheck.size(); i++) {
709  pairOfCandidatesToCheck[i] = std::pair<vpImagePoint, size_t>(candidatesToCheck[i], i);
710  }
711 
712  size_t cpt1 = 0;
713  std::vector<vpPolygon> polygons_tmp = polygons;
714  for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
715  std::vector<std::pair<vpImagePoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
716 
717  while (it2 != pairOfCandidatesToCheck.end()) {
718  if (it1->isInside(it2->first)) {
719  candidates.push_back(it2->first);
720  vpKeyPoint::compute3D(it2->first, roisPt[cpt1], cam, cMo, pt);
721  points.push_back(pt);
722 
723  if (descriptors != NULL) {
724  desc.push_back(descriptors->row((int)it2->second));
725  }
726 
727  // Remove candidate keypoint which is located on the current polygon
728  it2 = pairOfCandidatesToCheck.erase(it2);
729  } else {
730  ++it2;
731  }
732  }
733  }
734 }
735 
752  const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector<cv::KeyPoint> &candidates,
753  const std::vector<vpCylinder> &cylinders,
754  const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<cv::Point3f> &points,
755  cv::Mat *descriptors)
756 {
757  std::vector<cv::KeyPoint> candidatesToCheck = candidates;
758  candidates.clear();
759  points.clear();
760  cv::Mat desc;
761 
762  // Keep only keypoints on cylinders
763  size_t cpt_keypoint = 0;
764  for (std::vector<cv::KeyPoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
765  ++it1, cpt_keypoint++) {
766  size_t cpt_cylinder = 0;
767 
768  // Iterate through the list of vpCylinders
769  for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
770  it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
771  // Iterate through the list of the bounding boxes of the current
772  // vpCylinder
773  for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
774  if (vpPolygon::isInside(*it3, it1->pt.y, it1->pt.x)) {
775  candidates.push_back(*it1);
776 
777  // Calculate the 3D coordinates for each keypoint located on
778  // cylinders
779  double xm = 0.0, ym = 0.0;
780  vpPixelMeterConversion::convertPoint(cam, it1->pt.x, it1->pt.y, xm, ym);
781  double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
782 
783  if (!vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
784  vpColVector point_cam(4);
785  point_cam[0] = xm * Z;
786  point_cam[1] = ym * Z;
787  point_cam[2] = Z;
788  point_cam[3] = 1;
789  vpColVector point_obj(4);
790  point_obj = cMo.inverse() * point_cam;
791  vpPoint pt;
792  pt.setWorldCoordinates(point_obj);
793  points.push_back(cv::Point3f((float)pt.get_oX(), (float)pt.get_oY(), (float)pt.get_oZ()));
794 
795  if (descriptors != NULL) {
796  desc.push_back(descriptors->row((int)cpt_keypoint));
797  }
798 
799  break;
800  }
801  }
802  }
803  }
804  }
805 
806  if (descriptors != NULL) {
807  desc.copyTo(*descriptors);
808  }
809 }
810 
827  const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector<vpImagePoint> &candidates,
828  const std::vector<vpCylinder> &cylinders,
829  const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<vpPoint> &points,
830  cv::Mat *descriptors)
831 {
832  std::vector<vpImagePoint> candidatesToCheck = candidates;
833  candidates.clear();
834  points.clear();
835  cv::Mat desc;
836 
837  // Keep only keypoints on cylinders
838  size_t cpt_keypoint = 0;
839  for (std::vector<vpImagePoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
840  ++it1, cpt_keypoint++) {
841  size_t cpt_cylinder = 0;
842 
843  // Iterate through the list of vpCylinders
844  for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
845  it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
846  // Iterate through the list of the bounding boxes of the current
847  // vpCylinder
848  for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
849  if (vpPolygon::isInside(*it3, it1->get_i(), it1->get_j())) {
850  candidates.push_back(*it1);
851 
852  // Calculate the 3D coordinates for each keypoint located on
853  // cylinders
854  double xm = 0.0, ym = 0.0;
855  vpPixelMeterConversion::convertPoint(cam, it1->get_u(), it1->get_v(), xm, ym);
856  double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
857 
858  if (!vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
859  vpColVector point_cam(4);
860  point_cam[0] = xm * Z;
861  point_cam[1] = ym * Z;
862  point_cam[2] = Z;
863  point_cam[3] = 1;
864  vpColVector point_obj(4);
865  point_obj = cMo.inverse() * point_cam;
866  vpPoint pt;
867  pt.setWorldCoordinates(point_obj);
868  points.push_back(pt);
869 
870  if (descriptors != NULL) {
871  desc.push_back(descriptors->row((int)cpt_keypoint));
872  }
873 
874  break;
875  }
876  }
877  }
878  }
879  }
880 
881  if (descriptors != NULL) {
882  desc.copyTo(*descriptors);
883  }
884 }
885 
899 bool vpKeyPoint::computePose(const std::vector<cv::Point2f> &imagePoints, const std::vector<cv::Point3f> &objectPoints,
900  const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector<int> &inlierIndex,
901  double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &))
902 {
903  double t = vpTime::measureTimeMs();
904 
905  if (imagePoints.size() < 4 || objectPoints.size() < 4 || imagePoints.size() != objectPoints.size()) {
906  elapsedTime = (vpTime::measureTimeMs() - t);
907  std::cerr << "Not enough points to compute the pose (at least 4 points "
908  "are needed)."
909  << std::endl;
910 
911  return false;
912  }
913 
914  cv::Mat cameraMatrix =
915  (cv::Mat_<double>(3, 3) << cam.get_px(), 0, cam.get_u0(), 0, cam.get_py(), cam.get_v0(), 0, 0, 1);
916  cv::Mat rvec, tvec;
917 
918  // Bug with OpenCV < 2.4.0 when zero distorsion is provided by an empty
919  // array. http://code.opencv.org/issues/1700 ;
920  // http://code.opencv.org/issues/1718 what(): Distortion coefficients must
921  // be 1x4, 4x1, 1x5, 5x1, 1x8 or 8x1 floating-point vector in function
922  // cvProjectPoints2 Fixed in OpenCV 2.4.0 (r7558)
923  // cv::Mat distCoeffs;
924  cv::Mat distCoeffs = cv::Mat::zeros(1, 5, CV_64F);
925 
926  try {
927 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
928  // OpenCV 3.0.0 (2014/12/12)
929  cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, false, m_nbRansacIterations,
930  (float)m_ransacReprojectionError,
931  0.99, // confidence=0.99 (default) – The probability
932  // that the algorithm produces a useful result.
933  inlierIndex, cv::SOLVEPNP_ITERATIVE);
934 // SOLVEPNP_ITERATIVE (default): Iterative method is based on
935 // Levenberg-Marquardt optimization. In this case the function finds such a
936 // pose that minimizes reprojection error, that is the sum of squared
937 // distances between the observed projections imagePoints and the projected
938 // (using projectPoints() ) objectPoints . SOLVEPNP_P3P: Method is based on
939 // the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang “Complete Solution
940 // Classification for the Perspective-Three-Point Problem”. In this case the
941 // function requires exactly four object and image points. SOLVEPNP_EPNP:
942 // Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the
943 // paper “EPnP: Efficient Perspective-n-Point Camera Pose Estimation”.
944 // SOLVEPNP_DLS: Method is based on the paper of Joel A. Hesch and Stergios I.
945 // Roumeliotis. “A Direct Least-Squares (DLS) Method for PnP”. SOLVEPNP_UPNP
946 // Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto,
947 // F.Moreno-Noguer. “Exhaustive Linearization for Robust Camera Pose and Focal
948 // Length Estimation”. In this case the function also estimates the
949 // parameters
950 // f_x and f_y assuming that both have the same value. Then the cameraMatrix
951 // is updated with the estimated focal length.
952 #else
953  int nbInlierToReachConsensus = m_nbRansacMinInlierCount;
954  if (m_useConsensusPercentage) {
955  nbInlierToReachConsensus = (int)(m_ransacConsensusPercentage / 100.0 * (double)m_queryFilteredKeyPoints.size());
956  }
957 
958  cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, false, m_nbRansacIterations,
959  (float)m_ransacReprojectionError, nbInlierToReachConsensus, inlierIndex);
960 #endif
961  } catch (cv::Exception &e) {
962  std::cerr << e.what() << std::endl;
963  elapsedTime = (vpTime::measureTimeMs() - t);
964  return false;
965  }
966  vpTranslationVector translationVec(tvec.at<double>(0), tvec.at<double>(1), tvec.at<double>(2));
967  vpThetaUVector thetaUVector(rvec.at<double>(0), rvec.at<double>(1), rvec.at<double>(2));
968  cMo = vpHomogeneousMatrix(translationVec, thetaUVector);
969 
970  if (func != NULL) {
971  // Check the final pose returned by solvePnPRansac to discard
972  // solutions which do not respect the pose criterion.
973  if (!func(cMo)) {
974  elapsedTime = (vpTime::measureTimeMs() - t);
975  return false;
976  }
977  }
978 
979  elapsedTime = (vpTime::measureTimeMs() - t);
980  return true;
981 }
982 
995 bool vpKeyPoint::computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo,
996  std::vector<vpPoint> &inliers, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &))
997 {
998  std::vector<unsigned int> inlierIndex;
999  return computePose(objectVpPoints, cMo, inliers, inlierIndex, elapsedTime, func);
1000 }
1001 
1015 bool vpKeyPoint::computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo,
1016  std::vector<vpPoint> &inliers, std::vector<unsigned int> &inlierIndex, double &elapsedTime,
1017  bool (*func)(const vpHomogeneousMatrix &))
1018 {
1019  double t = vpTime::measureTimeMs();
1020 
1021  if (objectVpPoints.size() < 4) {
1022  elapsedTime = (vpTime::measureTimeMs() - t);
1023  // std::cerr << "Not enough points to compute the pose (at least 4
1024  // points are needed)." << std::endl;
1025 
1026  return false;
1027  }
1028 
1029  vpPose pose;
1030 
1031  for (std::vector<vpPoint>::const_iterator it = objectVpPoints.begin(); it != objectVpPoints.end(); ++it) {
1032  pose.addPoint(*it);
1033  }
1034 
1035  unsigned int nbInlierToReachConsensus = (unsigned int)m_nbRansacMinInlierCount;
1036  if (m_useConsensusPercentage) {
1037  nbInlierToReachConsensus =
1038  (unsigned int)(m_ransacConsensusPercentage / 100.0 * (double)m_queryFilteredKeyPoints.size());
1039  }
1040 
1041  pose.setRansacFilterFlag(m_ransacFilterFlag);
1042  pose.setUseParallelRansac(m_ransacParallel);
1043  pose.setNbParallelRansacThreads(m_ransacParallelNbThreads);
1044  pose.setRansacNbInliersToReachConsensus(nbInlierToReachConsensus);
1045  pose.setRansacThreshold(m_ransacThreshold);
1046  pose.setRansacMaxTrials(m_nbRansacIterations);
1047 
1048  bool isRansacPoseEstimationOk = false;
1049  try {
1050  pose.setCovarianceComputation(m_computeCovariance);
1051  isRansacPoseEstimationOk = pose.computePose(vpPose::RANSAC, cMo, func);
1052  inliers = pose.getRansacInliers();
1053  inlierIndex = pose.getRansacInlierIndex();
1054 
1055  if (m_computeCovariance) {
1056  m_covarianceMatrix = pose.getCovarianceMatrix();
1057  }
1058  } catch (const vpException &e) {
1059  std::cerr << "e=" << e.what() << std::endl;
1060  elapsedTime = (vpTime::measureTimeMs() - t);
1061  return false;
1062  }
1063 
1064  // if(func != NULL && isRansacPoseEstimationOk) {
1065  // //Check the final pose returned by the Ransac VVS pose estimation as
1066  // in rare some cases
1067  // //we can converge toward a final cMo that does not respect the pose
1068  // criterion even
1069  // //if the 4 minimal points picked to respect the pose criterion.
1070  // if(!func(&cMo)) {
1071  // elapsedTime = (vpTime::measureTimeMs() - t);
1072  // return false;
1073  // }
1074  // }
1075 
1076  elapsedTime = (vpTime::measureTimeMs() - t);
1077  return isRansacPoseEstimationOk;
1078 }
1079 
1094 double vpKeyPoint::computePoseEstimationError(const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
1095  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo_est)
1096 {
1097  if (matchKeyPoints.size() == 0) {
1098  // return std::numeric_limits<double>::max(); // create an error under
1099  // Windows. To fix it we have to add #undef max
1100  return DBL_MAX;
1101  }
1102 
1103  std::vector<double> errors(matchKeyPoints.size());
1104  size_t cpt = 0;
1105  vpPoint pt;
1106  for (std::vector<std::pair<cv::KeyPoint, cv::Point3f> >::const_iterator it = matchKeyPoints.begin();
1107  it != matchKeyPoints.end(); ++it, cpt++) {
1108  pt.set_oX(it->second.x);
1109  pt.set_oY(it->second.y);
1110  pt.set_oZ(it->second.z);
1111  pt.project(cMo_est);
1112  double u = 0.0, v = 0.0;
1113  vpMeterPixelConversion::convertPoint(cam, pt.get_x(), pt.get_y(), u, v);
1114  errors[cpt] = std::sqrt((u - it->first.pt.x) * (u - it->first.pt.x) + (v - it->first.pt.y) * (v - it->first.pt.y));
1115  }
1116 
1117  return std::accumulate(errors.begin(), errors.end(), 0.0) / errors.size();
1118 }
1119 
1129  vpImage<unsigned char> &IMatching)
1130 {
1131  // Image matching side by side
1132  unsigned int width = IRef.getWidth() + ICurrent.getWidth();
1133  unsigned int height = ((std::max))(IRef.getHeight(), ICurrent.getHeight());
1134 
1135  IMatching = vpImage<unsigned char>(height, width);
1136 }
1137 
1147  vpImage<vpRGBa> &IMatching)
1148 {
1149  // Image matching side by side
1150  unsigned int width = IRef.getWidth() + ICurrent.getWidth();
1151  unsigned int height = ((std::max))(IRef.getHeight(), ICurrent.getHeight());
1152 
1153  IMatching = vpImage<vpRGBa>(height, width);
1154 }
1155 
1166 {
1167  // Nb images in the training database + the current image we want to detect
1168  // the object
1169  unsigned int nbImg = (unsigned int)(m_mapOfImages.size() + 1);
1170 
1171  if (m_mapOfImages.empty()) {
1172  std::cerr << "There is no training image loaded !" << std::endl;
1173  return;
1174  }
1175 
1176  if (nbImg == 2) {
1177  // Only one training image, so we display them side by side
1178  createImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
1179  } else {
1180  // Multiple training images, display them as a mosaic image
1181  //(unsigned int) std::floor(std::sqrt((double) nbImg) + 0.5);
1182  unsigned int nbImgSqrt = (unsigned int)vpMath::round(std::sqrt((double)nbImg));
1183 
1184  // Number of columns in the mosaic grid
1185  unsigned int nbWidth = nbImgSqrt;
1186  // Number of rows in the mosaic grid
1187  unsigned int nbHeight = nbImgSqrt;
1188 
1189  // Deals with non square mosaic grid and the total number of images
1190  if (nbImgSqrt * nbImgSqrt < nbImg) {
1191  nbWidth++;
1192  }
1193 
1194  unsigned int maxW = ICurrent.getWidth();
1195  unsigned int maxH = ICurrent.getHeight();
1196  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1197  ++it) {
1198  if (maxW < it->second.getWidth()) {
1199  maxW = it->second.getWidth();
1200  }
1201 
1202  if (maxH < it->second.getHeight()) {
1203  maxH = it->second.getHeight();
1204  }
1205  }
1206 
1207  IMatching = vpImage<unsigned char>(maxH * nbHeight, maxW * nbWidth);
1208  }
1209 }
1210 
1221 {
1222  // Nb images in the training database + the current image we want to detect
1223  // the object
1224  unsigned int nbImg = (unsigned int)(m_mapOfImages.size() + 1);
1225 
1226  if (m_mapOfImages.empty()) {
1227  std::cerr << "There is no training image loaded !" << std::endl;
1228  return;
1229  }
1230 
1231  if (nbImg == 2) {
1232  // Only one training image, so we display them side by side
1233  createImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
1234  } else {
1235  // Multiple training images, display them as a mosaic image
1236  //(unsigned int) std::floor(std::sqrt((double) nbImg) + 0.5);
1237  unsigned int nbImgSqrt = (unsigned int)vpMath::round(std::sqrt((double)nbImg));
1238 
1239  // Number of columns in the mosaic grid
1240  unsigned int nbWidth = nbImgSqrt;
1241  // Number of rows in the mosaic grid
1242  unsigned int nbHeight = nbImgSqrt;
1243 
1244  // Deals with non square mosaic grid and the total number of images
1245  if (nbImgSqrt * nbImgSqrt < nbImg) {
1246  nbWidth++;
1247  }
1248 
1249  unsigned int maxW = ICurrent.getWidth();
1250  unsigned int maxH = ICurrent.getHeight();
1251  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1252  ++it) {
1253  if (maxW < it->second.getWidth()) {
1254  maxW = it->second.getWidth();
1255  }
1256 
1257  if (maxH < it->second.getHeight()) {
1258  maxH = it->second.getHeight();
1259  }
1260  }
1261 
1262  IMatching = vpImage<vpRGBa>(maxH * nbHeight, maxW * nbWidth);
1263  }
1264 }
1265 
1273 void vpKeyPoint::detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, const vpRect &rectangle)
1274 {
1275  double elapsedTime;
1276  detect(I, keyPoints, elapsedTime, rectangle);
1277 }
1278 
1286 void vpKeyPoint::detect(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, const vpRect &rectangle)
1287 {
1288  double elapsedTime;
1289  detect(I_color, keyPoints, elapsedTime, rectangle);
1290 }
1291 
1300 void vpKeyPoint::detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, const cv::Mat &mask)
1301 {
1302  double elapsedTime;
1303  detect(matImg, keyPoints, elapsedTime, mask);
1304 }
1305 
1314 void vpKeyPoint::detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
1315  const vpRect &rectangle)
1316 {
1317  cv::Mat matImg;
1318  vpImageConvert::convert(I, matImg, false);
1319  cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1320 
1321  if (rectangle.getWidth() > 0 && rectangle.getHeight() > 0) {
1322  cv::Point leftTop((int)rectangle.getLeft(), (int)rectangle.getTop()),
1323  rightBottom((int)rectangle.getRight(), (int)rectangle.getBottom());
1324  cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1325  } else {
1326  mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1327  }
1328 
1329  detect(matImg, keyPoints, elapsedTime, mask);
1330 }
1331 
1340 void vpKeyPoint::detect(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
1341  const vpRect &rectangle)
1342 {
1343  cv::Mat matImg;
1344  vpImageConvert::convert(I_color, matImg);
1345  cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1346 
1347  if (rectangle.getWidth() > 0 && rectangle.getHeight() > 0) {
1348  cv::Point leftTop((int)rectangle.getLeft(), (int)rectangle.getTop()),
1349  rightBottom((int)rectangle.getRight(), (int)rectangle.getBottom());
1350  cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1351  } else {
1352  mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1353  }
1354 
1355  detect(matImg, keyPoints, elapsedTime, mask);
1356 }
1357 
1367 void vpKeyPoint::detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
1368  const cv::Mat &mask)
1369 {
1370  double t = vpTime::measureTimeMs();
1371  keyPoints.clear();
1372 
1373  for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
1374  it != m_detectors.end(); ++it) {
1375  std::vector<cv::KeyPoint> kp;
1376  it->second->detect(matImg, kp, mask);
1377  keyPoints.insert(keyPoints.end(), kp.begin(), kp.end());
1378  }
1379 
1380  elapsedTime = vpTime::measureTimeMs() - t;
1381 }
1382 
1390 void vpKeyPoint::display(const vpImage<unsigned char> &IRef, const vpImage<unsigned char> &ICurrent, unsigned int size)
1391 {
1392  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1393  getQueryKeyPoints(vpQueryImageKeyPoints);
1394  std::vector<vpImagePoint> vpTrainImageKeyPoints;
1395  getTrainKeyPoints(vpTrainImageKeyPoints);
1396 
1397  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1398  vpDisplay::displayCross(IRef, vpTrainImageKeyPoints[(size_t)(it->trainIdx)], size, vpColor::red);
1399  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, vpColor::green);
1400  }
1401 }
1402 
1410 void vpKeyPoint::display(const vpImage<vpRGBa> &IRef, const vpImage<vpRGBa> &ICurrent, unsigned int size)
1411 {
1412  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1413  getQueryKeyPoints(vpQueryImageKeyPoints);
1414  std::vector<vpImagePoint> vpTrainImageKeyPoints;
1415  getTrainKeyPoints(vpTrainImageKeyPoints);
1416 
1417  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1418  vpDisplay::displayCross(IRef, vpTrainImageKeyPoints[(size_t)(it->trainIdx)], size, vpColor::red);
1419  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, vpColor::green);
1420  }
1421 }
1422 
1430 void vpKeyPoint::display(const vpImage<unsigned char> &ICurrent, unsigned int size, const vpColor &color)
1431 {
1432  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1433  getQueryKeyPoints(vpQueryImageKeyPoints);
1434 
1435  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1436  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, color);
1437  }
1438 }
1439 
1447 void vpKeyPoint::display(const vpImage<vpRGBa> &ICurrent, unsigned int size, const vpColor &color)
1448 {
1449  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1450  getQueryKeyPoints(vpQueryImageKeyPoints);
1451 
1452  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1453  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, color);
1454  }
1455 }
1456 
1469  unsigned int crossSize, unsigned int lineThickness, const vpColor &color)
1470 {
1471  bool randomColor = (color == vpColor::none);
1472  srand((unsigned int)time(NULL));
1473  vpColor currentColor = color;
1474 
1475  std::vector<vpImagePoint> queryImageKeyPoints;
1476  getQueryKeyPoints(queryImageKeyPoints);
1477  std::vector<vpImagePoint> trainImageKeyPoints;
1478  getTrainKeyPoints(trainImageKeyPoints);
1479 
1480  vpImagePoint leftPt, rightPt;
1481  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1482  if (randomColor) {
1483  currentColor = vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1484  }
1485 
1486  leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1487  rightPt = vpImagePoint(queryImageKeyPoints[(size_t)(it->queryIdx)].get_i(),
1488  queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.getWidth());
1489  vpDisplay::displayCross(IMatching, leftPt, crossSize, currentColor);
1490  vpDisplay::displayCross(IMatching, rightPt, crossSize, currentColor);
1491  vpDisplay::displayLine(IMatching, leftPt, rightPt, currentColor, lineThickness);
1492  }
1493 }
1494 
1507  unsigned int crossSize, unsigned int lineThickness, const vpColor &color)
1508 {
1509  bool randomColor = (color == vpColor::none);
1510  srand((unsigned int)time(NULL));
1511  vpColor currentColor = color;
1512 
1513  std::vector<vpImagePoint> queryImageKeyPoints;
1514  getQueryKeyPoints(queryImageKeyPoints);
1515  std::vector<vpImagePoint> trainImageKeyPoints;
1516  getTrainKeyPoints(trainImageKeyPoints);
1517 
1518  vpImagePoint leftPt, rightPt;
1519  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1520  if (randomColor) {
1521  currentColor = vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1522  }
1523 
1524  leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1525  rightPt = vpImagePoint(queryImageKeyPoints[(size_t)(it->queryIdx)].get_i(),
1526  queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.getWidth());
1527  vpDisplay::displayCross(IMatching, leftPt, crossSize, currentColor);
1528  vpDisplay::displayCross(IMatching, rightPt, crossSize, currentColor);
1529  vpDisplay::displayLine(IMatching, leftPt, rightPt, currentColor, lineThickness);
1530  }
1531 }
1532 
1545  unsigned int crossSize, unsigned int lineThickness, const vpColor &color)
1546 {
1547  bool randomColor = (color == vpColor::none);
1548  srand((unsigned int)time(NULL));
1549  vpColor currentColor = color;
1550 
1551  std::vector<vpImagePoint> queryImageKeyPoints;
1552  getQueryKeyPoints(queryImageKeyPoints);
1553  std::vector<vpImagePoint> trainImageKeyPoints;
1554  getTrainKeyPoints(trainImageKeyPoints);
1555 
1556  vpImagePoint leftPt, rightPt;
1557  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1558  if (randomColor) {
1559  currentColor = vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1560  }
1561 
1562  leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1563  rightPt = vpImagePoint(queryImageKeyPoints[(size_t)(it->queryIdx)].get_i(),
1564  queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.getWidth());
1565  vpDisplay::displayCross(IMatching, leftPt, crossSize, currentColor);
1566  vpDisplay::displayCross(IMatching, rightPt, crossSize, currentColor);
1567  vpDisplay::displayLine(IMatching, leftPt, rightPt, currentColor, lineThickness);
1568  }
1569 }
1570 
1583  const std::vector<vpImagePoint> &ransacInliers, unsigned int crossSize,
1584  unsigned int lineThickness)
1585 {
1586  if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1587  // No training images so return
1588  std::cerr << "There is no training image loaded !" << std::endl;
1589  return;
1590  }
1591 
1592  // Nb images in the training database + the current image we want to detect
1593  // the object
1594  int nbImg = (int)(m_mapOfImages.size() + 1);
1595 
1596  if (nbImg == 2) {
1597  // Only one training image, so we display the matching result side-by-side
1598  displayMatching(m_mapOfImages.begin()->second, IMatching, crossSize);
1599  } else {
1600  // Multiple training images, display them as a mosaic image
1601  int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
1602  int nbWidth = nbImgSqrt;
1603  int nbHeight = nbImgSqrt;
1604 
1605  if (nbImgSqrt * nbImgSqrt < nbImg) {
1606  nbWidth++;
1607  }
1608 
1609  std::map<int, int> mapOfImageIdIndex;
1610  int cpt = 0;
1611  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
1612  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1613  ++it, cpt++) {
1614  mapOfImageIdIndex[it->first] = cpt;
1615 
1616  if (maxW < it->second.getWidth()) {
1617  maxW = it->second.getWidth();
1618  }
1619 
1620  if (maxH < it->second.getHeight()) {
1621  maxH = it->second.getHeight();
1622  }
1623  }
1624 
1625  // Indexes of the current image in the grid computed to put preferably the
1626  // image in the center of the mosaic grid
1627  int medianI = nbHeight / 2;
1628  int medianJ = nbWidth / 2;
1629  int medianIndex = medianI * nbWidth + medianJ;
1630  for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1631  vpImagePoint topLeftCorner;
1632  int current_class_id_index = 0;
1633  if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1634  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1635  } else {
1636  // Shift of one unity the index of the training images which are after
1637  // the current image
1638  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1639  }
1640 
1641  int indexI = current_class_id_index / nbWidth;
1642  int indexJ = current_class_id_index - (indexI * nbWidth);
1643  topLeftCorner.set_ij((int)maxH * indexI, (int)maxW * indexJ);
1644 
1645  // Display cross for keypoints in the learning database
1646  vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1647  (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1648  }
1649 
1650  vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
1651  for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1652  // Display cross for keypoints detected in the current image
1653  vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1654  (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1655  }
1656  for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1657  // Display green circle for RANSAC inliers
1658  vpDisplay::displayCircle(IMatching, (int)(it->get_v() + topLeftCorner.get_i()),
1659  (int)(it->get_u() + topLeftCorner.get_j()), 4, vpColor::green);
1660  }
1661  for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1662  // Display red circle for RANSAC outliers
1663  vpDisplay::displayCircle(IMatching, (int)(it->get_i() + topLeftCorner.get_i()),
1664  (int)(it->get_j() + topLeftCorner.get_j()), 4, vpColor::red);
1665  }
1666 
1667  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1668  int current_class_id = 0;
1669  if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] < medianIndex) {
1670  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1671  } else {
1672  // Shift of one unity the index of the training images which are after
1673  // the current image
1674  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1675  }
1676 
1677  int indexI = current_class_id / nbWidth;
1678  int indexJ = current_class_id - (indexI * nbWidth);
1679 
1680  vpImagePoint end((int)maxH * indexI + m_trainKeyPoints[(size_t)it->trainIdx].pt.y,
1681  (int)maxW * indexJ + m_trainKeyPoints[(size_t)it->trainIdx].pt.x);
1682  vpImagePoint start((int)maxH * medianI + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.y,
1683  (int)maxW * medianJ + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.x);
1684 
1685  // Draw line for matching keypoints detected in the current image and
1686  // those detected in the training images
1687  vpDisplay::displayLine(IMatching, start, end, vpColor::green, lineThickness);
1688  }
1689  }
1690 }
1691 
1704  const std::vector<vpImagePoint> &ransacInliers, unsigned int crossSize,
1705  unsigned int lineThickness)
1706 {
1707  if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1708  // No training images so return
1709  std::cerr << "There is no training image loaded !" << std::endl;
1710  return;
1711  }
1712 
1713  // Nb images in the training database + the current image we want to detect
1714  // the object
1715  int nbImg = (int)(m_mapOfImages.size() + 1);
1716 
1717  if (nbImg == 2) {
1718  // Only one training image, so we display the matching result side-by-side
1719  displayMatching(m_mapOfImages.begin()->second, IMatching, crossSize);
1720  } else {
1721  // Multiple training images, display them as a mosaic image
1722  int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
1723  int nbWidth = nbImgSqrt;
1724  int nbHeight = nbImgSqrt;
1725 
1726  if (nbImgSqrt * nbImgSqrt < nbImg) {
1727  nbWidth++;
1728  }
1729 
1730  std::map<int, int> mapOfImageIdIndex;
1731  int cpt = 0;
1732  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
1733  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1734  ++it, cpt++) {
1735  mapOfImageIdIndex[it->first] = cpt;
1736 
1737  if (maxW < it->second.getWidth()) {
1738  maxW = it->second.getWidth();
1739  }
1740 
1741  if (maxH < it->second.getHeight()) {
1742  maxH = it->second.getHeight();
1743  }
1744  }
1745 
1746  // Indexes of the current image in the grid computed to put preferably the
1747  // image in the center of the mosaic grid
1748  int medianI = nbHeight / 2;
1749  int medianJ = nbWidth / 2;
1750  int medianIndex = medianI * nbWidth + medianJ;
1751  for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1752  vpImagePoint topLeftCorner;
1753  int current_class_id_index = 0;
1754  if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1755  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1756  } else {
1757  // Shift of one unity the index of the training images which are after
1758  // the current image
1759  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1760  }
1761 
1762  int indexI = current_class_id_index / nbWidth;
1763  int indexJ = current_class_id_index - (indexI * nbWidth);
1764  topLeftCorner.set_ij((int)maxH * indexI, (int)maxW * indexJ);
1765 
1766  // Display cross for keypoints in the learning database
1767  vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1768  (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1769  }
1770 
1771  vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
1772  for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1773  // Display cross for keypoints detected in the current image
1774  vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1775  (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1776  }
1777  for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1778  // Display green circle for RANSAC inliers
1779  vpDisplay::displayCircle(IMatching, (int)(it->get_v() + topLeftCorner.get_i()),
1780  (int)(it->get_u() + topLeftCorner.get_j()), 4, vpColor::green);
1781  }
1782  for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1783  // Display red circle for RANSAC outliers
1784  vpDisplay::displayCircle(IMatching, (int)(it->get_i() + topLeftCorner.get_i()),
1785  (int)(it->get_j() + topLeftCorner.get_j()), 4, vpColor::red);
1786  }
1787 
1788  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1789  int current_class_id = 0;
1790  if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] < medianIndex) {
1791  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1792  } else {
1793  // Shift of one unity the index of the training images which are after
1794  // the current image
1795  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1796  }
1797 
1798  int indexI = current_class_id / nbWidth;
1799  int indexJ = current_class_id - (indexI * nbWidth);
1800 
1801  vpImagePoint end((int)maxH * indexI + m_trainKeyPoints[(size_t)it->trainIdx].pt.y,
1802  (int)maxW * indexJ + m_trainKeyPoints[(size_t)it->trainIdx].pt.x);
1803  vpImagePoint start((int)maxH * medianI + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.y,
1804  (int)maxW * medianJ + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.x);
1805 
1806  // Draw line for matching keypoints detected in the current image and
1807  // those detected in the training images
1808  vpDisplay::displayLine(IMatching, start, end, vpColor::green, lineThickness);
1809  }
1810  }
1811 }
1812 
1823 void vpKeyPoint::extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1824  std::vector<cv::Point3f> *trainPoints)
1825 {
1826  double elapsedTime;
1827  extract(I, keyPoints, descriptors, elapsedTime, trainPoints);
1828 }
1829 
1840 void vpKeyPoint::extract(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1841  std::vector<cv::Point3f> *trainPoints)
1842 {
1843  double elapsedTime;
1844  extract(I_color, keyPoints, descriptors, elapsedTime, trainPoints);
1845 }
1846 
1857 void vpKeyPoint::extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1858  std::vector<cv::Point3f> *trainPoints)
1859 {
1860  double elapsedTime;
1861  extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1862 }
1863 
1875 void vpKeyPoint::extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1876  double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1877 {
1878  cv::Mat matImg;
1879  vpImageConvert::convert(I, matImg, false);
1880  extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1881 }
1882 
1894 void vpKeyPoint::extract(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1895  double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1896 {
1897  cv::Mat matImg;
1898  vpImageConvert::convert(I_color, matImg);
1899  extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1900 }
1901 
1913 void vpKeyPoint::extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1914  double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1915 {
1916  double t = vpTime::measureTimeMs();
1917  bool first = true;
1918 
1919  for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator itd = m_extractors.begin();
1920  itd != m_extractors.end(); ++itd) {
1921  if (first) {
1922  first = false;
1923  // Check if we have 3D object points information
1924  if (trainPoints != NULL && !trainPoints->empty()) {
1925  // Copy the input list of keypoints, keypoints that cannot be computed
1926  // are removed in the function compute
1927  std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1928 
1929  // Extract descriptors for the given list of keypoints
1930  itd->second->compute(matImg, keyPoints, descriptors);
1931 
1932  if (keyPoints.size() != keyPoints_tmp.size()) {
1933  // Keypoints have been removed
1934  // Store the hash of a keypoint as the key and the index of the
1935  // keypoint as the value
1936  std::map<size_t, size_t> mapOfKeypointHashes;
1937  size_t cpt = 0;
1938  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1939  ++it, cpt++) {
1940  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1941  }
1942 
1943  std::vector<cv::Point3f> trainPoints_tmp;
1944  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1945  if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1946  trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1947  }
1948  }
1949 
1950  // Copy trainPoints_tmp to m_trainPoints
1951  *trainPoints = trainPoints_tmp;
1952  }
1953  } else {
1954  // Extract descriptors for the given list of keypoints
1955  itd->second->compute(matImg, keyPoints, descriptors);
1956  }
1957  } else {
1958  // Copy the input list of keypoints, keypoints that cannot be computed
1959  // are removed in the function compute
1960  std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1961 
1962  cv::Mat desc;
1963  // Extract descriptors for the given list of keypoints
1964  itd->second->compute(matImg, keyPoints, desc);
1965 
1966  if (keyPoints.size() != keyPoints_tmp.size()) {
1967  // Keypoints have been removed
1968  // Store the hash of a keypoint as the key and the index of the
1969  // keypoint as the value
1970  std::map<size_t, size_t> mapOfKeypointHashes;
1971  size_t cpt = 0;
1972  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1973  ++it, cpt++) {
1974  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1975  }
1976 
1977  std::vector<cv::Point3f> trainPoints_tmp;
1978  cv::Mat descriptors_tmp;
1979  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1980  if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1981  if (trainPoints != NULL && !trainPoints->empty()) {
1982  trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1983  }
1984 
1985  if (!descriptors.empty()) {
1986  descriptors_tmp.push_back(descriptors.row((int)mapOfKeypointHashes[myKeypointHash(*it)]));
1987  }
1988  }
1989  }
1990 
1991  if (trainPoints != NULL) {
1992  // Copy trainPoints_tmp to m_trainPoints
1993  *trainPoints = trainPoints_tmp;
1994  }
1995  // Copy descriptors_tmp to descriptors
1996  descriptors_tmp.copyTo(descriptors);
1997  }
1998 
1999  // Merge descriptors horizontally
2000  if (descriptors.empty()) {
2001  desc.copyTo(descriptors);
2002  } else {
2003  cv::hconcat(descriptors, desc, descriptors);
2004  }
2005  }
2006  }
2007 
2008  if (keyPoints.size() != (size_t)descriptors.rows) {
2009  std::cerr << "keyPoints.size() != (size_t) descriptors.rows" << std::endl;
2010  }
2011  elapsedTime = vpTime::measureTimeMs() - t;
2012 }
2013 
2017 void vpKeyPoint::filterMatches()
2018 {
2019  std::vector<cv::KeyPoint> queryKpts;
2020  std::vector<cv::Point3f> trainPts;
2021  std::vector<cv::DMatch> m;
2022 
2023  if (m_useKnn) {
2024  // double max_dist = 0;
2025  // double min_dist = std::numeric_limits<double>::max(); // create an
2026  // error under Windows. To fix it we have to add #undef max
2027  double min_dist = DBL_MAX;
2028  double mean = 0.0;
2029  std::vector<double> distance_vec(m_knnMatches.size());
2030 
2031  if (m_filterType == stdAndRatioDistanceThreshold) {
2032  for (size_t i = 0; i < m_knnMatches.size(); i++) {
2033  double dist = m_knnMatches[i][0].distance;
2034  mean += dist;
2035  distance_vec[i] = dist;
2036 
2037  if (dist < min_dist) {
2038  min_dist = dist;
2039  }
2040  // if (dist > max_dist) {
2041  // max_dist = dist;
2042  //}
2043  }
2044  mean /= m_queryDescriptors.rows;
2045  }
2046 
2047  double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
2048  double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
2049  double threshold = min_dist + stdev;
2050 
2051  for (size_t i = 0; i < m_knnMatches.size(); i++) {
2052  if (m_knnMatches[i].size() >= 2) {
2053  // Calculate ratio of the descriptor distance between the two nearest
2054  // neighbors of the keypoint
2055  float ratio = m_knnMatches[i][0].distance / m_knnMatches[i][1].distance;
2056  // float ratio = std::sqrt((vecMatches[i][0].distance *
2057  // vecMatches[i][0].distance)
2058  // / (vecMatches[i][1].distance *
2059  // vecMatches[i][1].distance));
2060  double dist = m_knnMatches[i][0].distance;
2061 
2062  if (ratio < m_matchingRatioThreshold || (m_filterType == stdAndRatioDistanceThreshold && dist < threshold)) {
2063  m.push_back(cv::DMatch((int)queryKpts.size(), m_knnMatches[i][0].trainIdx, m_knnMatches[i][0].distance));
2064 
2065  if (!m_trainPoints.empty()) {
2066  trainPts.push_back(m_trainPoints[(size_t)m_knnMatches[i][0].trainIdx]);
2067  }
2068  queryKpts.push_back(m_queryKeyPoints[(size_t)m_knnMatches[i][0].queryIdx]);
2069  }
2070  }
2071  }
2072  } else {
2073  // double max_dist = 0;
2074  // create an error under Windows. To fix it we have to add #undef max
2075  // double min_dist = std::numeric_limits<double>::max();
2076  double min_dist = DBL_MAX;
2077  double mean = 0.0;
2078  std::vector<double> distance_vec(m_matches.size());
2079  for (size_t i = 0; i < m_matches.size(); i++) {
2080  double dist = m_matches[i].distance;
2081  mean += dist;
2082  distance_vec[i] = dist;
2083 
2084  if (dist < min_dist) {
2085  min_dist = dist;
2086  }
2087  // if (dist > max_dist) {
2088  // max_dist = dist;
2089  // }
2090  }
2091  mean /= m_queryDescriptors.rows;
2092 
2093  double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
2094  double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
2095 
2096  // Define a threshold where we keep all keypoints whose the descriptor
2097  // distance falls below a factor of the minimum descriptor distance (for
2098  // all the query keypoints) or below the minimum descriptor distance +
2099  // the standard deviation (calculated on all the query descriptor
2100  // distances)
2101  double threshold =
2102  m_filterType == constantFactorDistanceThreshold ? m_matchingFactorThreshold * min_dist : min_dist + stdev;
2103 
2104  for (size_t i = 0; i < m_matches.size(); i++) {
2105  if (m_matches[i].distance <= threshold) {
2106  m.push_back(cv::DMatch((int)queryKpts.size(), m_matches[i].trainIdx, m_matches[i].distance));
2107 
2108  if (!m_trainPoints.empty()) {
2109  trainPts.push_back(m_trainPoints[(size_t)m_matches[i].trainIdx]);
2110  }
2111  queryKpts.push_back(m_queryKeyPoints[(size_t)m_matches[i].queryIdx]);
2112  }
2113  }
2114  }
2115 
2116  if (m_useSingleMatchFilter) {
2117  // Eliminate matches where multiple query keypoints are matched to the
2118  // same train keypoint
2119  std::vector<cv::DMatch> mTmp;
2120  std::vector<cv::Point3f> trainPtsTmp;
2121  std::vector<cv::KeyPoint> queryKptsTmp;
2122 
2123  std::map<int, int> mapOfTrainIdx;
2124  // Count the number of query points matched to the same train point
2125  for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
2126  mapOfTrainIdx[it->trainIdx]++;
2127  }
2128 
2129  // Keep matches with only one correspondence
2130  for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
2131  if (mapOfTrainIdx[it->trainIdx] == 1) {
2132  mTmp.push_back(cv::DMatch((int)queryKptsTmp.size(), it->trainIdx, it->distance));
2133 
2134  if (!m_trainPoints.empty()) {
2135  trainPtsTmp.push_back(m_trainPoints[(size_t)it->trainIdx]);
2136  }
2137  queryKptsTmp.push_back(queryKpts[(size_t)it->queryIdx]);
2138  }
2139  }
2140 
2141  m_filteredMatches = mTmp;
2142  m_objectFilteredPoints = trainPtsTmp;
2143  m_queryFilteredKeyPoints = queryKptsTmp;
2144  } else {
2145  m_filteredMatches = m;
2146  m_objectFilteredPoints = trainPts;
2147  m_queryFilteredKeyPoints = queryKpts;
2148  }
2149 }
2150 
2158 void vpKeyPoint::getObjectPoints(std::vector<cv::Point3f> &objectPoints) const
2159 {
2160  objectPoints = m_objectFilteredPoints;
2161 }
2162 
2170 void vpKeyPoint::getObjectPoints(std::vector<vpPoint> &objectPoints) const
2171 {
2172  vpConvert::convertFromOpenCV(m_objectFilteredPoints, objectPoints);
2173 }
2174 
2183 void vpKeyPoint::getQueryKeyPoints(std::vector<cv::KeyPoint> &keyPoints, bool matches) const
2184 {
2185  if (matches) {
2186  keyPoints = m_queryFilteredKeyPoints;
2187  }
2188  else {
2189  keyPoints = m_queryKeyPoints;
2190  }
2191 }
2192 
2201 void vpKeyPoint::getQueryKeyPoints(std::vector<vpImagePoint> &keyPoints, bool matches) const
2202 {
2203  if (matches) {
2204  keyPoints = currentImagePointsList;
2205  }
2206  else {
2207  vpConvert::convertFromOpenCV(m_queryKeyPoints, keyPoints);
2208  }
2209 }
2210 
2216 void vpKeyPoint::getTrainKeyPoints(std::vector<cv::KeyPoint> &keyPoints) const { keyPoints = m_trainKeyPoints; }
2217 
2223 void vpKeyPoint::getTrainKeyPoints(std::vector<vpImagePoint> &keyPoints) const { keyPoints = referenceImagePointsList; }
2224 
2231 void vpKeyPoint::getTrainPoints(std::vector<cv::Point3f> &points) const { points = m_trainPoints; }
2232 
2239 void vpKeyPoint::getTrainPoints(std::vector<vpPoint> &points) const { points = m_trainVpPoints; }
2240 
2245 void vpKeyPoint::init()
2246 {
2247 // Require 2.4.0 <= opencv < 3.0.0
2248 #if defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && (VISP_HAVE_OPENCV_VERSION < 0x030000)
2249  // The following line must be called in order to use SIFT or SURF
2250  if (!cv::initModule_nonfree()) {
2251  std::cerr << "Cannot init module non free, SIFT or SURF cannot be used." << std::endl;
2252  }
2253 #endif
2254 
2255  // Use k-nearest neighbors (knn) to retrieve the two best matches for a
2256  // keypoint So this is useful only for ratioDistanceThreshold method
2257  if (m_filterType == ratioDistanceThreshold || m_filterType == stdAndRatioDistanceThreshold) {
2258  m_useKnn = true;
2259  }
2260 
2261  initDetectors(m_detectorNames);
2262  initExtractors(m_extractorNames);
2263  initMatcher(m_matcherName);
2264 }
2265 
2271 void vpKeyPoint::initDetector(const std::string &detectorName)
2272 {
2273 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
2274  m_detectors[detectorName] = cv::FeatureDetector::create(detectorName);
2275 
2276  if (m_detectors[detectorName] == NULL) {
2277  std::stringstream ss_msg;
2278  ss_msg << "Fail to initialize the detector: " << detectorName
2279  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2280  throw vpException(vpException::fatalError, ss_msg.str());
2281  }
2282 #else
2283  std::string detectorNameTmp = detectorName;
2284  std::string pyramid = "Pyramid";
2285  std::size_t pos = detectorName.find(pyramid);
2286  bool usePyramid = false;
2287  if (pos != std::string::npos) {
2288  detectorNameTmp = detectorName.substr(pos + pyramid.size());
2289  usePyramid = true;
2290  }
2291 
2292  if (detectorNameTmp == "SIFT") {
2293 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2294  (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2295  // SIFT is no more patented since 09/03/2020
2296  cv::Ptr<cv::FeatureDetector> siftDetector;
2297  if (m_maxFeatures > 0) {
2298 #if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2299  siftDetector = cv::SIFT::create(m_maxFeatures);
2300 #else
2301  siftDetector = cv::xfeatures2d::SIFT::create(m_maxFeatures);
2302 #endif
2303  } else {
2304 #if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2305  siftDetector = cv::SIFT::create();
2306 #else
2307  siftDetector = cv::xfeatures2d::SIFT::create();
2308 #endif
2309  }
2310  if (!usePyramid) {
2311  m_detectors[detectorNameTmp] = siftDetector;
2312  } else {
2313  std::cerr << "You should not use SIFT with Pyramid feature detection!" << std::endl;
2314  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
2315  }
2316 #else
2317  std::stringstream ss_msg;
2318  ss_msg << "Fail to initialize the detector: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2319  << " was not build with xFeatures2d module.";
2320  throw vpException(vpException::fatalError, ss_msg.str());
2321 #endif
2322  } else if (detectorNameTmp == "SURF") {
2323 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2324  cv::Ptr<cv::FeatureDetector> surfDetector = cv::xfeatures2d::SURF::create();
2325  if (!usePyramid) {
2326  m_detectors[detectorNameTmp] = surfDetector;
2327  } else {
2328  std::cerr << "You should not use SURF with Pyramid feature detection!" << std::endl;
2329  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(surfDetector);
2330  }
2331 #else
2332  std::stringstream ss_msg;
2333  ss_msg << "Fail to initialize the detector: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2334  << " was not build with xFeatures2d module.";
2335  throw vpException(vpException::fatalError, ss_msg.str());
2336 #endif
2337  } else if (detectorNameTmp == "FAST") {
2338  cv::Ptr<cv::FeatureDetector> fastDetector = cv::FastFeatureDetector::create();
2339  if (!usePyramid) {
2340  m_detectors[detectorNameTmp] = fastDetector;
2341  } else {
2342  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
2343  }
2344  } else if (detectorNameTmp == "MSER") {
2345  cv::Ptr<cv::FeatureDetector> fastDetector = cv::MSER::create();
2346  if (!usePyramid) {
2347  m_detectors[detectorNameTmp] = fastDetector;
2348  } else {
2349  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
2350  }
2351  } else if (detectorNameTmp == "ORB") {
2352  cv::Ptr<cv::FeatureDetector> orbDetector;
2353  if (m_maxFeatures > 0) {
2354  orbDetector = cv::ORB::create(m_maxFeatures);
2355  }
2356  else {
2357  orbDetector = cv::ORB::create();
2358  }
2359  if (!usePyramid) {
2360  m_detectors[detectorNameTmp] = orbDetector;
2361  } else {
2362  std::cerr << "You should not use ORB with Pyramid feature detection!" << std::endl;
2363  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(orbDetector);
2364  }
2365  } else if (detectorNameTmp == "BRISK") {
2366  cv::Ptr<cv::FeatureDetector> briskDetector = cv::BRISK::create();
2367  if (!usePyramid) {
2368  m_detectors[detectorNameTmp] = briskDetector;
2369  } else {
2370  std::cerr << "You should not use BRISK with Pyramid feature detection!" << std::endl;
2371  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(briskDetector);
2372  }
2373  } else if (detectorNameTmp == "KAZE") {
2374  cv::Ptr<cv::FeatureDetector> kazeDetector = cv::KAZE::create();
2375  if (!usePyramid) {
2376  m_detectors[detectorNameTmp] = kazeDetector;
2377  } else {
2378  std::cerr << "You should not use KAZE with Pyramid feature detection!" << std::endl;
2379  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(kazeDetector);
2380  }
2381  } else if (detectorNameTmp == "AKAZE") {
2382  cv::Ptr<cv::FeatureDetector> akazeDetector = cv::AKAZE::create();
2383  if (!usePyramid) {
2384  m_detectors[detectorNameTmp] = akazeDetector;
2385  } else {
2386  std::cerr << "You should not use AKAZE with Pyramid feature detection!" << std::endl;
2387  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(akazeDetector);
2388  }
2389  } else if (detectorNameTmp == "GFTT") {
2390  cv::Ptr<cv::FeatureDetector> gfttDetector = cv::GFTTDetector::create();
2391  if (!usePyramid) {
2392  m_detectors[detectorNameTmp] = gfttDetector;
2393  } else {
2394  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(gfttDetector);
2395  }
2396  } else if (detectorNameTmp == "SimpleBlob") {
2397  cv::Ptr<cv::FeatureDetector> simpleBlobDetector = cv::SimpleBlobDetector::create();
2398  if (!usePyramid) {
2399  m_detectors[detectorNameTmp] = simpleBlobDetector;
2400  } else {
2401  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(simpleBlobDetector);
2402  }
2403  } else if (detectorNameTmp == "STAR") {
2404 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2405  cv::Ptr<cv::FeatureDetector> starDetector = cv::xfeatures2d::StarDetector::create();
2406  if (!usePyramid) {
2407  m_detectors[detectorNameTmp] = starDetector;
2408  } else {
2409  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(starDetector);
2410  }
2411 #else
2412  std::stringstream ss_msg;
2413  ss_msg << "Fail to initialize the detector: STAR. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2414  << " was not build with xFeatures2d module.";
2415  throw vpException(vpException::fatalError, ss_msg.str());
2416 #endif
2417  } else if (detectorNameTmp == "AGAST") {
2418  cv::Ptr<cv::FeatureDetector> agastDetector = cv::AgastFeatureDetector::create();
2419  if (!usePyramid) {
2420  m_detectors[detectorNameTmp] = agastDetector;
2421  } else {
2422  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(agastDetector);
2423  }
2424  } else if (detectorNameTmp == "MSD") {
2425 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100)
2426 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2427  cv::Ptr<cv::FeatureDetector> msdDetector = cv::xfeatures2d::MSDDetector::create();
2428  if (!usePyramid) {
2429  m_detectors[detectorNameTmp] = msdDetector;
2430  } else {
2431  std::cerr << "You should not use MSD with Pyramid feature detection!" << std::endl;
2432  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(msdDetector);
2433  }
2434 #else
2435  std::stringstream ss_msg;
2436  ss_msg << "Fail to initialize the detector: MSD. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2437  << " was not build with xFeatures2d module.";
2438  throw vpException(vpException::fatalError, ss_msg.str());
2439 #endif
2440 #else
2441  std::stringstream ss_msg;
2442  ss_msg << "Feature " << detectorName << " is not available in OpenCV version: " << std::hex
2443  << VISP_HAVE_OPENCV_VERSION << " (require >= OpenCV 3.1).";
2444 #endif
2445  } else {
2446  std::cerr << "The detector:" << detectorNameTmp << " is not available." << std::endl;
2447  }
2448 
2449  bool detectorInitialized = false;
2450  if (!usePyramid) {
2451  //if not null and to avoid warning C4800: forcing value to bool 'true' or 'false' (performance warning)
2452  detectorInitialized = !m_detectors[detectorNameTmp].empty();
2453  } else {
2454  //if not null and to avoid warning C4800: forcing value to bool 'true' or 'false' (performance warning)
2455  detectorInitialized = !m_detectors[detectorName].empty();
2456  }
2457 
2458  if (!detectorInitialized) {
2459  std::stringstream ss_msg;
2460  ss_msg << "Fail to initialize the detector: " << detectorNameTmp
2461  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2462  throw vpException(vpException::fatalError, ss_msg.str());
2463  }
2464 #endif
2465 }
2466 
2473 void vpKeyPoint::initDetectors(const std::vector<std::string> &detectorNames)
2474 {
2475  for (std::vector<std::string>::const_iterator it = detectorNames.begin(); it != detectorNames.end(); ++it) {
2476  initDetector(*it);
2477  }
2478 }
2479 
2485 void vpKeyPoint::initExtractor(const std::string &extractorName)
2486 {
2487 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
2488  m_extractors[extractorName] = cv::DescriptorExtractor::create(extractorName);
2489 #else
2490  if (extractorName == "SIFT") {
2491 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2492  (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2493  // SIFT is no more patented since 09/03/2020
2494 #if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2495  m_extractors[extractorName] = cv::SIFT::create();
2496 #else
2497  m_extractors[extractorName] = cv::xfeatures2d::SIFT::create();
2498 #endif
2499 #else
2500  std::stringstream ss_msg;
2501  ss_msg << "Fail to initialize the extractor: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2502  << " was not build with xFeatures2d module.";
2503  throw vpException(vpException::fatalError, ss_msg.str());
2504 #endif
2505  } else if (extractorName == "SURF") {
2506 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2507  // Use extended set of SURF descriptors (128 instead of 64)
2508  m_extractors[extractorName] = cv::xfeatures2d::SURF::create(100, 4, 3, true);
2509 #else
2510  std::stringstream ss_msg;
2511  ss_msg << "Fail to initialize the extractor: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2512  << " was not build with xFeatures2d module.";
2513  throw vpException(vpException::fatalError, ss_msg.str());
2514 #endif
2515  } else if (extractorName == "ORB") {
2516  m_extractors[extractorName] = cv::ORB::create();
2517  } else if (extractorName == "BRISK") {
2518  m_extractors[extractorName] = cv::BRISK::create();
2519  } else if (extractorName == "FREAK") {
2520 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2521  m_extractors[extractorName] = cv::xfeatures2d::FREAK::create();
2522 #else
2523  std::stringstream ss_msg;
2524  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2525  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2526  throw vpException(vpException::fatalError, ss_msg.str());
2527 #endif
2528  } else if (extractorName == "BRIEF") {
2529 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2530  m_extractors[extractorName] = cv::xfeatures2d::BriefDescriptorExtractor::create();
2531 #else
2532  std::stringstream ss_msg;
2533  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2534  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2535  throw vpException(vpException::fatalError, ss_msg.str());
2536 #endif
2537  } else if (extractorName == "KAZE") {
2538  m_extractors[extractorName] = cv::KAZE::create();
2539  } else if (extractorName == "AKAZE") {
2540  m_extractors[extractorName] = cv::AKAZE::create();
2541  } else if (extractorName == "DAISY") {
2542 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2543  m_extractors[extractorName] = cv::xfeatures2d::DAISY::create();
2544 #else
2545  std::stringstream ss_msg;
2546  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2547  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2548  throw vpException(vpException::fatalError, ss_msg.str());
2549 #endif
2550  } else if (extractorName == "LATCH") {
2551 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2552  m_extractors[extractorName] = cv::xfeatures2d::LATCH::create();
2553 #else
2554  std::stringstream ss_msg;
2555  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2556  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2557  throw vpException(vpException::fatalError, ss_msg.str());
2558 #endif
2559  } else if (extractorName == "LUCID") {
2560 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2561  // m_extractors[extractorName] = cv::xfeatures2d::LUCID::create(1, 2);
2562  // Not possible currently, need a color image
2563  throw vpException(vpException::badValue, "Not possible currently as it needs a color image.");
2564 #else
2565  std::stringstream ss_msg;
2566  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2567  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2568  throw vpException(vpException::fatalError, ss_msg.str());
2569 #endif
2570  } else if (extractorName == "VGG") {
2571 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2572 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2573  m_extractors[extractorName] = cv::xfeatures2d::VGG::create();
2574 #else
2575  std::stringstream ss_msg;
2576  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2577  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2578  throw vpException(vpException::fatalError, ss_msg.str());
2579 #endif
2580 #else
2581  std::stringstream ss_msg;
2582  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2583  << VISP_HAVE_OPENCV_VERSION << " but requires at least OpenCV 3.2.";
2584  throw vpException(vpException::fatalError, ss_msg.str());
2585 #endif
2586  } else if (extractorName == "BoostDesc") {
2587 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2588 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2589  m_extractors[extractorName] = cv::xfeatures2d::BoostDesc::create();
2590 #else
2591  std::stringstream ss_msg;
2592  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2593  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2594  throw vpException(vpException::fatalError, ss_msg.str());
2595 #endif
2596 #else
2597  std::stringstream ss_msg;
2598  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2599  << VISP_HAVE_OPENCV_VERSION << " but requires at least OpenCV 3.2.";
2600  throw vpException(vpException::fatalError, ss_msg.str());
2601 #endif
2602  } else {
2603  std::cerr << "The extractor:" << extractorName << " is not available." << std::endl;
2604  }
2605 #endif
2606 
2607  if (!m_extractors[extractorName]) { //if null
2608  std::stringstream ss_msg;
2609  ss_msg << "Fail to initialize the extractor: " << extractorName
2610  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2611  throw vpException(vpException::fatalError, ss_msg.str());
2612  }
2613 
2614 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2615  if (extractorName == "SURF") {
2616  // Use extended set of SURF descriptors (128 instead of 64)
2617  m_extractors[extractorName]->set("extended", 1);
2618  }
2619 #endif
2620 }
2621 
2628 void vpKeyPoint::initExtractors(const std::vector<std::string> &extractorNames)
2629 {
2630  for (std::vector<std::string>::const_iterator it = extractorNames.begin(); it != extractorNames.end(); ++it) {
2631  initExtractor(*it);
2632  }
2633 
2634  int descriptorType = CV_32F;
2635  bool firstIteration = true;
2636  for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2637  it != m_extractors.end(); ++it) {
2638  if (firstIteration) {
2639  firstIteration = false;
2640  descriptorType = it->second->descriptorType();
2641  } else {
2642  if (descriptorType != it->second->descriptorType()) {
2643  throw vpException(vpException::fatalError, "All the descriptors must have the same type !");
2644  }
2645  }
2646  }
2647 }
2648 
2649 void vpKeyPoint::initFeatureNames()
2650 {
2651 // Create map enum to string
2652 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2653  m_mapOfDetectorNames[DETECTOR_FAST] = "FAST";
2654  m_mapOfDetectorNames[DETECTOR_MSER] = "MSER";
2655  m_mapOfDetectorNames[DETECTOR_ORB] = "ORB";
2656  m_mapOfDetectorNames[DETECTOR_BRISK] = "BRISK";
2657  m_mapOfDetectorNames[DETECTOR_GFTT] = "GFTT";
2658  m_mapOfDetectorNames[DETECTOR_SimpleBlob] = "SimpleBlob";
2659 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2660  m_mapOfDetectorNames[DETECTOR_STAR] = "STAR";
2661 #endif
2662 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2663  (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2664  m_mapOfDetectorNames[DETECTOR_SIFT] = "SIFT";
2665 #endif
2666 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2667  m_mapOfDetectorNames[DETECTOR_SURF] = "SURF";
2668 #endif
2669 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2670  m_mapOfDetectorNames[DETECTOR_KAZE] = "KAZE";
2671  m_mapOfDetectorNames[DETECTOR_AKAZE] = "AKAZE";
2672  m_mapOfDetectorNames[DETECTOR_AGAST] = "AGAST";
2673 #endif
2674 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2675  m_mapOfDetectorNames[DETECTOR_MSD] = "MSD";
2676 #endif
2677 #endif
2678 
2679 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2680  m_mapOfDescriptorNames[DESCRIPTOR_ORB] = "ORB";
2681  m_mapOfDescriptorNames[DESCRIPTOR_BRISK] = "BRISK";
2682 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2683  m_mapOfDescriptorNames[DESCRIPTOR_FREAK] = "FREAK";
2684  m_mapOfDescriptorNames[DESCRIPTOR_BRIEF] = "BRIEF";
2685 #endif
2686 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2687  (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2688  m_mapOfDescriptorNames[DESCRIPTOR_SIFT] = "SIFT";
2689 #endif
2690 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2691  m_mapOfDescriptorNames[DESCRIPTOR_SURF] = "SURF";
2692 #endif
2693 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2694  m_mapOfDescriptorNames[DESCRIPTOR_KAZE] = "KAZE";
2695  m_mapOfDescriptorNames[DESCRIPTOR_AKAZE] = "AKAZE";
2696 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2697  m_mapOfDescriptorNames[DESCRIPTOR_DAISY] = "DAISY";
2698  m_mapOfDescriptorNames[DESCRIPTOR_LATCH] = "LATCH";
2699 #endif
2700 #endif
2701 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2702  m_mapOfDescriptorNames[DESCRIPTOR_VGG] = "VGG";
2703  m_mapOfDescriptorNames[DESCRIPTOR_BoostDesc] = "BoostDesc";
2704 #endif
2705 #endif
2706 }
2707 
2713 void vpKeyPoint::initMatcher(const std::string &matcherName)
2714 {
2715  int descriptorType = CV_32F;
2716  bool firstIteration = true;
2717  for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2718  it != m_extractors.end(); ++it) {
2719  if (firstIteration) {
2720  firstIteration = false;
2721  descriptorType = it->second->descriptorType();
2722  } else {
2723  if (descriptorType != it->second->descriptorType()) {
2724  throw vpException(vpException::fatalError, "All the descriptors must have the same type !");
2725  }
2726  }
2727  }
2728 
2729  if (matcherName == "FlannBased") {
2730  if (m_extractors.empty()) {
2731  std::cout << "Warning: No extractor initialized, by default use "
2732  "floating values (CV_32F) "
2733  "for descriptor type !"
2734  << std::endl;
2735  }
2736 
2737  if (descriptorType == CV_8U) {
2738 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2739  m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::LshIndexParams>(12, 20, 2));
2740 #else
2741  m_matcher = new cv::FlannBasedMatcher(new cv::flann::LshIndexParams(12, 20, 2));
2742 #endif
2743  } else {
2744 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2745  m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::KDTreeIndexParams>());
2746 #else
2747  m_matcher = new cv::FlannBasedMatcher(new cv::flann::KDTreeIndexParams());
2748 #endif
2749  }
2750  } else {
2751  m_matcher = cv::DescriptorMatcher::create(matcherName);
2752  }
2753 
2754 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2755  if (m_matcher != NULL && !m_useKnn && matcherName == "BruteForce") {
2756  m_matcher->set("crossCheck", m_useBruteForceCrossCheck);
2757  }
2758 #endif
2759 
2760  if (!m_matcher) { //if null
2761  std::stringstream ss_msg;
2762  ss_msg << "Fail to initialize the matcher: " << matcherName
2763  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2764  throw vpException(vpException::fatalError, ss_msg.str());
2765  }
2766 }
2767 
2777  vpImage<unsigned char> &IMatching)
2778 {
2779  vpImagePoint topLeftCorner(0, 0);
2780  IMatching.insert(IRef, topLeftCorner);
2781  topLeftCorner = vpImagePoint(0, IRef.getWidth());
2782  IMatching.insert(ICurrent, topLeftCorner);
2783 }
2784 
2794  vpImage<vpRGBa> &IMatching)
2795 {
2796  vpImagePoint topLeftCorner(0, 0);
2797  IMatching.insert(IRef, topLeftCorner);
2798  topLeftCorner = vpImagePoint(0, IRef.getWidth());
2799  IMatching.insert(ICurrent, topLeftCorner);
2800 }
2801 
2810 {
2811  // Nb images in the training database + the current image we want to detect
2812  // the object
2813  int nbImg = (int)(m_mapOfImages.size() + 1);
2814 
2815  if (m_mapOfImages.empty()) {
2816  std::cerr << "There is no training image loaded !" << std::endl;
2817  return;
2818  }
2819 
2820  if (nbImg == 2) {
2821  // Only one training image, so we display them side by side
2822  insertImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
2823  } else {
2824  // Multiple training images, display them as a mosaic image
2825  int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
2826  int nbWidth = nbImgSqrt;
2827  int nbHeight = nbImgSqrt;
2828 
2829  if (nbImgSqrt * nbImgSqrt < nbImg) {
2830  nbWidth++;
2831  }
2832 
2833  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
2834  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2835  ++it) {
2836  if (maxW < it->second.getWidth()) {
2837  maxW = it->second.getWidth();
2838  }
2839 
2840  if (maxH < it->second.getHeight()) {
2841  maxH = it->second.getHeight();
2842  }
2843  }
2844 
2845  // Indexes of the current image in the grid made in order to the image is
2846  // in the center square in the mosaic grid
2847  int medianI = nbHeight / 2;
2848  int medianJ = nbWidth / 2;
2849  int medianIndex = medianI * nbWidth + medianJ;
2850 
2851  int cpt = 0;
2852  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2853  ++it, cpt++) {
2854  int local_cpt = cpt;
2855  if (cpt >= medianIndex) {
2856  // Shift of one unity the index of the training images which are after
2857  // the current image
2858  local_cpt++;
2859  }
2860  int indexI = local_cpt / nbWidth;
2861  int indexJ = local_cpt - (indexI * nbWidth);
2862  vpImagePoint topLeftCorner((int)maxH * indexI, (int)maxW * indexJ);
2863 
2864  IMatching.insert(it->second, topLeftCorner);
2865  }
2866 
2867  vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
2868  IMatching.insert(ICurrent, topLeftCorner);
2869  }
2870 }
2871 
2880 {
2881  // Nb images in the training database + the current image we want to detect
2882  // the object
2883  int nbImg = (int)(m_mapOfImages.size() + 1);
2884 
2885  if (m_mapOfImages.empty()) {
2886  std::cerr << "There is no training image loaded !" << std::endl;
2887  return;
2888  }
2889 
2890  if (nbImg == 2) {
2891  // Only one training image, so we display them side by side
2892  vpImage<vpRGBa> IRef;
2893  vpImageConvert::convert(m_mapOfImages.begin()->second, IRef);
2894  insertImageMatching(IRef, ICurrent, IMatching);
2895  } else {
2896  // Multiple training images, display them as a mosaic image
2897  int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
2898  int nbWidth = nbImgSqrt;
2899  int nbHeight = nbImgSqrt;
2900 
2901  if (nbImgSqrt * nbImgSqrt < nbImg) {
2902  nbWidth++;
2903  }
2904 
2905  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
2906  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2907  ++it) {
2908  if (maxW < it->second.getWidth()) {
2909  maxW = it->second.getWidth();
2910  }
2911 
2912  if (maxH < it->second.getHeight()) {
2913  maxH = it->second.getHeight();
2914  }
2915  }
2916 
2917  // Indexes of the current image in the grid made in order to the image is
2918  // in the center square in the mosaic grid
2919  int medianI = nbHeight / 2;
2920  int medianJ = nbWidth / 2;
2921  int medianIndex = medianI * nbWidth + medianJ;
2922 
2923  int cpt = 0;
2924  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2925  ++it, cpt++) {
2926  int local_cpt = cpt;
2927  if (cpt >= medianIndex) {
2928  // Shift of one unity the index of the training images which are after
2929  // the current image
2930  local_cpt++;
2931  }
2932  int indexI = local_cpt / nbWidth;
2933  int indexJ = local_cpt - (indexI * nbWidth);
2934  vpImagePoint topLeftCorner((int)maxH * indexI, (int)maxW * indexJ);
2935 
2936  vpImage<vpRGBa> IRef;
2937  vpImageConvert::convert(it->second, IRef);
2938  IMatching.insert(IRef, topLeftCorner);
2939  }
2940 
2941  vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
2942  IMatching.insert(ICurrent, topLeftCorner);
2943  }
2944 }
2945 
2951 void vpKeyPoint::loadConfigFile(const std::string &configFile)
2952 {
2954 
2955  try {
2956  // Reset detector and extractor
2957  m_detectorNames.clear();
2958  m_extractorNames.clear();
2959  m_detectors.clear();
2960  m_extractors.clear();
2961 
2962  std::cout << " *********** Parsing XML for configuration for vpKeyPoint "
2963  "************ "
2964  << std::endl;
2965  xmlp.parse(configFile);
2966 
2967  m_detectorNames.push_back(xmlp.getDetectorName());
2968  m_extractorNames.push_back(xmlp.getExtractorName());
2969  m_matcherName = xmlp.getMatcherName();
2970 
2971  switch (xmlp.getMatchingMethod()) {
2973  m_filterType = constantFactorDistanceThreshold;
2974  break;
2975 
2977  m_filterType = stdDistanceThreshold;
2978  break;
2979 
2981  m_filterType = ratioDistanceThreshold;
2982  break;
2983 
2985  m_filterType = stdAndRatioDistanceThreshold;
2986  break;
2987 
2989  m_filterType = noFilterMatching;
2990  break;
2991 
2992  default:
2993  break;
2994  }
2995 
2996  m_matchingFactorThreshold = xmlp.getMatchingFactorThreshold();
2997  m_matchingRatioThreshold = xmlp.getMatchingRatioThreshold();
2998 
2999  m_useRansacVVS = xmlp.getUseRansacVVSPoseEstimation();
3000  m_useConsensusPercentage = xmlp.getUseRansacConsensusPercentage();
3001  m_nbRansacIterations = xmlp.getNbRansacIterations();
3002  m_ransacReprojectionError = xmlp.getRansacReprojectionError();
3003  m_nbRansacMinInlierCount = xmlp.getNbRansacMinInlierCount();
3004  m_ransacThreshold = xmlp.getRansacThreshold();
3005  m_ransacConsensusPercentage = xmlp.getRansacConsensusPercentage();
3006 
3007  if (m_filterType == ratioDistanceThreshold || m_filterType == stdAndRatioDistanceThreshold) {
3008  m_useKnn = true;
3009  } else {
3010  m_useKnn = false;
3011  }
3012 
3013  init();
3014  } catch (...) {
3015  throw vpException(vpException::ioError, "Can't open XML file \"%s\"\n ", configFile.c_str());
3016  }
3017 }
3018 
3027 void vpKeyPoint::loadLearningData(const std::string &filename, bool binaryMode, bool append)
3028 {
3029  int startClassId = 0;
3030  int startImageId = 0;
3031  if (!append) {
3032  m_trainKeyPoints.clear();
3033  m_trainPoints.clear();
3034  m_mapOfImageId.clear();
3035  m_mapOfImages.clear();
3036  } else {
3037  // In append case, find the max index of keypoint class Id
3038  for (std::map<int, int>::const_iterator it = m_mapOfImageId.begin(); it != m_mapOfImageId.end(); ++it) {
3039  if (startClassId < it->first) {
3040  startClassId = it->first;
3041  }
3042  }
3043 
3044  // In append case, find the max index of images Id
3045  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
3046  ++it) {
3047  if (startImageId < it->first) {
3048  startImageId = it->first;
3049  }
3050  }
3051  }
3052 
3053  // Get parent directory
3054  std::string parent = vpIoTools::getParent(filename);
3055  if (!parent.empty()) {
3056  parent += "/";
3057  }
3058 
3059  if (binaryMode) {
3060  std::ifstream file(filename.c_str(), std::ifstream::binary);
3061  if (!file.is_open()) {
3062  throw vpException(vpException::ioError, "Cannot open the file.");
3063  }
3064 
3065  // Read info about training images
3066  int nbImgs = 0;
3067  vpIoTools::readBinaryValueLE(file, nbImgs);
3068 
3069 #if !defined(VISP_HAVE_MODULE_IO)
3070  if (nbImgs > 0) {
3071  std::cout << "Warning: The learning file contains image data that will "
3072  "not be loaded as visp_io module "
3073  "is not available !"
3074  << std::endl;
3075  }
3076 #endif
3077 
3078  for (int i = 0; i < nbImgs; i++) {
3079  // Read image_id
3080  int id = 0;
3081  vpIoTools::readBinaryValueLE(file, id);
3082 
3083  int length = 0;
3084  vpIoTools::readBinaryValueLE(file, length);
3085  // Will contain the path to the training images
3086  char *path = new char[length + 1]; // char path[length + 1];
3087 
3088  for (int cpt = 0; cpt < length; cpt++) {
3089  char c;
3090  file.read((char *)(&c), sizeof(c));
3091  path[cpt] = c;
3092  }
3093  path[length] = '\0';
3094 
3096 #ifdef VISP_HAVE_MODULE_IO
3097  if (vpIoTools::isAbsolutePathname(std::string(path))) {
3098  vpImageIo::read(I, path);
3099  } else {
3100  vpImageIo::read(I, parent + path);
3101  }
3102 
3103  // Add the image previously loaded only if VISP_HAVE_MODULE_IO
3104  m_mapOfImages[id + startImageId] = I;
3105 #endif
3106 
3107  // Delete path
3108  delete[] path;
3109  }
3110 
3111  // Read if 3D point information are saved or not
3112  int have3DInfoInt = 0;
3113  vpIoTools::readBinaryValueLE(file, have3DInfoInt);
3114  bool have3DInfo = have3DInfoInt != 0;
3115 
3116  // Read the number of descriptors
3117  int nRows = 0;
3118  vpIoTools::readBinaryValueLE(file, nRows);
3119 
3120  // Read the size of the descriptor
3121  int nCols = 0;
3122  vpIoTools::readBinaryValueLE(file, nCols);
3123 
3124  // Read the type of the descriptor
3125  int descriptorType = 5; // CV_32F
3126  vpIoTools::readBinaryValueLE(file, descriptorType);
3127 
3128  cv::Mat trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
3129  for (int i = 0; i < nRows; i++) {
3130  // Read information about keyPoint
3131  float u, v, size, angle, response;
3132  int octave, class_id, image_id;
3135  vpIoTools::readBinaryValueLE(file, size);
3136  vpIoTools::readBinaryValueLE(file, angle);
3137  vpIoTools::readBinaryValueLE(file, response);
3138  vpIoTools::readBinaryValueLE(file, octave);
3139  vpIoTools::readBinaryValueLE(file, class_id);
3140  vpIoTools::readBinaryValueLE(file, image_id);
3141  cv::KeyPoint keyPoint(cv::Point2f(u, v), size, angle, response, octave, (class_id + startClassId));
3142  m_trainKeyPoints.push_back(keyPoint);
3143 
3144  if (image_id != -1) {
3145 #ifdef VISP_HAVE_MODULE_IO
3146  // No training images if image_id == -1
3147  m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
3148 #endif
3149  }
3150 
3151  if (have3DInfo) {
3152  // Read oX, oY, oZ
3153  float oX, oY, oZ;
3154  vpIoTools::readBinaryValueLE(file, oX);
3155  vpIoTools::readBinaryValueLE(file, oY);
3156  vpIoTools::readBinaryValueLE(file, oZ);
3157  m_trainPoints.push_back(cv::Point3f(oX, oY, oZ));
3158  }
3159 
3160  for (int j = 0; j < nCols; j++) {
3161  // Read the value of the descriptor
3162  switch (descriptorType) {
3163  case CV_8U: {
3164  unsigned char value;
3165  file.read((char *)(&value), sizeof(value));
3166  trainDescriptorsTmp.at<unsigned char>(i, j) = value;
3167  } break;
3168 
3169  case CV_8S: {
3170  char value;
3171  file.read((char *)(&value), sizeof(value));
3172  trainDescriptorsTmp.at<char>(i, j) = value;
3173  } break;
3174 
3175  case CV_16U: {
3176  unsigned short int value;
3177  vpIoTools::readBinaryValueLE(file, value);
3178  trainDescriptorsTmp.at<unsigned short int>(i, j) = value;
3179  } break;
3180 
3181  case CV_16S: {
3182  short int value;
3183  vpIoTools::readBinaryValueLE(file, value);
3184  trainDescriptorsTmp.at<short int>(i, j) = value;
3185  } break;
3186 
3187  case CV_32S: {
3188  int value;
3189  vpIoTools::readBinaryValueLE(file, value);
3190  trainDescriptorsTmp.at<int>(i, j) = value;
3191  } break;
3192 
3193  case CV_32F: {
3194  float value;
3195  vpIoTools::readBinaryValueLE(file, value);
3196  trainDescriptorsTmp.at<float>(i, j) = value;
3197  } break;
3198 
3199  case CV_64F: {
3200  double value;
3201  vpIoTools::readBinaryValueLE(file, value);
3202  trainDescriptorsTmp.at<double>(i, j) = value;
3203  } break;
3204 
3205  default: {
3206  float value;
3207  vpIoTools::readBinaryValueLE(file, value);
3208  trainDescriptorsTmp.at<float>(i, j) = value;
3209  } break;
3210  }
3211  }
3212  }
3213 
3214  if (!append || m_trainDescriptors.empty()) {
3215  trainDescriptorsTmp.copyTo(m_trainDescriptors);
3216  } else {
3217  cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
3218  }
3219 
3220  file.close();
3221  } else {
3222  pugi::xml_document doc;
3223 
3224  /*parse the file and get the DOM */
3225  if (!doc.load_file(filename.c_str())) {
3226  throw vpException(vpException::ioError, "Error with file: %s", filename.c_str());
3227  }
3228 
3229  pugi::xml_node root_element = doc.document_element();
3230 
3231  int descriptorType = CV_32F; // float
3232  int nRows = 0, nCols = 0;
3233  int i = 0, j = 0;
3234 
3235  cv::Mat trainDescriptorsTmp;
3236 
3237  for (pugi::xml_node first_level_node = root_element.first_child(); first_level_node; first_level_node = first_level_node.next_sibling()) {
3238 
3239  std::string name(first_level_node.name());
3240  if (first_level_node.type() == pugi::node_element && name == "TrainingImageInfo") {
3241 
3242  for (pugi::xml_node image_info_node = first_level_node.first_child(); image_info_node; image_info_node = image_info_node.next_sibling()) {
3243  name = std::string(image_info_node.name());
3244 
3245  if (name == "trainImg") {
3246  // Read image_id
3247  int id = image_info_node.attribute("image_id").as_int();
3248 
3250 #ifdef VISP_HAVE_MODULE_IO
3251  std::string path(image_info_node.text().as_string());
3252  // Read path to the training images
3253  if (vpIoTools::isAbsolutePathname(std::string(path))) {
3254  vpImageIo::read(I, path);
3255  } else {
3256  vpImageIo::read(I, parent + path);
3257  }
3258 
3259  // Add the image previously loaded only if VISP_HAVE_MODULE_IO
3260  m_mapOfImages[id + startImageId] = I;
3261 #endif
3262  }
3263  }
3264  } else if (first_level_node.type() == pugi::node_element && name == "DescriptorsInfo") {
3265  for (pugi::xml_node descriptors_info_node = first_level_node.first_child(); descriptors_info_node;
3266  descriptors_info_node = descriptors_info_node.next_sibling()) {
3267  if (descriptors_info_node.type() == pugi::node_element) {
3268  name = std::string(descriptors_info_node.name());
3269 
3270  if (name == "nrows") {
3271  nRows = descriptors_info_node.text().as_int();
3272  } else if (name == "ncols") {
3273  nCols = descriptors_info_node.text().as_int();
3274  } else if (name == "type") {
3275  descriptorType = descriptors_info_node.text().as_int();
3276  }
3277  }
3278  }
3279 
3280  trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
3281  } else if (first_level_node.type() == pugi::node_element && name == "DescriptorInfo") {
3282  double u = 0.0, v = 0.0, size = 0.0, angle = 0.0, response = 0.0;
3283  int octave = 0, class_id = 0, image_id = 0;
3284  double oX = 0.0, oY = 0.0, oZ = 0.0;
3285 
3286  std::stringstream ss;
3287 
3288  for (pugi::xml_node point_node = first_level_node.first_child(); point_node; point_node = point_node.next_sibling()) {
3289  if (point_node.type() == pugi::node_element) {
3290  name = std::string(point_node.name());
3291 
3292  // Read information about keypoints
3293  if (name == "u") {
3294  u = point_node.text().as_double();
3295  } else if (name == "v") {
3296  v = point_node.text().as_double();
3297  } else if (name == "size") {
3298  size = point_node.text().as_double();
3299  } else if (name == "angle") {
3300  angle = point_node.text().as_double();
3301  } else if (name == "response") {
3302  response = point_node.text().as_double();
3303  } else if (name == "octave") {
3304  octave = point_node.text().as_int();
3305  } else if (name == "class_id") {
3306  class_id = point_node.text().as_int();
3307  cv::KeyPoint keyPoint(cv::Point2f((float)u, (float)v), (float)size, (float)angle, (float)response, octave,
3308  (class_id + startClassId));
3309  m_trainKeyPoints.push_back(keyPoint);
3310  } else if (name == "image_id") {
3311  image_id = point_node.text().as_int();
3312  if (image_id != -1) {
3313 #ifdef VISP_HAVE_MODULE_IO
3314  // No training images if image_id == -1
3315  m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
3316 #endif
3317  }
3318  } else if (name == "oX") {
3319  oX = point_node.text().as_double();
3320  } else if (name == "oY") {
3321  oY = point_node.text().as_double();
3322  } else if (name == "oZ") {
3323  oZ = point_node.text().as_double();
3324  m_trainPoints.push_back(cv::Point3f((float)oX, (float)oY, (float)oZ));
3325  } else if (name == "desc") {
3326  j = 0;
3327 
3328  for (pugi::xml_node descriptor_value_node = point_node.first_child(); descriptor_value_node;
3329  descriptor_value_node = descriptor_value_node.next_sibling()) {
3330 
3331  if (descriptor_value_node.type() == pugi::node_element) {
3332  // Read descriptors values
3333  std::string parseStr(descriptor_value_node.text().as_string());
3334  ss.clear();
3335  ss.str(parseStr);
3336 
3337  if (!ss.fail()) {
3338  switch (descriptorType) {
3339  case CV_8U: {
3340  // Parse the numeric value [0 ; 255] to an int
3341  int parseValue;
3342  ss >> parseValue;
3343  trainDescriptorsTmp.at<unsigned char>(i, j) = (unsigned char)parseValue;
3344  } break;
3345 
3346  case CV_8S:
3347  // Parse the numeric value [-128 ; 127] to an int
3348  int parseValue;
3349  ss >> parseValue;
3350  trainDescriptorsTmp.at<char>(i, j) = (char)parseValue;
3351  break;
3352 
3353  case CV_16U:
3354  ss >> trainDescriptorsTmp.at<unsigned short int>(i, j);
3355  break;
3356 
3357  case CV_16S:
3358  ss >> trainDescriptorsTmp.at<short int>(i, j);
3359  break;
3360 
3361  case CV_32S:
3362  ss >> trainDescriptorsTmp.at<int>(i, j);
3363  break;
3364 
3365  case CV_32F:
3366  ss >> trainDescriptorsTmp.at<float>(i, j);
3367  break;
3368 
3369  case CV_64F:
3370  ss >> trainDescriptorsTmp.at<double>(i, j);
3371  break;
3372 
3373  default:
3374  ss >> trainDescriptorsTmp.at<float>(i, j);
3375  break;
3376  }
3377  } else {
3378  std::cerr << "Error when converting:" << ss.str() << std::endl;
3379  }
3380 
3381  j++;
3382  }
3383  }
3384  }
3385  }
3386  }
3387  i++;
3388  }
3389  }
3390 
3391  if (!append || m_trainDescriptors.empty()) {
3392  trainDescriptorsTmp.copyTo(m_trainDescriptors);
3393  } else {
3394  cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
3395  }
3396  }
3397 
3398  // Convert OpenCV type to ViSP type for compatibility
3400  vpConvert::convertFromOpenCV(this->m_trainPoints, m_trainVpPoints);
3401 
3402  // Add train descriptors in matcher object
3403  m_matcher->clear();
3404  m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
3405 
3406  // Set _reference_computed to true as we load a learning file
3407  _reference_computed = true;
3408 
3409  // Set m_currentImageId
3410  m_currentImageId = (int)m_mapOfImages.size();
3411 }
3412 
3421 void vpKeyPoint::match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors,
3422  std::vector<cv::DMatch> &matches, double &elapsedTime)
3423 {
3424  double t = vpTime::measureTimeMs();
3425 
3426  if (m_useKnn) {
3427  m_knnMatches.clear();
3428 
3429  if (m_useMatchTrainToQuery) {
3430  std::vector<std::vector<cv::DMatch> > knnMatchesTmp;
3431 
3432  // Match train descriptors to query descriptors
3433  cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(true);
3434  matcherTmp->knnMatch(trainDescriptors, queryDescriptors, knnMatchesTmp, 2);
3435 
3436  for (std::vector<std::vector<cv::DMatch> >::const_iterator it1 = knnMatchesTmp.begin();
3437  it1 != knnMatchesTmp.end(); ++it1) {
3438  std::vector<cv::DMatch> tmp;
3439  for (std::vector<cv::DMatch>::const_iterator it2 = it1->begin(); it2 != it1->end(); ++it2) {
3440  tmp.push_back(cv::DMatch(it2->trainIdx, it2->queryIdx, it2->distance));
3441  }
3442  m_knnMatches.push_back(tmp);
3443  }
3444 
3445  matches.resize(m_knnMatches.size());
3446  std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3447  } else {
3448  // Match query descriptors to train descriptors
3449  m_matcher->knnMatch(queryDescriptors, m_knnMatches, 2);
3450  matches.resize(m_knnMatches.size());
3451  std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3452  }
3453  } else {
3454  matches.clear();
3455 
3456  if (m_useMatchTrainToQuery) {
3457  std::vector<cv::DMatch> matchesTmp;
3458  // Match train descriptors to query descriptors
3459  cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(true);
3460  matcherTmp->match(trainDescriptors, queryDescriptors, matchesTmp);
3461 
3462  for (std::vector<cv::DMatch>::const_iterator it = matchesTmp.begin(); it != matchesTmp.end(); ++it) {
3463  matches.push_back(cv::DMatch(it->trainIdx, it->queryIdx, it->distance));
3464  }
3465  } else {
3466  // Match query descriptors to train descriptors
3467  m_matcher->match(queryDescriptors, matches);
3468  }
3469  }
3470  elapsedTime = vpTime::measureTimeMs() - t;
3471 }
3472 
3480 unsigned int vpKeyPoint::matchPoint(const vpImage<unsigned char> &I) { return matchPoint(I, vpRect()); }
3481 
3489 unsigned int vpKeyPoint::matchPoint(const vpImage<vpRGBa> &I_color) { return matchPoint(I_color, vpRect()); }
3490 
3501 unsigned int vpKeyPoint::matchPoint(const vpImage<unsigned char> &I, const vpImagePoint &iP, unsigned int height,
3502  unsigned int width)
3503 {
3504  return matchPoint(I, vpRect(iP, width, height));
3505 }
3506 
3517 unsigned int vpKeyPoint::matchPoint(const vpImage<vpRGBa> &I_color, const vpImagePoint &iP, unsigned int height,
3518  unsigned int width)
3519 {
3520  return matchPoint(I_color, vpRect(iP, width, height));
3521 }
3522 
3531 unsigned int vpKeyPoint::matchPoint(const vpImage<unsigned char> &I, const vpRect &rectangle)
3532 {
3533  if (m_trainDescriptors.empty()) {
3534  std::cerr << "Reference is empty." << std::endl;
3535  if (!_reference_computed) {
3536  std::cerr << "Reference is not computed." << std::endl;
3537  }
3538  std::cerr << "Matching is not possible." << std::endl;
3539 
3540  return 0;
3541  }
3542 
3543  if (m_useAffineDetection) {
3544  std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3545  std::vector<cv::Mat> listOfQueryDescriptors;
3546 
3547  // Detect keypoints and extract descriptors on multiple images
3548  detectExtractAffine(I, listOfQueryKeyPoints, listOfQueryDescriptors);
3549 
3550  // Flatten the different train lists
3551  m_queryKeyPoints.clear();
3552  for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3553  it != listOfQueryKeyPoints.end(); ++it) {
3554  m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3555  }
3556 
3557  bool first = true;
3558  for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3559  ++it) {
3560  if (first) {
3561  first = false;
3562  it->copyTo(m_queryDescriptors);
3563  } else {
3564  m_queryDescriptors.push_back(*it);
3565  }
3566  }
3567  } else {
3568  detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3569  extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3570  }
3571 
3572  return matchPoint(m_queryKeyPoints, m_queryDescriptors);
3573 }
3574 
3583 unsigned int vpKeyPoint::matchPoint(const std::vector<cv::KeyPoint> &queryKeyPoints, const cv::Mat &queryDescriptors)
3584 {
3585  m_queryKeyPoints = queryKeyPoints;
3586  m_queryDescriptors = queryDescriptors;
3587 
3588  match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3589 
3590  if (m_filterType != noFilterMatching) {
3591  m_queryFilteredKeyPoints.clear();
3592  m_objectFilteredPoints.clear();
3593  m_filteredMatches.clear();
3594 
3595  filterMatches();
3596  } else {
3597  if (m_useMatchTrainToQuery) {
3598  // Add only query keypoints matched with a train keypoints
3599  m_queryFilteredKeyPoints.clear();
3600  m_filteredMatches.clear();
3601  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3602  m_filteredMatches.push_back(cv::DMatch((int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3603  m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(size_t)it->queryIdx]);
3604  }
3605  } else {
3606  m_queryFilteredKeyPoints = m_queryKeyPoints;
3607  m_filteredMatches = m_matches;
3608  }
3609 
3610  if (!m_trainPoints.empty()) {
3611  m_objectFilteredPoints.clear();
3612  // Add 3D object points such as the same index in
3613  // m_queryFilteredKeyPoints and in m_objectFilteredPoints
3614  // matches to the same train object
3615  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3616  // m_matches is normally ordered following the queryDescriptor index
3617  m_objectFilteredPoints.push_back(m_trainPoints[(size_t)it->trainIdx]);
3618  }
3619  }
3620  }
3621 
3622  // Convert OpenCV type to ViSP type for compatibility
3623  vpConvert::convertFromOpenCV(m_queryFilteredKeyPoints, currentImagePointsList);
3625 
3626  return static_cast<unsigned int>(m_filteredMatches.size());
3627 }
3628 
3637 unsigned int vpKeyPoint::matchPoint(const vpImage<vpRGBa> &I_color, const vpRect &rectangle)
3638 {
3639  vpImageConvert::convert(I_color, m_I);
3640  return matchPoint(m_I, rectangle);
3641 }
3642 
3656  bool (*func)(const vpHomogeneousMatrix &), const vpRect &rectangle)
3657 {
3658  double error, elapsedTime;
3659  return matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3660 }
3661 
3675  bool (*func)(const vpHomogeneousMatrix &), const vpRect &rectangle)
3676 {
3677  double error, elapsedTime;
3678  return matchPoint(I_color, cam, cMo, error, elapsedTime, func, rectangle);
3679 }
3680 
3697  double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &),
3698  const vpRect &rectangle)
3699 {
3700  // Check if we have training descriptors
3701  if (m_trainDescriptors.empty()) {
3702  std::cerr << "Reference is empty." << std::endl;
3703  if (!_reference_computed) {
3704  std::cerr << "Reference is not computed." << std::endl;
3705  }
3706  std::cerr << "Matching is not possible." << std::endl;
3707 
3708  return false;
3709  }
3710 
3711  if (m_useAffineDetection) {
3712  std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3713  std::vector<cv::Mat> listOfQueryDescriptors;
3714 
3715  // Detect keypoints and extract descriptors on multiple images
3716  detectExtractAffine(I, listOfQueryKeyPoints, listOfQueryDescriptors);
3717 
3718  // Flatten the different train lists
3719  m_queryKeyPoints.clear();
3720  for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3721  it != listOfQueryKeyPoints.end(); ++it) {
3722  m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3723  }
3724 
3725  bool first = true;
3726  for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3727  ++it) {
3728  if (first) {
3729  first = false;
3730  it->copyTo(m_queryDescriptors);
3731  } else {
3732  m_queryDescriptors.push_back(*it);
3733  }
3734  }
3735  } else {
3736  detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3737  extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3738  }
3739 
3740  match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3741 
3742  elapsedTime = m_detectionTime + m_extractionTime + m_matchingTime;
3743 
3744  if (m_filterType != noFilterMatching) {
3745  m_queryFilteredKeyPoints.clear();
3746  m_objectFilteredPoints.clear();
3747  m_filteredMatches.clear();
3748 
3749  filterMatches();
3750  } else {
3751  if (m_useMatchTrainToQuery) {
3752  // Add only query keypoints matched with a train keypoints
3753  m_queryFilteredKeyPoints.clear();
3754  m_filteredMatches.clear();
3755  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3756  m_filteredMatches.push_back(cv::DMatch((int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3757  m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(size_t)it->queryIdx]);
3758  }
3759  } else {
3760  m_queryFilteredKeyPoints = m_queryKeyPoints;
3761  m_filteredMatches = m_matches;
3762  }
3763 
3764  if (!m_trainPoints.empty()) {
3765  m_objectFilteredPoints.clear();
3766  // Add 3D object points such as the same index in
3767  // m_queryFilteredKeyPoints and in m_objectFilteredPoints
3768  // matches to the same train object
3769  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3770  // m_matches is normally ordered following the queryDescriptor index
3771  m_objectFilteredPoints.push_back(m_trainPoints[(size_t)it->trainIdx]);
3772  }
3773  }
3774  }
3775 
3776  // Convert OpenCV type to ViSP type for compatibility
3777  vpConvert::convertFromOpenCV(m_queryFilteredKeyPoints, currentImagePointsList);
3779 
3780  // error = std::numeric_limits<double>::max(); // create an error under
3781  // Windows. To fix it we have to add #undef max
3782  error = DBL_MAX;
3783  m_ransacInliers.clear();
3784  m_ransacOutliers.clear();
3785 
3786  if (m_useRansacVVS) {
3787  std::vector<vpPoint> objectVpPoints(m_objectFilteredPoints.size());
3788  size_t cpt = 0;
3789  // Create a list of vpPoint with 2D coordinates (current keypoint
3790  // location) + 3D coordinates (world/object coordinates)
3791  for (std::vector<cv::Point3f>::const_iterator it = m_objectFilteredPoints.begin();
3792  it != m_objectFilteredPoints.end(); ++it, cpt++) {
3793  vpPoint pt;
3794  pt.setWorldCoordinates(it->x, it->y, it->z);
3795 
3796  vpImagePoint imP(m_queryFilteredKeyPoints[cpt].pt.y, m_queryFilteredKeyPoints[cpt].pt.x);
3797 
3798  double x = 0.0, y = 0.0;
3799  vpPixelMeterConversion::convertPoint(cam, imP, x, y);
3800  pt.set_x(x);
3801  pt.set_y(y);
3802 
3803  objectVpPoints[cpt] = pt;
3804  }
3805 
3806  std::vector<vpPoint> inliers;
3807  std::vector<unsigned int> inlierIndex;
3808 
3809  bool res = computePose(objectVpPoints, cMo, inliers, inlierIndex, m_poseTime, func);
3810 
3811  std::map<unsigned int, bool> mapOfInlierIndex;
3812  m_matchRansacKeyPointsToPoints.clear();
3813 
3814  for (std::vector<unsigned int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3815  m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3816  m_queryFilteredKeyPoints[(size_t)(*it)], m_objectFilteredPoints[(size_t)(*it)]));
3817  mapOfInlierIndex[*it] = true;
3818  }
3819 
3820  for (size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3821  if (mapOfInlierIndex.find((unsigned int)i) == mapOfInlierIndex.end()) {
3822  m_ransacOutliers.push_back(vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3823  }
3824  }
3825 
3826  error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3827 
3828  m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3829  std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3830  m_ransacInliers.begin(), matchRansacToVpImage);
3831 
3832  elapsedTime += m_poseTime;
3833 
3834  return res;
3835  } else {
3836  std::vector<cv::Point2f> imageFilteredPoints;
3837  cv::KeyPoint::convert(m_queryFilteredKeyPoints, imageFilteredPoints);
3838  std::vector<int> inlierIndex;
3839  bool res = computePose(imageFilteredPoints, m_objectFilteredPoints, cam, cMo, inlierIndex, m_poseTime);
3840 
3841  std::map<int, bool> mapOfInlierIndex;
3842  m_matchRansacKeyPointsToPoints.clear();
3843 
3844  for (std::vector<int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3845  m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3846  m_queryFilteredKeyPoints[(size_t)(*it)], m_objectFilteredPoints[(size_t)(*it)]));
3847  mapOfInlierIndex[*it] = true;
3848  }
3849 
3850  for (size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3851  if (mapOfInlierIndex.find((int)i) == mapOfInlierIndex.end()) {
3852  m_ransacOutliers.push_back(vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3853  }
3854  }
3855 
3856  error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3857 
3858  m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3859  std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3860  m_ransacInliers.begin(), matchRansacToVpImage);
3861 
3862  elapsedTime += m_poseTime;
3863 
3864  return res;
3865  }
3866 }
3867 
3884  double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &),
3885  const vpRect &rectangle)
3886 {
3887  vpImageConvert::convert(I_color, m_I);
3888  return (matchPoint(m_I, cam, cMo, error, elapsedTime, func, rectangle));
3889 }
3890 
3891 
3913  vpImagePoint &centerOfGravity, const bool isPlanarObject,
3914  std::vector<vpImagePoint> *imPts1, std::vector<vpImagePoint> *imPts2,
3915  double *meanDescriptorDistance, double *detection_score, const vpRect &rectangle)
3916 {
3917  if (imPts1 != NULL && imPts2 != NULL) {
3918  imPts1->clear();
3919  imPts2->clear();
3920  }
3921 
3922  matchPoint(I, rectangle);
3923 
3924  double meanDescriptorDistanceTmp = 0.0;
3925  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
3926  meanDescriptorDistanceTmp += (double)it->distance;
3927  }
3928 
3929  meanDescriptorDistanceTmp /= (double)m_filteredMatches.size();
3930  double score = (double)m_filteredMatches.size() / meanDescriptorDistanceTmp;
3931 
3932  if (meanDescriptorDistance != NULL) {
3933  *meanDescriptorDistance = meanDescriptorDistanceTmp;
3934  }
3935  if (detection_score != NULL) {
3936  *detection_score = score;
3937  }
3938 
3939  if (m_filteredMatches.size() >= 4) {
3940  // Training / Reference 2D points
3941  std::vector<cv::Point2f> points1(m_filteredMatches.size());
3942  // Query / Current 2D points
3943  std::vector<cv::Point2f> points2(m_filteredMatches.size());
3944 
3945  for (size_t i = 0; i < m_filteredMatches.size(); i++) {
3946  points1[i] = cv::Point2f(m_trainKeyPoints[(size_t)m_filteredMatches[i].trainIdx].pt);
3947  points2[i] = cv::Point2f(m_queryFilteredKeyPoints[(size_t)m_filteredMatches[i].queryIdx].pt);
3948  }
3949 
3950  std::vector<vpImagePoint> inliers;
3951  if (isPlanarObject) {
3952 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
3953  cv::Mat homographyMatrix = cv::findHomography(points1, points2, CV_RANSAC);
3954 #else
3955  cv::Mat homographyMatrix = cv::findHomography(points1, points2, cv::RANSAC);
3956 #endif
3957 
3958  for (size_t i = 0; i < m_filteredMatches.size(); i++) {
3959  // Compute reprojection error
3960  cv::Mat realPoint = cv::Mat(3, 1, CV_64F);
3961  realPoint.at<double>(0, 0) = points1[i].x;
3962  realPoint.at<double>(1, 0) = points1[i].y;
3963  realPoint.at<double>(2, 0) = 1.f;
3964 
3965  cv::Mat reprojectedPoint = homographyMatrix * realPoint;
3966  double err_x = (reprojectedPoint.at<double>(0, 0) / reprojectedPoint.at<double>(2, 0)) - points2[i].x;
3967  double err_y = (reprojectedPoint.at<double>(1, 0) / reprojectedPoint.at<double>(2, 0)) - points2[i].y;
3968  double reprojectionError = std::sqrt(err_x * err_x + err_y * err_y);
3969 
3970  if (reprojectionError < 6.0) {
3971  inliers.push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3972  if (imPts1 != NULL) {
3973  imPts1->push_back(vpImagePoint((double)points1[i].y, (double)points1[i].x));
3974  }
3975 
3976  if (imPts2 != NULL) {
3977  imPts2->push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3978  }
3979  }
3980  }
3981  } else if (m_filteredMatches.size() >= 8) {
3982  cv::Mat fundamentalInliers;
3983  cv::Mat fundamentalMatrix = cv::findFundamentalMat(points1, points2, cv::FM_RANSAC, 3, 0.99, fundamentalInliers);
3984 
3985  for (size_t i = 0; i < (size_t)fundamentalInliers.rows; i++) {
3986  if (fundamentalInliers.at<uchar>((int)i, 0)) {
3987  inliers.push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3988 
3989  if (imPts1 != NULL) {
3990  imPts1->push_back(vpImagePoint((double)points1[i].y, (double)points1[i].x));
3991  }
3992 
3993  if (imPts2 != NULL) {
3994  imPts2->push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3995  }
3996  }
3997  }
3998  }
3999 
4000  if (!inliers.empty()) {
4001  // Build a polygon with the list of inlier keypoints detected in the
4002  // current image to get the bounding box
4003  vpPolygon polygon(inliers);
4004  boundingBox = polygon.getBoundingBox();
4005 
4006  // Compute the center of gravity
4007  double meanU = 0.0, meanV = 0.0;
4008  for (std::vector<vpImagePoint>::const_iterator it = inliers.begin(); it != inliers.end(); ++it) {
4009  meanU += it->get_u();
4010  meanV += it->get_v();
4011  }
4012 
4013  meanU /= (double)inliers.size();
4014  meanV /= (double)inliers.size();
4015 
4016  centerOfGravity.set_u(meanU);
4017  centerOfGravity.set_v(meanV);
4018  }
4019  } else {
4020  // Too few matches
4021  return false;
4022  }
4023 
4024  if (m_detectionMethod == detectionThreshold) {
4025  return meanDescriptorDistanceTmp < m_detectionThreshold;
4026  } else {
4027  return score > m_detectionScore;
4028  }
4029 }
4030 
4051  vpHomogeneousMatrix &cMo, double &error, double &elapsedTime, vpRect &boundingBox,
4052  vpImagePoint &centerOfGravity, bool (*func)(const vpHomogeneousMatrix &),
4053  const vpRect &rectangle)
4054 {
4055  bool isMatchOk = matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
4056  if (isMatchOk) {
4057  // Use the pose estimated to project the model points in the image
4058  vpPoint pt;
4059  vpImagePoint imPt;
4060  std::vector<vpImagePoint> modelImagePoints(m_trainVpPoints.size());
4061  size_t cpt = 0;
4062  for (std::vector<vpPoint>::const_iterator it = m_trainVpPoints.begin(); it != m_trainVpPoints.end(); ++it, cpt++) {
4063  pt = *it;
4064  pt.project(cMo);
4065  vpMeterPixelConversion::convertPoint(cam, pt.get_x(), pt.get_y(), imPt);
4066  modelImagePoints[cpt] = imPt;
4067  }
4068 
4069  // Build a polygon with the list of model image points to get the bounding
4070  // box
4071  vpPolygon polygon(modelImagePoints);
4072  boundingBox = polygon.getBoundingBox();
4073 
4074  // Compute the center of gravity of the current inlier keypoints
4075  double meanU = 0.0, meanV = 0.0;
4076  for (std::vector<vpImagePoint>::const_iterator it = m_ransacInliers.begin(); it != m_ransacInliers.end(); ++it) {
4077  meanU += it->get_u();
4078  meanV += it->get_v();
4079  }
4080 
4081  meanU /= (double)m_ransacInliers.size();
4082  meanV /= (double)m_ransacInliers.size();
4083 
4084  centerOfGravity.set_u(meanU);
4085  centerOfGravity.set_v(meanV);
4086  }
4087 
4088  return isMatchOk;
4089 }
4090 
4106  std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
4107  std::vector<cv::Mat> &listOfDescriptors,
4108  std::vector<vpImage<unsigned char> > *listOfAffineI)
4109 {
4110 #if 0
4111  cv::Mat img;
4112  vpImageConvert::convert(I, img);
4113  listOfKeypoints.clear();
4114  listOfDescriptors.clear();
4115 
4116  for (int tl = 1; tl < 6; tl++) {
4117  double t = pow(2, 0.5 * tl);
4118  for (int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
4119  std::vector<cv::KeyPoint> keypoints;
4120  cv::Mat descriptors;
4121 
4122  cv::Mat timg, mask, Ai;
4123  img.copyTo(timg);
4124 
4125  affineSkew(t, phi, timg, mask, Ai);
4126 
4127 
4128  if(listOfAffineI != NULL) {
4129  cv::Mat img_disp;
4130  bitwise_and(mask, timg, img_disp);
4132  vpImageConvert::convert(img_disp, tI);
4133  listOfAffineI->push_back(tI);
4134  }
4135 #if 0
4136  cv::Mat img_disp;
4137  cv::bitwise_and(mask, timg, img_disp);
4138  cv::namedWindow( "Skew", cv::WINDOW_AUTOSIZE ); // Create a window for display.
4139  cv::imshow( "Skew", img_disp );
4140  cv::waitKey(0);
4141 #endif
4142 
4143  for(std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
4144  it != m_detectors.end(); ++it) {
4145  std::vector<cv::KeyPoint> kp;
4146  it->second->detect(timg, kp, mask);
4147  keypoints.insert(keypoints.end(), kp.begin(), kp.end());
4148  }
4149 
4150  double elapsedTime;
4151  extract(timg, keypoints, descriptors, elapsedTime);
4152 
4153  for(unsigned int i = 0; i < keypoints.size(); i++) {
4154  cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
4155  cv::Mat kpt_t = Ai * cv::Mat(kpt);
4156  keypoints[i].pt.x = kpt_t.at<float>(0, 0);
4157  keypoints[i].pt.y = kpt_t.at<float>(1, 0);
4158  }
4159 
4160  listOfKeypoints.push_back(keypoints);
4161  listOfDescriptors.push_back(descriptors);
4162  }
4163  }
4164 
4165 #else
4166  cv::Mat img;
4167  vpImageConvert::convert(I, img);
4168 
4169  // Create a vector for storing the affine skew parameters
4170  std::vector<std::pair<double, int> > listOfAffineParams;
4171  for (int tl = 1; tl < 6; tl++) {
4172  double t = pow(2, 0.5 * tl);
4173  for (int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
4174  listOfAffineParams.push_back(std::pair<double, int>(t, phi));
4175  }
4176  }
4177 
4178  listOfKeypoints.resize(listOfAffineParams.size());
4179  listOfDescriptors.resize(listOfAffineParams.size());
4180 
4181  if (listOfAffineI != NULL) {
4182  listOfAffineI->resize(listOfAffineParams.size());
4183  }
4184 
4185 #ifdef VISP_HAVE_OPENMP
4186 #pragma omp parallel for
4187 #endif
4188  for (int cpt = 0; cpt < static_cast<int>(listOfAffineParams.size()); cpt++) {
4189  std::vector<cv::KeyPoint> keypoints;
4190  cv::Mat descriptors;
4191 
4192  cv::Mat timg, mask, Ai;
4193  img.copyTo(timg);
4194 
4195  affineSkew(listOfAffineParams[(size_t)cpt].first, listOfAffineParams[(size_t)cpt].second, timg, mask, Ai);
4196 
4197  if (listOfAffineI != NULL) {
4198  cv::Mat img_disp;
4199  bitwise_and(mask, timg, img_disp);
4201  vpImageConvert::convert(img_disp, tI);
4202  (*listOfAffineI)[(size_t)cpt] = tI;
4203  }
4204 
4205 #if 0
4206  cv::Mat img_disp;
4207  cv::bitwise_and(mask, timg, img_disp);
4208  cv::namedWindow( "Skew", cv::WINDOW_AUTOSIZE ); // Create a window for display.
4209  cv::imshow( "Skew", img_disp );
4210  cv::waitKey(0);
4211 #endif
4212 
4213  for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
4214  it != m_detectors.end(); ++it) {
4215  std::vector<cv::KeyPoint> kp;
4216  it->second->detect(timg, kp, mask);
4217  keypoints.insert(keypoints.end(), kp.begin(), kp.end());
4218  }
4219 
4220  double elapsedTime;
4221  extract(timg, keypoints, descriptors, elapsedTime);
4222 
4223  for (size_t i = 0; i < keypoints.size(); i++) {
4224  cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
4225  cv::Mat kpt_t = Ai * cv::Mat(kpt);
4226  keypoints[i].pt.x = kpt_t.at<float>(0, 0);
4227  keypoints[i].pt.y = kpt_t.at<float>(1, 0);
4228  }
4229 
4230  listOfKeypoints[(size_t)cpt] = keypoints;
4231  listOfDescriptors[(size_t)cpt] = descriptors;
4232  }
4233 #endif
4234 }
4235 
4240 {
4241  // vpBasicKeyPoint class
4242  referenceImagePointsList.clear();
4243  currentImagePointsList.clear();
4244  matchedReferencePoints.clear();
4245  _reference_computed = false;
4246 
4247  m_computeCovariance = false;
4248  m_covarianceMatrix = vpMatrix();
4249  m_currentImageId = 0;
4250  m_detectionMethod = detectionScore;
4251  m_detectionScore = 0.15;
4252  m_detectionThreshold = 100.0;
4253  m_detectionTime = 0.0;
4254  m_detectorNames.clear();
4255  m_detectors.clear();
4256  m_extractionTime = 0.0;
4257  m_extractorNames.clear();
4258  m_extractors.clear();
4259  m_filteredMatches.clear();
4260  m_filterType = ratioDistanceThreshold;
4261  m_imageFormat = jpgImageFormat;
4262  m_knnMatches.clear();
4263  m_mapOfImageId.clear();
4264  m_mapOfImages.clear();
4265  m_matcher = cv::Ptr<cv::DescriptorMatcher>();
4266  m_matcherName = "BruteForce-Hamming";
4267  m_matches.clear();
4268  m_matchingFactorThreshold = 2.0;
4269  m_matchingRatioThreshold = 0.85;
4270  m_matchingTime = 0.0;
4271  m_matchRansacKeyPointsToPoints.clear();
4272  m_nbRansacIterations = 200;
4273  m_nbRansacMinInlierCount = 100;
4274  m_objectFilteredPoints.clear();
4275  m_poseTime = 0.0;
4276  m_queryDescriptors = cv::Mat();
4277  m_queryFilteredKeyPoints.clear();
4278  m_queryKeyPoints.clear();
4279  m_ransacConsensusPercentage = 20.0;
4280  m_ransacFilterFlag = vpPose::NO_FILTER;
4281  m_ransacInliers.clear();
4282  m_ransacOutliers.clear();
4283  m_ransacParallel = true;
4284  m_ransacParallelNbThreads = 0;
4285  m_ransacReprojectionError = 6.0;
4286  m_ransacThreshold = 0.01;
4287  m_trainDescriptors = cv::Mat();
4288  m_trainKeyPoints.clear();
4289  m_trainPoints.clear();
4290  m_trainVpPoints.clear();
4291  m_useAffineDetection = false;
4292 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
4293  m_useBruteForceCrossCheck = true;
4294 #endif
4295  m_useConsensusPercentage = false;
4296  m_useKnn = true; // as m_filterType == ratioDistanceThreshold
4297  m_useMatchTrainToQuery = false;
4298  m_useRansacVVS = true;
4299  m_useSingleMatchFilter = true;
4300 
4301  m_detectorNames.push_back("ORB");
4302  m_extractorNames.push_back("ORB");
4303 
4304  init();
4305 }
4306 
4315 void vpKeyPoint::saveLearningData(const std::string &filename, bool binaryMode, bool saveTrainingImages)
4316 {
4317  std::string parent = vpIoTools::getParent(filename);
4318  if (!parent.empty()) {
4319  vpIoTools::makeDirectory(parent);
4320  }
4321 
4322  std::map<int, std::string> mapOfImgPath;
4323  if (saveTrainingImages) {
4324 #ifdef VISP_HAVE_MODULE_IO
4325  // Save the training image files in the same directory
4326  unsigned int cpt = 0;
4327 
4328  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
4329  ++it, cpt++) {
4330  if (cpt > 999) {
4331  throw vpException(vpException::fatalError, "The number of training images to save is too big !");
4332  }
4333 
4334  std::stringstream ss;
4335  ss << "train_image_" << std::setfill('0') << std::setw(3) << cpt;
4336 
4337  switch (m_imageFormat) {
4338  case jpgImageFormat:
4339  ss << ".jpg";
4340  break;
4341 
4342  case pngImageFormat:
4343  ss << ".png";
4344  break;
4345 
4346  case ppmImageFormat:
4347  ss << ".ppm";
4348  break;
4349 
4350  case pgmImageFormat:
4351  ss << ".pgm";
4352  break;
4353 
4354  default:
4355  ss << ".png";
4356  break;
4357  }
4358 
4359  std::string imgFilename = ss.str();
4360  mapOfImgPath[it->first] = imgFilename;
4361  vpImageIo::write(it->second, parent + (!parent.empty() ? "/" : "") + imgFilename);
4362  }
4363 #else
4364  std::cout << "Warning: in vpKeyPoint::saveLearningData() training images "
4365  "are not saved because "
4366  "visp_io module is not available !"
4367  << std::endl;
4368 #endif
4369  }
4370 
4371  bool have3DInfo = m_trainPoints.size() > 0;
4372  if (have3DInfo && m_trainPoints.size() != m_trainKeyPoints.size()) {
4373  throw vpException(vpException::fatalError, "List of keypoints and list of 3D points have different size !");
4374  }
4375 
4376  if (binaryMode) {
4377  // Save the learning data into little endian binary file.
4378  std::ofstream file(filename.c_str(), std::ofstream::binary);
4379  if (!file.is_open()) {
4380  throw vpException(vpException::ioError, "Cannot create the file.");
4381  }
4382 
4383  // Write info about training images
4384  int nbImgs = (int)mapOfImgPath.size();
4385  vpIoTools::writeBinaryValueLE(file, nbImgs);
4386 
4387 #ifdef VISP_HAVE_MODULE_IO
4388  for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
4389  // Write image_id
4390  int id = it->first;
4392 
4393  // Write image path
4394  std::string path = it->second;
4395  int length = (int)path.length();
4396  vpIoTools::writeBinaryValueLE(file, length);
4397 
4398  for (int cpt = 0; cpt < length; cpt++) {
4399  file.write((char *)(&path[(size_t)cpt]), sizeof(path[(size_t)cpt]));
4400  }
4401  }
4402 #endif
4403 
4404  // Write if we have 3D point information
4405  int have3DInfoInt = have3DInfo ? 1 : 0;
4406  vpIoTools::writeBinaryValueLE(file, have3DInfoInt);
4407 
4408  int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
4409  int descriptorType = m_trainDescriptors.type();
4410 
4411  // Write the number of descriptors
4412  vpIoTools::writeBinaryValueLE(file, nRows);
4413 
4414  // Write the size of the descriptor
4415  vpIoTools::writeBinaryValueLE(file, nCols);
4416 
4417  // Write the type of the descriptor
4418  vpIoTools::writeBinaryValueLE(file, descriptorType);
4419 
4420  for (int i = 0; i < nRows; i++) {
4421  unsigned int i_ = (unsigned int)i;
4422  // Write u
4423  float u = m_trainKeyPoints[i_].pt.x;
4425 
4426  // Write v
4427  float v = m_trainKeyPoints[i_].pt.y;
4429 
4430  // Write size
4431  float size = m_trainKeyPoints[i_].size;
4432  vpIoTools::writeBinaryValueLE(file, size);
4433 
4434  // Write angle
4435  float angle = m_trainKeyPoints[i_].angle;
4436  vpIoTools::writeBinaryValueLE(file, angle);
4437 
4438  // Write response
4439  float response = m_trainKeyPoints[i_].response;
4440  vpIoTools::writeBinaryValueLE(file, response);
4441 
4442  // Write octave
4443  int octave = m_trainKeyPoints[i_].octave;
4444  vpIoTools::writeBinaryValueLE(file, octave);
4445 
4446  // Write class_id
4447  int class_id = m_trainKeyPoints[i_].class_id;
4448  vpIoTools::writeBinaryValueLE(file, class_id);
4449 
4450 // Write image_id
4451 #ifdef VISP_HAVE_MODULE_IO
4452  std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
4453  int image_id = (saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1;
4454  vpIoTools::writeBinaryValueLE(file, image_id);
4455 #else
4456  int image_id = -1;
4457  // file.write((char *)(&image_id), sizeof(image_id));
4458  vpIoTools::writeBinaryValueLE(file, image_id);
4459 #endif
4460 
4461  if (have3DInfo) {
4462  float oX = m_trainPoints[i_].x, oY = m_trainPoints[i_].y, oZ = m_trainPoints[i_].z;
4463  // Write oX
4465 
4466  // Write oY
4468 
4469  // Write oZ
4471  }
4472 
4473  for (int j = 0; j < nCols; j++) {
4474  // Write the descriptor value
4475  switch (descriptorType) {
4476  case CV_8U:
4477  file.write((char *)(&m_trainDescriptors.at<unsigned char>(i, j)),
4478  sizeof(m_trainDescriptors.at<unsigned char>(i, j)));
4479  break;
4480 
4481  case CV_8S:
4482  file.write((char *)(&m_trainDescriptors.at<char>(i, j)), sizeof(m_trainDescriptors.at<char>(i, j)));
4483  break;
4484 
4485  case CV_16U:
4486  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<unsigned short int>(i, j));
4487  break;
4488 
4489  case CV_16S:
4490  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<short int>(i, j));
4491  break;
4492 
4493  case CV_32S:
4494  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<int>(i, j));
4495  break;
4496 
4497  case CV_32F:
4498  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<float>(i, j));
4499  break;
4500 
4501  case CV_64F:
4502  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<double>(i, j));
4503  break;
4504 
4505  default:
4506  throw vpException(vpException::fatalError, "Problem with the data type of descriptors !");
4507  break;
4508  }
4509  }
4510  }
4511 
4512  file.close();
4513  } else {
4514  pugi::xml_document doc;
4515  pugi::xml_node node = doc.append_child(pugi::node_declaration);
4516  node.append_attribute("version") = "1.0";
4517  node.append_attribute("encoding") = "UTF-8";
4518 
4519  if (!doc) {
4520  throw vpException(vpException::ioError, "Error with file: ", filename.c_str());
4521  }
4522 
4523  pugi::xml_node root_node = doc.append_child("LearningData");
4524 
4525  // Write the training images info
4526  pugi::xml_node image_node = root_node.append_child("TrainingImageInfo");
4527 
4528 #ifdef VISP_HAVE_MODULE_IO
4529  for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
4530  pugi::xml_node image_info_node = image_node.append_child("trainImg");
4531  image_info_node.append_child(pugi::node_pcdata).set_value(it->second.c_str());
4532  std::stringstream ss;
4533  ss << it->first;
4534  image_info_node.append_attribute("image_id") = ss.str().c_str();
4535  }
4536 #endif
4537 
4538  // Write information about descriptors
4539  pugi::xml_node descriptors_info_node = root_node.append_child("DescriptorsInfo");
4540 
4541  int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
4542  int descriptorType = m_trainDescriptors.type();
4543 
4544  // Write the number of rows
4545  descriptors_info_node.append_child("nrows").append_child(pugi::node_pcdata).text() = nRows;
4546 
4547  // Write the number of cols
4548  descriptors_info_node.append_child("ncols").append_child(pugi::node_pcdata).text() = nCols;
4549 
4550  // Write the descriptors type
4551  descriptors_info_node.append_child("type").append_child(pugi::node_pcdata).text() = descriptorType;
4552 
4553  for (int i = 0; i < nRows; i++) {
4554  unsigned int i_ = (unsigned int)i;
4555  pugi::xml_node descriptor_node = root_node.append_child("DescriptorInfo");
4556 
4557  descriptor_node.append_child("u").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.x;
4558  descriptor_node.append_child("v").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.y;
4559  descriptor_node.append_child("size").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].size;
4560  descriptor_node.append_child("angle").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].angle;
4561  descriptor_node.append_child("response").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].response;
4562  descriptor_node.append_child("octave").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].octave;
4563  descriptor_node.append_child("class_id").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].class_id;
4564 
4565 #ifdef VISP_HAVE_MODULE_IO
4566  std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
4567  descriptor_node.append_child("image_id").append_child(pugi::node_pcdata).text() =
4568  ((saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1);
4569 #else
4570  descriptor_node.append_child("image_id").append_child(pugi::node_pcdata).text() = -1;
4571 #endif
4572 
4573  if (have3DInfo) {
4574  descriptor_node.append_child("oX").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].x;
4575  descriptor_node.append_child("oY").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].y;
4576  descriptor_node.append_child("oZ").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].z;
4577  }
4578 
4579  pugi::xml_node desc_node = descriptor_node.append_child("desc");
4580 
4581  for (int j = 0; j < nCols; j++) {
4582  switch (descriptorType) {
4583  case CV_8U: {
4584  // Promote an unsigned char to an int
4585  // val_tmp holds the numeric value that will be written
4586  // We save the value in numeric form otherwise xml library will not be
4587  // able to parse A better solution could be possible
4588  int val_tmp = m_trainDescriptors.at<unsigned char>(i, j);
4589  desc_node.append_child("val").append_child(pugi::node_pcdata).text() = val_tmp;
4590  } break;
4591 
4592  case CV_8S: {
4593  // Promote a char to an int
4594  // val_tmp holds the numeric value that will be written
4595  // We save the value in numeric form otherwise xml library will not be
4596  // able to parse A better solution could be possible
4597  int val_tmp = m_trainDescriptors.at<char>(i, j);
4598  desc_node.append_child("val").append_child(pugi::node_pcdata).text() = val_tmp;
4599  } break;
4600 
4601  case CV_16U:
4602  desc_node.append_child("val").append_child(pugi::node_pcdata).text() =
4603  m_trainDescriptors.at<unsigned short int>(i, j);
4604  break;
4605 
4606  case CV_16S:
4607  desc_node.append_child("val").append_child(pugi::node_pcdata).text() =
4608  m_trainDescriptors.at<short int>(i, j);
4609  break;
4610 
4611  case CV_32S:
4612  desc_node.append_child("val").append_child(pugi::node_pcdata).text() =
4613  m_trainDescriptors.at<int>(i, j);
4614  break;
4615 
4616  case CV_32F:
4617  desc_node.append_child("val").append_child(pugi::node_pcdata).text() =
4618  m_trainDescriptors.at<float>(i, j);
4619  break;
4620 
4621  case CV_64F:
4622  desc_node.append_child("val").append_child(pugi::node_pcdata).text() =
4623  m_trainDescriptors.at<double>(i, j);
4624  break;
4625 
4626  default:
4627  throw vpException(vpException::fatalError, "Problem with the data type of descriptors !");
4628  break;
4629  }
4630  }
4631  }
4632 
4633  doc.save_file(filename.c_str(), PUGIXML_TEXT(" "), pugi::format_default, pugi::encoding_utf8);
4634  }
4635 }
4636 
4637 #if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x030000)
4638 #ifndef DOXYGEN_SHOULD_SKIP_THIS
4639 // From OpenCV 2.4.11 source code.
4640 struct KeypointResponseGreaterThanThreshold {
4641  KeypointResponseGreaterThanThreshold(float _value) : value(_value) {}
4642  inline bool operator()(const cv::KeyPoint &kpt) const { return kpt.response >= value; }
4643  float value;
4644 };
4645 
4646 struct KeypointResponseGreater {
4647  inline bool operator()(const cv::KeyPoint &kp1, const cv::KeyPoint &kp2) const { return kp1.response > kp2.response; }
4648 };
4649 
4650 // takes keypoints and culls them by the response
4651 void vpKeyPoint::KeyPointsFilter::retainBest(std::vector<cv::KeyPoint> &keypoints, int n_points)
4652 {
4653  // this is only necessary if the keypoints size is greater than the number
4654  // of desired points.
4655  if (n_points >= 0 && keypoints.size() > (size_t)n_points) {
4656  if (n_points == 0) {
4657  keypoints.clear();
4658  return;
4659  }
4660  // first use nth element to partition the keypoints into the best and
4661  // worst.
4662  std::nth_element(keypoints.begin(), keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreater());
4663  // this is the boundary response, and in the case of FAST may be ambiguous
4664  float ambiguous_response = keypoints[(size_t)(n_points - 1)].response;
4665  // use std::partition to grab all of the keypoints with the boundary
4666  // response.
4667  std::vector<cv::KeyPoint>::const_iterator new_end = std::partition(
4668  keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreaterThanThreshold(ambiguous_response));
4669  // resize the keypoints, given this new end point. nth_element and
4670  // partition reordered the points inplace
4671  keypoints.resize((size_t)(new_end - keypoints.begin()));
4672  }
4673 }
4674 
4675 struct RoiPredicate {
4676  RoiPredicate(const cv::Rect &_r) : r(_r) {}
4677 
4678  bool operator()(const cv::KeyPoint &keyPt) const { return !r.contains(keyPt.pt); }
4679 
4680  cv::Rect r;
4681 };
4682 
4683 void vpKeyPoint::KeyPointsFilter::runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize,
4684  int borderSize)
4685 {
4686  if (borderSize > 0) {
4687  if (imageSize.height <= borderSize * 2 || imageSize.width <= borderSize * 2)
4688  keypoints.clear();
4689  else
4690  keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(),
4691  RoiPredicate(cv::Rect(
4692  cv::Point(borderSize, borderSize),
4693  cv::Point(imageSize.width - borderSize, imageSize.height - borderSize)))),
4694  keypoints.end());
4695  }
4696 }
4697 
4698 struct SizePredicate {
4699  SizePredicate(float _minSize, float _maxSize) : minSize(_minSize), maxSize(_maxSize) {}
4700 
4701  bool operator()(const cv::KeyPoint &keyPt) const
4702  {
4703  float size = keyPt.size;
4704  return (size < minSize) || (size > maxSize);
4705  }
4706 
4707  float minSize, maxSize;
4708 };
4709 
4710 void vpKeyPoint::KeyPointsFilter::runByKeypointSize(std::vector<cv::KeyPoint> &keypoints, float minSize, float maxSize)
4711 {
4712  CV_Assert(minSize >= 0);
4713  CV_Assert(maxSize >= 0);
4714  CV_Assert(minSize <= maxSize);
4715 
4716  keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), SizePredicate(minSize, maxSize)), keypoints.end());
4717 }
4718 
4719 class MaskPredicate
4720 {
4721 public:
4722  MaskPredicate(const cv::Mat &_mask) : mask(_mask) {}
4723  bool operator()(const cv::KeyPoint &key_pt) const
4724  {
4725  return mask.at<uchar>((int)(key_pt.pt.y + 0.5f), (int)(key_pt.pt.x + 0.5f)) == 0;
4726  }
4727 
4728 private:
4729  const cv::Mat mask;
4730  MaskPredicate &operator=(const MaskPredicate &);
4731 };
4732 
4733 void vpKeyPoint::KeyPointsFilter::runByPixelsMask(std::vector<cv::KeyPoint> &keypoints, const cv::Mat &mask)
4734 {
4735  if (mask.empty())
4736  return;
4737 
4738  keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), MaskPredicate(mask)), keypoints.end());
4739 }
4740 
4741 struct KeyPoint_LessThan {
4742  KeyPoint_LessThan(const std::vector<cv::KeyPoint> &_kp) : kp(&_kp) {}
4743  bool operator()(/*int i, int j*/ size_t i, size_t j) const
4744  {
4745  const cv::KeyPoint &kp1 = (*kp)[/*(size_t)*/ i];
4746  const cv::KeyPoint &kp2 = (*kp)[/*(size_t)*/ j];
4747  if (!vpMath::equal(kp1.pt.x, kp2.pt.x,
4748  std::numeric_limits<float>::epsilon())) { // if (kp1.pt.x !=
4749  // kp2.pt.x) {
4750  return kp1.pt.x < kp2.pt.x;
4751  }
4752 
4753  if (!vpMath::equal(kp1.pt.y, kp2.pt.y,
4754  std::numeric_limits<float>::epsilon())) { // if (kp1.pt.y !=
4755  // kp2.pt.y) {
4756  return kp1.pt.y < kp2.pt.y;
4757  }
4758 
4759  if (!vpMath::equal(kp1.size, kp2.size,
4760  std::numeric_limits<float>::epsilon())) { // if (kp1.size !=
4761  // kp2.size) {
4762  return kp1.size > kp2.size;
4763  }
4764 
4765  if (!vpMath::equal(kp1.angle, kp2.angle,
4766  std::numeric_limits<float>::epsilon())) { // if (kp1.angle !=
4767  // kp2.angle) {
4768  return kp1.angle < kp2.angle;
4769  }
4770 
4771  if (!vpMath::equal(kp1.response, kp2.response,
4772  std::numeric_limits<float>::epsilon())) { // if (kp1.response !=
4773  // kp2.response) {
4774  return kp1.response > kp2.response;
4775  }
4776 
4777  if (kp1.octave != kp2.octave) {
4778  return kp1.octave > kp2.octave;
4779  }
4780 
4781  if (kp1.class_id != kp2.class_id) {
4782  return kp1.class_id > kp2.class_id;
4783  }
4784 
4785  return i < j;
4786  }
4787  const std::vector<cv::KeyPoint> *kp;
4788 };
4789 
4790 void vpKeyPoint::KeyPointsFilter::removeDuplicated(std::vector<cv::KeyPoint> &keypoints)
4791 {
4792  size_t i, j, n = keypoints.size();
4793  std::vector<size_t> kpidx(n);
4794  std::vector<uchar> mask(n, (uchar)1);
4795 
4796  for (i = 0; i < n; i++) {
4797  kpidx[i] = i;
4798  }
4799  std::sort(kpidx.begin(), kpidx.end(), KeyPoint_LessThan(keypoints));
4800  for (i = 1, j = 0; i < n; i++) {
4801  cv::KeyPoint &kp1 = keypoints[kpidx[i]];
4802  cv::KeyPoint &kp2 = keypoints[kpidx[j]];
4803  // if (kp1.pt.x != kp2.pt.x || kp1.pt.y != kp2.pt.y || kp1.size !=
4804  // kp2.size || kp1.angle != kp2.angle) {
4805  if (!vpMath::equal(kp1.pt.x, kp2.pt.x, std::numeric_limits<float>::epsilon()) ||
4806  !vpMath::equal(kp1.pt.y, kp2.pt.y, std::numeric_limits<float>::epsilon()) ||
4807  !vpMath::equal(kp1.size, kp2.size, std::numeric_limits<float>::epsilon()) ||
4808  !vpMath::equal(kp1.angle, kp2.angle, std::numeric_limits<float>::epsilon())) {
4809  j = i;
4810  } else {
4811  mask[kpidx[i]] = 0;
4812  }
4813  }
4814 
4815  for (i = j = 0; i < n; i++) {
4816  if (mask[i]) {
4817  if (i != j) {
4818  keypoints[j] = keypoints[i];
4819  }
4820  j++;
4821  }
4822  }
4823  keypoints.resize(j);
4824 }
4825 
4826 /*
4827  * PyramidAdaptedFeatureDetector
4828  */
4829 vpKeyPoint::PyramidAdaptedFeatureDetector::PyramidAdaptedFeatureDetector(const cv::Ptr<cv::FeatureDetector> &_detector,
4830  int _maxLevel)
4831  : detector(_detector), maxLevel(_maxLevel)
4832 {
4833 }
4834 
4835 bool vpKeyPoint::PyramidAdaptedFeatureDetector::empty() const
4836 {
4837  return detector.empty() || (cv::FeatureDetector *)detector->empty();
4838 }
4839 
4840 void vpKeyPoint::PyramidAdaptedFeatureDetector::detect(cv::InputArray image,
4841  CV_OUT std::vector<cv::KeyPoint> &keypoints, cv::InputArray mask)
4842 {
4843  detectImpl(image.getMat(), keypoints, mask.getMat());
4844 }
4845 
4846 void vpKeyPoint::PyramidAdaptedFeatureDetector::detectImpl(const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
4847  const cv::Mat &mask) const
4848 {
4849  cv::Mat src = image;
4850  cv::Mat src_mask = mask;
4851 
4852  cv::Mat dilated_mask;
4853  if (!mask.empty()) {
4854  cv::dilate(mask, dilated_mask, cv::Mat());
4855  cv::Mat mask255(mask.size(), CV_8UC1, cv::Scalar(0));
4856  mask255.setTo(cv::Scalar(255), dilated_mask != 0);
4857  dilated_mask = mask255;
4858  }
4859 
4860  for (int l = 0, multiplier = 1; l <= maxLevel; ++l, multiplier *= 2) {
4861  // Detect on current level of the pyramid
4862  std::vector<cv::KeyPoint> new_pts;
4863  detector->detect(src, new_pts, src_mask);
4864  std::vector<cv::KeyPoint>::iterator it = new_pts.begin(), end = new_pts.end();
4865  for (; it != end; ++it) {
4866  it->pt.x *= multiplier;
4867  it->pt.y *= multiplier;
4868  it->size *= multiplier;
4869  it->octave = l;
4870  }
4871  keypoints.insert(keypoints.end(), new_pts.begin(), new_pts.end());
4872 
4873  // Downsample
4874  if (l < maxLevel) {
4875  cv::Mat dst;
4876  pyrDown(src, dst);
4877  src = dst;
4878 
4879  if (!mask.empty())
4880  resize(dilated_mask, src_mask, src.size(), 0, 0, CV_INTER_AREA);
4881  }
4882  }
4883 
4884  if (!mask.empty())
4885  vpKeyPoint::KeyPointsFilter::runByPixelsMask(keypoints, mask);
4886 }
4887 #endif
4888 #endif
4889 
4890 #elif !defined(VISP_BUILD_SHARED_LIBS)
4891 // Work around to avoid warning: libvisp_vision.a(vpKeyPoint.cpp.o) has no
4892 // symbols
4893 void dummy_vpKeyPoint(){};
4894 #endif
void getTrainKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
Used to indicate that a value is not in the allowed range.
Definition: vpException.h:97
Implementation of a matrix and operations on matrices.
Definition: vpMatrix.h:153
void displayMatching(const vpImage< unsigned char > &IRef, vpImage< unsigned char > &IMatching, unsigned int crossSize, unsigned int lineThickness=1, const vpColor &color=vpColor::green)
static void makeDirectory(const std::string &dirname)
Definition: vpIoTools.cpp:482
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool(*func)(const vpHomogeneousMatrix &)=NULL)
Definition: vpPose.cpp:374
double getTop() const
Definition: vpRect.h:193
static void compute3D(const cv::KeyPoint &candidate, const std::vector< vpPoint > &roi, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
Definition: vpKeyPoint.cpp:547
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
void detectExtractAffine(const vpImage< unsigned char > &I, std::vector< std::vector< cv::KeyPoint > > &listOfKeypoints, std::vector< cv::Mat > &listOfDescriptors, std::vector< vpImage< unsigned char > > *listOfAffineI=NULL)
double get_u0() const
void setWorldCoordinates(double oX, double oY, double oZ)
Definition: vpPoint.cpp:113
double get_i() const
Definition: vpImagePoint.h:203
unsigned int getWidth() const
Definition: vpImage.h:246
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static bool isAbsolutePathname(const std::string &pathname)
Definition: vpIoTools.cpp:1487
vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType, const std::string &matcherName, const vpFilterMatchingType &filterType=ratioDistanceThreshold)
Definition: vpKeyPoint.cpp:81
void createImageMatching(vpImage< unsigned char > &IRef, vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void readBinaryValueLE(std::ifstream &file, int16_t &short_value)
Definition: vpIoTools.cpp:1769
void set_u(double u)
Definition: vpImagePoint.h:225
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
Class to define RGB colors available for display functionnalities.
Definition: vpColor.h:157
static bool equal(double x, double y, double s=0.001)
Definition: vpMath.h:293
static const vpColor none
Definition: vpColor.h:229
double get_oY() const
Get the point oY coordinate in the object frame.
Definition: vpPoint.cpp:449
error that can be emited by ViSP classes.
Definition: vpException.h:71
void setRansacThreshold(const double &t)
Definition: vpPose.h:238
double getHeight() const
Definition: vpRect.h:167
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
std::vector< unsigned int > getRansacInlierIndex() const
Definition: vpPose.h:249
double get_py() const
double get_y() const
Get the point y coordinate in the image plane.
Definition: vpPoint.cpp:458
double getRight() const
Definition: vpRect.h:180
static std::string getParent(const std::string &pathname)
Definition: vpIoTools.cpp:1382
static const vpColor green
Definition: vpColor.h:220
void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector< cv::DMatch > &matches, double &elapsedTime)
double get_j() const
Definition: vpImagePoint.h:214
void insertImageMatching(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
VISP_EXPORT double measureTimeMs()
Definition: vpTime.cpp:126
static const vpColor red
Definition: vpColor.h:217
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Definition: vpPoint.h:81
void set_x(double x)
Set the point x coordinate in the image plane.
Definition: vpPoint.cpp:497
bool isInside(const vpImagePoint &iP, const PointInPolygonMethod &method=PnPolyRayCasting) const
Definition: vpPolygon.cpp:309
void set_y(double y)
Set the point y coordinate in the image plane.
Definition: vpPoint.cpp:499
static void write(const vpImage< unsigned char > &I, const std::string &filename)
Definition: vpImageIo.cpp:445
vpMatchingMethodEnum getMatchingMethod() const
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
Definition: vpKeyPoint.cpp:632
Defines a generic 2D polygon.
Definition: vpPolygon.h:103
const char * what() const
double getBottom() const
Definition: vpRect.h:98
vpRect getBoundingBox() const
Definition: vpPolygon.h:177
double getWidth() const
Definition: vpRect.h:228
void set_oY(double oY)
Set the point oY coordinate in the object frame.
Definition: vpPoint.cpp:490
void setNbParallelRansacThreads(int nb)
Definition: vpPose.h:306
double get_v0() const
vpFeatureDetectorType
Definition: vpKeyPoint.h:258
static void compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpCylinder > &cylinders, const std::vector< std::vector< std::vector< vpImagePoint > > > &vectorOfCylinderRois, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
Definition: vpKeyPoint.cpp:751
bool _reference_computed
flag to indicate if the reference has been built.
void setUseParallelRansac(bool use)
Definition: vpPose.h:320
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
Definition: vpPose.h:80
Generic class defining intrinsic camera parameters.
double get_oZ() const
Get the point oZ coordinate in the object frame.
Definition: vpPoint.cpp:451
void display(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, unsigned int size=3)
vpFeatureDescriptorType
Definition: vpKeyPoint.h:289
unsigned int buildReference(const vpImage< unsigned char > &I)
Definition: vpKeyPoint.cpp:238
double get_x() const
Get the point x coordinate in the image plane.
Definition: vpPoint.cpp:456
vpMatrix getCovarianceMatrix() const
Definition: vpPose.h:269
static bool isNaN(double value)
Definition: vpMath.cpp:85
void insert(const vpImage< Type > &src, const vpImagePoint &topLeft)
Definition: vpImage.h:1115
std::vector< vpImagePoint > referenceImagePointsList
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
void getObjectPoints(std::vector< cv::Point3f > &objectPoints) const
double get_px() const
static void writeBinaryValueLE(std::ofstream &file, const int16_t short_value)
Definition: vpIoTools.cpp:1847
unsigned int matchPoint(const vpImage< unsigned char > &I)
void setRansacMaxTrials(const int &rM)
Definition: vpPose.h:247
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
Definition: vpPose.h:237
static int round(double x)
Definition: vpMath.h:245
static void displayCircle(const vpImage< unsigned char > &I, const vpImagePoint &center, unsigned int radius, const vpColor &color, bool fill=false, unsigned int thickness=1)
double get_oX() const
Get the point oX coordinate in the object frame.
Definition: vpPoint.cpp:447
void set_oZ(double oZ)
Set the point oZ coordinate in the object frame.
Definition: vpPoint.cpp:492
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
void set_v(double v)
Definition: vpImagePoint.h:236
void set_ij(double ii, double jj)
Definition: vpImagePoint.h:188
static void read(vpImage< unsigned char > &I, const std::string &filename)
Definition: vpImageIo.cpp:244
Implementation of column vector and the associated operations.
Definition: vpColVector.h:130
void set_oX(double oX)
Set the point oX coordinate in the object frame.
Definition: vpPoint.cpp:488
void getQueryKeyPoints(std::vector< cv::KeyPoint > &keyPoints, bool matches=true) const
void getTrainPoints(std::vector< cv::Point3f > &points) const
vpHomogeneousMatrix inverse() const
vpFilterMatchingType
Definition: vpKeyPoint.h:227
double getB() const
Definition: vpPlane.h:104
void setRansacFilterFlag(const RANSAC_FILTER_FLAGS &flag)
Definition: vpPose.h:289
bool matchPointAndDetect(const vpImage< unsigned char > &I, vpRect &boundingBox, vpImagePoint &centerOfGravity, const bool isPlanarObject=true, std::vector< vpImagePoint > *imPts1=NULL, std::vector< vpImagePoint > *imPts2=NULL, double *meanDescriptorDistance=NULL, double *detectionScore=NULL, const vpRect &rectangle=vpRect())
std::vector< unsigned int > matchedReferencePoints
double getA() const
Definition: vpPlane.h:102
unsigned int getHeight() const
Definition: vpImage.h:188
Defines a rectangle in the plane.
Definition: vpRect.h:79
std::vector< vpImagePoint > currentImagePointsList
double getC() const
Definition: vpPlane.h:106
bool computePose(const std::vector< cv::Point2f > &imagePoints, const std::vector< cv::Point3f > &objectPoints, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector< int > &inlierIndex, double &elapsedTime, bool(*func)(const vpHomogeneousMatrix &)=NULL)
Definition: vpKeyPoint.cpp:899
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:87
This class defines the container for a plane geometrical structure.
Definition: vpPlane.h:58
void loadConfigFile(const std::string &configFile)
void reset()
void addPoint(const vpPoint &P)
Definition: vpPose.cpp:149
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
double getLeft() const
Definition: vpRect.h:174
static void convertFromOpenCV(const cv::KeyPoint &from, vpImagePoint &to)
Definition: vpConvert.cpp:226
Class that consider the case of a translation vector.
Implementation of a rotation vector as axis-angle minimal representation.
std::vector< vpPoint > getRansacInliers() const
Definition: vpPose.h:250
void setCovarianceComputation(const bool &flag)
Definition: vpPose.h:258
double getD() const
Definition: vpPlane.h:108
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
void initMatcher(const std::string &matcherName)
void extract(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, cv::Mat &descriptors, std::vector< cv::Point3f > *trainPoints=NULL)
void parse(const std::string &filename)
bool detect(const vpImage< unsigned char > &I)