Visual Servoing Platform  version 3.3.1 under development (2020-07-09)
vpKeyPoint.cpp
1 /****************************************************************************
2  *
3  * ViSP, open source Visual Servoing Platform software.
4  * Copyright (C) 2005 - 2019 by Inria. All rights reserved.
5  *
6  * This software is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  * See the file LICENSE.txt at the root directory of this source
11  * distribution for additional information about the GNU GPL.
12  *
13  * For using ViSP with software that can not be combined with the GNU
14  * GPL, please contact Inria about acquiring a ViSP Professional
15  * Edition License.
16  *
17  * See http://visp.inria.fr for more information.
18  *
19  * This software was developed at:
20  * Inria Rennes - Bretagne Atlantique
21  * Campus Universitaire de Beaulieu
22  * 35042 Rennes Cedex
23  * France
24  *
25  * If you have questions regarding the use of this file, please contact
26  * Inria at visp@inria.fr
27  *
28  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
29  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
30  *
31  * Description:
32  * Key point functionalities.
33  *
34  * Authors:
35  * Souriya Trinh
36  *
37  *****************************************************************************/
38 
39 #include <iomanip>
40 #include <limits>
41 
42 #include <visp3/core/vpIoTools.h>
43 #include <visp3/vision/vpKeyPoint.h>
44 
45 #if (VISP_HAVE_OPENCV_VERSION >= 0x020101)
46 
47 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
48 #include <opencv2/calib3d/calib3d.hpp>
49 #endif
50 
51 #ifdef VISP_HAVE_PUGIXML
52 #include <pugixml.hpp>
53 #endif
54 
55 namespace
56 {
57 // Specific Type transformation functions
58 inline cv::DMatch knnToDMatch(const std::vector<cv::DMatch> &knnMatches)
59 {
60  if (knnMatches.size() > 0) {
61  return knnMatches[0];
62  }
63 
64  return cv::DMatch();
65 }
66 
67 inline vpImagePoint matchRansacToVpImage(const std::pair<cv::KeyPoint, cv::Point3f> &pair)
68 {
69  return vpImagePoint(pair.first.pt.y, pair.first.pt.x);
70 }
71 
72 }
73 
83 vpKeyPoint::vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType,
84  const std::string &matcherName, const vpFilterMatchingType &filterType)
85  : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
86  m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
87  m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
88  m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
89  m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
90  m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
91  m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
92  m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
93  m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0),
94  m_ransacThreshold(0.01), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(),
95  m_useAffineDetection(false),
96 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
97  m_useBruteForceCrossCheck(true),
98 #endif
99  m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
100  m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
101 {
102  initFeatureNames();
103 
104  m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
105  m_extractorNames.push_back(m_mapOfDescriptorNames[descriptorType]);
106 
107  init();
108 }
109 
119 vpKeyPoint::vpKeyPoint(const std::string &detectorName, const std::string &extractorName,
120  const std::string &matcherName, const vpFilterMatchingType &filterType)
121  : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
122  m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
123  m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
124  m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
125  m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
126  m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
127  m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
128  m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
129  m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0),
130  m_ransacThreshold(0.01), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(),
131  m_useAffineDetection(false),
132 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
133  m_useBruteForceCrossCheck(true),
134 #endif
135  m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
136  m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
137 {
138  initFeatureNames();
139 
140  m_detectorNames.push_back(detectorName);
141  m_extractorNames.push_back(extractorName);
142 
143  init();
144 }
145 
155 vpKeyPoint::vpKeyPoint(const std::vector<std::string> &detectorNames, const std::vector<std::string> &extractorNames,
156  const std::string &matcherName, const vpFilterMatchingType &filterType)
157  : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
158  m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(detectorNames),
159  m_detectors(), m_extractionTime(0.), m_extractorNames(extractorNames), m_extractors(), m_filteredMatches(),
160  m_filterType(filterType), m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(),
161  m_matcher(), m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0),
162  m_matchingRatioThreshold(0.85), m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200),
163  m_nbRansacMinInlierCount(100), m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(),
164  m_queryFilteredKeyPoints(), m_queryKeyPoints(), m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(vpPose::NO_FILTER), m_ransacInliers(),
165  m_ransacOutliers(), m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
166  m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
167 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
168  m_useBruteForceCrossCheck(true),
169 #endif
170  m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
171  m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
172 {
173  initFeatureNames();
174  init();
175 }
176 
185 void vpKeyPoint::affineSkew(double tilt, double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai)
186 {
187  int h = img.rows;
188  int w = img.cols;
189 
190  mask = cv::Mat(h, w, CV_8UC1, cv::Scalar(255));
191 
192  cv::Mat A = cv::Mat::eye(2, 3, CV_32F);
193 
194  // if (phi != 0.0) {
195  if (std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
196  phi *= M_PI / 180.;
197  double s = sin(phi);
198  double c = cos(phi);
199 
200  A = (cv::Mat_<float>(2, 2) << c, -s, s, c);
201 
202  cv::Mat corners = (cv::Mat_<float>(4, 2) << 0, 0, w, 0, w, h, 0, h);
203  cv::Mat tcorners = corners * A.t();
204  cv::Mat tcorners_x, tcorners_y;
205  tcorners.col(0).copyTo(tcorners_x);
206  tcorners.col(1).copyTo(tcorners_y);
207  std::vector<cv::Mat> channels;
208  channels.push_back(tcorners_x);
209  channels.push_back(tcorners_y);
210  cv::merge(channels, tcorners);
211 
212  cv::Rect rect = cv::boundingRect(tcorners);
213  A = (cv::Mat_<float>(2, 3) << c, -s, -rect.x, s, c, -rect.y);
214 
215  cv::warpAffine(img, img, A, cv::Size(rect.width, rect.height), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
216  }
217  // if (tilt != 1.0) {
218  if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon()) {
219  double s = 0.8 * sqrt(tilt * tilt - 1);
220  cv::GaussianBlur(img, img, cv::Size(0, 0), s, 0.01);
221  cv::resize(img, img, cv::Size(0, 0), 1.0 / tilt, 1.0, cv::INTER_NEAREST);
222  A.row(0) = A.row(0) / tilt;
223  }
224  // if (tilt != 1.0 || phi != 0.0) {
225  if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon() ||
226  std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
227  h = img.rows;
228  w = img.cols;
229  cv::warpAffine(mask, mask, A, cv::Size(w, h), cv::INTER_NEAREST);
230  }
231  cv::invertAffineTransform(A, Ai);
232 }
233 
241 
248 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color) { return buildReference(I_color, vpRect()); }
249 
260  unsigned int height, unsigned int width)
261 {
262  return buildReference(I, vpRect(iP, width, height));
263 }
264 
274 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, const vpImagePoint &iP,
275  unsigned int height, unsigned int width)
276 {
277  return buildReference(I_color, vpRect(iP, width, height));
278 }
279 
287 unsigned int vpKeyPoint::buildReference(const vpImage<unsigned char> &I, const vpRect &rectangle)
288 {
289  // Reset variables used when dealing with 3D models
290  // So as no 3D point list is passed, we dont need this variables
291  m_trainPoints.clear();
292  m_mapOfImageId.clear();
293  m_mapOfImages.clear();
294  m_currentImageId = 1;
295 
296  if (m_useAffineDetection) {
297  std::vector<std::vector<cv::KeyPoint> > listOfTrainKeyPoints;
298  std::vector<cv::Mat> listOfTrainDescriptors;
299 
300  // Detect keypoints and extract descriptors on multiple images
301  detectExtractAffine(I, listOfTrainKeyPoints, listOfTrainDescriptors);
302 
303  // Flatten the different train lists
304  m_trainKeyPoints.clear();
305  for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfTrainKeyPoints.begin();
306  it != listOfTrainKeyPoints.end(); ++it) {
307  m_trainKeyPoints.insert(m_trainKeyPoints.end(), it->begin(), it->end());
308  }
309 
310  bool first = true;
311  for (std::vector<cv::Mat>::const_iterator it = listOfTrainDescriptors.begin(); it != listOfTrainDescriptors.end(); ++it) {
312  if (first) {
313  first = false;
314  it->copyTo(m_trainDescriptors);
315  } else {
316  m_trainDescriptors.push_back(*it);
317  }
318  }
319  } else {
320  detect(I, m_trainKeyPoints, m_detectionTime, rectangle);
321  extract(I, m_trainKeyPoints, m_trainDescriptors, m_extractionTime);
322  }
323 
324  // Save the correspondence keypoint class_id with the training image_id in a map.
325  // Used to display the matching with all the training images.
326  for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
327  m_mapOfImageId[it->class_id] = m_currentImageId;
328  }
329 
330  // Save the image in a map at a specific image_id
331  m_mapOfImages[m_currentImageId] = I;
332 
333  // Convert OpenCV type to ViSP type for compatibility
335 
336  _reference_computed = true;
337 
338  // Add train descriptors in matcher object
339  m_matcher->clear();
340  m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
341 
342  return static_cast<unsigned int>(m_trainKeyPoints.size());
343 }
344 
352 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, const vpRect &rectangle)
353 {
354  vpImageConvert::convert(I_color, m_I);
355  return (buildReference(m_I, rectangle));
356 }
357 
369 unsigned int vpKeyPoint::buildReference(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &trainKeyPoints,
370  std::vector<cv::Point3f> &points3f, bool append, int class_id)
371 {
372  cv::Mat trainDescriptors;
373  // Copy the input list of keypoints
374  std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
375 
376  extract(I, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
377 
378  if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
379  // Keypoints have been removed
380  // Store the hash of a keypoint as the key and the index of the keypoint
381  // as the value
382  std::map<size_t, size_t> mapOfKeypointHashes;
383  size_t cpt = 0;
384  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
385  ++it, cpt++) {
386  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
387  }
388 
389  std::vector<cv::Point3f> trainPoints_tmp;
390  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
391  if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
392  trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
393  }
394  }
395 
396  // Copy trainPoints_tmp to points3f
397  points3f = trainPoints_tmp;
398  }
399 
400  return (buildReference(I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
401 }
402 
414 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &trainKeyPoints,
415  std::vector<cv::Point3f> &points3f, bool append, int class_id)
416 {
417  cv::Mat trainDescriptors;
418  // Copy the input list of keypoints
419  std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
420 
421  extract(I_color, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
422 
423  if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
424  // Keypoints have been removed
425  // Store the hash of a keypoint as the key and the index of the keypoint
426  // as the value
427  std::map<size_t, size_t> mapOfKeypointHashes;
428  size_t cpt = 0;
429  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
430  ++it, cpt++) {
431  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
432  }
433 
434  std::vector<cv::Point3f> trainPoints_tmp;
435  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
436  if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
437  trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
438  }
439  }
440 
441  // Copy trainPoints_tmp to points3f
442  points3f = trainPoints_tmp;
443  }
444 
445  return (buildReference(I_color, trainKeyPoints, trainDescriptors, points3f, append, class_id));
446 }
447 
461 unsigned int vpKeyPoint::buildReference(const vpImage<unsigned char> &I, const std::vector<cv::KeyPoint> &trainKeyPoints,
462  const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
463  bool append, int class_id)
464 {
465  if (!append) {
466  m_trainPoints.clear();
467  m_mapOfImageId.clear();
468  m_mapOfImages.clear();
469  m_currentImageId = 0;
470  m_trainKeyPoints.clear();
471  }
472 
473  m_currentImageId++;
474 
475  std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
476  // Set class_id if != -1
477  if (class_id != -1) {
478  for (std::vector<cv::KeyPoint>::iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
479  it->class_id = class_id;
480  }
481  }
482 
483  // Save the correspondence keypoint class_id with the training image_id in a map.
484  // Used to display the matching with all the training images.
485  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
486  m_mapOfImageId[it->class_id] = m_currentImageId;
487  }
488 
489  // Save the image in a map at a specific image_id
490  m_mapOfImages[m_currentImageId] = I;
491 
492  // Append reference lists
493  m_trainKeyPoints.insert(m_trainKeyPoints.end(), trainKeyPoints_tmp.begin(), trainKeyPoints_tmp.end());
494  if (!append) {
495  trainDescriptors.copyTo(m_trainDescriptors);
496  } else {
497  m_trainDescriptors.push_back(trainDescriptors);
498  }
499  this->m_trainPoints.insert(m_trainPoints.end(), points3f.begin(), points3f.end());
500 
501  // Convert OpenCV type to ViSP type for compatibility
503  vpConvert::convertFromOpenCV(m_trainPoints, m_trainVpPoints);
504 
505  // Add train descriptors in matcher object
506  m_matcher->clear();
507  m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
508 
509  _reference_computed = true;
510 
511  return static_cast<unsigned int>(m_trainKeyPoints.size());
512 }
513 
526 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, const std::vector<cv::KeyPoint> &trainKeyPoints,
527  const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
528  bool append, int class_id)
529 {
530  vpImageConvert::convert(I_color, m_I);
531  return (buildReference(m_I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
532 }
533 
549 void vpKeyPoint::compute3D(const cv::KeyPoint &candidate, const std::vector<vpPoint> &roi,
550  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
551 {
552  /* compute plane equation */
553  std::vector<vpPoint>::const_iterator it_roi = roi.begin();
554  vpPoint pts[3];
555  pts[0] = *it_roi;
556  ++it_roi;
557  pts[1] = *it_roi;
558  ++it_roi;
559  pts[2] = *it_roi;
560  vpPlane Po(pts[0], pts[1], pts[2]);
561  double xc = 0.0, yc = 0.0;
562  vpPixelMeterConversion::convertPoint(cam, candidate.pt.x, candidate.pt.y, xc, yc);
563  double Z = -Po.getD() / (Po.getA() * xc + Po.getB() * yc + Po.getC());
564  double X = xc * Z;
565  double Y = yc * Z;
566  vpColVector point_cam(4);
567  point_cam[0] = X;
568  point_cam[1] = Y;
569  point_cam[2] = Z;
570  point_cam[3] = 1;
571  vpColVector point_obj(4);
572  point_obj = cMo.inverse() * point_cam;
573  point = cv::Point3f((float)point_obj[0], (float)point_obj[1], (float)point_obj[2]);
574 }
575 
591 void vpKeyPoint::compute3D(const vpImagePoint &candidate, const std::vector<vpPoint> &roi,
592  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, vpPoint &point)
593 {
594  /* compute plane equation */
595  std::vector<vpPoint>::const_iterator it_roi = roi.begin();
596  vpPoint pts[3];
597  pts[0] = *it_roi;
598  ++it_roi;
599  pts[1] = *it_roi;
600  ++it_roi;
601  pts[2] = *it_roi;
602  vpPlane Po(pts[0], pts[1], pts[2]);
603  double xc = 0.0, yc = 0.0;
604  vpPixelMeterConversion::convertPoint(cam, candidate, xc, yc);
605  double Z = -Po.getD() / (Po.getA() * xc + Po.getB() * yc + Po.getC());
606  double X = xc * Z;
607  double Y = yc * Z;
608  vpColVector point_cam(4);
609  point_cam[0] = X;
610  point_cam[1] = Y;
611  point_cam[2] = Z;
612  point_cam[3] = 1;
613  vpColVector point_obj(4);
614  point_obj = cMo.inverse() * point_cam;
615  point.setWorldCoordinates(point_obj);
616 }
617 
635  std::vector<cv::KeyPoint> &candidates,
636  const std::vector<vpPolygon> &polygons,
637  const std::vector<std::vector<vpPoint> > &roisPt,
638  std::vector<cv::Point3f> &points, cv::Mat *descriptors)
639 {
640  std::vector<cv::KeyPoint> candidatesToCheck = candidates;
641  candidates.clear();
642  points.clear();
643  vpImagePoint imPt;
644  cv::Point3f pt;
645  cv::Mat desc;
646 
647  std::vector<std::pair<cv::KeyPoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
648  for (size_t i = 0; i < candidatesToCheck.size(); i++) {
649  pairOfCandidatesToCheck[i] = std::pair<cv::KeyPoint, size_t>(candidatesToCheck[i], i);
650  }
651 
652  size_t cpt1 = 0;
653  std::vector<vpPolygon> polygons_tmp = polygons;
654  for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
655  std::vector<std::pair<cv::KeyPoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
656 
657  while (it2 != pairOfCandidatesToCheck.end()) {
658  imPt.set_ij(it2->first.pt.y, it2->first.pt.x);
659  if (it1->isInside(imPt)) {
660  candidates.push_back(it2->first);
661  vpKeyPoint::compute3D(it2->first, roisPt[cpt1], cam, cMo, pt);
662  points.push_back(pt);
663 
664  if (descriptors != NULL) {
665  desc.push_back(descriptors->row((int)it2->second));
666  }
667 
668  // Remove candidate keypoint which is located on the current polygon
669  it2 = pairOfCandidatesToCheck.erase(it2);
670  } else {
671  ++it2;
672  }
673  }
674  }
675 
676  if (descriptors != NULL) {
677  desc.copyTo(*descriptors);
678  }
679 }
680 
698  std::vector<vpImagePoint> &candidates,
699  const std::vector<vpPolygon> &polygons,
700  const std::vector<std::vector<vpPoint> > &roisPt,
701  std::vector<vpPoint> &points, cv::Mat *descriptors)
702 {
703  std::vector<vpImagePoint> candidatesToCheck = candidates;
704  candidates.clear();
705  points.clear();
706  vpPoint pt;
707  cv::Mat desc;
708 
709  std::vector<std::pair<vpImagePoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
710  for (size_t i = 0; i < candidatesToCheck.size(); i++) {
711  pairOfCandidatesToCheck[i] = std::pair<vpImagePoint, size_t>(candidatesToCheck[i], i);
712  }
713 
714  size_t cpt1 = 0;
715  std::vector<vpPolygon> polygons_tmp = polygons;
716  for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
717  std::vector<std::pair<vpImagePoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
718 
719  while (it2 != pairOfCandidatesToCheck.end()) {
720  if (it1->isInside(it2->first)) {
721  candidates.push_back(it2->first);
722  vpKeyPoint::compute3D(it2->first, roisPt[cpt1], cam, cMo, pt);
723  points.push_back(pt);
724 
725  if (descriptors != NULL) {
726  desc.push_back(descriptors->row((int)it2->second));
727  }
728 
729  // Remove candidate keypoint which is located on the current polygon
730  it2 = pairOfCandidatesToCheck.erase(it2);
731  } else {
732  ++it2;
733  }
734  }
735  }
736 }
737 
754  const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector<cv::KeyPoint> &candidates,
755  const std::vector<vpCylinder> &cylinders,
756  const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<cv::Point3f> &points,
757  cv::Mat *descriptors)
758 {
759  std::vector<cv::KeyPoint> candidatesToCheck = candidates;
760  candidates.clear();
761  points.clear();
762  cv::Mat desc;
763 
764  // Keep only keypoints on cylinders
765  size_t cpt_keypoint = 0;
766  for (std::vector<cv::KeyPoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
767  ++it1, cpt_keypoint++) {
768  size_t cpt_cylinder = 0;
769 
770  // Iterate through the list of vpCylinders
771  for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
772  it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
773  // Iterate through the list of the bounding boxes of the current
774  // vpCylinder
775  for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
776  if (vpPolygon::isInside(*it3, it1->pt.y, it1->pt.x)) {
777  candidates.push_back(*it1);
778 
779  // Calculate the 3D coordinates for each keypoint located on
780  // cylinders
781  double xm = 0.0, ym = 0.0;
782  vpPixelMeterConversion::convertPoint(cam, it1->pt.x, it1->pt.y, xm, ym);
783  double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
784 
785  if (!vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
786  vpColVector point_cam(4);
787  point_cam[0] = xm * Z;
788  point_cam[1] = ym * Z;
789  point_cam[2] = Z;
790  point_cam[3] = 1;
791  vpColVector point_obj(4);
792  point_obj = cMo.inverse() * point_cam;
793  vpPoint pt;
794  pt.setWorldCoordinates(point_obj);
795  points.push_back(cv::Point3f((float)pt.get_oX(), (float)pt.get_oY(), (float)pt.get_oZ()));
796 
797  if (descriptors != NULL) {
798  desc.push_back(descriptors->row((int)cpt_keypoint));
799  }
800 
801  break;
802  }
803  }
804  }
805  }
806  }
807 
808  if (descriptors != NULL) {
809  desc.copyTo(*descriptors);
810  }
811 }
812 
829  const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector<vpImagePoint> &candidates,
830  const std::vector<vpCylinder> &cylinders,
831  const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<vpPoint> &points,
832  cv::Mat *descriptors)
833 {
834  std::vector<vpImagePoint> candidatesToCheck = candidates;
835  candidates.clear();
836  points.clear();
837  cv::Mat desc;
838 
839  // Keep only keypoints on cylinders
840  size_t cpt_keypoint = 0;
841  for (std::vector<vpImagePoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
842  ++it1, cpt_keypoint++) {
843  size_t cpt_cylinder = 0;
844 
845  // Iterate through the list of vpCylinders
846  for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
847  it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
848  // Iterate through the list of the bounding boxes of the current
849  // vpCylinder
850  for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
851  if (vpPolygon::isInside(*it3, it1->get_i(), it1->get_j())) {
852  candidates.push_back(*it1);
853 
854  // Calculate the 3D coordinates for each keypoint located on
855  // cylinders
856  double xm = 0.0, ym = 0.0;
857  vpPixelMeterConversion::convertPoint(cam, it1->get_u(), it1->get_v(), xm, ym);
858  double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
859 
860  if (!vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
861  vpColVector point_cam(4);
862  point_cam[0] = xm * Z;
863  point_cam[1] = ym * Z;
864  point_cam[2] = Z;
865  point_cam[3] = 1;
866  vpColVector point_obj(4);
867  point_obj = cMo.inverse() * point_cam;
868  vpPoint pt;
869  pt.setWorldCoordinates(point_obj);
870  points.push_back(pt);
871 
872  if (descriptors != NULL) {
873  desc.push_back(descriptors->row((int)cpt_keypoint));
874  }
875 
876  break;
877  }
878  }
879  }
880  }
881  }
882 
883  if (descriptors != NULL) {
884  desc.copyTo(*descriptors);
885  }
886 }
887 
901 bool vpKeyPoint::computePose(const std::vector<cv::Point2f> &imagePoints, const std::vector<cv::Point3f> &objectPoints,
902  const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector<int> &inlierIndex,
903  double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &))
904 {
905  double t = vpTime::measureTimeMs();
906 
907  if (imagePoints.size() < 4 || objectPoints.size() < 4 || imagePoints.size() != objectPoints.size()) {
908  elapsedTime = (vpTime::measureTimeMs() - t);
909  std::cerr << "Not enough points to compute the pose (at least 4 points "
910  "are needed)."
911  << std::endl;
912 
913  return false;
914  }
915 
916  cv::Mat cameraMatrix =
917  (cv::Mat_<double>(3, 3) << cam.get_px(), 0, cam.get_u0(), 0, cam.get_py(), cam.get_v0(), 0, 0, 1);
918  cv::Mat rvec, tvec;
919 
920  // Bug with OpenCV < 2.4.0 when zero distorsion is provided by an empty
921  // array. http://code.opencv.org/issues/1700 ;
922  // http://code.opencv.org/issues/1718 what(): Distortion coefficients must
923  // be 1x4, 4x1, 1x5, 5x1, 1x8 or 8x1 floating-point vector in function
924  // cvProjectPoints2 Fixed in OpenCV 2.4.0 (r7558)
925  // cv::Mat distCoeffs;
926  cv::Mat distCoeffs = cv::Mat::zeros(1, 5, CV_64F);
927 
928  try {
929 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
930  // OpenCV 3.0.0 (2014/12/12)
931  cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, false, m_nbRansacIterations,
932  (float)m_ransacReprojectionError,
933  0.99, // confidence=0.99 (default) – The probability
934  // that the algorithm produces a useful result.
935  inlierIndex, cv::SOLVEPNP_ITERATIVE);
936 // SOLVEPNP_ITERATIVE (default): Iterative method is based on
937 // Levenberg-Marquardt optimization. In this case the function finds such a
938 // pose that minimizes reprojection error, that is the sum of squared
939 // distances between the observed projections imagePoints and the projected
940 // (using projectPoints() ) objectPoints . SOLVEPNP_P3P: Method is based on
941 // the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang “Complete Solution
942 // Classification for the Perspective-Three-Point Problem”. In this case the
943 // function requires exactly four object and image points. SOLVEPNP_EPNP:
944 // Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the
945 // paper “EPnP: Efficient Perspective-n-Point Camera Pose Estimation”.
946 // SOLVEPNP_DLS: Method is based on the paper of Joel A. Hesch and Stergios I.
947 // Roumeliotis. “A Direct Least-Squares (DLS) Method for PnP”. SOLVEPNP_UPNP
948 // Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto,
949 // F.Moreno-Noguer. “Exhaustive Linearization for Robust Camera Pose and Focal
950 // Length Estimation”. In this case the function also estimates the
951 // parameters
952 // f_x and f_y assuming that both have the same value. Then the cameraMatrix
953 // is updated with the estimated focal length.
954 #else
955  int nbInlierToReachConsensus = m_nbRansacMinInlierCount;
956  if (m_useConsensusPercentage) {
957  nbInlierToReachConsensus = (int)(m_ransacConsensusPercentage / 100.0 * (double)m_queryFilteredKeyPoints.size());
958  }
959 
960  cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, false, m_nbRansacIterations,
961  (float)m_ransacReprojectionError, nbInlierToReachConsensus, inlierIndex);
962 #endif
963  } catch (cv::Exception &e) {
964  std::cerr << e.what() << std::endl;
965  elapsedTime = (vpTime::measureTimeMs() - t);
966  return false;
967  }
968  vpTranslationVector translationVec(tvec.at<double>(0), tvec.at<double>(1), tvec.at<double>(2));
969  vpThetaUVector thetaUVector(rvec.at<double>(0), rvec.at<double>(1), rvec.at<double>(2));
970  cMo = vpHomogeneousMatrix(translationVec, thetaUVector);
971 
972  if (func != NULL) {
973  // Check the final pose returned by solvePnPRansac to discard
974  // solutions which do not respect the pose criterion.
975  if (!func(cMo)) {
976  elapsedTime = (vpTime::measureTimeMs() - t);
977  return false;
978  }
979  }
980 
981  elapsedTime = (vpTime::measureTimeMs() - t);
982  return true;
983 }
984 
997 bool vpKeyPoint::computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo,
998  std::vector<vpPoint> &inliers, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &))
999 {
1000  std::vector<unsigned int> inlierIndex;
1001  return computePose(objectVpPoints, cMo, inliers, inlierIndex, elapsedTime, func);
1002 }
1003 
1017 bool vpKeyPoint::computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo,
1018  std::vector<vpPoint> &inliers, std::vector<unsigned int> &inlierIndex, double &elapsedTime,
1019  bool (*func)(const vpHomogeneousMatrix &))
1020 {
1021  double t = vpTime::measureTimeMs();
1022 
1023  if (objectVpPoints.size() < 4) {
1024  elapsedTime = (vpTime::measureTimeMs() - t);
1025  // std::cerr << "Not enough points to compute the pose (at least 4
1026  // points are needed)." << std::endl;
1027 
1028  return false;
1029  }
1030 
1031  vpPose pose;
1032 
1033  for (std::vector<vpPoint>::const_iterator it = objectVpPoints.begin(); it != objectVpPoints.end(); ++it) {
1034  pose.addPoint(*it);
1035  }
1036 
1037  unsigned int nbInlierToReachConsensus = (unsigned int)m_nbRansacMinInlierCount;
1038  if (m_useConsensusPercentage) {
1039  nbInlierToReachConsensus =
1040  (unsigned int)(m_ransacConsensusPercentage / 100.0 * (double)m_queryFilteredKeyPoints.size());
1041  }
1042 
1043  pose.setRansacFilterFlag(m_ransacFilterFlag);
1044  pose.setUseParallelRansac(m_ransacParallel);
1045  pose.setNbParallelRansacThreads(m_ransacParallelNbThreads);
1046  pose.setRansacNbInliersToReachConsensus(nbInlierToReachConsensus);
1047  pose.setRansacThreshold(m_ransacThreshold);
1048  pose.setRansacMaxTrials(m_nbRansacIterations);
1049 
1050  bool isRansacPoseEstimationOk = false;
1051  try {
1052  pose.setCovarianceComputation(m_computeCovariance);
1053  isRansacPoseEstimationOk = pose.computePose(vpPose::RANSAC, cMo, func);
1054  inliers = pose.getRansacInliers();
1055  inlierIndex = pose.getRansacInlierIndex();
1056 
1057  if (m_computeCovariance) {
1058  m_covarianceMatrix = pose.getCovarianceMatrix();
1059  }
1060  } catch (const vpException &e) {
1061  std::cerr << "e=" << e.what() << std::endl;
1062  elapsedTime = (vpTime::measureTimeMs() - t);
1063  return false;
1064  }
1065 
1066  // if(func != NULL && isRansacPoseEstimationOk) {
1067  // //Check the final pose returned by the Ransac VVS pose estimation as
1068  // in rare some cases
1069  // //we can converge toward a final cMo that does not respect the pose
1070  // criterion even
1071  // //if the 4 minimal points picked to respect the pose criterion.
1072  // if(!func(&cMo)) {
1073  // elapsedTime = (vpTime::measureTimeMs() - t);
1074  // return false;
1075  // }
1076  // }
1077 
1078  elapsedTime = (vpTime::measureTimeMs() - t);
1079  return isRansacPoseEstimationOk;
1080 }
1081 
1096 double vpKeyPoint::computePoseEstimationError(const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
1097  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo_est)
1098 {
1099  if (matchKeyPoints.size() == 0) {
1100  // return std::numeric_limits<double>::max(); // create an error under
1101  // Windows. To fix it we have to add #undef max
1102  return DBL_MAX;
1103  }
1104 
1105  std::vector<double> errors(matchKeyPoints.size());
1106  size_t cpt = 0;
1107  vpPoint pt;
1108  for (std::vector<std::pair<cv::KeyPoint, cv::Point3f> >::const_iterator it = matchKeyPoints.begin();
1109  it != matchKeyPoints.end(); ++it, cpt++) {
1110  pt.set_oX(it->second.x);
1111  pt.set_oY(it->second.y);
1112  pt.set_oZ(it->second.z);
1113  pt.project(cMo_est);
1114  double u = 0.0, v = 0.0;
1115  vpMeterPixelConversion::convertPoint(cam, pt.get_x(), pt.get_y(), u, v);
1116  errors[cpt] = std::sqrt((u - it->first.pt.x) * (u - it->first.pt.x) + (v - it->first.pt.y) * (v - it->first.pt.y));
1117  }
1118 
1119  return std::accumulate(errors.begin(), errors.end(), 0.0) / errors.size();
1120 }
1121 
1131  vpImage<unsigned char> &IMatching)
1132 {
1133  // Image matching side by side
1134  unsigned int width = IRef.getWidth() + ICurrent.getWidth();
1135  unsigned int height = ((std::max))(IRef.getHeight(), ICurrent.getHeight());
1136 
1137  IMatching = vpImage<unsigned char>(height, width);
1138 }
1139 
1149  vpImage<vpRGBa> &IMatching)
1150 {
1151  // Image matching side by side
1152  unsigned int width = IRef.getWidth() + ICurrent.getWidth();
1153  unsigned int height = ((std::max))(IRef.getHeight(), ICurrent.getHeight());
1154 
1155  IMatching = vpImage<vpRGBa>(height, width);
1156 }
1157 
1168 {
1169  // Nb images in the training database + the current image we want to detect
1170  // the object
1171  unsigned int nbImg = (unsigned int)(m_mapOfImages.size() + 1);
1172 
1173  if (m_mapOfImages.empty()) {
1174  std::cerr << "There is no training image loaded !" << std::endl;
1175  return;
1176  }
1177 
1178  if (nbImg == 2) {
1179  // Only one training image, so we display them side by side
1180  createImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
1181  } else {
1182  // Multiple training images, display them as a mosaic image
1183  //(unsigned int) std::floor(std::sqrt((double) nbImg) + 0.5);
1184  unsigned int nbImgSqrt = (unsigned int)vpMath::round(std::sqrt((double)nbImg));
1185 
1186  // Number of columns in the mosaic grid
1187  unsigned int nbWidth = nbImgSqrt;
1188  // Number of rows in the mosaic grid
1189  unsigned int nbHeight = nbImgSqrt;
1190 
1191  // Deals with non square mosaic grid and the total number of images
1192  if (nbImgSqrt * nbImgSqrt < nbImg) {
1193  nbWidth++;
1194  }
1195 
1196  unsigned int maxW = ICurrent.getWidth();
1197  unsigned int maxH = ICurrent.getHeight();
1198  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1199  ++it) {
1200  if (maxW < it->second.getWidth()) {
1201  maxW = it->second.getWidth();
1202  }
1203 
1204  if (maxH < it->second.getHeight()) {
1205  maxH = it->second.getHeight();
1206  }
1207  }
1208 
1209  IMatching = vpImage<unsigned char>(maxH * nbHeight, maxW * nbWidth);
1210  }
1211 }
1212 
1223 {
1224  // Nb images in the training database + the current image we want to detect
1225  // the object
1226  unsigned int nbImg = (unsigned int)(m_mapOfImages.size() + 1);
1227 
1228  if (m_mapOfImages.empty()) {
1229  std::cerr << "There is no training image loaded !" << std::endl;
1230  return;
1231  }
1232 
1233  if (nbImg == 2) {
1234  // Only one training image, so we display them side by side
1235  createImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
1236  } else {
1237  // Multiple training images, display them as a mosaic image
1238  //(unsigned int) std::floor(std::sqrt((double) nbImg) + 0.5);
1239  unsigned int nbImgSqrt = (unsigned int)vpMath::round(std::sqrt((double)nbImg));
1240 
1241  // Number of columns in the mosaic grid
1242  unsigned int nbWidth = nbImgSqrt;
1243  // Number of rows in the mosaic grid
1244  unsigned int nbHeight = nbImgSqrt;
1245 
1246  // Deals with non square mosaic grid and the total number of images
1247  if (nbImgSqrt * nbImgSqrt < nbImg) {
1248  nbWidth++;
1249  }
1250 
1251  unsigned int maxW = ICurrent.getWidth();
1252  unsigned int maxH = ICurrent.getHeight();
1253  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1254  ++it) {
1255  if (maxW < it->second.getWidth()) {
1256  maxW = it->second.getWidth();
1257  }
1258 
1259  if (maxH < it->second.getHeight()) {
1260  maxH = it->second.getHeight();
1261  }
1262  }
1263 
1264  IMatching = vpImage<vpRGBa>(maxH * nbHeight, maxW * nbWidth);
1265  }
1266 }
1267 
1275 void vpKeyPoint::detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, const vpRect &rectangle)
1276 {
1277  double elapsedTime;
1278  detect(I, keyPoints, elapsedTime, rectangle);
1279 }
1280 
1288 void vpKeyPoint::detect(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, const vpRect &rectangle)
1289 {
1290  double elapsedTime;
1291  detect(I_color, keyPoints, elapsedTime, rectangle);
1292 }
1293 
1302 void vpKeyPoint::detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, const cv::Mat &mask)
1303 {
1304  double elapsedTime;
1305  detect(matImg, keyPoints, elapsedTime, mask);
1306 }
1307 
1316 void vpKeyPoint::detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
1317  const vpRect &rectangle)
1318 {
1319  cv::Mat matImg;
1320  vpImageConvert::convert(I, matImg, false);
1321  cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1322 
1323  if (rectangle.getWidth() > 0 && rectangle.getHeight() > 0) {
1324  cv::Point leftTop((int)rectangle.getLeft(), (int)rectangle.getTop()),
1325  rightBottom((int)rectangle.getRight(), (int)rectangle.getBottom());
1326  cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1327  } else {
1328  mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1329  }
1330 
1331  detect(matImg, keyPoints, elapsedTime, mask);
1332 }
1333 
1342 void vpKeyPoint::detect(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
1343  const vpRect &rectangle)
1344 {
1345  cv::Mat matImg;
1346  vpImageConvert::convert(I_color, matImg);
1347  cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1348 
1349  if (rectangle.getWidth() > 0 && rectangle.getHeight() > 0) {
1350  cv::Point leftTop((int)rectangle.getLeft(), (int)rectangle.getTop()),
1351  rightBottom((int)rectangle.getRight(), (int)rectangle.getBottom());
1352  cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1353  } else {
1354  mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1355  }
1356 
1357  detect(matImg, keyPoints, elapsedTime, mask);
1358 }
1359 
1369 void vpKeyPoint::detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
1370  const cv::Mat &mask)
1371 {
1372  double t = vpTime::measureTimeMs();
1373  keyPoints.clear();
1374 
1375  for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
1376  it != m_detectors.end(); ++it) {
1377  std::vector<cv::KeyPoint> kp;
1378  it->second->detect(matImg, kp, mask);
1379  keyPoints.insert(keyPoints.end(), kp.begin(), kp.end());
1380  }
1381 
1382  elapsedTime = vpTime::measureTimeMs() - t;
1383 }
1384 
1392 void vpKeyPoint::display(const vpImage<unsigned char> &IRef, const vpImage<unsigned char> &ICurrent, unsigned int size)
1393 {
1394  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1395  getQueryKeyPoints(vpQueryImageKeyPoints);
1396  std::vector<vpImagePoint> vpTrainImageKeyPoints;
1397  getTrainKeyPoints(vpTrainImageKeyPoints);
1398 
1399  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1400  vpDisplay::displayCross(IRef, vpTrainImageKeyPoints[(size_t)(it->trainIdx)], size, vpColor::red);
1401  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, vpColor::green);
1402  }
1403 }
1404 
1412 void vpKeyPoint::display(const vpImage<vpRGBa> &IRef, const vpImage<vpRGBa> &ICurrent, unsigned int size)
1413 {
1414  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1415  getQueryKeyPoints(vpQueryImageKeyPoints);
1416  std::vector<vpImagePoint> vpTrainImageKeyPoints;
1417  getTrainKeyPoints(vpTrainImageKeyPoints);
1418 
1419  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1420  vpDisplay::displayCross(IRef, vpTrainImageKeyPoints[(size_t)(it->trainIdx)], size, vpColor::red);
1421  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, vpColor::green);
1422  }
1423 }
1424 
1432 void vpKeyPoint::display(const vpImage<unsigned char> &ICurrent, unsigned int size, const vpColor &color)
1433 {
1434  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1435  getQueryKeyPoints(vpQueryImageKeyPoints);
1436 
1437  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1438  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, color);
1439  }
1440 }
1441 
1449 void vpKeyPoint::display(const vpImage<vpRGBa> &ICurrent, unsigned int size, const vpColor &color)
1450 {
1451  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1452  getQueryKeyPoints(vpQueryImageKeyPoints);
1453 
1454  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1455  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, color);
1456  }
1457 }
1458 
1471  unsigned int crossSize, unsigned int lineThickness, const vpColor &color)
1472 {
1473  bool randomColor = (color == vpColor::none);
1474  srand((unsigned int)time(NULL));
1475  vpColor currentColor = color;
1476 
1477  std::vector<vpImagePoint> queryImageKeyPoints;
1478  getQueryKeyPoints(queryImageKeyPoints);
1479  std::vector<vpImagePoint> trainImageKeyPoints;
1480  getTrainKeyPoints(trainImageKeyPoints);
1481 
1482  vpImagePoint leftPt, rightPt;
1483  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1484  if (randomColor) {
1485  currentColor = vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1486  }
1487 
1488  leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1489  rightPt = vpImagePoint(queryImageKeyPoints[(size_t)(it->queryIdx)].get_i(),
1490  queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.getWidth());
1491  vpDisplay::displayCross(IMatching, leftPt, crossSize, currentColor);
1492  vpDisplay::displayCross(IMatching, rightPt, crossSize, currentColor);
1493  vpDisplay::displayLine(IMatching, leftPt, rightPt, currentColor, lineThickness);
1494  }
1495 }
1496 
1509  unsigned int crossSize, unsigned int lineThickness, const vpColor &color)
1510 {
1511  bool randomColor = (color == vpColor::none);
1512  srand((unsigned int)time(NULL));
1513  vpColor currentColor = color;
1514 
1515  std::vector<vpImagePoint> queryImageKeyPoints;
1516  getQueryKeyPoints(queryImageKeyPoints);
1517  std::vector<vpImagePoint> trainImageKeyPoints;
1518  getTrainKeyPoints(trainImageKeyPoints);
1519 
1520  vpImagePoint leftPt, rightPt;
1521  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1522  if (randomColor) {
1523  currentColor = vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1524  }
1525 
1526  leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1527  rightPt = vpImagePoint(queryImageKeyPoints[(size_t)(it->queryIdx)].get_i(),
1528  queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.getWidth());
1529  vpDisplay::displayCross(IMatching, leftPt, crossSize, currentColor);
1530  vpDisplay::displayCross(IMatching, rightPt, crossSize, currentColor);
1531  vpDisplay::displayLine(IMatching, leftPt, rightPt, currentColor, lineThickness);
1532  }
1533 }
1534 
1547  unsigned int crossSize, unsigned int lineThickness, const vpColor &color)
1548 {
1549  bool randomColor = (color == vpColor::none);
1550  srand((unsigned int)time(NULL));
1551  vpColor currentColor = color;
1552 
1553  std::vector<vpImagePoint> queryImageKeyPoints;
1554  getQueryKeyPoints(queryImageKeyPoints);
1555  std::vector<vpImagePoint> trainImageKeyPoints;
1556  getTrainKeyPoints(trainImageKeyPoints);
1557 
1558  vpImagePoint leftPt, rightPt;
1559  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1560  if (randomColor) {
1561  currentColor = vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1562  }
1563 
1564  leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1565  rightPt = vpImagePoint(queryImageKeyPoints[(size_t)(it->queryIdx)].get_i(),
1566  queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.getWidth());
1567  vpDisplay::displayCross(IMatching, leftPt, crossSize, currentColor);
1568  vpDisplay::displayCross(IMatching, rightPt, crossSize, currentColor);
1569  vpDisplay::displayLine(IMatching, leftPt, rightPt, currentColor, lineThickness);
1570  }
1571 }
1572 
1585  const std::vector<vpImagePoint> &ransacInliers, unsigned int crossSize,
1586  unsigned int lineThickness)
1587 {
1588  if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1589  // No training images so return
1590  std::cerr << "There is no training image loaded !" << std::endl;
1591  return;
1592  }
1593 
1594  // Nb images in the training database + the current image we want to detect
1595  // the object
1596  int nbImg = (int)(m_mapOfImages.size() + 1);
1597 
1598  if (nbImg == 2) {
1599  // Only one training image, so we display the matching result side-by-side
1600  displayMatching(m_mapOfImages.begin()->second, IMatching, crossSize);
1601  } else {
1602  // Multiple training images, display them as a mosaic image
1603  int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
1604  int nbWidth = nbImgSqrt;
1605  int nbHeight = nbImgSqrt;
1606 
1607  if (nbImgSqrt * nbImgSqrt < nbImg) {
1608  nbWidth++;
1609  }
1610 
1611  std::map<int, int> mapOfImageIdIndex;
1612  int cpt = 0;
1613  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
1614  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1615  ++it, cpt++) {
1616  mapOfImageIdIndex[it->first] = cpt;
1617 
1618  if (maxW < it->second.getWidth()) {
1619  maxW = it->second.getWidth();
1620  }
1621 
1622  if (maxH < it->second.getHeight()) {
1623  maxH = it->second.getHeight();
1624  }
1625  }
1626 
1627  // Indexes of the current image in the grid computed to put preferably the
1628  // image in the center of the mosaic grid
1629  int medianI = nbHeight / 2;
1630  int medianJ = nbWidth / 2;
1631  int medianIndex = medianI * nbWidth + medianJ;
1632  for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1633  vpImagePoint topLeftCorner;
1634  int current_class_id_index = 0;
1635  if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1636  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1637  } else {
1638  // Shift of one unity the index of the training images which are after
1639  // the current image
1640  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1641  }
1642 
1643  int indexI = current_class_id_index / nbWidth;
1644  int indexJ = current_class_id_index - (indexI * nbWidth);
1645  topLeftCorner.set_ij((int)maxH * indexI, (int)maxW * indexJ);
1646 
1647  // Display cross for keypoints in the learning database
1648  vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1649  (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1650  }
1651 
1652  vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
1653  for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1654  // Display cross for keypoints detected in the current image
1655  vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1656  (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1657  }
1658  for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1659  // Display green circle for RANSAC inliers
1660  vpDisplay::displayCircle(IMatching, (int)(it->get_v() + topLeftCorner.get_i()),
1661  (int)(it->get_u() + topLeftCorner.get_j()), 4, vpColor::green);
1662  }
1663  for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1664  // Display red circle for RANSAC outliers
1665  vpDisplay::displayCircle(IMatching, (int)(it->get_i() + topLeftCorner.get_i()),
1666  (int)(it->get_j() + topLeftCorner.get_j()), 4, vpColor::red);
1667  }
1668 
1669  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1670  int current_class_id = 0;
1671  if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] < medianIndex) {
1672  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1673  } else {
1674  // Shift of one unity the index of the training images which are after
1675  // the current image
1676  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1677  }
1678 
1679  int indexI = current_class_id / nbWidth;
1680  int indexJ = current_class_id - (indexI * nbWidth);
1681 
1682  vpImagePoint end((int)maxH * indexI + m_trainKeyPoints[(size_t)it->trainIdx].pt.y,
1683  (int)maxW * indexJ + m_trainKeyPoints[(size_t)it->trainIdx].pt.x);
1684  vpImagePoint start((int)maxH * medianI + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.y,
1685  (int)maxW * medianJ + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.x);
1686 
1687  // Draw line for matching keypoints detected in the current image and
1688  // those detected in the training images
1689  vpDisplay::displayLine(IMatching, start, end, vpColor::green, lineThickness);
1690  }
1691  }
1692 }
1693 
1706  const std::vector<vpImagePoint> &ransacInliers, unsigned int crossSize,
1707  unsigned int lineThickness)
1708 {
1709  if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1710  // No training images so return
1711  std::cerr << "There is no training image loaded !" << std::endl;
1712  return;
1713  }
1714 
1715  // Nb images in the training database + the current image we want to detect
1716  // the object
1717  int nbImg = (int)(m_mapOfImages.size() + 1);
1718 
1719  if (nbImg == 2) {
1720  // Only one training image, so we display the matching result side-by-side
1721  displayMatching(m_mapOfImages.begin()->second, IMatching, crossSize);
1722  } else {
1723  // Multiple training images, display them as a mosaic image
1724  int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
1725  int nbWidth = nbImgSqrt;
1726  int nbHeight = nbImgSqrt;
1727 
1728  if (nbImgSqrt * nbImgSqrt < nbImg) {
1729  nbWidth++;
1730  }
1731 
1732  std::map<int, int> mapOfImageIdIndex;
1733  int cpt = 0;
1734  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
1735  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1736  ++it, cpt++) {
1737  mapOfImageIdIndex[it->first] = cpt;
1738 
1739  if (maxW < it->second.getWidth()) {
1740  maxW = it->second.getWidth();
1741  }
1742 
1743  if (maxH < it->second.getHeight()) {
1744  maxH = it->second.getHeight();
1745  }
1746  }
1747 
1748  // Indexes of the current image in the grid computed to put preferably the
1749  // image in the center of the mosaic grid
1750  int medianI = nbHeight / 2;
1751  int medianJ = nbWidth / 2;
1752  int medianIndex = medianI * nbWidth + medianJ;
1753  for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1754  vpImagePoint topLeftCorner;
1755  int current_class_id_index = 0;
1756  if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1757  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1758  } else {
1759  // Shift of one unity the index of the training images which are after
1760  // the current image
1761  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1762  }
1763 
1764  int indexI = current_class_id_index / nbWidth;
1765  int indexJ = current_class_id_index - (indexI * nbWidth);
1766  topLeftCorner.set_ij((int)maxH * indexI, (int)maxW * indexJ);
1767 
1768  // Display cross for keypoints in the learning database
1769  vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1770  (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1771  }
1772 
1773  vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
1774  for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1775  // Display cross for keypoints detected in the current image
1776  vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1777  (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1778  }
1779  for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1780  // Display green circle for RANSAC inliers
1781  vpDisplay::displayCircle(IMatching, (int)(it->get_v() + topLeftCorner.get_i()),
1782  (int)(it->get_u() + topLeftCorner.get_j()), 4, vpColor::green);
1783  }
1784  for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1785  // Display red circle for RANSAC outliers
1786  vpDisplay::displayCircle(IMatching, (int)(it->get_i() + topLeftCorner.get_i()),
1787  (int)(it->get_j() + topLeftCorner.get_j()), 4, vpColor::red);
1788  }
1789 
1790  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1791  int current_class_id = 0;
1792  if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] < medianIndex) {
1793  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1794  } else {
1795  // Shift of one unity the index of the training images which are after
1796  // the current image
1797  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1798  }
1799 
1800  int indexI = current_class_id / nbWidth;
1801  int indexJ = current_class_id - (indexI * nbWidth);
1802 
1803  vpImagePoint end((int)maxH * indexI + m_trainKeyPoints[(size_t)it->trainIdx].pt.y,
1804  (int)maxW * indexJ + m_trainKeyPoints[(size_t)it->trainIdx].pt.x);
1805  vpImagePoint start((int)maxH * medianI + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.y,
1806  (int)maxW * medianJ + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.x);
1807 
1808  // Draw line for matching keypoints detected in the current image and
1809  // those detected in the training images
1810  vpDisplay::displayLine(IMatching, start, end, vpColor::green, lineThickness);
1811  }
1812  }
1813 }
1814 
1825 void vpKeyPoint::extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1826  std::vector<cv::Point3f> *trainPoints)
1827 {
1828  double elapsedTime;
1829  extract(I, keyPoints, descriptors, elapsedTime, trainPoints);
1830 }
1831 
1842 void vpKeyPoint::extract(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1843  std::vector<cv::Point3f> *trainPoints)
1844 {
1845  double elapsedTime;
1846  extract(I_color, keyPoints, descriptors, elapsedTime, trainPoints);
1847 }
1848 
1859 void vpKeyPoint::extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1860  std::vector<cv::Point3f> *trainPoints)
1861 {
1862  double elapsedTime;
1863  extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1864 }
1865 
1877 void vpKeyPoint::extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1878  double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1879 {
1880  cv::Mat matImg;
1881  vpImageConvert::convert(I, matImg, false);
1882  extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1883 }
1884 
1896 void vpKeyPoint::extract(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1897  double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1898 {
1899  cv::Mat matImg;
1900  vpImageConvert::convert(I_color, matImg);
1901  extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1902 }
1903 
1915 void vpKeyPoint::extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1916  double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1917 {
1918  double t = vpTime::measureTimeMs();
1919  bool first = true;
1920 
1921  for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator itd = m_extractors.begin();
1922  itd != m_extractors.end(); ++itd) {
1923  if (first) {
1924  first = false;
1925  // Check if we have 3D object points information
1926  if (trainPoints != NULL && !trainPoints->empty()) {
1927  // Copy the input list of keypoints, keypoints that cannot be computed
1928  // are removed in the function compute
1929  std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1930 
1931  // Extract descriptors for the given list of keypoints
1932  itd->second->compute(matImg, keyPoints, descriptors);
1933 
1934  if (keyPoints.size() != keyPoints_tmp.size()) {
1935  // Keypoints have been removed
1936  // Store the hash of a keypoint as the key and the index of the
1937  // keypoint as the value
1938  std::map<size_t, size_t> mapOfKeypointHashes;
1939  size_t cpt = 0;
1940  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1941  ++it, cpt++) {
1942  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1943  }
1944 
1945  std::vector<cv::Point3f> trainPoints_tmp;
1946  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1947  if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1948  trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1949  }
1950  }
1951 
1952  // Copy trainPoints_tmp to m_trainPoints
1953  *trainPoints = trainPoints_tmp;
1954  }
1955  } else {
1956  // Extract descriptors for the given list of keypoints
1957  itd->second->compute(matImg, keyPoints, descriptors);
1958  }
1959  } else {
1960  // Copy the input list of keypoints, keypoints that cannot be computed
1961  // are removed in the function compute
1962  std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1963 
1964  cv::Mat desc;
1965  // Extract descriptors for the given list of keypoints
1966  itd->second->compute(matImg, keyPoints, desc);
1967 
1968  if (keyPoints.size() != keyPoints_tmp.size()) {
1969  // Keypoints have been removed
1970  // Store the hash of a keypoint as the key and the index of the
1971  // keypoint as the value
1972  std::map<size_t, size_t> mapOfKeypointHashes;
1973  size_t cpt = 0;
1974  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1975  ++it, cpt++) {
1976  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1977  }
1978 
1979  std::vector<cv::Point3f> trainPoints_tmp;
1980  cv::Mat descriptors_tmp;
1981  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1982  if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1983  if (trainPoints != NULL && !trainPoints->empty()) {
1984  trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1985  }
1986 
1987  if (!descriptors.empty()) {
1988  descriptors_tmp.push_back(descriptors.row((int)mapOfKeypointHashes[myKeypointHash(*it)]));
1989  }
1990  }
1991  }
1992 
1993  if (trainPoints != NULL) {
1994  // Copy trainPoints_tmp to m_trainPoints
1995  *trainPoints = trainPoints_tmp;
1996  }
1997  // Copy descriptors_tmp to descriptors
1998  descriptors_tmp.copyTo(descriptors);
1999  }
2000 
2001  // Merge descriptors horizontally
2002  if (descriptors.empty()) {
2003  desc.copyTo(descriptors);
2004  } else {
2005  cv::hconcat(descriptors, desc, descriptors);
2006  }
2007  }
2008  }
2009 
2010  if (keyPoints.size() != (size_t)descriptors.rows) {
2011  std::cerr << "keyPoints.size() != (size_t) descriptors.rows" << std::endl;
2012  }
2013  elapsedTime = vpTime::measureTimeMs() - t;
2014 }
2015 
2019 void vpKeyPoint::filterMatches()
2020 {
2021  std::vector<cv::KeyPoint> queryKpts;
2022  std::vector<cv::Point3f> trainPts;
2023  std::vector<cv::DMatch> m;
2024 
2025  if (m_useKnn) {
2026  // double max_dist = 0;
2027  // double min_dist = std::numeric_limits<double>::max(); // create an
2028  // error under Windows. To fix it we have to add #undef max
2029  double min_dist = DBL_MAX;
2030  double mean = 0.0;
2031  std::vector<double> distance_vec(m_knnMatches.size());
2032 
2033  if (m_filterType == stdAndRatioDistanceThreshold) {
2034  for (size_t i = 0; i < m_knnMatches.size(); i++) {
2035  double dist = m_knnMatches[i][0].distance;
2036  mean += dist;
2037  distance_vec[i] = dist;
2038 
2039  if (dist < min_dist) {
2040  min_dist = dist;
2041  }
2042  // if (dist > max_dist) {
2043  // max_dist = dist;
2044  //}
2045  }
2046  mean /= m_queryDescriptors.rows;
2047  }
2048 
2049  double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
2050  double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
2051  double threshold = min_dist + stdev;
2052 
2053  for (size_t i = 0; i < m_knnMatches.size(); i++) {
2054  if (m_knnMatches[i].size() >= 2) {
2055  // Calculate ratio of the descriptor distance between the two nearest
2056  // neighbors of the keypoint
2057  float ratio = m_knnMatches[i][0].distance / m_knnMatches[i][1].distance;
2058  // float ratio = std::sqrt((vecMatches[i][0].distance *
2059  // vecMatches[i][0].distance)
2060  // / (vecMatches[i][1].distance *
2061  // vecMatches[i][1].distance));
2062  double dist = m_knnMatches[i][0].distance;
2063 
2064  if (ratio < m_matchingRatioThreshold || (m_filterType == stdAndRatioDistanceThreshold && dist < threshold)) {
2065  m.push_back(cv::DMatch((int)queryKpts.size(), m_knnMatches[i][0].trainIdx, m_knnMatches[i][0].distance));
2066 
2067  if (!m_trainPoints.empty()) {
2068  trainPts.push_back(m_trainPoints[(size_t)m_knnMatches[i][0].trainIdx]);
2069  }
2070  queryKpts.push_back(m_queryKeyPoints[(size_t)m_knnMatches[i][0].queryIdx]);
2071  }
2072  }
2073  }
2074  } else {
2075  // double max_dist = 0;
2076  // create an error under Windows. To fix it we have to add #undef max
2077  // double min_dist = std::numeric_limits<double>::max();
2078  double min_dist = DBL_MAX;
2079  double mean = 0.0;
2080  std::vector<double> distance_vec(m_matches.size());
2081  for (size_t i = 0; i < m_matches.size(); i++) {
2082  double dist = m_matches[i].distance;
2083  mean += dist;
2084  distance_vec[i] = dist;
2085 
2086  if (dist < min_dist) {
2087  min_dist = dist;
2088  }
2089  // if (dist > max_dist) {
2090  // max_dist = dist;
2091  // }
2092  }
2093  mean /= m_queryDescriptors.rows;
2094 
2095  double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
2096  double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
2097 
2098  // Define a threshold where we keep all keypoints whose the descriptor
2099  // distance falls below a factor of the minimum descriptor distance (for
2100  // all the query keypoints) or below the minimum descriptor distance +
2101  // the standard deviation (calculated on all the query descriptor
2102  // distances)
2103  double threshold =
2104  m_filterType == constantFactorDistanceThreshold ? m_matchingFactorThreshold * min_dist : min_dist + stdev;
2105 
2106  for (size_t i = 0; i < m_matches.size(); i++) {
2107  if (m_matches[i].distance <= threshold) {
2108  m.push_back(cv::DMatch((int)queryKpts.size(), m_matches[i].trainIdx, m_matches[i].distance));
2109 
2110  if (!m_trainPoints.empty()) {
2111  trainPts.push_back(m_trainPoints[(size_t)m_matches[i].trainIdx]);
2112  }
2113  queryKpts.push_back(m_queryKeyPoints[(size_t)m_matches[i].queryIdx]);
2114  }
2115  }
2116  }
2117 
2118  if (m_useSingleMatchFilter) {
2119  // Eliminate matches where multiple query keypoints are matched to the
2120  // same train keypoint
2121  std::vector<cv::DMatch> mTmp;
2122  std::vector<cv::Point3f> trainPtsTmp;
2123  std::vector<cv::KeyPoint> queryKptsTmp;
2124 
2125  std::map<int, int> mapOfTrainIdx;
2126  // Count the number of query points matched to the same train point
2127  for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
2128  mapOfTrainIdx[it->trainIdx]++;
2129  }
2130 
2131  // Keep matches with only one correspondence
2132  for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
2133  if (mapOfTrainIdx[it->trainIdx] == 1) {
2134  mTmp.push_back(cv::DMatch((int)queryKptsTmp.size(), it->trainIdx, it->distance));
2135 
2136  if (!m_trainPoints.empty()) {
2137  trainPtsTmp.push_back(m_trainPoints[(size_t)it->trainIdx]);
2138  }
2139  queryKptsTmp.push_back(queryKpts[(size_t)it->queryIdx]);
2140  }
2141  }
2142 
2143  m_filteredMatches = mTmp;
2144  m_objectFilteredPoints = trainPtsTmp;
2145  m_queryFilteredKeyPoints = queryKptsTmp;
2146  } else {
2147  m_filteredMatches = m;
2148  m_objectFilteredPoints = trainPts;
2149  m_queryFilteredKeyPoints = queryKpts;
2150  }
2151 }
2152 
2160 void vpKeyPoint::getObjectPoints(std::vector<cv::Point3f> &objectPoints) const
2161 {
2162  objectPoints = m_objectFilteredPoints;
2163 }
2164 
2172 void vpKeyPoint::getObjectPoints(std::vector<vpPoint> &objectPoints) const
2173 {
2174  vpConvert::convertFromOpenCV(m_objectFilteredPoints, objectPoints);
2175 }
2176 
2185 void vpKeyPoint::getQueryKeyPoints(std::vector<cv::KeyPoint> &keyPoints, bool matches) const
2186 {
2187  if (matches) {
2188  keyPoints = m_queryFilteredKeyPoints;
2189  }
2190  else {
2191  keyPoints = m_queryKeyPoints;
2192  }
2193 }
2194 
2201 void vpKeyPoint::getQueryKeyPoints(std::vector<vpImagePoint> &keyPoints, bool matches) const
2202 {
2203  if (matches) {
2204  keyPoints = currentImagePointsList;
2205  }
2206  else {
2207  vpConvert::convertFromOpenCV(m_queryKeyPoints, keyPoints);
2208  }
2209 }
2210 
2216 void vpKeyPoint::getTrainKeyPoints(std::vector<cv::KeyPoint> &keyPoints) const { keyPoints = m_trainKeyPoints; }
2217 
2223 void vpKeyPoint::getTrainKeyPoints(std::vector<vpImagePoint> &keyPoints) const { keyPoints = referenceImagePointsList; }
2224 
2231 void vpKeyPoint::getTrainPoints(std::vector<cv::Point3f> &points) const { points = m_trainPoints; }
2232 
2239 void vpKeyPoint::getTrainPoints(std::vector<vpPoint> &points) const { points = m_trainVpPoints; }
2240 
2245 void vpKeyPoint::init()
2246 {
2247 // Require 2.4.0 <= opencv < 3.0.0
2248 #if defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && (VISP_HAVE_OPENCV_VERSION < 0x030000)
2249  // The following line must be called in order to use SIFT or SURF
2250  if (!cv::initModule_nonfree()) {
2251  std::cerr << "Cannot init module non free, SIFT or SURF cannot be used." << std::endl;
2252  }
2253 #endif
2254 
2255  // Use k-nearest neighbors (knn) to retrieve the two best matches for a
2256  // keypoint So this is useful only for ratioDistanceThreshold method
2257  if (m_filterType == ratioDistanceThreshold || m_filterType == stdAndRatioDistanceThreshold) {
2258  m_useKnn = true;
2259  }
2260 
2261  initDetectors(m_detectorNames);
2262  initExtractors(m_extractorNames);
2263  initMatcher(m_matcherName);
2264 }
2265 
2271 void vpKeyPoint::initDetector(const std::string &detectorName)
2272 {
2273 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
2274  m_detectors[detectorName] = cv::FeatureDetector::create(detectorName);
2275 
2276  if (m_detectors[detectorName] == NULL) {
2277  std::stringstream ss_msg;
2278  ss_msg << "Fail to initialize the detector: " << detectorName
2279  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2280  throw vpException(vpException::fatalError, ss_msg.str());
2281  }
2282 #else
2283  std::string detectorNameTmp = detectorName;
2284  std::string pyramid = "Pyramid";
2285  std::size_t pos = detectorName.find(pyramid);
2286  bool usePyramid = false;
2287  if (pos != std::string::npos) {
2288  detectorNameTmp = detectorName.substr(pos + pyramid.size());
2289  usePyramid = true;
2290  }
2291 
2292  if (detectorNameTmp == "SIFT") {
2293 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2294  cv::Ptr<cv::FeatureDetector> siftDetector;
2295  if (m_maxFeatures > 0) {
2296  siftDetector = cv::xfeatures2d::SIFT::create(m_maxFeatures);
2297  }
2298  else {
2299  siftDetector = cv::xfeatures2d::SIFT::create();
2300  }
2301  if (!usePyramid) {
2302  m_detectors[detectorNameTmp] = siftDetector;
2303  } else {
2304  std::cerr << "You should not use SIFT with Pyramid feature detection!" << std::endl;
2305  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
2306  }
2307 #else
2308  std::stringstream ss_msg;
2309  ss_msg << "Fail to initialize the detector: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2310  << " was not build with xFeatures2d module.";
2311  throw vpException(vpException::fatalError, ss_msg.str());
2312 #endif
2313  } else if (detectorNameTmp == "SURF") {
2314 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2315  cv::Ptr<cv::FeatureDetector> surfDetector = cv::xfeatures2d::SURF::create();
2316  if (!usePyramid) {
2317  m_detectors[detectorNameTmp] = surfDetector;
2318  } else {
2319  std::cerr << "You should not use SURF with Pyramid feature detection!" << std::endl;
2320  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(surfDetector);
2321  }
2322 #else
2323  std::stringstream ss_msg;
2324  ss_msg << "Fail to initialize the detector: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2325  << " was not build with xFeatures2d module.";
2326  throw vpException(vpException::fatalError, ss_msg.str());
2327 #endif
2328  } else if (detectorNameTmp == "FAST") {
2329  cv::Ptr<cv::FeatureDetector> fastDetector = cv::FastFeatureDetector::create();
2330  if (!usePyramid) {
2331  m_detectors[detectorNameTmp] = fastDetector;
2332  } else {
2333  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
2334  }
2335  } else if (detectorNameTmp == "MSER") {
2336  cv::Ptr<cv::FeatureDetector> fastDetector = cv::MSER::create();
2337  if (!usePyramid) {
2338  m_detectors[detectorNameTmp] = fastDetector;
2339  } else {
2340  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
2341  }
2342  } else if (detectorNameTmp == "ORB") {
2343  cv::Ptr<cv::FeatureDetector> orbDetector;
2344  if (m_maxFeatures > 0) {
2345  orbDetector = cv::ORB::create(m_maxFeatures);
2346  }
2347  else {
2348  orbDetector = cv::ORB::create();
2349  }
2350  if (!usePyramid) {
2351  m_detectors[detectorNameTmp] = orbDetector;
2352  } else {
2353  std::cerr << "You should not use ORB with Pyramid feature detection!" << std::endl;
2354  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(orbDetector);
2355  }
2356  } else if (detectorNameTmp == "BRISK") {
2357  cv::Ptr<cv::FeatureDetector> briskDetector = cv::BRISK::create();
2358  if (!usePyramid) {
2359  m_detectors[detectorNameTmp] = briskDetector;
2360  } else {
2361  std::cerr << "You should not use BRISK with Pyramid feature detection!" << std::endl;
2362  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(briskDetector);
2363  }
2364  } else if (detectorNameTmp == "KAZE") {
2365  cv::Ptr<cv::FeatureDetector> kazeDetector = cv::KAZE::create();
2366  if (!usePyramid) {
2367  m_detectors[detectorNameTmp] = kazeDetector;
2368  } else {
2369  std::cerr << "You should not use KAZE with Pyramid feature detection!" << std::endl;
2370  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(kazeDetector);
2371  }
2372  } else if (detectorNameTmp == "AKAZE") {
2373  cv::Ptr<cv::FeatureDetector> akazeDetector = cv::AKAZE::create();
2374  if (!usePyramid) {
2375  m_detectors[detectorNameTmp] = akazeDetector;
2376  } else {
2377  std::cerr << "You should not use AKAZE with Pyramid feature detection!" << std::endl;
2378  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(akazeDetector);
2379  }
2380  } else if (detectorNameTmp == "GFTT") {
2381  cv::Ptr<cv::FeatureDetector> gfttDetector = cv::GFTTDetector::create();
2382  if (!usePyramid) {
2383  m_detectors[detectorNameTmp] = gfttDetector;
2384  } else {
2385  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(gfttDetector);
2386  }
2387  } else if (detectorNameTmp == "SimpleBlob") {
2388  cv::Ptr<cv::FeatureDetector> simpleBlobDetector = cv::SimpleBlobDetector::create();
2389  if (!usePyramid) {
2390  m_detectors[detectorNameTmp] = simpleBlobDetector;
2391  } else {
2392  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(simpleBlobDetector);
2393  }
2394  } else if (detectorNameTmp == "STAR") {
2395 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2396  cv::Ptr<cv::FeatureDetector> starDetector = cv::xfeatures2d::StarDetector::create();
2397  if (!usePyramid) {
2398  m_detectors[detectorNameTmp] = starDetector;
2399  } else {
2400  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(starDetector);
2401  }
2402 #else
2403  std::stringstream ss_msg;
2404  ss_msg << "Fail to initialize the detector: STAR. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2405  << " was not build with xFeatures2d module.";
2406  throw vpException(vpException::fatalError, ss_msg.str());
2407 #endif
2408  } else if (detectorNameTmp == "AGAST") {
2409  cv::Ptr<cv::FeatureDetector> agastDetector = cv::AgastFeatureDetector::create();
2410  if (!usePyramid) {
2411  m_detectors[detectorNameTmp] = agastDetector;
2412  } else {
2413  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(agastDetector);
2414  }
2415  } else if (detectorNameTmp == "MSD") {
2416 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100)
2417 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2418  cv::Ptr<cv::FeatureDetector> msdDetector = cv::xfeatures2d::MSDDetector::create();
2419  if (!usePyramid) {
2420  m_detectors[detectorNameTmp] = msdDetector;
2421  } else {
2422  std::cerr << "You should not use MSD with Pyramid feature detection!" << std::endl;
2423  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(msdDetector);
2424  }
2425 #else
2426  std::stringstream ss_msg;
2427  ss_msg << "Fail to initialize the detector: MSD. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2428  << " was not build with xFeatures2d module.";
2429  throw vpException(vpException::fatalError, ss_msg.str());
2430 #endif
2431 #else
2432  std::stringstream ss_msg;
2433  ss_msg << "Feature " << detectorName << " is not available in OpenCV version: " << std::hex
2434  << VISP_HAVE_OPENCV_VERSION << " (require >= OpenCV 3.1).";
2435 #endif
2436  } else {
2437  std::cerr << "The detector:" << detectorNameTmp << " is not available." << std::endl;
2438  }
2439 
2440  bool detectorInitialized = false;
2441  if (!usePyramid) {
2442  //if not null and to avoid warning C4800: forcing value to bool 'true' or 'false' (performance warning)
2443  detectorInitialized = !m_detectors[detectorNameTmp].empty();
2444  } else {
2445  //if not null and to avoid warning C4800: forcing value to bool 'true' or 'false' (performance warning)
2446  detectorInitialized = !m_detectors[detectorName].empty();
2447  }
2448 
2449  if (!detectorInitialized) {
2450  std::stringstream ss_msg;
2451  ss_msg << "Fail to initialize the detector: " << detectorNameTmp
2452  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2453  throw vpException(vpException::fatalError, ss_msg.str());
2454  }
2455 #endif
2456 }
2457 
2464 void vpKeyPoint::initDetectors(const std::vector<std::string> &detectorNames)
2465 {
2466  for (std::vector<std::string>::const_iterator it = detectorNames.begin(); it != detectorNames.end(); ++it) {
2467  initDetector(*it);
2468  }
2469 }
2470 
2476 void vpKeyPoint::initExtractor(const std::string &extractorName)
2477 {
2478 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
2479  m_extractors[extractorName] = cv::DescriptorExtractor::create(extractorName);
2480 #else
2481  if (extractorName == "SIFT") {
2482 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2483  m_extractors[extractorName] = cv::xfeatures2d::SIFT::create();
2484 #else
2485  std::stringstream ss_msg;
2486  ss_msg << "Fail to initialize the extractor: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2487  << " was not build with xFeatures2d module.";
2488  throw vpException(vpException::fatalError, ss_msg.str());
2489 #endif
2490  } else if (extractorName == "SURF") {
2491 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2492  // Use extended set of SURF descriptors (128 instead of 64)
2493  m_extractors[extractorName] = cv::xfeatures2d::SURF::create(100, 4, 3, true);
2494 #else
2495  std::stringstream ss_msg;
2496  ss_msg << "Fail to initialize the extractor: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2497  << " was not build with xFeatures2d module.";
2498  throw vpException(vpException::fatalError, ss_msg.str());
2499 #endif
2500  } else if (extractorName == "ORB") {
2501  m_extractors[extractorName] = cv::ORB::create();
2502  } else if (extractorName == "BRISK") {
2503  m_extractors[extractorName] = cv::BRISK::create();
2504  } else if (extractorName == "FREAK") {
2505 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2506  m_extractors[extractorName] = cv::xfeatures2d::FREAK::create();
2507 #else
2508  std::stringstream ss_msg;
2509  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2510  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2511  throw vpException(vpException::fatalError, ss_msg.str());
2512 #endif
2513  } else if (extractorName == "BRIEF") {
2514 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2515  m_extractors[extractorName] = cv::xfeatures2d::BriefDescriptorExtractor::create();
2516 #else
2517  std::stringstream ss_msg;
2518  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2519  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2520  throw vpException(vpException::fatalError, ss_msg.str());
2521 #endif
2522  } else if (extractorName == "KAZE") {
2523  m_extractors[extractorName] = cv::KAZE::create();
2524  } else if (extractorName == "AKAZE") {
2525  m_extractors[extractorName] = cv::AKAZE::create();
2526  } else if (extractorName == "DAISY") {
2527 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2528  m_extractors[extractorName] = cv::xfeatures2d::DAISY::create();
2529 #else
2530  std::stringstream ss_msg;
2531  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2532  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2533  throw vpException(vpException::fatalError, ss_msg.str());
2534 #endif
2535  } else if (extractorName == "LATCH") {
2536 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2537  m_extractors[extractorName] = cv::xfeatures2d::LATCH::create();
2538 #else
2539  std::stringstream ss_msg;
2540  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2541  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2542  throw vpException(vpException::fatalError, ss_msg.str());
2543 #endif
2544  } else if (extractorName == "LUCID") {
2545 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2546  // m_extractors[extractorName] = cv::xfeatures2d::LUCID::create(1, 2);
2547  // Not possible currently, need a color image
2548  throw vpException(vpException::badValue, "Not possible currently as it needs a color image.");
2549 #else
2550  std::stringstream ss_msg;
2551  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2552  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2553  throw vpException(vpException::fatalError, ss_msg.str());
2554 #endif
2555  } else if (extractorName == "VGG") {
2556 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2557 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2558  m_extractors[extractorName] = cv::xfeatures2d::VGG::create();
2559 #else
2560  std::stringstream ss_msg;
2561  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2562  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2563  throw vpException(vpException::fatalError, ss_msg.str());
2564 #endif
2565 #else
2566  std::stringstream ss_msg;
2567  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2568  << VISP_HAVE_OPENCV_VERSION << " but requires at least OpenCV 3.2.";
2569  throw vpException(vpException::fatalError, ss_msg.str());
2570 #endif
2571  } else if (extractorName == "BoostDesc") {
2572 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2573 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2574  m_extractors[extractorName] = cv::xfeatures2d::BoostDesc::create();
2575 #else
2576  std::stringstream ss_msg;
2577  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2578  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2579  throw vpException(vpException::fatalError, ss_msg.str());
2580 #endif
2581 #else
2582  std::stringstream ss_msg;
2583  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2584  << VISP_HAVE_OPENCV_VERSION << " but requires at least OpenCV 3.2.";
2585  throw vpException(vpException::fatalError, ss_msg.str());
2586 #endif
2587  } else {
2588  std::cerr << "The extractor:" << extractorName << " is not available." << std::endl;
2589  }
2590 #endif
2591 
2592  if (!m_extractors[extractorName]) { //if null
2593  std::stringstream ss_msg;
2594  ss_msg << "Fail to initialize the extractor: " << extractorName
2595  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2596  throw vpException(vpException::fatalError, ss_msg.str());
2597  }
2598 
2599 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2600  if (extractorName == "SURF") {
2601  // Use extended set of SURF descriptors (128 instead of 64)
2602  m_extractors[extractorName]->set("extended", 1);
2603  }
2604 #endif
2605 }
2606 
2613 void vpKeyPoint::initExtractors(const std::vector<std::string> &extractorNames)
2614 {
2615  for (std::vector<std::string>::const_iterator it = extractorNames.begin(); it != extractorNames.end(); ++it) {
2616  initExtractor(*it);
2617  }
2618 
2619  int descriptorType = CV_32F;
2620  bool firstIteration = true;
2621  for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2622  it != m_extractors.end(); ++it) {
2623  if (firstIteration) {
2624  firstIteration = false;
2625  descriptorType = it->second->descriptorType();
2626  } else {
2627  if (descriptorType != it->second->descriptorType()) {
2628  throw vpException(vpException::fatalError, "All the descriptors must have the same type !");
2629  }
2630  }
2631  }
2632 }
2633 
2634 void vpKeyPoint::initFeatureNames()
2635 {
2636 // Create map enum to string
2637 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2638  m_mapOfDetectorNames[DETECTOR_FAST] = "FAST";
2639  m_mapOfDetectorNames[DETECTOR_MSER] = "MSER";
2640  m_mapOfDetectorNames[DETECTOR_ORB] = "ORB";
2641  m_mapOfDetectorNames[DETECTOR_BRISK] = "BRISK";
2642  m_mapOfDetectorNames[DETECTOR_GFTT] = "GFTT";
2643  m_mapOfDetectorNames[DETECTOR_SimpleBlob] = "SimpleBlob";
2644 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2645  m_mapOfDetectorNames[DETECTOR_STAR] = "STAR";
2646 #endif
2647 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2648  m_mapOfDetectorNames[DETECTOR_SIFT] = "SIFT";
2649  m_mapOfDetectorNames[DETECTOR_SURF] = "SURF";
2650 #endif
2651 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2652  m_mapOfDetectorNames[DETECTOR_KAZE] = "KAZE";
2653  m_mapOfDetectorNames[DETECTOR_AKAZE] = "AKAZE";
2654  m_mapOfDetectorNames[DETECTOR_AGAST] = "AGAST";
2655 #endif
2656 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2657  m_mapOfDetectorNames[DETECTOR_MSD] = "MSD";
2658 #endif
2659 #endif
2660 
2661 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2662  m_mapOfDescriptorNames[DESCRIPTOR_ORB] = "ORB";
2663  m_mapOfDescriptorNames[DESCRIPTOR_BRISK] = "BRISK";
2664 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2665  m_mapOfDescriptorNames[DESCRIPTOR_FREAK] = "FREAK";
2666  m_mapOfDescriptorNames[DESCRIPTOR_BRIEF] = "BRIEF";
2667 #endif
2668 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2669  m_mapOfDescriptorNames[DESCRIPTOR_SIFT] = "SIFT";
2670  m_mapOfDescriptorNames[DESCRIPTOR_SURF] = "SURF";
2671 #endif
2672 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2673  m_mapOfDescriptorNames[DESCRIPTOR_KAZE] = "KAZE";
2674  m_mapOfDescriptorNames[DESCRIPTOR_AKAZE] = "AKAZE";
2675 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2676  m_mapOfDescriptorNames[DESCRIPTOR_DAISY] = "DAISY";
2677  m_mapOfDescriptorNames[DESCRIPTOR_LATCH] = "LATCH";
2678 #endif
2679 #endif
2680 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2681  m_mapOfDescriptorNames[DESCRIPTOR_VGG] = "VGG";
2682  m_mapOfDescriptorNames[DESCRIPTOR_BoostDesc] = "BoostDesc";
2683 #endif
2684 #endif
2685 }
2686 
2692 void vpKeyPoint::initMatcher(const std::string &matcherName)
2693 {
2694  int descriptorType = CV_32F;
2695  bool firstIteration = true;
2696  for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2697  it != m_extractors.end(); ++it) {
2698  if (firstIteration) {
2699  firstIteration = false;
2700  descriptorType = it->second->descriptorType();
2701  } else {
2702  if (descriptorType != it->second->descriptorType()) {
2703  throw vpException(vpException::fatalError, "All the descriptors must have the same type !");
2704  }
2705  }
2706  }
2707 
2708  if (matcherName == "FlannBased") {
2709  if (m_extractors.empty()) {
2710  std::cout << "Warning: No extractor initialized, by default use "
2711  "floating values (CV_32F) "
2712  "for descriptor type !"
2713  << std::endl;
2714  }
2715 
2716  if (descriptorType == CV_8U) {
2717 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2718  m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::LshIndexParams>(12, 20, 2));
2719 #else
2720  m_matcher = new cv::FlannBasedMatcher(new cv::flann::LshIndexParams(12, 20, 2));
2721 #endif
2722  } else {
2723 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2724  m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::KDTreeIndexParams>());
2725 #else
2726  m_matcher = new cv::FlannBasedMatcher(new cv::flann::KDTreeIndexParams());
2727 #endif
2728  }
2729  } else {
2730  m_matcher = cv::DescriptorMatcher::create(matcherName);
2731  }
2732 
2733 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2734  if (m_matcher != NULL && !m_useKnn && matcherName == "BruteForce") {
2735  m_matcher->set("crossCheck", m_useBruteForceCrossCheck);
2736  }
2737 #endif
2738 
2739  if (!m_matcher) { //if null
2740  std::stringstream ss_msg;
2741  ss_msg << "Fail to initialize the matcher: " << matcherName
2742  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2743  throw vpException(vpException::fatalError, ss_msg.str());
2744  }
2745 }
2746 
2756  vpImage<unsigned char> &IMatching)
2757 {
2758  vpImagePoint topLeftCorner(0, 0);
2759  IMatching.insert(IRef, topLeftCorner);
2760  topLeftCorner = vpImagePoint(0, IRef.getWidth());
2761  IMatching.insert(ICurrent, topLeftCorner);
2762 }
2763 
2773  vpImage<vpRGBa> &IMatching)
2774 {
2775  vpImagePoint topLeftCorner(0, 0);
2776  IMatching.insert(IRef, topLeftCorner);
2777  topLeftCorner = vpImagePoint(0, IRef.getWidth());
2778  IMatching.insert(ICurrent, topLeftCorner);
2779 }
2780 
2789 {
2790  // Nb images in the training database + the current image we want to detect
2791  // the object
2792  int nbImg = (int)(m_mapOfImages.size() + 1);
2793 
2794  if (m_mapOfImages.empty()) {
2795  std::cerr << "There is no training image loaded !" << std::endl;
2796  return;
2797  }
2798 
2799  if (nbImg == 2) {
2800  // Only one training image, so we display them side by side
2801  insertImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
2802  } else {
2803  // Multiple training images, display them as a mosaic image
2804  int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
2805  int nbWidth = nbImgSqrt;
2806  int nbHeight = nbImgSqrt;
2807 
2808  if (nbImgSqrt * nbImgSqrt < nbImg) {
2809  nbWidth++;
2810  }
2811 
2812  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
2813  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2814  ++it) {
2815  if (maxW < it->second.getWidth()) {
2816  maxW = it->second.getWidth();
2817  }
2818 
2819  if (maxH < it->second.getHeight()) {
2820  maxH = it->second.getHeight();
2821  }
2822  }
2823 
2824  // Indexes of the current image in the grid made in order to the image is
2825  // in the center square in the mosaic grid
2826  int medianI = nbHeight / 2;
2827  int medianJ = nbWidth / 2;
2828  int medianIndex = medianI * nbWidth + medianJ;
2829 
2830  int cpt = 0;
2831  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2832  ++it, cpt++) {
2833  int local_cpt = cpt;
2834  if (cpt >= medianIndex) {
2835  // Shift of one unity the index of the training images which are after
2836  // the current image
2837  local_cpt++;
2838  }
2839  int indexI = local_cpt / nbWidth;
2840  int indexJ = local_cpt - (indexI * nbWidth);
2841  vpImagePoint topLeftCorner((int)maxH * indexI, (int)maxW * indexJ);
2842 
2843  IMatching.insert(it->second, topLeftCorner);
2844  }
2845 
2846  vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
2847  IMatching.insert(ICurrent, topLeftCorner);
2848  }
2849 }
2850 
2859 {
2860  // Nb images in the training database + the current image we want to detect
2861  // the object
2862  int nbImg = (int)(m_mapOfImages.size() + 1);
2863 
2864  if (m_mapOfImages.empty()) {
2865  std::cerr << "There is no training image loaded !" << std::endl;
2866  return;
2867  }
2868 
2869  if (nbImg == 2) {
2870  // Only one training image, so we display them side by side
2871  vpImage<vpRGBa> IRef;
2872  vpImageConvert::convert(m_mapOfImages.begin()->second, IRef);
2873  insertImageMatching(IRef, ICurrent, IMatching);
2874  } else {
2875  // Multiple training images, display them as a mosaic image
2876  int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
2877  int nbWidth = nbImgSqrt;
2878  int nbHeight = nbImgSqrt;
2879 
2880  if (nbImgSqrt * nbImgSqrt < nbImg) {
2881  nbWidth++;
2882  }
2883 
2884  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
2885  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2886  ++it) {
2887  if (maxW < it->second.getWidth()) {
2888  maxW = it->second.getWidth();
2889  }
2890 
2891  if (maxH < it->second.getHeight()) {
2892  maxH = it->second.getHeight();
2893  }
2894  }
2895 
2896  // Indexes of the current image in the grid made in order to the image is
2897  // in the center square in the mosaic grid
2898  int medianI = nbHeight / 2;
2899  int medianJ = nbWidth / 2;
2900  int medianIndex = medianI * nbWidth + medianJ;
2901 
2902  int cpt = 0;
2903  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2904  ++it, cpt++) {
2905  int local_cpt = cpt;
2906  if (cpt >= medianIndex) {
2907  // Shift of one unity the index of the training images which are after
2908  // the current image
2909  local_cpt++;
2910  }
2911  int indexI = local_cpt / nbWidth;
2912  int indexJ = local_cpt - (indexI * nbWidth);
2913  vpImagePoint topLeftCorner((int)maxH * indexI, (int)maxW * indexJ);
2914 
2915  vpImage<vpRGBa> IRef;
2916  vpImageConvert::convert(it->second, IRef);
2917  IMatching.insert(IRef, topLeftCorner);
2918  }
2919 
2920  vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
2921  IMatching.insert(ICurrent, topLeftCorner);
2922  }
2923 }
2924 
2925 #ifdef VISP_HAVE_PUGIXML
2926 
2931 void vpKeyPoint::loadConfigFile(const std::string &configFile)
2932 {
2934 
2935  try {
2936  // Reset detector and extractor
2937  m_detectorNames.clear();
2938  m_extractorNames.clear();
2939  m_detectors.clear();
2940  m_extractors.clear();
2941 
2942  std::cout << " *********** Parsing XML for configuration for vpKeyPoint "
2943  "************ "
2944  << std::endl;
2945  xmlp.parse(configFile);
2946 
2947  m_detectorNames.push_back(xmlp.getDetectorName());
2948  m_extractorNames.push_back(xmlp.getExtractorName());
2949  m_matcherName = xmlp.getMatcherName();
2950 
2951  switch (xmlp.getMatchingMethod()) {
2953  m_filterType = constantFactorDistanceThreshold;
2954  break;
2955 
2957  m_filterType = stdDistanceThreshold;
2958  break;
2959 
2961  m_filterType = ratioDistanceThreshold;
2962  break;
2963 
2965  m_filterType = stdAndRatioDistanceThreshold;
2966  break;
2967 
2969  m_filterType = noFilterMatching;
2970  break;
2971 
2972  default:
2973  break;
2974  }
2975 
2976  m_matchingFactorThreshold = xmlp.getMatchingFactorThreshold();
2977  m_matchingRatioThreshold = xmlp.getMatchingRatioThreshold();
2978 
2979  m_useRansacVVS = xmlp.getUseRansacVVSPoseEstimation();
2980  m_useConsensusPercentage = xmlp.getUseRansacConsensusPercentage();
2981  m_nbRansacIterations = xmlp.getNbRansacIterations();
2982  m_ransacReprojectionError = xmlp.getRansacReprojectionError();
2983  m_nbRansacMinInlierCount = xmlp.getNbRansacMinInlierCount();
2984  m_ransacThreshold = xmlp.getRansacThreshold();
2985  m_ransacConsensusPercentage = xmlp.getRansacConsensusPercentage();
2986 
2987  if (m_filterType == ratioDistanceThreshold || m_filterType == stdAndRatioDistanceThreshold) {
2988  m_useKnn = true;
2989  } else {
2990  m_useKnn = false;
2991  }
2992 
2993  init();
2994  } catch (...) {
2995  throw vpException(vpException::ioError, "Can't open XML file \"%s\"\n ", configFile.c_str());
2996  }
2997 }
2998 #endif
2999 
3008 void vpKeyPoint::loadLearningData(const std::string &filename, bool binaryMode, bool append)
3009 {
3010  int startClassId = 0;
3011  int startImageId = 0;
3012  if (!append) {
3013  m_trainKeyPoints.clear();
3014  m_trainPoints.clear();
3015  m_mapOfImageId.clear();
3016  m_mapOfImages.clear();
3017  } else {
3018  // In append case, find the max index of keypoint class Id
3019  for (std::map<int, int>::const_iterator it = m_mapOfImageId.begin(); it != m_mapOfImageId.end(); ++it) {
3020  if (startClassId < it->first) {
3021  startClassId = it->first;
3022  }
3023  }
3024 
3025  // In append case, find the max index of images Id
3026  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
3027  ++it) {
3028  if (startImageId < it->first) {
3029  startImageId = it->first;
3030  }
3031  }
3032  }
3033 
3034  // Get parent directory
3035  std::string parent = vpIoTools::getParent(filename);
3036  if (!parent.empty()) {
3037  parent += "/";
3038  }
3039 
3040  if (binaryMode) {
3041  std::ifstream file(filename.c_str(), std::ifstream::binary);
3042  if (!file.is_open()) {
3043  throw vpException(vpException::ioError, "Cannot open the file.");
3044  }
3045 
3046  // Read info about training images
3047  int nbImgs = 0;
3048  vpIoTools::readBinaryValueLE(file, nbImgs);
3049 
3050 #if !defined(VISP_HAVE_MODULE_IO)
3051  if (nbImgs > 0) {
3052  std::cout << "Warning: The learning file contains image data that will "
3053  "not be loaded as visp_io module "
3054  "is not available !"
3055  << std::endl;
3056  }
3057 #endif
3058 
3059  for (int i = 0; i < nbImgs; i++) {
3060  // Read image_id
3061  int id = 0;
3062  vpIoTools::readBinaryValueLE(file, id);
3063 
3064  int length = 0;
3065  vpIoTools::readBinaryValueLE(file, length);
3066  // Will contain the path to the training images
3067  char *path = new char[length + 1]; // char path[length + 1];
3068 
3069  for (int cpt = 0; cpt < length; cpt++) {
3070  char c;
3071  file.read((char *)(&c), sizeof(c));
3072  path[cpt] = c;
3073  }
3074  path[length] = '\0';
3075 
3077 #ifdef VISP_HAVE_MODULE_IO
3078  if (vpIoTools::isAbsolutePathname(std::string(path))) {
3079  vpImageIo::read(I, path);
3080  } else {
3081  vpImageIo::read(I, parent + path);
3082  }
3083 
3084  // Add the image previously loaded only if VISP_HAVE_MODULE_IO
3085  m_mapOfImages[id + startImageId] = I;
3086 #endif
3087 
3088  // Delete path
3089  delete[] path;
3090  }
3091 
3092  // Read if 3D point information are saved or not
3093  int have3DInfoInt = 0;
3094  vpIoTools::readBinaryValueLE(file, have3DInfoInt);
3095  bool have3DInfo = have3DInfoInt != 0;
3096 
3097  // Read the number of descriptors
3098  int nRows = 0;
3099  vpIoTools::readBinaryValueLE(file, nRows);
3100 
3101  // Read the size of the descriptor
3102  int nCols = 0;
3103  vpIoTools::readBinaryValueLE(file, nCols);
3104 
3105  // Read the type of the descriptor
3106  int descriptorType = 5; // CV_32F
3107  vpIoTools::readBinaryValueLE(file, descriptorType);
3108 
3109  cv::Mat trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
3110  for (int i = 0; i < nRows; i++) {
3111  // Read information about keyPoint
3112  float u, v, size, angle, response;
3113  int octave, class_id, image_id;
3116  vpIoTools::readBinaryValueLE(file, size);
3117  vpIoTools::readBinaryValueLE(file, angle);
3118  vpIoTools::readBinaryValueLE(file, response);
3119  vpIoTools::readBinaryValueLE(file, octave);
3120  vpIoTools::readBinaryValueLE(file, class_id);
3121  vpIoTools::readBinaryValueLE(file, image_id);
3122  cv::KeyPoint keyPoint(cv::Point2f(u, v), size, angle, response, octave, (class_id + startClassId));
3123  m_trainKeyPoints.push_back(keyPoint);
3124 
3125  if (image_id != -1) {
3126 #ifdef VISP_HAVE_MODULE_IO
3127  // No training images if image_id == -1
3128  m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
3129 #endif
3130  }
3131 
3132  if (have3DInfo) {
3133  // Read oX, oY, oZ
3134  float oX, oY, oZ;
3135  vpIoTools::readBinaryValueLE(file, oX);
3136  vpIoTools::readBinaryValueLE(file, oY);
3137  vpIoTools::readBinaryValueLE(file, oZ);
3138  m_trainPoints.push_back(cv::Point3f(oX, oY, oZ));
3139  }
3140 
3141  for (int j = 0; j < nCols; j++) {
3142  // Read the value of the descriptor
3143  switch (descriptorType) {
3144  case CV_8U: {
3145  unsigned char value;
3146  file.read((char *)(&value), sizeof(value));
3147  trainDescriptorsTmp.at<unsigned char>(i, j) = value;
3148  } break;
3149 
3150  case CV_8S: {
3151  char value;
3152  file.read((char *)(&value), sizeof(value));
3153  trainDescriptorsTmp.at<char>(i, j) = value;
3154  } break;
3155 
3156  case CV_16U: {
3157  unsigned short int value;
3158  vpIoTools::readBinaryValueLE(file, value);
3159  trainDescriptorsTmp.at<unsigned short int>(i, j) = value;
3160  } break;
3161 
3162  case CV_16S: {
3163  short int value;
3164  vpIoTools::readBinaryValueLE(file, value);
3165  trainDescriptorsTmp.at<short int>(i, j) = value;
3166  } break;
3167 
3168  case CV_32S: {
3169  int value;
3170  vpIoTools::readBinaryValueLE(file, value);
3171  trainDescriptorsTmp.at<int>(i, j) = value;
3172  } break;
3173 
3174  case CV_32F: {
3175  float value;
3176  vpIoTools::readBinaryValueLE(file, value);
3177  trainDescriptorsTmp.at<float>(i, j) = value;
3178  } break;
3179 
3180  case CV_64F: {
3181  double value;
3182  vpIoTools::readBinaryValueLE(file, value);
3183  trainDescriptorsTmp.at<double>(i, j) = value;
3184  } break;
3185 
3186  default: {
3187  float value;
3188  vpIoTools::readBinaryValueLE(file, value);
3189  trainDescriptorsTmp.at<float>(i, j) = value;
3190  } break;
3191  }
3192  }
3193  }
3194 
3195  if (!append || m_trainDescriptors.empty()) {
3196  trainDescriptorsTmp.copyTo(m_trainDescriptors);
3197  } else {
3198  cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
3199  }
3200 
3201  file.close();
3202  } else {
3203 #ifdef VISP_HAVE_PUGIXML
3204  pugi::xml_document doc;
3205 
3206  /*parse the file and get the DOM */
3207  if (!doc.load_file(filename.c_str())) {
3208  throw vpException(vpException::ioError, "Error with file: %s", filename.c_str());
3209  }
3210 
3211  pugi::xml_node root_element = doc.document_element();
3212 
3213  int descriptorType = CV_32F; // float
3214  int nRows = 0, nCols = 0;
3215  int i = 0, j = 0;
3216 
3217  cv::Mat trainDescriptorsTmp;
3218 
3219  for (pugi::xml_node first_level_node = root_element.first_child(); first_level_node; first_level_node = first_level_node.next_sibling()) {
3220 
3221  std::string name(first_level_node.name());
3222  if (first_level_node.type() == pugi::node_element && name == "TrainingImageInfo") {
3223 
3224  for (pugi::xml_node image_info_node = first_level_node.first_child(); image_info_node; image_info_node = image_info_node.next_sibling()) {
3225  name = std::string(image_info_node.name());
3226 
3227  if (name == "trainImg") {
3228  // Read image_id
3229  int id = image_info_node.attribute("image_id").as_int();
3230 
3232 #ifdef VISP_HAVE_MODULE_IO
3233  std::string path(image_info_node.text().as_string());
3234  // Read path to the training images
3235  if (vpIoTools::isAbsolutePathname(std::string(path))) {
3236  vpImageIo::read(I, path);
3237  } else {
3238  vpImageIo::read(I, parent + path);
3239  }
3240 
3241  // Add the image previously loaded only if VISP_HAVE_MODULE_IO
3242  m_mapOfImages[id + startImageId] = I;
3243 #endif
3244  }
3245  }
3246  } else if (first_level_node.type() == pugi::node_element && name == "DescriptorsInfo") {
3247  for (pugi::xml_node descriptors_info_node = first_level_node.first_child(); descriptors_info_node;
3248  descriptors_info_node = descriptors_info_node.next_sibling()) {
3249  if (descriptors_info_node.type() == pugi::node_element) {
3250  name = std::string(descriptors_info_node.name());
3251 
3252  if (name == "nrows") {
3253  nRows = descriptors_info_node.text().as_int();
3254  } else if (name == "ncols") {
3255  nCols = descriptors_info_node.text().as_int();
3256  } else if (name == "type") {
3257  descriptorType = descriptors_info_node.text().as_int();
3258  }
3259  }
3260  }
3261 
3262  trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
3263  } else if (first_level_node.type() == pugi::node_element && name == "DescriptorInfo") {
3264  double u = 0.0, v = 0.0, size = 0.0, angle = 0.0, response = 0.0;
3265  int octave = 0, class_id = 0, image_id = 0;
3266  double oX = 0.0, oY = 0.0, oZ = 0.0;
3267 
3268  std::stringstream ss;
3269 
3270  for (pugi::xml_node point_node = first_level_node.first_child(); point_node; point_node = point_node.next_sibling()) {
3271  if (point_node.type() == pugi::node_element) {
3272  name = std::string(point_node.name());
3273 
3274  // Read information about keypoints
3275  if (name == "u") {
3276  u = point_node.text().as_double();
3277  } else if (name == "v") {
3278  v = point_node.text().as_double();
3279  } else if (name == "size") {
3280  size = point_node.text().as_double();
3281  } else if (name == "angle") {
3282  angle = point_node.text().as_double();
3283  } else if (name == "response") {
3284  response = point_node.text().as_double();
3285  } else if (name == "octave") {
3286  octave = point_node.text().as_int();
3287  } else if (name == "class_id") {
3288  class_id = point_node.text().as_int();
3289  cv::KeyPoint keyPoint(cv::Point2f((float)u, (float)v), (float)size, (float)angle, (float)response, octave,
3290  (class_id + startClassId));
3291  m_trainKeyPoints.push_back(keyPoint);
3292  } else if (name == "image_id") {
3293  image_id = point_node.text().as_int();
3294  if (image_id != -1) {
3295 #ifdef VISP_HAVE_MODULE_IO
3296  // No training images if image_id == -1
3297  m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
3298 #endif
3299  }
3300  } else if (name == "oX") {
3301  oX = point_node.text().as_double();
3302  } else if (name == "oY") {
3303  oY = point_node.text().as_double();
3304  } else if (name == "oZ") {
3305  oZ = point_node.text().as_double();
3306  m_trainPoints.push_back(cv::Point3f((float)oX, (float)oY, (float)oZ));
3307  } else if (name == "desc") {
3308  j = 0;
3309 
3310  for (pugi::xml_node descriptor_value_node = point_node.first_child(); descriptor_value_node;
3311  descriptor_value_node = descriptor_value_node.next_sibling()) {
3312 
3313  if (descriptor_value_node.type() == pugi::node_element) {
3314  // Read descriptors values
3315  std::string parseStr(descriptor_value_node.text().as_string());
3316  ss.clear();
3317  ss.str(parseStr);
3318 
3319  if (!ss.fail()) {
3320  switch (descriptorType) {
3321  case CV_8U: {
3322  // Parse the numeric value [0 ; 255] to an int
3323  int parseValue;
3324  ss >> parseValue;
3325  trainDescriptorsTmp.at<unsigned char>(i, j) = (unsigned char)parseValue;
3326  } break;
3327 
3328  case CV_8S:
3329  // Parse the numeric value [-128 ; 127] to an int
3330  int parseValue;
3331  ss >> parseValue;
3332  trainDescriptorsTmp.at<char>(i, j) = (char)parseValue;
3333  break;
3334 
3335  case CV_16U:
3336  ss >> trainDescriptorsTmp.at<unsigned short int>(i, j);
3337  break;
3338 
3339  case CV_16S:
3340  ss >> trainDescriptorsTmp.at<short int>(i, j);
3341  break;
3342 
3343  case CV_32S:
3344  ss >> trainDescriptorsTmp.at<int>(i, j);
3345  break;
3346 
3347  case CV_32F:
3348  ss >> trainDescriptorsTmp.at<float>(i, j);
3349  break;
3350 
3351  case CV_64F:
3352  ss >> trainDescriptorsTmp.at<double>(i, j);
3353  break;
3354 
3355  default:
3356  ss >> trainDescriptorsTmp.at<float>(i, j);
3357  break;
3358  }
3359  } else {
3360  std::cerr << "Error when converting:" << ss.str() << std::endl;
3361  }
3362 
3363  j++;
3364  }
3365  }
3366  }
3367  }
3368  }
3369  i++;
3370  }
3371  }
3372 
3373  if (!append || m_trainDescriptors.empty()) {
3374  trainDescriptorsTmp.copyTo(m_trainDescriptors);
3375  } else {
3376  cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
3377  }
3378 #else
3379  std::cout << "Error: pugixml is not properly built!" << std::endl;
3380 #endif
3381  }
3382 
3383  // Convert OpenCV type to ViSP type for compatibility
3385  vpConvert::convertFromOpenCV(this->m_trainPoints, m_trainVpPoints);
3386 
3387  // Add train descriptors in matcher object
3388  m_matcher->clear();
3389  m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
3390 
3391  // Set _reference_computed to true as we load a learning file
3392  _reference_computed = true;
3393 
3394  // Set m_currentImageId
3395  m_currentImageId = (int)m_mapOfImages.size();
3396 }
3397 
3406 void vpKeyPoint::match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors,
3407  std::vector<cv::DMatch> &matches, double &elapsedTime)
3408 {
3409  double t = vpTime::measureTimeMs();
3410 
3411  if (m_useKnn) {
3412  m_knnMatches.clear();
3413 
3414  if (m_useMatchTrainToQuery) {
3415  std::vector<std::vector<cv::DMatch> > knnMatchesTmp;
3416 
3417  // Match train descriptors to query descriptors
3418  cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(true);
3419  matcherTmp->knnMatch(trainDescriptors, queryDescriptors, knnMatchesTmp, 2);
3420 
3421  for (std::vector<std::vector<cv::DMatch> >::const_iterator it1 = knnMatchesTmp.begin();
3422  it1 != knnMatchesTmp.end(); ++it1) {
3423  std::vector<cv::DMatch> tmp;
3424  for (std::vector<cv::DMatch>::const_iterator it2 = it1->begin(); it2 != it1->end(); ++it2) {
3425  tmp.push_back(cv::DMatch(it2->trainIdx, it2->queryIdx, it2->distance));
3426  }
3427  m_knnMatches.push_back(tmp);
3428  }
3429 
3430  matches.resize(m_knnMatches.size());
3431  std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3432  } else {
3433  // Match query descriptors to train descriptors
3434  m_matcher->knnMatch(queryDescriptors, m_knnMatches, 2);
3435  matches.resize(m_knnMatches.size());
3436  std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3437  }
3438  } else {
3439  matches.clear();
3440 
3441  if (m_useMatchTrainToQuery) {
3442  std::vector<cv::DMatch> matchesTmp;
3443  // Match train descriptors to query descriptors
3444  cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(true);
3445  matcherTmp->match(trainDescriptors, queryDescriptors, matchesTmp);
3446 
3447  for (std::vector<cv::DMatch>::const_iterator it = matchesTmp.begin(); it != matchesTmp.end(); ++it) {
3448  matches.push_back(cv::DMatch(it->trainIdx, it->queryIdx, it->distance));
3449  }
3450  } else {
3451  // Match query descriptors to train descriptors
3452  m_matcher->match(queryDescriptors, matches);
3453  }
3454  }
3455  elapsedTime = vpTime::measureTimeMs() - t;
3456 }
3457 
3465 unsigned int vpKeyPoint::matchPoint(const vpImage<unsigned char> &I) { return matchPoint(I, vpRect()); }
3466 
3474 unsigned int vpKeyPoint::matchPoint(const vpImage<vpRGBa> &I_color) { return matchPoint(I_color, vpRect()); }
3475 
3486 unsigned int vpKeyPoint::matchPoint(const vpImage<unsigned char> &I, const vpImagePoint &iP, unsigned int height,
3487  unsigned int width)
3488 {
3489  return matchPoint(I, vpRect(iP, width, height));
3490 }
3491 
3502 unsigned int vpKeyPoint::matchPoint(const vpImage<vpRGBa> &I_color, const vpImagePoint &iP, unsigned int height,
3503  unsigned int width)
3504 {
3505  return matchPoint(I_color, vpRect(iP, width, height));
3506 }
3507 
3516 unsigned int vpKeyPoint::matchPoint(const vpImage<unsigned char> &I, const vpRect &rectangle)
3517 {
3518  if (m_trainDescriptors.empty()) {
3519  std::cerr << "Reference is empty." << std::endl;
3520  if (!_reference_computed) {
3521  std::cerr << "Reference is not computed." << std::endl;
3522  }
3523  std::cerr << "Matching is not possible." << std::endl;
3524 
3525  return 0;
3526  }
3527 
3528  if (m_useAffineDetection) {
3529  std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3530  std::vector<cv::Mat> listOfQueryDescriptors;
3531 
3532  // Detect keypoints and extract descriptors on multiple images
3533  detectExtractAffine(I, listOfQueryKeyPoints, listOfQueryDescriptors);
3534 
3535  // Flatten the different train lists
3536  m_queryKeyPoints.clear();
3537  for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3538  it != listOfQueryKeyPoints.end(); ++it) {
3539  m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3540  }
3541 
3542  bool first = true;
3543  for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3544  ++it) {
3545  if (first) {
3546  first = false;
3547  it->copyTo(m_queryDescriptors);
3548  } else {
3549  m_queryDescriptors.push_back(*it);
3550  }
3551  }
3552  } else {
3553  detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3554  extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3555  }
3556 
3557  return matchPoint(m_queryKeyPoints, m_queryDescriptors);
3558 }
3559 
3568 unsigned int vpKeyPoint::matchPoint(const std::vector<cv::KeyPoint> &queryKeyPoints, const cv::Mat &queryDescriptors)
3569 {
3570  m_queryKeyPoints = queryKeyPoints;
3571  m_queryDescriptors = queryDescriptors;
3572 
3573  match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3574 
3575  if (m_filterType != noFilterMatching) {
3576  m_queryFilteredKeyPoints.clear();
3577  m_objectFilteredPoints.clear();
3578  m_filteredMatches.clear();
3579 
3580  filterMatches();
3581  } else {
3582  if (m_useMatchTrainToQuery) {
3583  // Add only query keypoints matched with a train keypoints
3584  m_queryFilteredKeyPoints.clear();
3585  m_filteredMatches.clear();
3586  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3587  m_filteredMatches.push_back(cv::DMatch((int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3588  m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(size_t)it->queryIdx]);
3589  }
3590  } else {
3591  m_queryFilteredKeyPoints = m_queryKeyPoints;
3592  m_filteredMatches = m_matches;
3593  }
3594 
3595  if (!m_trainPoints.empty()) {
3596  m_objectFilteredPoints.clear();
3597  // Add 3D object points such as the same index in
3598  // m_queryFilteredKeyPoints and in m_objectFilteredPoints
3599  // matches to the same train object
3600  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3601  // m_matches is normally ordered following the queryDescriptor index
3602  m_objectFilteredPoints.push_back(m_trainPoints[(size_t)it->trainIdx]);
3603  }
3604  }
3605  }
3606 
3607  // Convert OpenCV type to ViSP type for compatibility
3608  vpConvert::convertFromOpenCV(m_queryFilteredKeyPoints, currentImagePointsList);
3610 
3611  return static_cast<unsigned int>(m_filteredMatches.size());
3612 }
3613 
3622 unsigned int vpKeyPoint::matchPoint(const vpImage<vpRGBa> &I_color, const vpRect &rectangle)
3623 {
3624  vpImageConvert::convert(I_color, m_I);
3625  return matchPoint(m_I, rectangle);
3626 }
3627 
3641  bool (*func)(const vpHomogeneousMatrix &), const vpRect &rectangle)
3642 {
3643  double error, elapsedTime;
3644  return matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3645 }
3646 
3660  bool (*func)(const vpHomogeneousMatrix &), const vpRect &rectangle)
3661 {
3662  double error, elapsedTime;
3663  return matchPoint(I_color, cam, cMo, error, elapsedTime, func, rectangle);
3664 }
3665 
3682  double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &),
3683  const vpRect &rectangle)
3684 {
3685  // Check if we have training descriptors
3686  if (m_trainDescriptors.empty()) {
3687  std::cerr << "Reference is empty." << std::endl;
3688  if (!_reference_computed) {
3689  std::cerr << "Reference is not computed." << std::endl;
3690  }
3691  std::cerr << "Matching is not possible." << std::endl;
3692 
3693  return false;
3694  }
3695 
3696  if (m_useAffineDetection) {
3697  std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3698  std::vector<cv::Mat> listOfQueryDescriptors;
3699 
3700  // Detect keypoints and extract descriptors on multiple images
3701  detectExtractAffine(I, listOfQueryKeyPoints, listOfQueryDescriptors);
3702 
3703  // Flatten the different train lists
3704  m_queryKeyPoints.clear();
3705  for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3706  it != listOfQueryKeyPoints.end(); ++it) {
3707  m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3708  }
3709 
3710  bool first = true;
3711  for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3712  ++it) {
3713  if (first) {
3714  first = false;
3715  it->copyTo(m_queryDescriptors);
3716  } else {
3717  m_queryDescriptors.push_back(*it);
3718  }
3719  }
3720  } else {
3721  detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3722  extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3723  }
3724 
3725  match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3726 
3727  elapsedTime = m_detectionTime + m_extractionTime + m_matchingTime;
3728 
3729  if (m_filterType != noFilterMatching) {
3730  m_queryFilteredKeyPoints.clear();
3731  m_objectFilteredPoints.clear();
3732  m_filteredMatches.clear();
3733 
3734  filterMatches();
3735  } else {
3736  if (m_useMatchTrainToQuery) {
3737  // Add only query keypoints matched with a train keypoints
3738  m_queryFilteredKeyPoints.clear();
3739  m_filteredMatches.clear();
3740  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3741  m_filteredMatches.push_back(cv::DMatch((int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3742  m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(size_t)it->queryIdx]);
3743  }
3744  } else {
3745  m_queryFilteredKeyPoints = m_queryKeyPoints;
3746  m_filteredMatches = m_matches;
3747  }
3748 
3749  if (!m_trainPoints.empty()) {
3750  m_objectFilteredPoints.clear();
3751  // Add 3D object points such as the same index in
3752  // m_queryFilteredKeyPoints and in m_objectFilteredPoints
3753  // matches to the same train object
3754  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3755  // m_matches is normally ordered following the queryDescriptor index
3756  m_objectFilteredPoints.push_back(m_trainPoints[(size_t)it->trainIdx]);
3757  }
3758  }
3759  }
3760 
3761  // Convert OpenCV type to ViSP type for compatibility
3762  vpConvert::convertFromOpenCV(m_queryFilteredKeyPoints, currentImagePointsList);
3764 
3765  // error = std::numeric_limits<double>::max(); // create an error under
3766  // Windows. To fix it we have to add #undef max
3767  error = DBL_MAX;
3768  m_ransacInliers.clear();
3769  m_ransacOutliers.clear();
3770 
3771  if (m_useRansacVVS) {
3772  std::vector<vpPoint> objectVpPoints(m_objectFilteredPoints.size());
3773  size_t cpt = 0;
3774  // Create a list of vpPoint with 2D coordinates (current keypoint
3775  // location) + 3D coordinates (world/object coordinates)
3776  for (std::vector<cv::Point3f>::const_iterator it = m_objectFilteredPoints.begin();
3777  it != m_objectFilteredPoints.end(); ++it, cpt++) {
3778  vpPoint pt;
3779  pt.setWorldCoordinates(it->x, it->y, it->z);
3780 
3781  vpImagePoint imP(m_queryFilteredKeyPoints[cpt].pt.y, m_queryFilteredKeyPoints[cpt].pt.x);
3782 
3783  double x = 0.0, y = 0.0;
3784  vpPixelMeterConversion::convertPoint(cam, imP, x, y);
3785  pt.set_x(x);
3786  pt.set_y(y);
3787 
3788  objectVpPoints[cpt] = pt;
3789  }
3790 
3791  std::vector<vpPoint> inliers;
3792  std::vector<unsigned int> inlierIndex;
3793 
3794  bool res = computePose(objectVpPoints, cMo, inliers, inlierIndex, m_poseTime, func);
3795 
3796  std::map<unsigned int, bool> mapOfInlierIndex;
3797  m_matchRansacKeyPointsToPoints.clear();
3798 
3799  for (std::vector<unsigned int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3800  m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3801  m_queryFilteredKeyPoints[(size_t)(*it)], m_objectFilteredPoints[(size_t)(*it)]));
3802  mapOfInlierIndex[*it] = true;
3803  }
3804 
3805  for (size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3806  if (mapOfInlierIndex.find((unsigned int)i) == mapOfInlierIndex.end()) {
3807  m_ransacOutliers.push_back(vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3808  }
3809  }
3810 
3811  error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3812 
3813  m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3814  std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3815  m_ransacInliers.begin(), matchRansacToVpImage);
3816 
3817  elapsedTime += m_poseTime;
3818 
3819  return res;
3820  } else {
3821  std::vector<cv::Point2f> imageFilteredPoints;
3822  cv::KeyPoint::convert(m_queryFilteredKeyPoints, imageFilteredPoints);
3823  std::vector<int> inlierIndex;
3824  bool res = computePose(imageFilteredPoints, m_objectFilteredPoints, cam, cMo, inlierIndex, m_poseTime);
3825 
3826  std::map<int, bool> mapOfInlierIndex;
3827  m_matchRansacKeyPointsToPoints.clear();
3828 
3829  for (std::vector<int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3830  m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3831  m_queryFilteredKeyPoints[(size_t)(*it)], m_objectFilteredPoints[(size_t)(*it)]));
3832  mapOfInlierIndex[*it] = true;
3833  }
3834 
3835  for (size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3836  if (mapOfInlierIndex.find((int)i) == mapOfInlierIndex.end()) {
3837  m_ransacOutliers.push_back(vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3838  }
3839  }
3840 
3841  error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3842 
3843  m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3844  std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3845  m_ransacInliers.begin(), matchRansacToVpImage);
3846 
3847  elapsedTime += m_poseTime;
3848 
3849  return res;
3850  }
3851 }
3852 
3869  double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &),
3870  const vpRect &rectangle)
3871 {
3872  vpImageConvert::convert(I_color, m_I);
3873  return (matchPoint(m_I, cam, cMo, error, elapsedTime, func, rectangle));
3874 }
3875 
3876 
3898  vpImagePoint &centerOfGravity, const bool isPlanarObject,
3899  std::vector<vpImagePoint> *imPts1, std::vector<vpImagePoint> *imPts2,
3900  double *meanDescriptorDistance, double *detection_score, const vpRect &rectangle)
3901 {
3902  if (imPts1 != NULL && imPts2 != NULL) {
3903  imPts1->clear();
3904  imPts2->clear();
3905  }
3906 
3907  matchPoint(I, rectangle);
3908 
3909  double meanDescriptorDistanceTmp = 0.0;
3910  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
3911  meanDescriptorDistanceTmp += (double)it->distance;
3912  }
3913 
3914  meanDescriptorDistanceTmp /= (double)m_filteredMatches.size();
3915  double score = (double)m_filteredMatches.size() / meanDescriptorDistanceTmp;
3916 
3917  if (meanDescriptorDistance != NULL) {
3918  *meanDescriptorDistance = meanDescriptorDistanceTmp;
3919  }
3920  if (detection_score != NULL) {
3921  *detection_score = score;
3922  }
3923 
3924  if (m_filteredMatches.size() >= 4) {
3925  // Training / Reference 2D points
3926  std::vector<cv::Point2f> points1(m_filteredMatches.size());
3927  // Query / Current 2D points
3928  std::vector<cv::Point2f> points2(m_filteredMatches.size());
3929 
3930  for (size_t i = 0; i < m_filteredMatches.size(); i++) {
3931  points1[i] = cv::Point2f(m_trainKeyPoints[(size_t)m_filteredMatches[i].trainIdx].pt);
3932  points2[i] = cv::Point2f(m_queryFilteredKeyPoints[(size_t)m_filteredMatches[i].queryIdx].pt);
3933  }
3934 
3935  std::vector<vpImagePoint> inliers;
3936  if (isPlanarObject) {
3937 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
3938  cv::Mat homographyMatrix = cv::findHomography(points1, points2, CV_RANSAC);
3939 #else
3940  cv::Mat homographyMatrix = cv::findHomography(points1, points2, cv::RANSAC);
3941 #endif
3942 
3943  for (size_t i = 0; i < m_filteredMatches.size(); i++) {
3944  // Compute reprojection error
3945  cv::Mat realPoint = cv::Mat(3, 1, CV_64F);
3946  realPoint.at<double>(0, 0) = points1[i].x;
3947  realPoint.at<double>(1, 0) = points1[i].y;
3948  realPoint.at<double>(2, 0) = 1.f;
3949 
3950  cv::Mat reprojectedPoint = homographyMatrix * realPoint;
3951  double err_x = (reprojectedPoint.at<double>(0, 0) / reprojectedPoint.at<double>(2, 0)) - points2[i].x;
3952  double err_y = (reprojectedPoint.at<double>(1, 0) / reprojectedPoint.at<double>(2, 0)) - points2[i].y;
3953  double reprojectionError = std::sqrt(err_x * err_x + err_y * err_y);
3954 
3955  if (reprojectionError < 6.0) {
3956  inliers.push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3957  if (imPts1 != NULL) {
3958  imPts1->push_back(vpImagePoint((double)points1[i].y, (double)points1[i].x));
3959  }
3960 
3961  if (imPts2 != NULL) {
3962  imPts2->push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3963  }
3964  }
3965  }
3966  } else if (m_filteredMatches.size() >= 8) {
3967  cv::Mat fundamentalInliers;
3968  cv::Mat fundamentalMatrix = cv::findFundamentalMat(points1, points2, cv::FM_RANSAC, 3, 0.99, fundamentalInliers);
3969 
3970  for (size_t i = 0; i < (size_t)fundamentalInliers.rows; i++) {
3971  if (fundamentalInliers.at<uchar>((int)i, 0)) {
3972  inliers.push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3973 
3974  if (imPts1 != NULL) {
3975  imPts1->push_back(vpImagePoint((double)points1[i].y, (double)points1[i].x));
3976  }
3977 
3978  if (imPts2 != NULL) {
3979  imPts2->push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3980  }
3981  }
3982  }
3983  }
3984 
3985  if (!inliers.empty()) {
3986  // Build a polygon with the list of inlier keypoints detected in the
3987  // current image to get the bounding box
3988  vpPolygon polygon(inliers);
3989  boundingBox = polygon.getBoundingBox();
3990 
3991  // Compute the center of gravity
3992  double meanU = 0.0, meanV = 0.0;
3993  for (std::vector<vpImagePoint>::const_iterator it = inliers.begin(); it != inliers.end(); ++it) {
3994  meanU += it->get_u();
3995  meanV += it->get_v();
3996  }
3997 
3998  meanU /= (double)inliers.size();
3999  meanV /= (double)inliers.size();
4000 
4001  centerOfGravity.set_u(meanU);
4002  centerOfGravity.set_v(meanV);
4003  }
4004  } else {
4005  // Too few matches
4006  return false;
4007  }
4008 
4009  if (m_detectionMethod == detectionThreshold) {
4010  return meanDescriptorDistanceTmp < m_detectionThreshold;
4011  } else {
4012  return score > m_detectionScore;
4013  }
4014 }
4015 
4036  vpHomogeneousMatrix &cMo, double &error, double &elapsedTime, vpRect &boundingBox,
4037  vpImagePoint &centerOfGravity, bool (*func)(const vpHomogeneousMatrix &),
4038  const vpRect &rectangle)
4039 {
4040  bool isMatchOk = matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
4041  if (isMatchOk) {
4042  // Use the pose estimated to project the model points in the image
4043  vpPoint pt;
4044  vpImagePoint imPt;
4045  std::vector<vpImagePoint> modelImagePoints(m_trainVpPoints.size());
4046  size_t cpt = 0;
4047  for (std::vector<vpPoint>::const_iterator it = m_trainVpPoints.begin(); it != m_trainVpPoints.end(); ++it, cpt++) {
4048  pt = *it;
4049  pt.project(cMo);
4050  vpMeterPixelConversion::convertPoint(cam, pt.get_x(), pt.get_y(), imPt);
4051  modelImagePoints[cpt] = imPt;
4052  }
4053 
4054  // Build a polygon with the list of model image points to get the bounding
4055  // box
4056  vpPolygon polygon(modelImagePoints);
4057  boundingBox = polygon.getBoundingBox();
4058 
4059  // Compute the center of gravity of the current inlier keypoints
4060  double meanU = 0.0, meanV = 0.0;
4061  for (std::vector<vpImagePoint>::const_iterator it = m_ransacInliers.begin(); it != m_ransacInliers.end(); ++it) {
4062  meanU += it->get_u();
4063  meanV += it->get_v();
4064  }
4065 
4066  meanU /= (double)m_ransacInliers.size();
4067  meanV /= (double)m_ransacInliers.size();
4068 
4069  centerOfGravity.set_u(meanU);
4070  centerOfGravity.set_v(meanV);
4071  }
4072 
4073  return isMatchOk;
4074 }
4075 
4091  std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
4092  std::vector<cv::Mat> &listOfDescriptors,
4093  std::vector<vpImage<unsigned char> > *listOfAffineI)
4094 {
4095 #if 0
4096  cv::Mat img;
4097  vpImageConvert::convert(I, img);
4098  listOfKeypoints.clear();
4099  listOfDescriptors.clear();
4100 
4101  for (int tl = 1; tl < 6; tl++) {
4102  double t = pow(2, 0.5 * tl);
4103  for (int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
4104  std::vector<cv::KeyPoint> keypoints;
4105  cv::Mat descriptors;
4106 
4107  cv::Mat timg, mask, Ai;
4108  img.copyTo(timg);
4109 
4110  affineSkew(t, phi, timg, mask, Ai);
4111 
4112 
4113  if(listOfAffineI != NULL) {
4114  cv::Mat img_disp;
4115  bitwise_and(mask, timg, img_disp);
4117  vpImageConvert::convert(img_disp, tI);
4118  listOfAffineI->push_back(tI);
4119  }
4120 #if 0
4121  cv::Mat img_disp;
4122  cv::bitwise_and(mask, timg, img_disp);
4123  cv::namedWindow( "Skew", cv::WINDOW_AUTOSIZE ); // Create a window for display.
4124  cv::imshow( "Skew", img_disp );
4125  cv::waitKey(0);
4126 #endif
4127 
4128  for(std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
4129  it != m_detectors.end(); ++it) {
4130  std::vector<cv::KeyPoint> kp;
4131  it->second->detect(timg, kp, mask);
4132  keypoints.insert(keypoints.end(), kp.begin(), kp.end());
4133  }
4134 
4135  double elapsedTime;
4136  extract(timg, keypoints, descriptors, elapsedTime);
4137 
4138  for(unsigned int i = 0; i < keypoints.size(); i++) {
4139  cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
4140  cv::Mat kpt_t = Ai * cv::Mat(kpt);
4141  keypoints[i].pt.x = kpt_t.at<float>(0, 0);
4142  keypoints[i].pt.y = kpt_t.at<float>(1, 0);
4143  }
4144 
4145  listOfKeypoints.push_back(keypoints);
4146  listOfDescriptors.push_back(descriptors);
4147  }
4148  }
4149 
4150 #else
4151  cv::Mat img;
4152  vpImageConvert::convert(I, img);
4153 
4154  // Create a vector for storing the affine skew parameters
4155  std::vector<std::pair<double, int> > listOfAffineParams;
4156  for (int tl = 1; tl < 6; tl++) {
4157  double t = pow(2, 0.5 * tl);
4158  for (int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
4159  listOfAffineParams.push_back(std::pair<double, int>(t, phi));
4160  }
4161  }
4162 
4163  listOfKeypoints.resize(listOfAffineParams.size());
4164  listOfDescriptors.resize(listOfAffineParams.size());
4165 
4166  if (listOfAffineI != NULL) {
4167  listOfAffineI->resize(listOfAffineParams.size());
4168  }
4169 
4170 #ifdef VISP_HAVE_OPENMP
4171 #pragma omp parallel for
4172 #endif
4173  for (int cpt = 0; cpt < static_cast<int>(listOfAffineParams.size()); cpt++) {
4174  std::vector<cv::KeyPoint> keypoints;
4175  cv::Mat descriptors;
4176 
4177  cv::Mat timg, mask, Ai;
4178  img.copyTo(timg);
4179 
4180  affineSkew(listOfAffineParams[(size_t)cpt].first, listOfAffineParams[(size_t)cpt].second, timg, mask, Ai);
4181 
4182  if (listOfAffineI != NULL) {
4183  cv::Mat img_disp;
4184  bitwise_and(mask, timg, img_disp);
4186  vpImageConvert::convert(img_disp, tI);
4187  (*listOfAffineI)[(size_t)cpt] = tI;
4188  }
4189 
4190 #if 0
4191  cv::Mat img_disp;
4192  cv::bitwise_and(mask, timg, img_disp);
4193  cv::namedWindow( "Skew", cv::WINDOW_AUTOSIZE ); // Create a window for display.
4194  cv::imshow( "Skew", img_disp );
4195  cv::waitKey(0);
4196 #endif
4197 
4198  for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
4199  it != m_detectors.end(); ++it) {
4200  std::vector<cv::KeyPoint> kp;
4201  it->second->detect(timg, kp, mask);
4202  keypoints.insert(keypoints.end(), kp.begin(), kp.end());
4203  }
4204 
4205  double elapsedTime;
4206  extract(timg, keypoints, descriptors, elapsedTime);
4207 
4208  for (size_t i = 0; i < keypoints.size(); i++) {
4209  cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
4210  cv::Mat kpt_t = Ai * cv::Mat(kpt);
4211  keypoints[i].pt.x = kpt_t.at<float>(0, 0);
4212  keypoints[i].pt.y = kpt_t.at<float>(1, 0);
4213  }
4214 
4215  listOfKeypoints[(size_t)cpt] = keypoints;
4216  listOfDescriptors[(size_t)cpt] = descriptors;
4217  }
4218 #endif
4219 }
4220 
4225 {
4226  // vpBasicKeyPoint class
4227  referenceImagePointsList.clear();
4228  currentImagePointsList.clear();
4229  matchedReferencePoints.clear();
4230  _reference_computed = false;
4231 
4232  m_computeCovariance = false;
4233  m_covarianceMatrix = vpMatrix();
4234  m_currentImageId = 0;
4235  m_detectionMethod = detectionScore;
4236  m_detectionScore = 0.15;
4237  m_detectionThreshold = 100.0;
4238  m_detectionTime = 0.0;
4239  m_detectorNames.clear();
4240  m_detectors.clear();
4241  m_extractionTime = 0.0;
4242  m_extractorNames.clear();
4243  m_extractors.clear();
4244  m_filteredMatches.clear();
4245  m_filterType = ratioDistanceThreshold;
4246  m_imageFormat = jpgImageFormat;
4247  m_knnMatches.clear();
4248  m_mapOfImageId.clear();
4249  m_mapOfImages.clear();
4250  m_matcher = cv::Ptr<cv::DescriptorMatcher>();
4251  m_matcherName = "BruteForce-Hamming";
4252  m_matches.clear();
4253  m_matchingFactorThreshold = 2.0;
4254  m_matchingRatioThreshold = 0.85;
4255  m_matchingTime = 0.0;
4256  m_matchRansacKeyPointsToPoints.clear();
4257  m_nbRansacIterations = 200;
4258  m_nbRansacMinInlierCount = 100;
4259  m_objectFilteredPoints.clear();
4260  m_poseTime = 0.0;
4261  m_queryDescriptors = cv::Mat();
4262  m_queryFilteredKeyPoints.clear();
4263  m_queryKeyPoints.clear();
4264  m_ransacConsensusPercentage = 20.0;
4265  m_ransacFilterFlag = vpPose::NO_FILTER;
4266  m_ransacInliers.clear();
4267  m_ransacOutliers.clear();
4268  m_ransacParallel = true;
4269  m_ransacParallelNbThreads = 0;
4270  m_ransacReprojectionError = 6.0;
4271  m_ransacThreshold = 0.01;
4272  m_trainDescriptors = cv::Mat();
4273  m_trainKeyPoints.clear();
4274  m_trainPoints.clear();
4275  m_trainVpPoints.clear();
4276  m_useAffineDetection = false;
4277 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
4278  m_useBruteForceCrossCheck = true;
4279 #endif
4280  m_useConsensusPercentage = false;
4281  m_useKnn = true; // as m_filterType == ratioDistanceThreshold
4282  m_useMatchTrainToQuery = false;
4283  m_useRansacVVS = true;
4284  m_useSingleMatchFilter = true;
4285 
4286  m_detectorNames.push_back("ORB");
4287  m_extractorNames.push_back("ORB");
4288 
4289  init();
4290 }
4291 
4300 void vpKeyPoint::saveLearningData(const std::string &filename, bool binaryMode, bool saveTrainingImages)
4301 {
4302  std::string parent = vpIoTools::getParent(filename);
4303  if (!parent.empty()) {
4304  vpIoTools::makeDirectory(parent);
4305  }
4306 
4307  std::map<int, std::string> mapOfImgPath;
4308  if (saveTrainingImages) {
4309 #ifdef VISP_HAVE_MODULE_IO
4310  // Save the training image files in the same directory
4311  unsigned int cpt = 0;
4312 
4313  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
4314  ++it, cpt++) {
4315  if (cpt > 999) {
4316  throw vpException(vpException::fatalError, "The number of training images to save is too big !");
4317  }
4318 
4319  std::stringstream ss;
4320  ss << "train_image_" << std::setfill('0') << std::setw(3) << cpt;
4321 
4322  switch (m_imageFormat) {
4323  case jpgImageFormat:
4324  ss << ".jpg";
4325  break;
4326 
4327  case pngImageFormat:
4328  ss << ".png";
4329  break;
4330 
4331  case ppmImageFormat:
4332  ss << ".ppm";
4333  break;
4334 
4335  case pgmImageFormat:
4336  ss << ".pgm";
4337  break;
4338 
4339  default:
4340  ss << ".png";
4341  break;
4342  }
4343 
4344  std::string imgFilename = ss.str();
4345  mapOfImgPath[it->first] = imgFilename;
4346  vpImageIo::write(it->second, parent + (!parent.empty() ? "/" : "") + imgFilename);
4347  }
4348 #else
4349  std::cout << "Warning: in vpKeyPoint::saveLearningData() training images "
4350  "are not saved because "
4351  "visp_io module is not available !"
4352  << std::endl;
4353 #endif
4354  }
4355 
4356  bool have3DInfo = m_trainPoints.size() > 0;
4357  if (have3DInfo && m_trainPoints.size() != m_trainKeyPoints.size()) {
4358  throw vpException(vpException::fatalError, "List of keypoints and list of 3D points have different size !");
4359  }
4360 
4361  if (binaryMode) {
4362  // Save the learning data into little endian binary file.
4363  std::ofstream file(filename.c_str(), std::ofstream::binary);
4364  if (!file.is_open()) {
4365  throw vpException(vpException::ioError, "Cannot create the file.");
4366  }
4367 
4368  // Write info about training images
4369  int nbImgs = (int)mapOfImgPath.size();
4370  vpIoTools::writeBinaryValueLE(file, nbImgs);
4371 
4372 #ifdef VISP_HAVE_MODULE_IO
4373  for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
4374  // Write image_id
4375  int id = it->first;
4377 
4378  // Write image path
4379  std::string path = it->second;
4380  int length = (int)path.length();
4381  vpIoTools::writeBinaryValueLE(file, length);
4382 
4383  for (int cpt = 0; cpt < length; cpt++) {
4384  file.write((char *)(&path[(size_t)cpt]), sizeof(path[(size_t)cpt]));
4385  }
4386  }
4387 #endif
4388 
4389  // Write if we have 3D point information
4390  int have3DInfoInt = have3DInfo ? 1 : 0;
4391  vpIoTools::writeBinaryValueLE(file, have3DInfoInt);
4392 
4393  int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
4394  int descriptorType = m_trainDescriptors.type();
4395 
4396  // Write the number of descriptors
4397  vpIoTools::writeBinaryValueLE(file, nRows);
4398 
4399  // Write the size of the descriptor
4400  vpIoTools::writeBinaryValueLE(file, nCols);
4401 
4402  // Write the type of the descriptor
4403  vpIoTools::writeBinaryValueLE(file, descriptorType);
4404 
4405  for (int i = 0; i < nRows; i++) {
4406  unsigned int i_ = (unsigned int)i;
4407  // Write u
4408  float u = m_trainKeyPoints[i_].pt.x;
4410 
4411  // Write v
4412  float v = m_trainKeyPoints[i_].pt.y;
4414 
4415  // Write size
4416  float size = m_trainKeyPoints[i_].size;
4417  vpIoTools::writeBinaryValueLE(file, size);
4418 
4419  // Write angle
4420  float angle = m_trainKeyPoints[i_].angle;
4421  vpIoTools::writeBinaryValueLE(file, angle);
4422 
4423  // Write response
4424  float response = m_trainKeyPoints[i_].response;
4425  vpIoTools::writeBinaryValueLE(file, response);
4426 
4427  // Write octave
4428  int octave = m_trainKeyPoints[i_].octave;
4429  vpIoTools::writeBinaryValueLE(file, octave);
4430 
4431  // Write class_id
4432  int class_id = m_trainKeyPoints[i_].class_id;
4433  vpIoTools::writeBinaryValueLE(file, class_id);
4434 
4435 // Write image_id
4436 #ifdef VISP_HAVE_MODULE_IO
4437  std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
4438  int image_id = (saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1;
4439  vpIoTools::writeBinaryValueLE(file, image_id);
4440 #else
4441  int image_id = -1;
4442  // file.write((char *)(&image_id), sizeof(image_id));
4443  vpIoTools::writeBinaryValueLE(file, image_id);
4444 #endif
4445 
4446  if (have3DInfo) {
4447  float oX = m_trainPoints[i_].x, oY = m_trainPoints[i_].y, oZ = m_trainPoints[i_].z;
4448  // Write oX
4450 
4451  // Write oY
4453 
4454  // Write oZ
4456  }
4457 
4458  for (int j = 0; j < nCols; j++) {
4459  // Write the descriptor value
4460  switch (descriptorType) {
4461  case CV_8U:
4462  file.write((char *)(&m_trainDescriptors.at<unsigned char>(i, j)),
4463  sizeof(m_trainDescriptors.at<unsigned char>(i, j)));
4464  break;
4465 
4466  case CV_8S:
4467  file.write((char *)(&m_trainDescriptors.at<char>(i, j)), sizeof(m_trainDescriptors.at<char>(i, j)));
4468  break;
4469 
4470  case CV_16U:
4471  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<unsigned short int>(i, j));
4472  break;
4473 
4474  case CV_16S:
4475  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<short int>(i, j));
4476  break;
4477 
4478  case CV_32S:
4479  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<int>(i, j));
4480  break;
4481 
4482  case CV_32F:
4483  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<float>(i, j));
4484  break;
4485 
4486  case CV_64F:
4487  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<double>(i, j));
4488  break;
4489 
4490  default:
4491  throw vpException(vpException::fatalError, "Problem with the data type of descriptors !");
4492  break;
4493  }
4494  }
4495  }
4496 
4497  file.close();
4498  } else {
4499 #ifdef VISP_HAVE_PUGIXML
4500  pugi::xml_document doc;
4501  pugi::xml_node node = doc.append_child(pugi::node_declaration);
4502  node.append_attribute("version") = "1.0";
4503  node.append_attribute("encoding") = "UTF-8";
4504 
4505  if (!doc) {
4506  throw vpException(vpException::ioError, "Error with file: ", filename.c_str());
4507  }
4508 
4509  pugi::xml_node root_node = doc.append_child("LearningData");
4510 
4511  // Write the training images info
4512  pugi::xml_node image_node = root_node.append_child("TrainingImageInfo");
4513 
4514 #ifdef VISP_HAVE_MODULE_IO
4515  for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
4516  pugi::xml_node image_info_node = image_node.append_child("trainImg");
4517  image_info_node.append_child(pugi::node_pcdata).set_value(it->second.c_str());
4518  std::stringstream ss;
4519  ss << it->first;
4520  image_info_node.append_attribute("image_id") = ss.str().c_str();
4521  }
4522 #endif
4523 
4524  // Write information about descriptors
4525  pugi::xml_node descriptors_info_node = root_node.append_child("DescriptorsInfo");
4526 
4527  int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
4528  int descriptorType = m_trainDescriptors.type();
4529 
4530  // Write the number of rows
4531  descriptors_info_node.append_child("nrows").append_child(pugi::node_pcdata).text() = nRows;
4532 
4533  // Write the number of cols
4534  descriptors_info_node.append_child("ncols").append_child(pugi::node_pcdata).text() = nCols;
4535 
4536  // Write the descriptors type
4537  descriptors_info_node.append_child("type").append_child(pugi::node_pcdata).text() = descriptorType;
4538 
4539  for (int i = 0; i < nRows; i++) {
4540  unsigned int i_ = (unsigned int)i;
4541  pugi::xml_node descriptor_node = root_node.append_child("DescriptorInfo");
4542 
4543  descriptor_node.append_child("u").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.x;
4544  descriptor_node.append_child("v").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.y;
4545  descriptor_node.append_child("size").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].size;
4546  descriptor_node.append_child("angle").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].angle;
4547  descriptor_node.append_child("response").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].response;
4548  descriptor_node.append_child("octave").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].octave;
4549  descriptor_node.append_child("class_id").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].class_id;
4550 
4551 #ifdef VISP_HAVE_MODULE_IO
4552  std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
4553  descriptor_node.append_child("image_id").append_child(pugi::node_pcdata).text() =
4554  ((saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1);
4555 #else
4556  descriptor_node.append_child("image_id").append_child(pugi::node_pcdata).text() = -1;
4557 #endif
4558 
4559  if (have3DInfo) {
4560  descriptor_node.append_child("oX").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].x;
4561  descriptor_node.append_child("oY").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].y;
4562  descriptor_node.append_child("oZ").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].z;
4563  }
4564 
4565  pugi::xml_node desc_node = descriptor_node.append_child("desc");
4566 
4567  for (int j = 0; j < nCols; j++) {
4568  switch (descriptorType) {
4569  case CV_8U: {
4570  // Promote an unsigned char to an int
4571  // val_tmp holds the numeric value that will be written
4572  // We save the value in numeric form otherwise xml library will not be
4573  // able to parse A better solution could be possible
4574  int val_tmp = m_trainDescriptors.at<unsigned char>(i, j);
4575  desc_node.append_child("val").append_child(pugi::node_pcdata).text() = val_tmp;
4576  } break;
4577 
4578  case CV_8S: {
4579  // Promote a char to an int
4580  // val_tmp holds the numeric value that will be written
4581  // We save the value in numeric form otherwise xml library will not be
4582  // able to parse A better solution could be possible
4583  int val_tmp = m_trainDescriptors.at<char>(i, j);
4584  desc_node.append_child("val").append_child(pugi::node_pcdata).text() = val_tmp;
4585  } break;
4586 
4587  case CV_16U:
4588  desc_node.append_child("val").append_child(pugi::node_pcdata).text() =
4589  m_trainDescriptors.at<unsigned short int>(i, j);
4590  break;
4591 
4592  case CV_16S:
4593  desc_node.append_child("val").append_child(pugi::node_pcdata).text() =
4594  m_trainDescriptors.at<short int>(i, j);
4595  break;
4596 
4597  case CV_32S:
4598  desc_node.append_child("val").append_child(pugi::node_pcdata).text() =
4599  m_trainDescriptors.at<int>(i, j);
4600  break;
4601 
4602  case CV_32F:
4603  desc_node.append_child("val").append_child(pugi::node_pcdata).text() =
4604  m_trainDescriptors.at<float>(i, j);
4605  break;
4606 
4607  case CV_64F:
4608  desc_node.append_child("val").append_child(pugi::node_pcdata).text() =
4609  m_trainDescriptors.at<double>(i, j);
4610  break;
4611 
4612  default:
4613  throw vpException(vpException::fatalError, "Problem with the data type of descriptors !");
4614  break;
4615  }
4616  }
4617  }
4618 
4619  doc.save_file(filename.c_str(), PUGIXML_TEXT(" "), pugi::format_default, pugi::encoding_utf8);
4620 #else
4621  std::cerr << "Error: pugixml is not properly built!" << std::endl;
4622 #endif
4623  }
4624 }
4625 
4626 #if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x030000)
4627 #ifndef DOXYGEN_SHOULD_SKIP_THIS
4628 // From OpenCV 2.4.11 source code.
4629 struct KeypointResponseGreaterThanThreshold {
4630  KeypointResponseGreaterThanThreshold(float _value) : value(_value) {}
4631  inline bool operator()(const cv::KeyPoint &kpt) const { return kpt.response >= value; }
4632  float value;
4633 };
4634 
4635 struct KeypointResponseGreater {
4636  inline bool operator()(const cv::KeyPoint &kp1, const cv::KeyPoint &kp2) const { return kp1.response > kp2.response; }
4637 };
4638 
4639 // takes keypoints and culls them by the response
4640 void vpKeyPoint::KeyPointsFilter::retainBest(std::vector<cv::KeyPoint> &keypoints, int n_points)
4641 {
4642  // this is only necessary if the keypoints size is greater than the number
4643  // of desired points.
4644  if (n_points >= 0 && keypoints.size() > (size_t)n_points) {
4645  if (n_points == 0) {
4646  keypoints.clear();
4647  return;
4648  }
4649  // first use nth element to partition the keypoints into the best and
4650  // worst.
4651  std::nth_element(keypoints.begin(), keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreater());
4652  // this is the boundary response, and in the case of FAST may be ambiguous
4653  float ambiguous_response = keypoints[(size_t)(n_points - 1)].response;
4654  // use std::partition to grab all of the keypoints with the boundary
4655  // response.
4656  std::vector<cv::KeyPoint>::const_iterator new_end = std::partition(
4657  keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreaterThanThreshold(ambiguous_response));
4658  // resize the keypoints, given this new end point. nth_element and
4659  // partition reordered the points inplace
4660  keypoints.resize((size_t)(new_end - keypoints.begin()));
4661  }
4662 }
4663 
4664 struct RoiPredicate {
4665  RoiPredicate(const cv::Rect &_r) : r(_r) {}
4666 
4667  bool operator()(const cv::KeyPoint &keyPt) const { return !r.contains(keyPt.pt); }
4668 
4669  cv::Rect r;
4670 };
4671 
4672 void vpKeyPoint::KeyPointsFilter::runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize,
4673  int borderSize)
4674 {
4675  if (borderSize > 0) {
4676  if (imageSize.height <= borderSize * 2 || imageSize.width <= borderSize * 2)
4677  keypoints.clear();
4678  else
4679  keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(),
4680  RoiPredicate(cv::Rect(
4681  cv::Point(borderSize, borderSize),
4682  cv::Point(imageSize.width - borderSize, imageSize.height - borderSize)))),
4683  keypoints.end());
4684  }
4685 }
4686 
4687 struct SizePredicate {
4688  SizePredicate(float _minSize, float _maxSize) : minSize(_minSize), maxSize(_maxSize) {}
4689 
4690  bool operator()(const cv::KeyPoint &keyPt) const
4691  {
4692  float size = keyPt.size;
4693  return (size < minSize) || (size > maxSize);
4694  }
4695 
4696  float minSize, maxSize;
4697 };
4698 
4699 void vpKeyPoint::KeyPointsFilter::runByKeypointSize(std::vector<cv::KeyPoint> &keypoints, float minSize, float maxSize)
4700 {
4701  CV_Assert(minSize >= 0);
4702  CV_Assert(maxSize >= 0);
4703  CV_Assert(minSize <= maxSize);
4704 
4705  keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), SizePredicate(minSize, maxSize)), keypoints.end());
4706 }
4707 
4708 class MaskPredicate
4709 {
4710 public:
4711  MaskPredicate(const cv::Mat &_mask) : mask(_mask) {}
4712  bool operator()(const cv::KeyPoint &key_pt) const
4713  {
4714  return mask.at<uchar>((int)(key_pt.pt.y + 0.5f), (int)(key_pt.pt.x + 0.5f)) == 0;
4715  }
4716 
4717 private:
4718  const cv::Mat mask;
4719  MaskPredicate &operator=(const MaskPredicate &);
4720 };
4721 
4722 void vpKeyPoint::KeyPointsFilter::runByPixelsMask(std::vector<cv::KeyPoint> &keypoints, const cv::Mat &mask)
4723 {
4724  if (mask.empty())
4725  return;
4726 
4727  keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), MaskPredicate(mask)), keypoints.end());
4728 }
4729 
4730 struct KeyPoint_LessThan {
4731  KeyPoint_LessThan(const std::vector<cv::KeyPoint> &_kp) : kp(&_kp) {}
4732  bool operator()(/*int i, int j*/ size_t i, size_t j) const
4733  {
4734  const cv::KeyPoint &kp1 = (*kp)[/*(size_t)*/ i];
4735  const cv::KeyPoint &kp2 = (*kp)[/*(size_t)*/ j];
4736  if (!vpMath::equal(kp1.pt.x, kp2.pt.x,
4737  std::numeric_limits<float>::epsilon())) { // if (kp1.pt.x !=
4738  // kp2.pt.x) {
4739  return kp1.pt.x < kp2.pt.x;
4740  }
4741 
4742  if (!vpMath::equal(kp1.pt.y, kp2.pt.y,
4743  std::numeric_limits<float>::epsilon())) { // if (kp1.pt.y !=
4744  // kp2.pt.y) {
4745  return kp1.pt.y < kp2.pt.y;
4746  }
4747 
4748  if (!vpMath::equal(kp1.size, kp2.size,
4749  std::numeric_limits<float>::epsilon())) { // if (kp1.size !=
4750  // kp2.size) {
4751  return kp1.size > kp2.size;
4752  }
4753 
4754  if (!vpMath::equal(kp1.angle, kp2.angle,
4755  std::numeric_limits<float>::epsilon())) { // if (kp1.angle !=
4756  // kp2.angle) {
4757  return kp1.angle < kp2.angle;
4758  }
4759 
4760  if (!vpMath::equal(kp1.response, kp2.response,
4761  std::numeric_limits<float>::epsilon())) { // if (kp1.response !=
4762  // kp2.response) {
4763  return kp1.response > kp2.response;
4764  }
4765 
4766  if (kp1.octave != kp2.octave) {
4767  return kp1.octave > kp2.octave;
4768  }
4769 
4770  if (kp1.class_id != kp2.class_id) {
4771  return kp1.class_id > kp2.class_id;
4772  }
4773 
4774  return i < j;
4775  }
4776  const std::vector<cv::KeyPoint> *kp;
4777 };
4778 
4779 void vpKeyPoint::KeyPointsFilter::removeDuplicated(std::vector<cv::KeyPoint> &keypoints)
4780 {
4781  size_t i, j, n = keypoints.size();
4782  std::vector<size_t> kpidx(n);
4783  std::vector<uchar> mask(n, (uchar)1);
4784 
4785  for (i = 0; i < n; i++) {
4786  kpidx[i] = i;
4787  }
4788  std::sort(kpidx.begin(), kpidx.end(), KeyPoint_LessThan(keypoints));
4789  for (i = 1, j = 0; i < n; i++) {
4790  cv::KeyPoint &kp1 = keypoints[kpidx[i]];
4791  cv::KeyPoint &kp2 = keypoints[kpidx[j]];
4792  // if (kp1.pt.x != kp2.pt.x || kp1.pt.y != kp2.pt.y || kp1.size !=
4793  // kp2.size || kp1.angle != kp2.angle) {
4794  if (!vpMath::equal(kp1.pt.x, kp2.pt.x, std::numeric_limits<float>::epsilon()) ||
4795  !vpMath::equal(kp1.pt.y, kp2.pt.y, std::numeric_limits<float>::epsilon()) ||
4796  !vpMath::equal(kp1.size, kp2.size, std::numeric_limits<float>::epsilon()) ||
4797  !vpMath::equal(kp1.angle, kp2.angle, std::numeric_limits<float>::epsilon())) {
4798  j = i;
4799  } else {
4800  mask[kpidx[i]] = 0;
4801  }
4802  }
4803 
4804  for (i = j = 0; i < n; i++) {
4805  if (mask[i]) {
4806  if (i != j) {
4807  keypoints[j] = keypoints[i];
4808  }
4809  j++;
4810  }
4811  }
4812  keypoints.resize(j);
4813 }
4814 
4815 /*
4816  * PyramidAdaptedFeatureDetector
4817  */
4818 vpKeyPoint::PyramidAdaptedFeatureDetector::PyramidAdaptedFeatureDetector(const cv::Ptr<cv::FeatureDetector> &_detector,
4819  int _maxLevel)
4820  : detector(_detector), maxLevel(_maxLevel)
4821 {
4822 }
4823 
4824 bool vpKeyPoint::PyramidAdaptedFeatureDetector::empty() const
4825 {
4826  return detector.empty() || (cv::FeatureDetector *)detector->empty();
4827 }
4828 
4829 void vpKeyPoint::PyramidAdaptedFeatureDetector::detect(cv::InputArray image,
4830  CV_OUT std::vector<cv::KeyPoint> &keypoints, cv::InputArray mask)
4831 {
4832  detectImpl(image.getMat(), keypoints, mask.getMat());
4833 }
4834 
4835 void vpKeyPoint::PyramidAdaptedFeatureDetector::detectImpl(const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
4836  const cv::Mat &mask) const
4837 {
4838  cv::Mat src = image;
4839  cv::Mat src_mask = mask;
4840 
4841  cv::Mat dilated_mask;
4842  if (!mask.empty()) {
4843  cv::dilate(mask, dilated_mask, cv::Mat());
4844  cv::Mat mask255(mask.size(), CV_8UC1, cv::Scalar(0));
4845  mask255.setTo(cv::Scalar(255), dilated_mask != 0);
4846  dilated_mask = mask255;
4847  }
4848 
4849  for (int l = 0, multiplier = 1; l <= maxLevel; ++l, multiplier *= 2) {
4850  // Detect on current level of the pyramid
4851  std::vector<cv::KeyPoint> new_pts;
4852  detector->detect(src, new_pts, src_mask);
4853  std::vector<cv::KeyPoint>::iterator it = new_pts.begin(), end = new_pts.end();
4854  for (; it != end; ++it) {
4855  it->pt.x *= multiplier;
4856  it->pt.y *= multiplier;
4857  it->size *= multiplier;
4858  it->octave = l;
4859  }
4860  keypoints.insert(keypoints.end(), new_pts.begin(), new_pts.end());
4861 
4862  // Downsample
4863  if (l < maxLevel) {
4864  cv::Mat dst;
4865  pyrDown(src, dst);
4866  src = dst;
4867 
4868  if (!mask.empty())
4869  resize(dilated_mask, src_mask, src.size(), 0, 0, CV_INTER_AREA);
4870  }
4871  }
4872 
4873  if (!mask.empty())
4874  vpKeyPoint::KeyPointsFilter::runByPixelsMask(keypoints, mask);
4875 }
4876 #endif
4877 #endif
4878 
4879 #elif !defined(VISP_BUILD_SHARED_LIBS)
4880 // Work around to avoid warning: libvisp_vision.a(vpKeyPoint.cpp.o) has no
4881 // symbols
4882 void dummy_vpKeyPoint(){};
4883 #endif
Used to indicate that a value is not in the allowed range.
Definition: vpException.h:97
Implementation of a matrix and operations on matrices.
Definition: vpMatrix.h:156
void displayMatching(const vpImage< unsigned char > &IRef, vpImage< unsigned char > &IMatching, unsigned int crossSize, unsigned int lineThickness=1, const vpColor &color=vpColor::green)
double get_i() const
Definition: vpImagePoint.h:203
static void makeDirectory(const std::string &dirname)
Definition: vpIoTools.cpp:482
double getTop() const
Definition: vpRect.h:193
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool(*func)(const vpHomogeneousMatrix &)=NULL)
Definition: vpPose.cpp:374
static void compute3D(const cv::KeyPoint &candidate, const std::vector< vpPoint > &roi, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
Definition: vpKeyPoint.cpp:549
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
double get_oY() const
Get the point oY coordinate in the object frame.
Definition: vpPoint.cpp:449
void detectExtractAffine(const vpImage< unsigned char > &I, std::vector< std::vector< cv::KeyPoint > > &listOfKeypoints, std::vector< cv::Mat > &listOfDescriptors, std::vector< vpImage< unsigned char > > *listOfAffineI=NULL)
void setWorldCoordinates(double oX, double oY, double oZ)
Definition: vpPoint.cpp:113
std::vector< unsigned int > getRansacInlierIndex() const
Definition: vpPose.h:249
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static bool isAbsolutePathname(const std::string &pathname)
Definition: vpIoTools.cpp:1488
vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType, const std::string &matcherName, const vpFilterMatchingType &filterType=ratioDistanceThreshold)
Definition: vpKeyPoint.cpp:83
void createImageMatching(vpImage< unsigned char > &IRef, vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
vpMatchingMethodEnum getMatchingMethod() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void readBinaryValueLE(std::ifstream &file, int16_t &short_value)
Definition: vpIoTools.cpp:1770
void set_u(double u)
Definition: vpImagePoint.h:225
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
Class to define RGB colors available for display functionnalities.
Definition: vpColor.h:157
static bool equal(double x, double y, double s=0.001)
Definition: vpMath.h:300
static const vpColor none
Definition: vpColor.h:229
error that can be emited by ViSP classes.
Definition: vpException.h:71
void setRansacThreshold(const double &t)
Definition: vpPose.h:238
vpHomogeneousMatrix inverse() const
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
void getQueryKeyPoints(std::vector< cv::KeyPoint > &keyPoints, bool matches=true) const
static std::string getParent(const std::string &pathname)
Definition: vpIoTools.cpp:1383
void getObjectPoints(std::vector< cv::Point3f > &objectPoints) const
static const vpColor green
Definition: vpColor.h:220
void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector< cv::DMatch > &matches, double &elapsedTime)
std::vector< vpPoint > getRansacInliers() const
Definition: vpPose.h:250
double get_oX() const
Get the point oX coordinate in the object frame.
Definition: vpPoint.cpp:447
double getRight() const
Definition: vpRect.h:180
void insertImageMatching(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
VISP_EXPORT double measureTimeMs()
Definition: vpTime.cpp:126
void getTrainPoints(std::vector< cv::Point3f > &points) const
vpRect getBoundingBox() const
Definition: vpPolygon.h:177
static const vpColor red
Definition: vpColor.h:217
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Definition: vpPoint.h:81
void set_x(double x)
Set the point x coordinate in the image plane.
Definition: vpPoint.cpp:497
void set_y(double y)
Set the point y coordinate in the image plane.
Definition: vpPoint.cpp:499
static void write(const vpImage< unsigned char > &I, const std::string &filename)
Definition: vpImageIo.cpp:445
vpMatrix getCovarianceMatrix() const
Definition: vpPose.h:269
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
Definition: vpKeyPoint.cpp:634
Defines a generic 2D polygon.
Definition: vpPolygon.h:103
void set_oY(double oY)
Set the point oY coordinate in the object frame.
Definition: vpPoint.cpp:490
void setNbParallelRansacThreads(int nb)
Definition: vpPose.h:306
double getWidth() const
Definition: vpRect.h:228
double getD() const
Definition: vpPlane.h:108
vpFeatureDetectorType
Definition: vpKeyPoint.h:258
void getTrainKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
static void compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpCylinder > &cylinders, const std::vector< std::vector< std::vector< vpImagePoint > > > &vectorOfCylinderRois, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
Definition: vpKeyPoint.cpp:753
double getB() const
Definition: vpPlane.h:104
bool _reference_computed
flag to indicate if the reference has been built.
bool isInside(const vpImagePoint &iP, const PointInPolygonMethod &method=PnPolyRayCasting) const
Definition: vpPolygon.cpp:309
double get_j() const
Definition: vpImagePoint.h:214
void setUseParallelRansac(bool use)
Definition: vpPose.h:320
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
Definition: vpPose.h:80
Generic class defining intrinsic camera parameters.
void display(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, unsigned int size=3)
vpFeatureDescriptorType
Definition: vpKeyPoint.h:286
unsigned int buildReference(const vpImage< unsigned char > &I)
Definition: vpKeyPoint.cpp:240
double get_oZ() const
Get the point oZ coordinate in the object frame.
Definition: vpPoint.cpp:451
static bool isNaN(double value)
Definition: vpMath.cpp:85
double getLeft() const
Definition: vpRect.h:174
void insert(const vpImage< Type > &src, const vpImagePoint &topLeft)
Definition: vpImage.h:1196
std::vector< vpImagePoint > referenceImagePointsList
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
const char * what() const
static void writeBinaryValueLE(std::ofstream &file, const int16_t short_value)
Definition: vpIoTools.cpp:1848
unsigned int matchPoint(const vpImage< unsigned char > &I)
double getA() const
Definition: vpPlane.h:102
void setRansacMaxTrials(const int &rM)
Definition: vpPose.h:247
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
Definition: vpPose.h:237
static int round(double x)
Definition: vpMath.h:245
static void displayCircle(const vpImage< unsigned char > &I, const vpImagePoint &center, unsigned int radius, const vpColor &color, bool fill=false, unsigned int thickness=1)
void set_oZ(double oZ)
Set the point oZ coordinate in the object frame.
Definition: vpPoint.cpp:492
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
void set_v(double v)
Definition: vpImagePoint.h:236
void set_ij(double ii, double jj)
Definition: vpImagePoint.h:188
unsigned int getHeight() const
Definition: vpImage.h:188
static void read(vpImage< unsigned char > &I, const std::string &filename)
Definition: vpImageIo.cpp:243
Implementation of column vector and the associated operations.
Definition: vpColVector.h:130
void set_oX(double oX)
Set the point oX coordinate in the object frame.
Definition: vpPoint.cpp:488
double get_x() const
Get the point x coordinate in the image plane.
Definition: vpPoint.cpp:456
vpFilterMatchingType
Definition: vpKeyPoint.h:227
void setRansacFilterFlag(const RANSAC_FILTER_FLAGS &flag)
Definition: vpPose.h:289
double getHeight() const
Definition: vpRect.h:167
bool matchPointAndDetect(const vpImage< unsigned char > &I, vpRect &boundingBox, vpImagePoint &centerOfGravity, const bool isPlanarObject=true, std::vector< vpImagePoint > *imPts1=NULL, std::vector< vpImagePoint > *imPts2=NULL, double *meanDescriptorDistance=NULL, double *detectionScore=NULL, const vpRect &rectangle=vpRect())
double get_y() const
Get the point y coordinate in the image plane.
Definition: vpPoint.cpp:458
std::vector< unsigned int > matchedReferencePoints
double getC() const
Definition: vpPlane.h:106
Defines a rectangle in the plane.
Definition: vpRect.h:79
std::vector< vpImagePoint > currentImagePointsList
bool computePose(const std::vector< cv::Point2f > &imagePoints, const std::vector< cv::Point3f > &objectPoints, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector< int > &inlierIndex, double &elapsedTime, bool(*func)(const vpHomogeneousMatrix &)=NULL)
Definition: vpKeyPoint.cpp:901
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:87
This class defines the container for a plane geometrical structure.
Definition: vpPlane.h:58
void loadConfigFile(const std::string &configFile)
void reset()
void addPoint(const vpPoint &P)
Definition: vpPose.cpp:149
unsigned int getWidth() const
Definition: vpImage.h:246
double getBottom() const
Definition: vpRect.h:98
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void convertFromOpenCV(const cv::KeyPoint &from, vpImagePoint &to)
Definition: vpConvert.cpp:226
Class that consider the case of a translation vector.
Implementation of a rotation vector as axis-angle minimal representation.
void setCovarianceComputation(const bool &flag)
Definition: vpPose.h:258
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
void initMatcher(const std::string &matcherName)
void extract(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, cv::Mat &descriptors, std::vector< cv::Point3f > *trainPoints=NULL)
void parse(const std::string &filename)
bool detect(const vpImage< unsigned char > &I)