Visual Servoing Platform  version 3.5.1 under development (2023-03-29)
vpKeyPoint.cpp
1 /****************************************************************************
2  *
3  * ViSP, open source Visual Servoing Platform software.
4  * Copyright (C) 2005 - 2019 by Inria. All rights reserved.
5  *
6  * This software is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  * See the file LICENSE.txt at the root directory of this source
11  * distribution for additional information about the GNU GPL.
12  *
13  * For using ViSP with software that can not be combined with the GNU
14  * GPL, please contact Inria about acquiring a ViSP Professional
15  * Edition License.
16  *
17  * See http://visp.inria.fr for more information.
18  *
19  * This software was developed at:
20  * Inria Rennes - Bretagne Atlantique
21  * Campus Universitaire de Beaulieu
22  * 35042 Rennes Cedex
23  * France
24  *
25  * If you have questions regarding the use of this file, please contact
26  * Inria at visp@inria.fr
27  *
28  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
29  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
30  *
31  * Description:
32  * Key point functionalities.
33  *
34  * Authors:
35  * Souriya Trinh
36  *
37  *****************************************************************************/
38 
39 #include <iomanip>
40 #include <limits>
41 
42 #include <visp3/core/vpIoTools.h>
43 #include <visp3/vision/vpKeyPoint.h>
44 
45 #if (VISP_HAVE_OPENCV_VERSION >= 0x020101)
46 
47 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
48 #include <opencv2/calib3d/calib3d.hpp>
49 #endif
50 
51 #include <pugixml.hpp>
52 
53 namespace
54 {
55 // Specific Type transformation functions
56 inline cv::DMatch knnToDMatch(const std::vector<cv::DMatch> &knnMatches)
57 {
58  if (knnMatches.size() > 0) {
59  return knnMatches[0];
60  }
61 
62  return cv::DMatch();
63 }
64 
65 inline vpImagePoint matchRansacToVpImage(const std::pair<cv::KeyPoint, cv::Point3f> &pair)
66 {
67  return vpImagePoint(pair.first.pt.y, pair.first.pt.x);
68 }
69 
70 } // namespace
71 
81 vpKeyPoint::vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType,
82  const std::string &matcherName, const vpFilterMatchingType &filterType)
83  : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
84  m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
85  m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
86  m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
87  m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
88  m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
89  m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
90  m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
91  m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
92  m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
93 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
94  m_useBruteForceCrossCheck(true),
95 #endif
96  m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
97  m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
98 {
99  initFeatureNames();
100 
101  m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
102  m_extractorNames.push_back(m_mapOfDescriptorNames[descriptorType]);
103 
104  init();
105 }
106 
116 vpKeyPoint::vpKeyPoint(const std::string &detectorName, const std::string &extractorName,
117  const std::string &matcherName, const vpFilterMatchingType &filterType)
118  : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
119  m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
120  m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
121  m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
122  m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
123  m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
124  m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
125  m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
126  m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
127  m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
128 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
129  m_useBruteForceCrossCheck(true),
130 #endif
131  m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
132  m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
133 {
134  initFeatureNames();
135 
136  m_detectorNames.push_back(detectorName);
137  m_extractorNames.push_back(extractorName);
138 
139  init();
140 }
141 
151 vpKeyPoint::vpKeyPoint(const std::vector<std::string> &detectorNames, const std::vector<std::string> &extractorNames,
152  const std::string &matcherName, const vpFilterMatchingType &filterType)
153  : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
154  m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(detectorNames),
155  m_detectors(), m_extractionTime(0.), m_extractorNames(extractorNames), m_extractors(), m_filteredMatches(),
156  m_filterType(filterType), m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(),
157  m_matcher(), m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0),
158  m_matchingRatioThreshold(0.85), m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200),
159  m_nbRansacMinInlierCount(100), m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(),
160  m_queryFilteredKeyPoints(), m_queryKeyPoints(), m_ransacConsensusPercentage(20.0),
161  m_ransacFilterFlag(vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(), m_ransacParallel(false),
162  m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01), m_trainDescriptors(),
163  m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
164 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
165  m_useBruteForceCrossCheck(true),
166 #endif
167  m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
168  m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
169 {
170  initFeatureNames();
171  init();
172 }
173 
182 void vpKeyPoint::affineSkew(double tilt, double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai)
183 {
184  int h = img.rows;
185  int w = img.cols;
186 
187  mask = cv::Mat(h, w, CV_8UC1, cv::Scalar(255));
188 
189  cv::Mat A = cv::Mat::eye(2, 3, CV_32F);
190 
191  // if (phi != 0.0) {
192  if (std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
193  phi *= M_PI / 180.;
194  double s = sin(phi);
195  double c = cos(phi);
196 
197  A = (cv::Mat_<float>(2, 2) << c, -s, s, c);
198 
199  cv::Mat corners = (cv::Mat_<float>(4, 2) << 0, 0, w, 0, w, h, 0, h);
200  cv::Mat tcorners = corners * A.t();
201  cv::Mat tcorners_x, tcorners_y;
202  tcorners.col(0).copyTo(tcorners_x);
203  tcorners.col(1).copyTo(tcorners_y);
204  std::vector<cv::Mat> channels;
205  channels.push_back(tcorners_x);
206  channels.push_back(tcorners_y);
207  cv::merge(channels, tcorners);
208 
209  cv::Rect rect = cv::boundingRect(tcorners);
210  A = (cv::Mat_<float>(2, 3) << c, -s, -rect.x, s, c, -rect.y);
211 
212  cv::warpAffine(img, img, A, cv::Size(rect.width, rect.height), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
213  }
214  // if (tilt != 1.0) {
215  if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon()) {
216  double s = 0.8 * sqrt(tilt * tilt - 1);
217  cv::GaussianBlur(img, img, cv::Size(0, 0), s, 0.01);
218  cv::resize(img, img, cv::Size(0, 0), 1.0 / tilt, 1.0, cv::INTER_NEAREST);
219  A.row(0) = A.row(0) / tilt;
220  }
221  // if (tilt != 1.0 || phi != 0.0) {
222  if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon() ||
223  std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
224  h = img.rows;
225  w = img.cols;
226  cv::warpAffine(mask, mask, A, cv::Size(w, h), cv::INTER_NEAREST);
227  }
228  cv::invertAffineTransform(A, Ai);
229 }
230 
238 
245 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color) { return buildReference(I_color, vpRect()); }
246 
256 unsigned int vpKeyPoint::buildReference(const vpImage<unsigned char> &I, const vpImagePoint &iP, unsigned int height,
257  unsigned int width)
258 {
259  return buildReference(I, vpRect(iP, width, height));
260 }
261 
271 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, const vpImagePoint &iP, unsigned int height,
272  unsigned int width)
273 {
274  return buildReference(I_color, vpRect(iP, width, height));
275 }
276 
284 unsigned int vpKeyPoint::buildReference(const vpImage<unsigned char> &I, const vpRect &rectangle)
285 {
286  // Reset variables used when dealing with 3D models
287  // So as no 3D point list is passed, we dont need this variables
288  m_trainPoints.clear();
289  m_mapOfImageId.clear();
290  m_mapOfImages.clear();
291  m_currentImageId = 1;
292 
293  if (m_useAffineDetection) {
294  std::vector<std::vector<cv::KeyPoint> > listOfTrainKeyPoints;
295  std::vector<cv::Mat> listOfTrainDescriptors;
296 
297  // Detect keypoints and extract descriptors on multiple images
298  detectExtractAffine(I, listOfTrainKeyPoints, listOfTrainDescriptors);
299 
300  // Flatten the different train lists
301  m_trainKeyPoints.clear();
302  for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfTrainKeyPoints.begin();
303  it != listOfTrainKeyPoints.end(); ++it) {
304  m_trainKeyPoints.insert(m_trainKeyPoints.end(), it->begin(), it->end());
305  }
306 
307  bool first = true;
308  for (std::vector<cv::Mat>::const_iterator it = listOfTrainDescriptors.begin(); it != listOfTrainDescriptors.end();
309  ++it) {
310  if (first) {
311  first = false;
312  it->copyTo(m_trainDescriptors);
313  } else {
314  m_trainDescriptors.push_back(*it);
315  }
316  }
317  } else {
318  detect(I, m_trainKeyPoints, m_detectionTime, rectangle);
319  extract(I, m_trainKeyPoints, m_trainDescriptors, m_extractionTime);
320  }
321 
322  // Save the correspondence keypoint class_id with the training image_id in a map.
323  // Used to display the matching with all the training images.
324  for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
325  m_mapOfImageId[it->class_id] = m_currentImageId;
326  }
327 
328  // Save the image in a map at a specific image_id
329  m_mapOfImages[m_currentImageId] = I;
330 
331  // Convert OpenCV type to ViSP type for compatibility
333 
334  _reference_computed = true;
335 
336  // Add train descriptors in matcher object
337  m_matcher->clear();
338  m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
339 
340  return static_cast<unsigned int>(m_trainKeyPoints.size());
341 }
342 
350 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, const vpRect &rectangle)
351 {
352  vpImageConvert::convert(I_color, m_I);
353  return (buildReference(m_I, rectangle));
354 }
355 
367 unsigned int vpKeyPoint::buildReference(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &trainKeyPoints,
368  std::vector<cv::Point3f> &points3f, bool append, int class_id)
369 {
370  cv::Mat trainDescriptors;
371  // Copy the input list of keypoints
372  std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
373 
374  extract(I, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
375 
376  if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
377  // Keypoints have been removed
378  // Store the hash of a keypoint as the key and the index of the keypoint
379  // as the value
380  std::map<size_t, size_t> mapOfKeypointHashes;
381  size_t cpt = 0;
382  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
383  ++it, cpt++) {
384  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
385  }
386 
387  std::vector<cv::Point3f> trainPoints_tmp;
388  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
389  if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
390  trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
391  }
392  }
393 
394  // Copy trainPoints_tmp to points3f
395  points3f = trainPoints_tmp;
396  }
397 
398  return (buildReference(I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
399 }
400 
412 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &trainKeyPoints,
413  std::vector<cv::Point3f> &points3f, bool append, int class_id)
414 {
415  cv::Mat trainDescriptors;
416  // Copy the input list of keypoints
417  std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
418 
419  extract(I_color, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
420 
421  if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
422  // Keypoints have been removed
423  // Store the hash of a keypoint as the key and the index of the keypoint
424  // as the value
425  std::map<size_t, size_t> mapOfKeypointHashes;
426  size_t cpt = 0;
427  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
428  ++it, cpt++) {
429  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
430  }
431 
432  std::vector<cv::Point3f> trainPoints_tmp;
433  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
434  if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
435  trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
436  }
437  }
438 
439  // Copy trainPoints_tmp to points3f
440  points3f = trainPoints_tmp;
441  }
442 
443  return (buildReference(I_color, trainKeyPoints, trainDescriptors, points3f, append, class_id));
444 }
445 
460  const std::vector<cv::KeyPoint> &trainKeyPoints,
461  const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
462  bool append, int class_id)
463 {
464  if (!append) {
465  m_trainPoints.clear();
466  m_mapOfImageId.clear();
467  m_mapOfImages.clear();
468  m_currentImageId = 0;
469  m_trainKeyPoints.clear();
470  }
471 
472  m_currentImageId++;
473 
474  std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
475  // Set class_id if != -1
476  if (class_id != -1) {
477  for (std::vector<cv::KeyPoint>::iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
478  it->class_id = class_id;
479  }
480  }
481 
482  // Save the correspondence keypoint class_id with the training image_id in a map.
483  // Used to display the matching with all the training images.
484  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
485  ++it) {
486  m_mapOfImageId[it->class_id] = m_currentImageId;
487  }
488 
489  // Save the image in a map at a specific image_id
490  m_mapOfImages[m_currentImageId] = I;
491 
492  // Append reference lists
493  m_trainKeyPoints.insert(m_trainKeyPoints.end(), trainKeyPoints_tmp.begin(), trainKeyPoints_tmp.end());
494  if (!append) {
495  trainDescriptors.copyTo(m_trainDescriptors);
496  } else {
497  m_trainDescriptors.push_back(trainDescriptors);
498  }
499  this->m_trainPoints.insert(m_trainPoints.end(), points3f.begin(), points3f.end());
500 
501  // Convert OpenCV type to ViSP type for compatibility
503  vpConvert::convertFromOpenCV(m_trainPoints, m_trainVpPoints);
504 
505  // Add train descriptors in matcher object
506  m_matcher->clear();
507  m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
508 
509  _reference_computed = true;
510 
511  return static_cast<unsigned int>(m_trainKeyPoints.size());
512 }
513 
526 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, const std::vector<cv::KeyPoint> &trainKeyPoints,
527  const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
528  bool append, int class_id)
529 {
530  vpImageConvert::convert(I_color, m_I);
531  return (buildReference(m_I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
532 }
533 
549 void vpKeyPoint::compute3D(const cv::KeyPoint &candidate, const std::vector<vpPoint> &roi,
550  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
551 {
552  /* compute plane equation */
553  std::vector<vpPoint>::const_iterator it_roi = roi.begin();
554  vpPoint pts[3];
555  pts[0] = *it_roi;
556  ++it_roi;
557  pts[1] = *it_roi;
558  ++it_roi;
559  pts[2] = *it_roi;
560  vpPlane Po(pts[0], pts[1], pts[2]);
561  double xc = 0.0, yc = 0.0;
562  vpPixelMeterConversion::convertPoint(cam, candidate.pt.x, candidate.pt.y, xc, yc);
563  double Z = -Po.getD() / (Po.getA() * xc + Po.getB() * yc + Po.getC());
564  double X = xc * Z;
565  double Y = yc * Z;
566  vpColVector point_cam(4);
567  point_cam[0] = X;
568  point_cam[1] = Y;
569  point_cam[2] = Z;
570  point_cam[3] = 1;
571  vpColVector point_obj(4);
572  point_obj = cMo.inverse() * point_cam;
573  point = cv::Point3f((float)point_obj[0], (float)point_obj[1], (float)point_obj[2]);
574 }
575 
591 void vpKeyPoint::compute3D(const vpImagePoint &candidate, const std::vector<vpPoint> &roi,
592  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, vpPoint &point)
593 {
594  /* compute plane equation */
595  std::vector<vpPoint>::const_iterator it_roi = roi.begin();
596  vpPoint pts[3];
597  pts[0] = *it_roi;
598  ++it_roi;
599  pts[1] = *it_roi;
600  ++it_roi;
601  pts[2] = *it_roi;
602  vpPlane Po(pts[0], pts[1], pts[2]);
603  double xc = 0.0, yc = 0.0;
604  vpPixelMeterConversion::convertPoint(cam, candidate, xc, yc);
605  double Z = -Po.getD() / (Po.getA() * xc + Po.getB() * yc + Po.getC());
606  double X = xc * Z;
607  double Y = yc * Z;
608  vpColVector point_cam(4);
609  point_cam[0] = X;
610  point_cam[1] = Y;
611  point_cam[2] = Z;
612  point_cam[3] = 1;
613  vpColVector point_obj(4);
614  point_obj = cMo.inverse() * point_cam;
615  point.setWorldCoordinates(point_obj);
616 }
617 
635  std::vector<cv::KeyPoint> &candidates,
636  const std::vector<vpPolygon> &polygons,
637  const std::vector<std::vector<vpPoint> > &roisPt,
638  std::vector<cv::Point3f> &points, cv::Mat *descriptors)
639 {
640  std::vector<cv::KeyPoint> candidatesToCheck = candidates;
641  candidates.clear();
642  points.clear();
643  vpImagePoint imPt;
644  cv::Point3f pt;
645  cv::Mat desc;
646 
647  std::vector<std::pair<cv::KeyPoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
648  for (size_t i = 0; i < candidatesToCheck.size(); i++) {
649  pairOfCandidatesToCheck[i] = std::pair<cv::KeyPoint, size_t>(candidatesToCheck[i], i);
650  }
651 
652  size_t cpt1 = 0;
653  std::vector<vpPolygon> polygons_tmp = polygons;
654  for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
655  std::vector<std::pair<cv::KeyPoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
656 
657  while (it2 != pairOfCandidatesToCheck.end()) {
658  imPt.set_ij(it2->first.pt.y, it2->first.pt.x);
659  if (it1->isInside(imPt)) {
660  candidates.push_back(it2->first);
661  vpKeyPoint::compute3D(it2->first, roisPt[cpt1], cam, cMo, pt);
662  points.push_back(pt);
663 
664  if (descriptors != NULL) {
665  desc.push_back(descriptors->row((int)it2->second));
666  }
667 
668  // Remove candidate keypoint which is located on the current polygon
669  it2 = pairOfCandidatesToCheck.erase(it2);
670  } else {
671  ++it2;
672  }
673  }
674  }
675 
676  if (descriptors != NULL) {
677  desc.copyTo(*descriptors);
678  }
679 }
680 
698  std::vector<vpImagePoint> &candidates,
699  const std::vector<vpPolygon> &polygons,
700  const std::vector<std::vector<vpPoint> > &roisPt,
701  std::vector<vpPoint> &points, cv::Mat *descriptors)
702 {
703  std::vector<vpImagePoint> candidatesToCheck = candidates;
704  candidates.clear();
705  points.clear();
706  vpPoint pt;
707  cv::Mat desc;
708 
709  std::vector<std::pair<vpImagePoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
710  for (size_t i = 0; i < candidatesToCheck.size(); i++) {
711  pairOfCandidatesToCheck[i] = std::pair<vpImagePoint, size_t>(candidatesToCheck[i], i);
712  }
713 
714  size_t cpt1 = 0;
715  std::vector<vpPolygon> polygons_tmp = polygons;
716  for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
717  std::vector<std::pair<vpImagePoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
718 
719  while (it2 != pairOfCandidatesToCheck.end()) {
720  if (it1->isInside(it2->first)) {
721  candidates.push_back(it2->first);
722  vpKeyPoint::compute3D(it2->first, roisPt[cpt1], cam, cMo, pt);
723  points.push_back(pt);
724 
725  if (descriptors != NULL) {
726  desc.push_back(descriptors->row((int)it2->second));
727  }
728 
729  // Remove candidate keypoint which is located on the current polygon
730  it2 = pairOfCandidatesToCheck.erase(it2);
731  } else {
732  ++it2;
733  }
734  }
735  }
736 }
737 
754  const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector<cv::KeyPoint> &candidates,
755  const std::vector<vpCylinder> &cylinders,
756  const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<cv::Point3f> &points,
757  cv::Mat *descriptors)
758 {
759  std::vector<cv::KeyPoint> candidatesToCheck = candidates;
760  candidates.clear();
761  points.clear();
762  cv::Mat desc;
763 
764  // Keep only keypoints on cylinders
765  size_t cpt_keypoint = 0;
766  for (std::vector<cv::KeyPoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
767  ++it1, cpt_keypoint++) {
768  size_t cpt_cylinder = 0;
769 
770  // Iterate through the list of vpCylinders
771  for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
772  it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
773  // Iterate through the list of the bounding boxes of the current
774  // vpCylinder
775  for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
776  if (vpPolygon::isInside(*it3, it1->pt.y, it1->pt.x)) {
777  candidates.push_back(*it1);
778 
779  // Calculate the 3D coordinates for each keypoint located on
780  // cylinders
781  double xm = 0.0, ym = 0.0;
782  vpPixelMeterConversion::convertPoint(cam, it1->pt.x, it1->pt.y, xm, ym);
783  double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
784 
785  if (!vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
786  vpColVector point_cam(4);
787  point_cam[0] = xm * Z;
788  point_cam[1] = ym * Z;
789  point_cam[2] = Z;
790  point_cam[3] = 1;
791  vpColVector point_obj(4);
792  point_obj = cMo.inverse() * point_cam;
793  vpPoint pt;
794  pt.setWorldCoordinates(point_obj);
795  points.push_back(cv::Point3f((float)pt.get_oX(), (float)pt.get_oY(), (float)pt.get_oZ()));
796 
797  if (descriptors != NULL) {
798  desc.push_back(descriptors->row((int)cpt_keypoint));
799  }
800 
801  break;
802  }
803  }
804  }
805  }
806  }
807 
808  if (descriptors != NULL) {
809  desc.copyTo(*descriptors);
810  }
811 }
812 
829  const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector<vpImagePoint> &candidates,
830  const std::vector<vpCylinder> &cylinders,
831  const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<vpPoint> &points,
832  cv::Mat *descriptors)
833 {
834  std::vector<vpImagePoint> candidatesToCheck = candidates;
835  candidates.clear();
836  points.clear();
837  cv::Mat desc;
838 
839  // Keep only keypoints on cylinders
840  size_t cpt_keypoint = 0;
841  for (std::vector<vpImagePoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
842  ++it1, cpt_keypoint++) {
843  size_t cpt_cylinder = 0;
844 
845  // Iterate through the list of vpCylinders
846  for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
847  it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
848  // Iterate through the list of the bounding boxes of the current
849  // vpCylinder
850  for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
851  if (vpPolygon::isInside(*it3, it1->get_i(), it1->get_j())) {
852  candidates.push_back(*it1);
853 
854  // Calculate the 3D coordinates for each keypoint located on
855  // cylinders
856  double xm = 0.0, ym = 0.0;
857  vpPixelMeterConversion::convertPoint(cam, it1->get_u(), it1->get_v(), xm, ym);
858  double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
859 
860  if (!vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
861  vpColVector point_cam(4);
862  point_cam[0] = xm * Z;
863  point_cam[1] = ym * Z;
864  point_cam[2] = Z;
865  point_cam[3] = 1;
866  vpColVector point_obj(4);
867  point_obj = cMo.inverse() * point_cam;
868  vpPoint pt;
869  pt.setWorldCoordinates(point_obj);
870  points.push_back(pt);
871 
872  if (descriptors != NULL) {
873  desc.push_back(descriptors->row((int)cpt_keypoint));
874  }
875 
876  break;
877  }
878  }
879  }
880  }
881  }
882 
883  if (descriptors != NULL) {
884  desc.copyTo(*descriptors);
885  }
886 }
887 
901 bool vpKeyPoint::computePose(const std::vector<cv::Point2f> &imagePoints, const std::vector<cv::Point3f> &objectPoints,
902  const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector<int> &inlierIndex,
903  double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &))
904 {
905  double t = vpTime::measureTimeMs();
906 
907  if (imagePoints.size() < 4 || objectPoints.size() < 4 || imagePoints.size() != objectPoints.size()) {
908  elapsedTime = (vpTime::measureTimeMs() - t);
909  std::cerr << "Not enough points to compute the pose (at least 4 points "
910  "are needed)."
911  << std::endl;
912 
913  return false;
914  }
915 
916  cv::Mat cameraMatrix =
917  (cv::Mat_<double>(3, 3) << cam.get_px(), 0, cam.get_u0(), 0, cam.get_py(), cam.get_v0(), 0, 0, 1);
918  cv::Mat rvec, tvec;
919 
920  // Bug with OpenCV < 2.4.0 when zero distorsion is provided by an empty
921  // array. http://code.opencv.org/issues/1700 ;
922  // http://code.opencv.org/issues/1718 what(): Distortion coefficients must
923  // be 1x4, 4x1, 1x5, 5x1, 1x8 or 8x1 floating-point vector in function
924  // cvProjectPoints2 Fixed in OpenCV 2.4.0 (r7558)
925  // cv::Mat distCoeffs;
926  cv::Mat distCoeffs = cv::Mat::zeros(1, 5, CV_64F);
927 
928  try {
929 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
930  // OpenCV 3.0.0 (2014/12/12)
931  cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, false, m_nbRansacIterations,
932  (float)m_ransacReprojectionError,
933  0.99, // confidence=0.99 (default) – The probability
934  // that the algorithm produces a useful result.
935  inlierIndex, cv::SOLVEPNP_ITERATIVE);
936 // SOLVEPNP_ITERATIVE (default): Iterative method is based on
937 // Levenberg-Marquardt optimization. In this case the function finds such a
938 // pose that minimizes reprojection error, that is the sum of squared
939 // distances between the observed projections imagePoints and the projected
940 // (using projectPoints() ) objectPoints . SOLVEPNP_P3P: Method is based on
941 // the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang “Complete Solution
942 // Classification for the Perspective-Three-Point Problem”. In this case the
943 // function requires exactly four object and image points. SOLVEPNP_EPNP:
944 // Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the
945 // paper “EPnP: Efficient Perspective-n-Point Camera Pose Estimation”.
946 // SOLVEPNP_DLS: Method is based on the paper of Joel A. Hesch and Stergios I.
947 // Roumeliotis. “A Direct Least-Squares (DLS) Method for PnP”. SOLVEPNP_UPNP
948 // Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto,
949 // F.Moreno-Noguer. “Exhaustive Linearization for Robust Camera Pose and Focal
950 // Length Estimation”. In this case the function also estimates the
951 // parameters
952 // f_x and f_y assuming that both have the same value. Then the cameraMatrix
953 // is updated with the estimated focal length.
954 #else
955  int nbInlierToReachConsensus = m_nbRansacMinInlierCount;
956  if (m_useConsensusPercentage) {
957  nbInlierToReachConsensus = (int)(m_ransacConsensusPercentage / 100.0 * (double)m_queryFilteredKeyPoints.size());
958  }
959 
960  cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, false, m_nbRansacIterations,
961  (float)m_ransacReprojectionError, nbInlierToReachConsensus, inlierIndex);
962 #endif
963  } catch (cv::Exception &e) {
964  std::cerr << e.what() << std::endl;
965  elapsedTime = (vpTime::measureTimeMs() - t);
966  return false;
967  }
968  vpTranslationVector translationVec(tvec.at<double>(0), tvec.at<double>(1), tvec.at<double>(2));
969  vpThetaUVector thetaUVector(rvec.at<double>(0), rvec.at<double>(1), rvec.at<double>(2));
970  cMo = vpHomogeneousMatrix(translationVec, thetaUVector);
971 
972  if (func != NULL) {
973  // Check the final pose returned by solvePnPRansac to discard
974  // solutions which do not respect the pose criterion.
975  if (!func(cMo)) {
976  elapsedTime = (vpTime::measureTimeMs() - t);
977  return false;
978  }
979  }
980 
981  elapsedTime = (vpTime::measureTimeMs() - t);
982  return true;
983 }
984 
997 bool vpKeyPoint::computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo,
998  std::vector<vpPoint> &inliers, double &elapsedTime,
999  bool (*func)(const vpHomogeneousMatrix &))
1000 {
1001  std::vector<unsigned int> inlierIndex;
1002  return computePose(objectVpPoints, cMo, inliers, inlierIndex, elapsedTime, func);
1003 }
1004 
1018 bool vpKeyPoint::computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo,
1019  std::vector<vpPoint> &inliers, std::vector<unsigned int> &inlierIndex, double &elapsedTime,
1020  bool (*func)(const vpHomogeneousMatrix &))
1021 {
1022  double t = vpTime::measureTimeMs();
1023 
1024  if (objectVpPoints.size() < 4) {
1025  elapsedTime = (vpTime::measureTimeMs() - t);
1026  // std::cerr << "Not enough points to compute the pose (at least 4
1027  // points are needed)." << std::endl;
1028 
1029  return false;
1030  }
1031 
1032  vpPose pose;
1033 
1034  for (std::vector<vpPoint>::const_iterator it = objectVpPoints.begin(); it != objectVpPoints.end(); ++it) {
1035  pose.addPoint(*it);
1036  }
1037 
1038  unsigned int nbInlierToReachConsensus = (unsigned int)m_nbRansacMinInlierCount;
1039  if (m_useConsensusPercentage) {
1040  nbInlierToReachConsensus =
1041  (unsigned int)(m_ransacConsensusPercentage / 100.0 * (double)m_queryFilteredKeyPoints.size());
1042  }
1043 
1044  pose.setRansacFilterFlag(m_ransacFilterFlag);
1045  pose.setUseParallelRansac(m_ransacParallel);
1046  pose.setNbParallelRansacThreads(m_ransacParallelNbThreads);
1047  pose.setRansacNbInliersToReachConsensus(nbInlierToReachConsensus);
1048  pose.setRansacThreshold(m_ransacThreshold);
1049  pose.setRansacMaxTrials(m_nbRansacIterations);
1050 
1051  bool isRansacPoseEstimationOk = false;
1052  try {
1053  pose.setCovarianceComputation(m_computeCovariance);
1054  isRansacPoseEstimationOk = pose.computePose(vpPose::RANSAC, cMo, func);
1055  inliers = pose.getRansacInliers();
1056  inlierIndex = pose.getRansacInlierIndex();
1057 
1058  if (m_computeCovariance) {
1059  m_covarianceMatrix = pose.getCovarianceMatrix();
1060  }
1061  } catch (const vpException &e) {
1062  std::cerr << "e=" << e.what() << std::endl;
1063  elapsedTime = (vpTime::measureTimeMs() - t);
1064  return false;
1065  }
1066 
1067  // if(func != NULL && isRansacPoseEstimationOk) {
1068  // //Check the final pose returned by the Ransac VVS pose estimation as
1069  // in rare some cases
1070  // //we can converge toward a final cMo that does not respect the pose
1071  // criterion even
1072  // //if the 4 minimal points picked to respect the pose criterion.
1073  // if(!func(&cMo)) {
1074  // elapsedTime = (vpTime::measureTimeMs() - t);
1075  // return false;
1076  // }
1077  // }
1078 
1079  elapsedTime = (vpTime::measureTimeMs() - t);
1080  return isRansacPoseEstimationOk;
1081 }
1082 
1097 double vpKeyPoint::computePoseEstimationError(const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
1098  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo_est)
1099 {
1100  if (matchKeyPoints.size() == 0) {
1101  // return std::numeric_limits<double>::max(); // create an error under
1102  // Windows. To fix it we have to add #undef max
1103  return DBL_MAX;
1104  }
1105 
1106  std::vector<double> errors(matchKeyPoints.size());
1107  size_t cpt = 0;
1108  vpPoint pt;
1109  for (std::vector<std::pair<cv::KeyPoint, cv::Point3f> >::const_iterator it = matchKeyPoints.begin();
1110  it != matchKeyPoints.end(); ++it, cpt++) {
1111  pt.set_oX(it->second.x);
1112  pt.set_oY(it->second.y);
1113  pt.set_oZ(it->second.z);
1114  pt.project(cMo_est);
1115  double u = 0.0, v = 0.0;
1116  vpMeterPixelConversion::convertPoint(cam, pt.get_x(), pt.get_y(), u, v);
1117  errors[cpt] = std::sqrt((u - it->first.pt.x) * (u - it->first.pt.x) + (v - it->first.pt.y) * (v - it->first.pt.y));
1118  }
1119 
1120  return std::accumulate(errors.begin(), errors.end(), 0.0) / errors.size();
1121 }
1122 
1132  vpImage<unsigned char> &IMatching)
1133 {
1134  // Image matching side by side
1135  unsigned int width = IRef.getWidth() + ICurrent.getWidth();
1136  unsigned int height = ((std::max))(IRef.getHeight(), ICurrent.getHeight());
1137 
1138  IMatching = vpImage<unsigned char>(height, width);
1139 }
1140 
1150  vpImage<vpRGBa> &IMatching)
1151 {
1152  // Image matching side by side
1153  unsigned int width = IRef.getWidth() + ICurrent.getWidth();
1154  unsigned int height = ((std::max))(IRef.getHeight(), ICurrent.getHeight());
1155 
1156  IMatching = vpImage<vpRGBa>(height, width);
1157 }
1158 
1169 {
1170  // Nb images in the training database + the current image we want to detect
1171  // the object
1172  unsigned int nbImg = (unsigned int)(m_mapOfImages.size() + 1);
1173 
1174  if (m_mapOfImages.empty()) {
1175  std::cerr << "There is no training image loaded !" << std::endl;
1176  return;
1177  }
1178 
1179  if (nbImg == 2) {
1180  // Only one training image, so we display them side by side
1181  createImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
1182  } else {
1183  // Multiple training images, display them as a mosaic image
1184  //(unsigned int) std::floor(std::sqrt((double) nbImg) + 0.5);
1185  unsigned int nbImgSqrt = (unsigned int)vpMath::round(std::sqrt((double)nbImg));
1186 
1187  // Number of columns in the mosaic grid
1188  unsigned int nbWidth = nbImgSqrt;
1189  // Number of rows in the mosaic grid
1190  unsigned int nbHeight = nbImgSqrt;
1191 
1192  // Deals with non square mosaic grid and the total number of images
1193  if (nbImgSqrt * nbImgSqrt < nbImg) {
1194  nbWidth++;
1195  }
1196 
1197  unsigned int maxW = ICurrent.getWidth();
1198  unsigned int maxH = ICurrent.getHeight();
1199  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1200  ++it) {
1201  if (maxW < it->second.getWidth()) {
1202  maxW = it->second.getWidth();
1203  }
1204 
1205  if (maxH < it->second.getHeight()) {
1206  maxH = it->second.getHeight();
1207  }
1208  }
1209 
1210  IMatching = vpImage<unsigned char>(maxH * nbHeight, maxW * nbWidth);
1211  }
1212 }
1213 
1224 {
1225  // Nb images in the training database + the current image we want to detect
1226  // the object
1227  unsigned int nbImg = (unsigned int)(m_mapOfImages.size() + 1);
1228 
1229  if (m_mapOfImages.empty()) {
1230  std::cerr << "There is no training image loaded !" << std::endl;
1231  return;
1232  }
1233 
1234  if (nbImg == 2) {
1235  // Only one training image, so we display them side by side
1236  createImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
1237  } else {
1238  // Multiple training images, display them as a mosaic image
1239  //(unsigned int) std::floor(std::sqrt((double) nbImg) + 0.5);
1240  unsigned int nbImgSqrt = (unsigned int)vpMath::round(std::sqrt((double)nbImg));
1241 
1242  // Number of columns in the mosaic grid
1243  unsigned int nbWidth = nbImgSqrt;
1244  // Number of rows in the mosaic grid
1245  unsigned int nbHeight = nbImgSqrt;
1246 
1247  // Deals with non square mosaic grid and the total number of images
1248  if (nbImgSqrt * nbImgSqrt < nbImg) {
1249  nbWidth++;
1250  }
1251 
1252  unsigned int maxW = ICurrent.getWidth();
1253  unsigned int maxH = ICurrent.getHeight();
1254  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1255  ++it) {
1256  if (maxW < it->second.getWidth()) {
1257  maxW = it->second.getWidth();
1258  }
1259 
1260  if (maxH < it->second.getHeight()) {
1261  maxH = it->second.getHeight();
1262  }
1263  }
1264 
1265  IMatching = vpImage<vpRGBa>(maxH * nbHeight, maxW * nbWidth);
1266  }
1267 }
1268 
1276 void vpKeyPoint::detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, const vpRect &rectangle)
1277 {
1278  double elapsedTime;
1279  detect(I, keyPoints, elapsedTime, rectangle);
1280 }
1281 
1289 void vpKeyPoint::detect(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, const vpRect &rectangle)
1290 {
1291  double elapsedTime;
1292  detect(I_color, keyPoints, elapsedTime, rectangle);
1293 }
1294 
1303 void vpKeyPoint::detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, const cv::Mat &mask)
1304 {
1305  double elapsedTime;
1306  detect(matImg, keyPoints, elapsedTime, mask);
1307 }
1308 
1317 void vpKeyPoint::detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
1318  const vpRect &rectangle)
1319 {
1320  cv::Mat matImg;
1321  vpImageConvert::convert(I, matImg, false);
1322  cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1323 
1324  if (rectangle.getWidth() > 0 && rectangle.getHeight() > 0) {
1325  cv::Point leftTop((int)rectangle.getLeft(), (int)rectangle.getTop()),
1326  rightBottom((int)rectangle.getRight(), (int)rectangle.getBottom());
1327  cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1328  } else {
1329  mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1330  }
1331 
1332  detect(matImg, keyPoints, elapsedTime, mask);
1333 }
1334 
1343 void vpKeyPoint::detect(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
1344  const vpRect &rectangle)
1345 {
1346  cv::Mat matImg;
1347  vpImageConvert::convert(I_color, matImg);
1348  cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1349 
1350  if (rectangle.getWidth() > 0 && rectangle.getHeight() > 0) {
1351  cv::Point leftTop((int)rectangle.getLeft(), (int)rectangle.getTop()),
1352  rightBottom((int)rectangle.getRight(), (int)rectangle.getBottom());
1353  cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1354  } else {
1355  mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1356  }
1357 
1358  detect(matImg, keyPoints, elapsedTime, mask);
1359 }
1360 
1370 void vpKeyPoint::detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
1371  const cv::Mat &mask)
1372 {
1373  double t = vpTime::measureTimeMs();
1374  keyPoints.clear();
1375 
1376  for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
1377  it != m_detectors.end(); ++it) {
1378  std::vector<cv::KeyPoint> kp;
1379 
1380  it->second->detect(matImg, kp, mask);
1381  keyPoints.insert(keyPoints.end(), kp.begin(), kp.end());
1382  }
1383 
1384  elapsedTime = vpTime::measureTimeMs() - t;
1385 }
1386 
1394 void vpKeyPoint::display(const vpImage<unsigned char> &IRef, const vpImage<unsigned char> &ICurrent, unsigned int size)
1395 {
1396  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1397  getQueryKeyPoints(vpQueryImageKeyPoints);
1398  std::vector<vpImagePoint> vpTrainImageKeyPoints;
1399  getTrainKeyPoints(vpTrainImageKeyPoints);
1400 
1401  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1402  vpDisplay::displayCross(IRef, vpTrainImageKeyPoints[(size_t)(it->trainIdx)], size, vpColor::red);
1403  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, vpColor::green);
1404  }
1405 }
1406 
1414 void vpKeyPoint::display(const vpImage<vpRGBa> &IRef, const vpImage<vpRGBa> &ICurrent, unsigned int size)
1415 {
1416  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1417  getQueryKeyPoints(vpQueryImageKeyPoints);
1418  std::vector<vpImagePoint> vpTrainImageKeyPoints;
1419  getTrainKeyPoints(vpTrainImageKeyPoints);
1420 
1421  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1422  vpDisplay::displayCross(IRef, vpTrainImageKeyPoints[(size_t)(it->trainIdx)], size, vpColor::red);
1423  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, vpColor::green);
1424  }
1425 }
1426 
1434 void vpKeyPoint::display(const vpImage<unsigned char> &ICurrent, unsigned int size, const vpColor &color)
1435 {
1436  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1437  getQueryKeyPoints(vpQueryImageKeyPoints);
1438 
1439  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1440  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, color);
1441  }
1442 }
1443 
1451 void vpKeyPoint::display(const vpImage<vpRGBa> &ICurrent, unsigned int size, const vpColor &color)
1452 {
1453  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1454  getQueryKeyPoints(vpQueryImageKeyPoints);
1455 
1456  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1457  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, color);
1458  }
1459 }
1460 
1473  unsigned int crossSize, unsigned int lineThickness, const vpColor &color)
1474 {
1475  bool randomColor = (color == vpColor::none);
1476  srand((unsigned int)time(NULL));
1477  vpColor currentColor = color;
1478 
1479  std::vector<vpImagePoint> queryImageKeyPoints;
1480  getQueryKeyPoints(queryImageKeyPoints);
1481  std::vector<vpImagePoint> trainImageKeyPoints;
1482  getTrainKeyPoints(trainImageKeyPoints);
1483 
1484  vpImagePoint leftPt, rightPt;
1485  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1486  if (randomColor) {
1487  currentColor = vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1488  }
1489 
1490  leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1491  rightPt = vpImagePoint(queryImageKeyPoints[(size_t)(it->queryIdx)].get_i(),
1492  queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.getWidth());
1493  vpDisplay::displayCross(IMatching, leftPt, crossSize, currentColor);
1494  vpDisplay::displayCross(IMatching, rightPt, crossSize, currentColor);
1495  vpDisplay::displayLine(IMatching, leftPt, rightPt, currentColor, lineThickness);
1496  }
1497 }
1498 
1510 void vpKeyPoint::displayMatching(const vpImage<unsigned char> &IRef, vpImage<vpRGBa> &IMatching, unsigned int crossSize,
1511  unsigned int lineThickness, const vpColor &color)
1512 {
1513  bool randomColor = (color == vpColor::none);
1514  srand((unsigned int)time(NULL));
1515  vpColor currentColor = color;
1516 
1517  std::vector<vpImagePoint> queryImageKeyPoints;
1518  getQueryKeyPoints(queryImageKeyPoints);
1519  std::vector<vpImagePoint> trainImageKeyPoints;
1520  getTrainKeyPoints(trainImageKeyPoints);
1521 
1522  vpImagePoint leftPt, rightPt;
1523  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1524  if (randomColor) {
1525  currentColor = vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1526  }
1527 
1528  leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1529  rightPt = vpImagePoint(queryImageKeyPoints[(size_t)(it->queryIdx)].get_i(),
1530  queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.getWidth());
1531  vpDisplay::displayCross(IMatching, leftPt, crossSize, currentColor);
1532  vpDisplay::displayCross(IMatching, rightPt, crossSize, currentColor);
1533  vpDisplay::displayLine(IMatching, leftPt, rightPt, currentColor, lineThickness);
1534  }
1535 }
1536 
1548 void vpKeyPoint::displayMatching(const vpImage<vpRGBa> &IRef, vpImage<vpRGBa> &IMatching, unsigned int crossSize,
1549  unsigned int lineThickness, const vpColor &color)
1550 {
1551  bool randomColor = (color == vpColor::none);
1552  srand((unsigned int)time(NULL));
1553  vpColor currentColor = color;
1554 
1555  std::vector<vpImagePoint> queryImageKeyPoints;
1556  getQueryKeyPoints(queryImageKeyPoints);
1557  std::vector<vpImagePoint> trainImageKeyPoints;
1558  getTrainKeyPoints(trainImageKeyPoints);
1559 
1560  vpImagePoint leftPt, rightPt;
1561  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1562  if (randomColor) {
1563  currentColor = vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1564  }
1565 
1566  leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1567  rightPt = vpImagePoint(queryImageKeyPoints[(size_t)(it->queryIdx)].get_i(),
1568  queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.getWidth());
1569  vpDisplay::displayCross(IMatching, leftPt, crossSize, currentColor);
1570  vpDisplay::displayCross(IMatching, rightPt, crossSize, currentColor);
1571  vpDisplay::displayLine(IMatching, leftPt, rightPt, currentColor, lineThickness);
1572  }
1573 }
1574 
1587  const std::vector<vpImagePoint> &ransacInliers, unsigned int crossSize,
1588  unsigned int lineThickness)
1589 {
1590  if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1591  // No training images so return
1592  std::cerr << "There is no training image loaded !" << std::endl;
1593  return;
1594  }
1595 
1596  // Nb images in the training database + the current image we want to detect
1597  // the object
1598  int nbImg = (int)(m_mapOfImages.size() + 1);
1599 
1600  if (nbImg == 2) {
1601  // Only one training image, so we display the matching result side-by-side
1602  displayMatching(m_mapOfImages.begin()->second, IMatching, crossSize);
1603  } else {
1604  // Multiple training images, display them as a mosaic image
1605  int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
1606  int nbWidth = nbImgSqrt;
1607  int nbHeight = nbImgSqrt;
1608 
1609  if (nbImgSqrt * nbImgSqrt < nbImg) {
1610  nbWidth++;
1611  }
1612 
1613  std::map<int, int> mapOfImageIdIndex;
1614  int cpt = 0;
1615  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
1616  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1617  ++it, cpt++) {
1618  mapOfImageIdIndex[it->first] = cpt;
1619 
1620  if (maxW < it->second.getWidth()) {
1621  maxW = it->second.getWidth();
1622  }
1623 
1624  if (maxH < it->second.getHeight()) {
1625  maxH = it->second.getHeight();
1626  }
1627  }
1628 
1629  // Indexes of the current image in the grid computed to put preferably the
1630  // image in the center of the mosaic grid
1631  int medianI = nbHeight / 2;
1632  int medianJ = nbWidth / 2;
1633  int medianIndex = medianI * nbWidth + medianJ;
1634  for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1635  vpImagePoint topLeftCorner;
1636  int current_class_id_index = 0;
1637  if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1638  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1639  } else {
1640  // Shift of one unity the index of the training images which are after
1641  // the current image
1642  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1643  }
1644 
1645  int indexI = current_class_id_index / nbWidth;
1646  int indexJ = current_class_id_index - (indexI * nbWidth);
1647  topLeftCorner.set_ij((int)maxH * indexI, (int)maxW * indexJ);
1648 
1649  // Display cross for keypoints in the learning database
1650  vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1651  (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1652  }
1653 
1654  vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
1655  for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1656  // Display cross for keypoints detected in the current image
1657  vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1658  (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1659  }
1660  for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1661  // Display green circle for RANSAC inliers
1662  vpDisplay::displayCircle(IMatching, (int)(it->get_v() + topLeftCorner.get_i()),
1663  (int)(it->get_u() + topLeftCorner.get_j()), 4, vpColor::green);
1664  }
1665  for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1666  // Display red circle for RANSAC outliers
1667  vpDisplay::displayCircle(IMatching, (int)(it->get_i() + topLeftCorner.get_i()),
1668  (int)(it->get_j() + topLeftCorner.get_j()), 4, vpColor::red);
1669  }
1670 
1671  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1672  int current_class_id = 0;
1673  if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] < medianIndex) {
1674  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1675  } else {
1676  // Shift of one unity the index of the training images which are after
1677  // the current image
1678  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1679  }
1680 
1681  int indexI = current_class_id / nbWidth;
1682  int indexJ = current_class_id - (indexI * nbWidth);
1683 
1684  vpImagePoint end((int)maxH * indexI + m_trainKeyPoints[(size_t)it->trainIdx].pt.y,
1685  (int)maxW * indexJ + m_trainKeyPoints[(size_t)it->trainIdx].pt.x);
1686  vpImagePoint start((int)maxH * medianI + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.y,
1687  (int)maxW * medianJ + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.x);
1688 
1689  // Draw line for matching keypoints detected in the current image and
1690  // those detected in the training images
1691  vpDisplay::displayLine(IMatching, start, end, vpColor::green, lineThickness);
1692  }
1693  }
1694 }
1695 
1708  const std::vector<vpImagePoint> &ransacInliers, unsigned int crossSize,
1709  unsigned int lineThickness)
1710 {
1711  if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1712  // No training images so return
1713  std::cerr << "There is no training image loaded !" << std::endl;
1714  return;
1715  }
1716 
1717  // Nb images in the training database + the current image we want to detect
1718  // the object
1719  int nbImg = (int)(m_mapOfImages.size() + 1);
1720 
1721  if (nbImg == 2) {
1722  // Only one training image, so we display the matching result side-by-side
1723  displayMatching(m_mapOfImages.begin()->second, IMatching, crossSize);
1724  } else {
1725  // Multiple training images, display them as a mosaic image
1726  int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
1727  int nbWidth = nbImgSqrt;
1728  int nbHeight = nbImgSqrt;
1729 
1730  if (nbImgSqrt * nbImgSqrt < nbImg) {
1731  nbWidth++;
1732  }
1733 
1734  std::map<int, int> mapOfImageIdIndex;
1735  int cpt = 0;
1736  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
1737  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1738  ++it, cpt++) {
1739  mapOfImageIdIndex[it->first] = cpt;
1740 
1741  if (maxW < it->second.getWidth()) {
1742  maxW = it->second.getWidth();
1743  }
1744 
1745  if (maxH < it->second.getHeight()) {
1746  maxH = it->second.getHeight();
1747  }
1748  }
1749 
1750  // Indexes of the current image in the grid computed to put preferably the
1751  // image in the center of the mosaic grid
1752  int medianI = nbHeight / 2;
1753  int medianJ = nbWidth / 2;
1754  int medianIndex = medianI * nbWidth + medianJ;
1755  for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1756  vpImagePoint topLeftCorner;
1757  int current_class_id_index = 0;
1758  if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1759  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1760  } else {
1761  // Shift of one unity the index of the training images which are after
1762  // the current image
1763  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1764  }
1765 
1766  int indexI = current_class_id_index / nbWidth;
1767  int indexJ = current_class_id_index - (indexI * nbWidth);
1768  topLeftCorner.set_ij((int)maxH * indexI, (int)maxW * indexJ);
1769 
1770  // Display cross for keypoints in the learning database
1771  vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1772  (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1773  }
1774 
1775  vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
1776  for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1777  // Display cross for keypoints detected in the current image
1778  vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1779  (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1780  }
1781  for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1782  // Display green circle for RANSAC inliers
1783  vpDisplay::displayCircle(IMatching, (int)(it->get_v() + topLeftCorner.get_i()),
1784  (int)(it->get_u() + topLeftCorner.get_j()), 4, vpColor::green);
1785  }
1786  for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1787  // Display red circle for RANSAC outliers
1788  vpDisplay::displayCircle(IMatching, (int)(it->get_i() + topLeftCorner.get_i()),
1789  (int)(it->get_j() + topLeftCorner.get_j()), 4, vpColor::red);
1790  }
1791 
1792  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1793  int current_class_id = 0;
1794  if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] < medianIndex) {
1795  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1796  } else {
1797  // Shift of one unity the index of the training images which are after
1798  // the current image
1799  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1800  }
1801 
1802  int indexI = current_class_id / nbWidth;
1803  int indexJ = current_class_id - (indexI * nbWidth);
1804 
1805  vpImagePoint end((int)maxH * indexI + m_trainKeyPoints[(size_t)it->trainIdx].pt.y,
1806  (int)maxW * indexJ + m_trainKeyPoints[(size_t)it->trainIdx].pt.x);
1807  vpImagePoint start((int)maxH * medianI + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.y,
1808  (int)maxW * medianJ + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.x);
1809 
1810  // Draw line for matching keypoints detected in the current image and
1811  // those detected in the training images
1812  vpDisplay::displayLine(IMatching, start, end, vpColor::green, lineThickness);
1813  }
1814  }
1815 }
1816 
1827 void vpKeyPoint::extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1828  std::vector<cv::Point3f> *trainPoints)
1829 {
1830  double elapsedTime;
1831  extract(I, keyPoints, descriptors, elapsedTime, trainPoints);
1832 }
1833 
1844 void vpKeyPoint::extract(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1845  std::vector<cv::Point3f> *trainPoints)
1846 {
1847  double elapsedTime;
1848  extract(I_color, keyPoints, descriptors, elapsedTime, trainPoints);
1849 }
1850 
1861 void vpKeyPoint::extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1862  std::vector<cv::Point3f> *trainPoints)
1863 {
1864  double elapsedTime;
1865  extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1866 }
1867 
1879 void vpKeyPoint::extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1880  double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1881 {
1882  cv::Mat matImg;
1883  vpImageConvert::convert(I, matImg, false);
1884  extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1885 }
1886 
1898 void vpKeyPoint::extract(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1899  double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1900 {
1901  cv::Mat matImg;
1902  vpImageConvert::convert(I_color, matImg);
1903  extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1904 }
1905 
1917 void vpKeyPoint::extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1918  double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1919 {
1920  double t = vpTime::measureTimeMs();
1921  bool first = true;
1922 
1923  for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator itd = m_extractors.begin();
1924  itd != m_extractors.end(); ++itd) {
1925  if (first) {
1926  first = false;
1927  // Check if we have 3D object points information
1928  if (trainPoints != NULL && !trainPoints->empty()) {
1929  // Copy the input list of keypoints, keypoints that cannot be computed
1930  // are removed in the function compute
1931  std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1932 
1933  // Extract descriptors for the given list of keypoints
1934  itd->second->compute(matImg, keyPoints, descriptors);
1935 
1936  if (keyPoints.size() != keyPoints_tmp.size()) {
1937  // Keypoints have been removed
1938  // Store the hash of a keypoint as the key and the index of the
1939  // keypoint as the value
1940  std::map<size_t, size_t> mapOfKeypointHashes;
1941  size_t cpt = 0;
1942  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1943  ++it, cpt++) {
1944  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1945  }
1946 
1947  std::vector<cv::Point3f> trainPoints_tmp;
1948  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1949  if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1950  trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1951  }
1952  }
1953 
1954  // Copy trainPoints_tmp to m_trainPoints
1955  *trainPoints = trainPoints_tmp;
1956  }
1957  } else {
1958  // Extract descriptors for the given list of keypoints
1959  itd->second->compute(matImg, keyPoints, descriptors);
1960  }
1961  } else {
1962  // Copy the input list of keypoints, keypoints that cannot be computed
1963  // are removed in the function compute
1964  std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1965 
1966  cv::Mat desc;
1967  // Extract descriptors for the given list of keypoints
1968  itd->second->compute(matImg, keyPoints, desc);
1969 
1970  if (keyPoints.size() != keyPoints_tmp.size()) {
1971  // Keypoints have been removed
1972  // Store the hash of a keypoint as the key and the index of the
1973  // keypoint as the value
1974  std::map<size_t, size_t> mapOfKeypointHashes;
1975  size_t cpt = 0;
1976  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1977  ++it, cpt++) {
1978  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1979  }
1980 
1981  std::vector<cv::Point3f> trainPoints_tmp;
1982  cv::Mat descriptors_tmp;
1983  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1984  if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1985  if (trainPoints != NULL && !trainPoints->empty()) {
1986  trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1987  }
1988 
1989  if (!descriptors.empty()) {
1990  descriptors_tmp.push_back(descriptors.row((int)mapOfKeypointHashes[myKeypointHash(*it)]));
1991  }
1992  }
1993  }
1994 
1995  if (trainPoints != NULL) {
1996  // Copy trainPoints_tmp to m_trainPoints
1997  *trainPoints = trainPoints_tmp;
1998  }
1999  // Copy descriptors_tmp to descriptors
2000  descriptors_tmp.copyTo(descriptors);
2001  }
2002 
2003  // Merge descriptors horizontally
2004  if (descriptors.empty()) {
2005  desc.copyTo(descriptors);
2006  } else {
2007  cv::hconcat(descriptors, desc, descriptors);
2008  }
2009  }
2010  }
2011 
2012  if (keyPoints.size() != (size_t)descriptors.rows) {
2013  std::cerr << "keyPoints.size() != (size_t) descriptors.rows" << std::endl;
2014  }
2015  elapsedTime = vpTime::measureTimeMs() - t;
2016 }
2017 
2021 void vpKeyPoint::filterMatches()
2022 {
2023  std::vector<cv::KeyPoint> queryKpts;
2024  std::vector<cv::Point3f> trainPts;
2025  std::vector<cv::DMatch> m;
2026 
2027  if (m_useKnn) {
2028  // double max_dist = 0;
2029  // double min_dist = std::numeric_limits<double>::max(); // create an
2030  // error under Windows. To fix it we have to add #undef max
2031  double min_dist = DBL_MAX;
2032  double mean = 0.0;
2033  std::vector<double> distance_vec(m_knnMatches.size());
2034 
2035  if (m_filterType == stdAndRatioDistanceThreshold) {
2036  for (size_t i = 0; i < m_knnMatches.size(); i++) {
2037  double dist = m_knnMatches[i][0].distance;
2038  mean += dist;
2039  distance_vec[i] = dist;
2040 
2041  if (dist < min_dist) {
2042  min_dist = dist;
2043  }
2044  // if (dist > max_dist) {
2045  // max_dist = dist;
2046  //}
2047  }
2048  mean /= m_queryDescriptors.rows;
2049  }
2050 
2051  double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
2052  double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
2053  double threshold = min_dist + stdev;
2054 
2055  for (size_t i = 0; i < m_knnMatches.size(); i++) {
2056  if (m_knnMatches[i].size() >= 2) {
2057  // Calculate ratio of the descriptor distance between the two nearest
2058  // neighbors of the keypoint
2059  float ratio = m_knnMatches[i][0].distance / m_knnMatches[i][1].distance;
2060  // float ratio = std::sqrt((vecMatches[i][0].distance *
2061  // vecMatches[i][0].distance)
2062  // / (vecMatches[i][1].distance *
2063  // vecMatches[i][1].distance));
2064  double dist = m_knnMatches[i][0].distance;
2065 
2066  if (ratio < m_matchingRatioThreshold || (m_filterType == stdAndRatioDistanceThreshold && dist < threshold)) {
2067  m.push_back(cv::DMatch((int)queryKpts.size(), m_knnMatches[i][0].trainIdx, m_knnMatches[i][0].distance));
2068 
2069  if (!m_trainPoints.empty()) {
2070  trainPts.push_back(m_trainPoints[(size_t)m_knnMatches[i][0].trainIdx]);
2071  }
2072  queryKpts.push_back(m_queryKeyPoints[(size_t)m_knnMatches[i][0].queryIdx]);
2073  }
2074  }
2075  }
2076  } else {
2077  // double max_dist = 0;
2078  // create an error under Windows. To fix it we have to add #undef max
2079  // double min_dist = std::numeric_limits<double>::max();
2080  double min_dist = DBL_MAX;
2081  double mean = 0.0;
2082  std::vector<double> distance_vec(m_matches.size());
2083  for (size_t i = 0; i < m_matches.size(); i++) {
2084  double dist = m_matches[i].distance;
2085  mean += dist;
2086  distance_vec[i] = dist;
2087 
2088  if (dist < min_dist) {
2089  min_dist = dist;
2090  }
2091  // if (dist > max_dist) {
2092  // max_dist = dist;
2093  // }
2094  }
2095  mean /= m_queryDescriptors.rows;
2096 
2097  double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
2098  double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
2099 
2100  // Define a threshold where we keep all keypoints whose the descriptor
2101  // distance falls below a factor of the minimum descriptor distance (for
2102  // all the query keypoints) or below the minimum descriptor distance +
2103  // the standard deviation (calculated on all the query descriptor
2104  // distances)
2105  double threshold =
2106  m_filterType == constantFactorDistanceThreshold ? m_matchingFactorThreshold * min_dist : min_dist + stdev;
2107 
2108  for (size_t i = 0; i < m_matches.size(); i++) {
2109  if (m_matches[i].distance <= threshold) {
2110  m.push_back(cv::DMatch((int)queryKpts.size(), m_matches[i].trainIdx, m_matches[i].distance));
2111 
2112  if (!m_trainPoints.empty()) {
2113  trainPts.push_back(m_trainPoints[(size_t)m_matches[i].trainIdx]);
2114  }
2115  queryKpts.push_back(m_queryKeyPoints[(size_t)m_matches[i].queryIdx]);
2116  }
2117  }
2118  }
2119 
2120  if (m_useSingleMatchFilter) {
2121  // Eliminate matches where multiple query keypoints are matched to the
2122  // same train keypoint
2123  std::vector<cv::DMatch> mTmp;
2124  std::vector<cv::Point3f> trainPtsTmp;
2125  std::vector<cv::KeyPoint> queryKptsTmp;
2126 
2127  std::map<int, int> mapOfTrainIdx;
2128  // Count the number of query points matched to the same train point
2129  for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
2130  mapOfTrainIdx[it->trainIdx]++;
2131  }
2132 
2133  // Keep matches with only one correspondence
2134  for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
2135  if (mapOfTrainIdx[it->trainIdx] == 1) {
2136  mTmp.push_back(cv::DMatch((int)queryKptsTmp.size(), it->trainIdx, it->distance));
2137 
2138  if (!m_trainPoints.empty()) {
2139  trainPtsTmp.push_back(m_trainPoints[(size_t)it->trainIdx]);
2140  }
2141  queryKptsTmp.push_back(queryKpts[(size_t)it->queryIdx]);
2142  }
2143  }
2144 
2145  m_filteredMatches = mTmp;
2146  m_objectFilteredPoints = trainPtsTmp;
2147  m_queryFilteredKeyPoints = queryKptsTmp;
2148  } else {
2149  m_filteredMatches = m;
2150  m_objectFilteredPoints = trainPts;
2151  m_queryFilteredKeyPoints = queryKpts;
2152  }
2153 }
2154 
2162 void vpKeyPoint::getObjectPoints(std::vector<cv::Point3f> &objectPoints) const
2163 {
2164  objectPoints = m_objectFilteredPoints;
2165 }
2166 
2174 void vpKeyPoint::getObjectPoints(std::vector<vpPoint> &objectPoints) const
2175 {
2176  vpConvert::convertFromOpenCV(m_objectFilteredPoints, objectPoints);
2177 }
2178 
2187 void vpKeyPoint::getQueryKeyPoints(std::vector<cv::KeyPoint> &keyPoints, bool matches) const
2188 {
2189  if (matches) {
2190  keyPoints = m_queryFilteredKeyPoints;
2191  } else {
2192  keyPoints = m_queryKeyPoints;
2193  }
2194 }
2195 
2204 void vpKeyPoint::getQueryKeyPoints(std::vector<vpImagePoint> &keyPoints, bool matches) const
2205 {
2206  if (matches) {
2207  keyPoints = currentImagePointsList;
2208  } else {
2209  vpConvert::convertFromOpenCV(m_queryKeyPoints, keyPoints);
2210  }
2211 }
2212 
2218 void vpKeyPoint::getTrainKeyPoints(std::vector<cv::KeyPoint> &keyPoints) const { keyPoints = m_trainKeyPoints; }
2219 
2225 void vpKeyPoint::getTrainKeyPoints(std::vector<vpImagePoint> &keyPoints) const { keyPoints = referenceImagePointsList; }
2226 
2233 void vpKeyPoint::getTrainPoints(std::vector<cv::Point3f> &points) const { points = m_trainPoints; }
2234 
2241 void vpKeyPoint::getTrainPoints(std::vector<vpPoint> &points) const { points = m_trainVpPoints; }
2242 
2247 void vpKeyPoint::init()
2248 {
2249 // Require 2.4.0 <= opencv < 3.0.0
2250 #if defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && (VISP_HAVE_OPENCV_VERSION < 0x030000)
2251  // The following line must be called in order to use SIFT or SURF
2252  if (!cv::initModule_nonfree()) {
2253  std::cerr << "Cannot init module non free, SIFT or SURF cannot be used." << std::endl;
2254  }
2255 #endif
2256 
2257  // Use k-nearest neighbors (knn) to retrieve the two best matches for a
2258  // keypoint So this is useful only for ratioDistanceThreshold method
2259  if (m_filterType == ratioDistanceThreshold || m_filterType == stdAndRatioDistanceThreshold) {
2260  m_useKnn = true;
2261  }
2262 
2263  initDetectors(m_detectorNames);
2264  initExtractors(m_extractorNames);
2265  initMatcher(m_matcherName);
2266 }
2267 
2273 void vpKeyPoint::initDetector(const std::string &detectorName)
2274 {
2275 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
2276  m_detectors[detectorName] = cv::FeatureDetector::create(detectorName);
2277 
2278  if (m_detectors[detectorName] == NULL) {
2279  std::stringstream ss_msg;
2280  ss_msg << "Fail to initialize the detector: " << detectorName
2281  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2282  throw vpException(vpException::fatalError, ss_msg.str());
2283  }
2284 #else
2285  std::string detectorNameTmp = detectorName;
2286  std::string pyramid = "Pyramid";
2287  std::size_t pos = detectorName.find(pyramid);
2288  bool usePyramid = false;
2289  if (pos != std::string::npos) {
2290  detectorNameTmp = detectorName.substr(pos + pyramid.size());
2291  usePyramid = true;
2292  }
2293 
2294  if (detectorNameTmp == "SIFT") {
2295 #if (VISP_HAVE_OPENCV_VERSION >= 0x040500) // OpenCV >= 4.5.0
2296  cv::Ptr<cv::FeatureDetector> siftDetector = cv::SiftFeatureDetector::create();
2297  if (!usePyramid) {
2298  m_detectors[detectorNameTmp] = siftDetector;
2299  } else {
2300  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
2301  }
2302 #else
2303 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) || (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || \
2304  (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2305  // SIFT is no more patented since 09/03/2020
2306  cv::Ptr<cv::FeatureDetector> siftDetector;
2307  if (m_maxFeatures > 0) {
2308 #if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2309  siftDetector = cv::SIFT::create(m_maxFeatures);
2310 #else
2311  siftDetector = cv::xfeatures2d::SIFT::create(m_maxFeatures);
2312 #endif
2313  } else {
2314 #if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2315  siftDetector = cv::SIFT::create();
2316 #else
2317  siftDetector = cv::xfeatures2d::SIFT::create();
2318 #endif
2319  }
2320  if (!usePyramid) {
2321  m_detectors[detectorNameTmp] = siftDetector;
2322  } else {
2323  std::cerr << "You should not use SIFT with Pyramid feature detection!" << std::endl;
2324  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
2325  }
2326 #else
2327  std::stringstream ss_msg;
2328  ss_msg << "Fail to initialize the detector: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2329  << " was not build with xFeatures2d module.";
2330  throw vpException(vpException::fatalError, ss_msg.str());
2331 #endif
2332 #endif
2333  } else if (detectorNameTmp == "SURF") {
2334 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2335  cv::Ptr<cv::FeatureDetector> surfDetector = cv::xfeatures2d::SURF::create();
2336  if (!usePyramid) {
2337  m_detectors[detectorNameTmp] = surfDetector;
2338  } else {
2339  std::cerr << "You should not use SURF with Pyramid feature detection!" << std::endl;
2340  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(surfDetector);
2341  }
2342 #else
2343  std::stringstream ss_msg;
2344  ss_msg << "Fail to initialize the detector: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2345  << " was not build with xFeatures2d module.";
2346  throw vpException(vpException::fatalError, ss_msg.str());
2347 #endif
2348  } else if (detectorNameTmp == "FAST") {
2349  cv::Ptr<cv::FeatureDetector> fastDetector = cv::FastFeatureDetector::create();
2350  if (!usePyramid) {
2351  m_detectors[detectorNameTmp] = fastDetector;
2352  } else {
2353  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
2354  }
2355  } else if (detectorNameTmp == "MSER") {
2356  cv::Ptr<cv::FeatureDetector> fastDetector = cv::MSER::create();
2357  if (!usePyramid) {
2358  m_detectors[detectorNameTmp] = fastDetector;
2359  } else {
2360  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
2361  }
2362  } else if (detectorNameTmp == "ORB") {
2363  cv::Ptr<cv::FeatureDetector> orbDetector;
2364  if (m_maxFeatures > 0) {
2365  orbDetector = cv::ORB::create(m_maxFeatures);
2366  } else {
2367  orbDetector = cv::ORB::create();
2368  }
2369  if (!usePyramid) {
2370  m_detectors[detectorNameTmp] = orbDetector;
2371  } else {
2372  std::cerr << "You should not use ORB with Pyramid feature detection!" << std::endl;
2373  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(orbDetector);
2374  }
2375  } else if (detectorNameTmp == "BRISK") {
2376  cv::Ptr<cv::FeatureDetector> briskDetector = cv::BRISK::create();
2377  if (!usePyramid) {
2378  m_detectors[detectorNameTmp] = briskDetector;
2379  } else {
2380  std::cerr << "You should not use BRISK with Pyramid feature detection!" << std::endl;
2381  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(briskDetector);
2382  }
2383  } else if (detectorNameTmp == "KAZE") {
2384  cv::Ptr<cv::FeatureDetector> kazeDetector = cv::KAZE::create();
2385  if (!usePyramid) {
2386  m_detectors[detectorNameTmp] = kazeDetector;
2387  } else {
2388  std::cerr << "You should not use KAZE with Pyramid feature detection!" << std::endl;
2389  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(kazeDetector);
2390  }
2391  } else if (detectorNameTmp == "AKAZE") {
2392  cv::Ptr<cv::FeatureDetector> akazeDetector = cv::AKAZE::create();
2393  if (!usePyramid) {
2394  m_detectors[detectorNameTmp] = akazeDetector;
2395  } else {
2396  std::cerr << "You should not use AKAZE with Pyramid feature detection!" << std::endl;
2397  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(akazeDetector);
2398  }
2399  } else if (detectorNameTmp == "GFTT") {
2400  cv::Ptr<cv::FeatureDetector> gfttDetector = cv::GFTTDetector::create();
2401  if (!usePyramid) {
2402  m_detectors[detectorNameTmp] = gfttDetector;
2403  } else {
2404  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(gfttDetector);
2405  }
2406  } else if (detectorNameTmp == "SimpleBlob") {
2407  cv::Ptr<cv::FeatureDetector> simpleBlobDetector = cv::SimpleBlobDetector::create();
2408  if (!usePyramid) {
2409  m_detectors[detectorNameTmp] = simpleBlobDetector;
2410  } else {
2411  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(simpleBlobDetector);
2412  }
2413  } else if (detectorNameTmp == "STAR") {
2414 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2415  cv::Ptr<cv::FeatureDetector> starDetector = cv::xfeatures2d::StarDetector::create();
2416  if (!usePyramid) {
2417  m_detectors[detectorNameTmp] = starDetector;
2418  } else {
2419  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(starDetector);
2420  }
2421 #else
2422  std::stringstream ss_msg;
2423  ss_msg << "Fail to initialize the detector: STAR. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2424  << " was not build with xFeatures2d module.";
2425  throw vpException(vpException::fatalError, ss_msg.str());
2426 #endif
2427  } else if (detectorNameTmp == "AGAST") {
2428  cv::Ptr<cv::FeatureDetector> agastDetector = cv::AgastFeatureDetector::create();
2429  if (!usePyramid) {
2430  m_detectors[detectorNameTmp] = agastDetector;
2431  } else {
2432  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(agastDetector);
2433  }
2434  } else if (detectorNameTmp == "MSD") {
2435 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100)
2436 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2437  cv::Ptr<cv::FeatureDetector> msdDetector = cv::xfeatures2d::MSDDetector::create();
2438  if (!usePyramid) {
2439  m_detectors[detectorNameTmp] = msdDetector;
2440  } else {
2441  std::cerr << "You should not use MSD with Pyramid feature detection!" << std::endl;
2442  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(msdDetector);
2443  }
2444 #else
2445  std::stringstream ss_msg;
2446  ss_msg << "Fail to initialize the detector: MSD. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2447  << " was not build with xFeatures2d module.";
2448  throw vpException(vpException::fatalError, ss_msg.str());
2449 #endif
2450 #else
2451  std::stringstream ss_msg;
2452  ss_msg << "Feature " << detectorName << " is not available in OpenCV version: " << std::hex
2453  << VISP_HAVE_OPENCV_VERSION << " (require >= OpenCV 3.1).";
2454 #endif
2455  } else {
2456  std::cerr << "The detector:" << detectorNameTmp << " is not available." << std::endl;
2457  }
2458 
2459  bool detectorInitialized = false;
2460  if (!usePyramid) {
2461  // if not null and to avoid warning C4800: forcing value to bool 'true' or 'false' (performance warning)
2462  detectorInitialized = !m_detectors[detectorNameTmp].empty();
2463  } else {
2464  // if not null and to avoid warning C4800: forcing value to bool 'true' or 'false' (performance warning)
2465  detectorInitialized = !m_detectors[detectorName].empty();
2466  }
2467 
2468  if (!detectorInitialized) {
2469  std::stringstream ss_msg;
2470  ss_msg << "Fail to initialize the detector: " << detectorNameTmp
2471  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2472  throw vpException(vpException::fatalError, ss_msg.str());
2473  }
2474 #endif
2475 }
2476 
2483 void vpKeyPoint::initDetectors(const std::vector<std::string> &detectorNames)
2484 {
2485  for (std::vector<std::string>::const_iterator it = detectorNames.begin(); it != detectorNames.end(); ++it) {
2486  initDetector(*it);
2487  }
2488 }
2489 
2495 void vpKeyPoint::initExtractor(const std::string &extractorName)
2496 {
2497 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
2498  m_extractors[extractorName] = cv::DescriptorExtractor::create(extractorName);
2499 #else
2500  if (extractorName == "SIFT") {
2501 #if (VISP_HAVE_OPENCV_VERSION >= 0x040500) // OpenCV >= 4.5.0
2502  m_extractors[extractorName] = cv::SIFT::create();
2503 #else
2504 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) || (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || \
2505  (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2506  // SIFT is no more patented since 09/03/2020
2507 #if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2508  m_extractors[extractorName] = cv::SIFT::create();
2509 #else
2510  m_extractors[extractorName] = cv::xfeatures2d::SIFT::create();
2511 #endif
2512 #else
2513  std::stringstream ss_msg;
2514  ss_msg << "Fail to initialize the extractor: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2515  << " was not build with xFeatures2d module.";
2516  throw vpException(vpException::fatalError, ss_msg.str());
2517 #endif
2518 #endif
2519  } else if (extractorName == "SURF") {
2520 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2521  // Use extended set of SURF descriptors (128 instead of 64)
2522  m_extractors[extractorName] = cv::xfeatures2d::SURF::create(100, 4, 3, true);
2523 #else
2524  std::stringstream ss_msg;
2525  ss_msg << "Fail to initialize the extractor: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2526  << " was not build with xFeatures2d module.";
2527  throw vpException(vpException::fatalError, ss_msg.str());
2528 #endif
2529  } else if (extractorName == "ORB") {
2530  m_extractors[extractorName] = cv::ORB::create();
2531  } else if (extractorName == "BRISK") {
2532  m_extractors[extractorName] = cv::BRISK::create();
2533  } else if (extractorName == "FREAK") {
2534 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2535  m_extractors[extractorName] = cv::xfeatures2d::FREAK::create();
2536 #else
2537  std::stringstream ss_msg;
2538  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2539  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2540  throw vpException(vpException::fatalError, ss_msg.str());
2541 #endif
2542  } else if (extractorName == "BRIEF") {
2543 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2544  m_extractors[extractorName] = cv::xfeatures2d::BriefDescriptorExtractor::create();
2545 #else
2546  std::stringstream ss_msg;
2547  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2548  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2549  throw vpException(vpException::fatalError, ss_msg.str());
2550 #endif
2551  } else if (extractorName == "KAZE") {
2552  m_extractors[extractorName] = cv::KAZE::create();
2553  } else if (extractorName == "AKAZE") {
2554  m_extractors[extractorName] = cv::AKAZE::create();
2555  } else if (extractorName == "DAISY") {
2556 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2557  m_extractors[extractorName] = cv::xfeatures2d::DAISY::create();
2558 #else
2559  std::stringstream ss_msg;
2560  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2561  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2562  throw vpException(vpException::fatalError, ss_msg.str());
2563 #endif
2564  } else if (extractorName == "LATCH") {
2565 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2566  m_extractors[extractorName] = cv::xfeatures2d::LATCH::create();
2567 #else
2568  std::stringstream ss_msg;
2569  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2570  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2571  throw vpException(vpException::fatalError, ss_msg.str());
2572 #endif
2573  } else if (extractorName == "LUCID") {
2574 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2575  // m_extractors[extractorName] = cv::xfeatures2d::LUCID::create(1, 2);
2576  // Not possible currently, need a color image
2577  throw vpException(vpException::badValue, "Not possible currently as it needs a color image.");
2578 #else
2579  std::stringstream ss_msg;
2580  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2581  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2582  throw vpException(vpException::fatalError, ss_msg.str());
2583 #endif
2584  } else if (extractorName == "VGG") {
2585 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2586 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2587  m_extractors[extractorName] = cv::xfeatures2d::VGG::create();
2588 #else
2589  std::stringstream ss_msg;
2590  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2591  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2592  throw vpException(vpException::fatalError, ss_msg.str());
2593 #endif
2594 #else
2595  std::stringstream ss_msg;
2596  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2597  << VISP_HAVE_OPENCV_VERSION << " but requires at least OpenCV 3.2.";
2598  throw vpException(vpException::fatalError, ss_msg.str());
2599 #endif
2600  } else if (extractorName == "BoostDesc") {
2601 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2602 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2603  m_extractors[extractorName] = cv::xfeatures2d::BoostDesc::create();
2604 #else
2605  std::stringstream ss_msg;
2606  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2607  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2608  throw vpException(vpException::fatalError, ss_msg.str());
2609 #endif
2610 #else
2611  std::stringstream ss_msg;
2612  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2613  << VISP_HAVE_OPENCV_VERSION << " but requires at least OpenCV 3.2.";
2614  throw vpException(vpException::fatalError, ss_msg.str());
2615 #endif
2616  } else {
2617  std::cerr << "The extractor:" << extractorName << " is not available." << std::endl;
2618  }
2619 #endif
2620 
2621  if (!m_extractors[extractorName]) { // if null
2622  std::stringstream ss_msg;
2623  ss_msg << "Fail to initialize the extractor: " << extractorName
2624  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2625  throw vpException(vpException::fatalError, ss_msg.str());
2626  }
2627 
2628 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2629  if (extractorName == "SURF") {
2630  // Use extended set of SURF descriptors (128 instead of 64)
2631  m_extractors[extractorName]->set("extended", 1);
2632  }
2633 #endif
2634 }
2635 
2642 void vpKeyPoint::initExtractors(const std::vector<std::string> &extractorNames)
2643 {
2644  for (std::vector<std::string>::const_iterator it = extractorNames.begin(); it != extractorNames.end(); ++it) {
2645  initExtractor(*it);
2646  }
2647 
2648  int descriptorType = CV_32F;
2649  bool firstIteration = true;
2650  for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2651  it != m_extractors.end(); ++it) {
2652  if (firstIteration) {
2653  firstIteration = false;
2654  descriptorType = it->second->descriptorType();
2655  } else {
2656  if (descriptorType != it->second->descriptorType()) {
2657  throw vpException(vpException::fatalError, "All the descriptors must have the same type !");
2658  }
2659  }
2660  }
2661 }
2662 
2663 void vpKeyPoint::initFeatureNames()
2664 {
2665 // Create map enum to string
2666 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2667  m_mapOfDetectorNames[DETECTOR_FAST] = "FAST";
2668  m_mapOfDetectorNames[DETECTOR_MSER] = "MSER";
2669  m_mapOfDetectorNames[DETECTOR_ORB] = "ORB";
2670  m_mapOfDetectorNames[DETECTOR_BRISK] = "BRISK";
2671  m_mapOfDetectorNames[DETECTOR_GFTT] = "GFTT";
2672  m_mapOfDetectorNames[DETECTOR_SimpleBlob] = "SimpleBlob";
2673 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2674  m_mapOfDetectorNames[DETECTOR_STAR] = "STAR";
2675 #endif
2676 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2677  (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2678  m_mapOfDetectorNames[DETECTOR_SIFT] = "SIFT";
2679 #endif
2680 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2681  m_mapOfDetectorNames[DETECTOR_SURF] = "SURF";
2682 #endif
2683 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2684  m_mapOfDetectorNames[DETECTOR_KAZE] = "KAZE";
2685  m_mapOfDetectorNames[DETECTOR_AKAZE] = "AKAZE";
2686  m_mapOfDetectorNames[DETECTOR_AGAST] = "AGAST";
2687 #endif
2688 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2689  m_mapOfDetectorNames[DETECTOR_MSD] = "MSD";
2690 #endif
2691 #endif
2692 
2693 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2694  m_mapOfDescriptorNames[DESCRIPTOR_ORB] = "ORB";
2695  m_mapOfDescriptorNames[DESCRIPTOR_BRISK] = "BRISK";
2696 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2697  m_mapOfDescriptorNames[DESCRIPTOR_FREAK] = "FREAK";
2698  m_mapOfDescriptorNames[DESCRIPTOR_BRIEF] = "BRIEF";
2699 #endif
2700 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2701  (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2702  m_mapOfDescriptorNames[DESCRIPTOR_SIFT] = "SIFT";
2703 #endif
2704 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2705  m_mapOfDescriptorNames[DESCRIPTOR_SURF] = "SURF";
2706 #endif
2707 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2708  m_mapOfDescriptorNames[DESCRIPTOR_KAZE] = "KAZE";
2709  m_mapOfDescriptorNames[DESCRIPTOR_AKAZE] = "AKAZE";
2710 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2711  m_mapOfDescriptorNames[DESCRIPTOR_DAISY] = "DAISY";
2712  m_mapOfDescriptorNames[DESCRIPTOR_LATCH] = "LATCH";
2713 #endif
2714 #endif
2715 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2716  m_mapOfDescriptorNames[DESCRIPTOR_VGG] = "VGG";
2717  m_mapOfDescriptorNames[DESCRIPTOR_BoostDesc] = "BoostDesc";
2718 #endif
2719 #endif
2720 }
2721 
2727 void vpKeyPoint::initMatcher(const std::string &matcherName)
2728 {
2729  int descriptorType = CV_32F;
2730  bool firstIteration = true;
2731  for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2732  it != m_extractors.end(); ++it) {
2733  if (firstIteration) {
2734  firstIteration = false;
2735  descriptorType = it->second->descriptorType();
2736  } else {
2737  if (descriptorType != it->second->descriptorType()) {
2738  throw vpException(vpException::fatalError, "All the descriptors must have the same type !");
2739  }
2740  }
2741  }
2742 
2743  if (matcherName == "FlannBased") {
2744  if (m_extractors.empty()) {
2745  std::cout << "Warning: No extractor initialized, by default use "
2746  "floating values (CV_32F) "
2747  "for descriptor type !"
2748  << std::endl;
2749  }
2750 
2751  if (descriptorType == CV_8U) {
2752 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2753  m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::LshIndexParams>(12, 20, 2));
2754 #else
2755  m_matcher = new cv::FlannBasedMatcher(new cv::flann::LshIndexParams(12, 20, 2));
2756 #endif
2757  } else {
2758 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2759  m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::KDTreeIndexParams>());
2760 #else
2761  m_matcher = new cv::FlannBasedMatcher(new cv::flann::KDTreeIndexParams());
2762 #endif
2763  }
2764  } else {
2765  m_matcher = cv::DescriptorMatcher::create(matcherName);
2766  }
2767 
2768 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2769  if (m_matcher != NULL && !m_useKnn && matcherName == "BruteForce") {
2770  m_matcher->set("crossCheck", m_useBruteForceCrossCheck);
2771  }
2772 #endif
2773 
2774  if (!m_matcher) { // if null
2775  std::stringstream ss_msg;
2776  ss_msg << "Fail to initialize the matcher: " << matcherName
2777  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2778  throw vpException(vpException::fatalError, ss_msg.str());
2779  }
2780 }
2781 
2791  vpImage<unsigned char> &IMatching)
2792 {
2793  vpImagePoint topLeftCorner(0, 0);
2794  IMatching.insert(IRef, topLeftCorner);
2795  topLeftCorner = vpImagePoint(0, IRef.getWidth());
2796  IMatching.insert(ICurrent, topLeftCorner);
2797 }
2798 
2808  vpImage<vpRGBa> &IMatching)
2809 {
2810  vpImagePoint topLeftCorner(0, 0);
2811  IMatching.insert(IRef, topLeftCorner);
2812  topLeftCorner = vpImagePoint(0, IRef.getWidth());
2813  IMatching.insert(ICurrent, topLeftCorner);
2814 }
2815 
2824 {
2825  // Nb images in the training database + the current image we want to detect
2826  // the object
2827  int nbImg = (int)(m_mapOfImages.size() + 1);
2828 
2829  if (m_mapOfImages.empty()) {
2830  std::cerr << "There is no training image loaded !" << std::endl;
2831  return;
2832  }
2833 
2834  if (nbImg == 2) {
2835  // Only one training image, so we display them side by side
2836  insertImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
2837  } else {
2838  // Multiple training images, display them as a mosaic image
2839  int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
2840  int nbWidth = nbImgSqrt;
2841  int nbHeight = nbImgSqrt;
2842 
2843  if (nbImgSqrt * nbImgSqrt < nbImg) {
2844  nbWidth++;
2845  }
2846 
2847  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
2848  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2849  ++it) {
2850  if (maxW < it->second.getWidth()) {
2851  maxW = it->second.getWidth();
2852  }
2853 
2854  if (maxH < it->second.getHeight()) {
2855  maxH = it->second.getHeight();
2856  }
2857  }
2858 
2859  // Indexes of the current image in the grid made in order to the image is
2860  // in the center square in the mosaic grid
2861  int medianI = nbHeight / 2;
2862  int medianJ = nbWidth / 2;
2863  int medianIndex = medianI * nbWidth + medianJ;
2864 
2865  int cpt = 0;
2866  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2867  ++it, cpt++) {
2868  int local_cpt = cpt;
2869  if (cpt >= medianIndex) {
2870  // Shift of one unity the index of the training images which are after
2871  // the current image
2872  local_cpt++;
2873  }
2874  int indexI = local_cpt / nbWidth;
2875  int indexJ = local_cpt - (indexI * nbWidth);
2876  vpImagePoint topLeftCorner((int)maxH * indexI, (int)maxW * indexJ);
2877 
2878  IMatching.insert(it->second, topLeftCorner);
2879  }
2880 
2881  vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
2882  IMatching.insert(ICurrent, topLeftCorner);
2883  }
2884 }
2885 
2894 {
2895  // Nb images in the training database + the current image we want to detect
2896  // the object
2897  int nbImg = (int)(m_mapOfImages.size() + 1);
2898 
2899  if (m_mapOfImages.empty()) {
2900  std::cerr << "There is no training image loaded !" << std::endl;
2901  return;
2902  }
2903 
2904  if (nbImg == 2) {
2905  // Only one training image, so we display them side by side
2906  vpImage<vpRGBa> IRef;
2907  vpImageConvert::convert(m_mapOfImages.begin()->second, IRef);
2908  insertImageMatching(IRef, ICurrent, IMatching);
2909  } else {
2910  // Multiple training images, display them as a mosaic image
2911  int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
2912  int nbWidth = nbImgSqrt;
2913  int nbHeight = nbImgSqrt;
2914 
2915  if (nbImgSqrt * nbImgSqrt < nbImg) {
2916  nbWidth++;
2917  }
2918 
2919  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
2920  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2921  ++it) {
2922  if (maxW < it->second.getWidth()) {
2923  maxW = it->second.getWidth();
2924  }
2925 
2926  if (maxH < it->second.getHeight()) {
2927  maxH = it->second.getHeight();
2928  }
2929  }
2930 
2931  // Indexes of the current image in the grid made in order to the image is
2932  // in the center square in the mosaic grid
2933  int medianI = nbHeight / 2;
2934  int medianJ = nbWidth / 2;
2935  int medianIndex = medianI * nbWidth + medianJ;
2936 
2937  int cpt = 0;
2938  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2939  ++it, cpt++) {
2940  int local_cpt = cpt;
2941  if (cpt >= medianIndex) {
2942  // Shift of one unity the index of the training images which are after
2943  // the current image
2944  local_cpt++;
2945  }
2946  int indexI = local_cpt / nbWidth;
2947  int indexJ = local_cpt - (indexI * nbWidth);
2948  vpImagePoint topLeftCorner((int)maxH * indexI, (int)maxW * indexJ);
2949 
2950  vpImage<vpRGBa> IRef;
2951  vpImageConvert::convert(it->second, IRef);
2952  IMatching.insert(IRef, topLeftCorner);
2953  }
2954 
2955  vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
2956  IMatching.insert(ICurrent, topLeftCorner);
2957  }
2958 }
2959 
2965 void vpKeyPoint::loadConfigFile(const std::string &configFile)
2966 {
2968 
2969  try {
2970  // Reset detector and extractor
2971  m_detectorNames.clear();
2972  m_extractorNames.clear();
2973  m_detectors.clear();
2974  m_extractors.clear();
2975 
2976  std::cout << " *********** Parsing XML for configuration for vpKeyPoint "
2977  "************ "
2978  << std::endl;
2979  xmlp.parse(configFile);
2980 
2981  m_detectorNames.push_back(xmlp.getDetectorName());
2982  m_extractorNames.push_back(xmlp.getExtractorName());
2983  m_matcherName = xmlp.getMatcherName();
2984 
2985  switch (xmlp.getMatchingMethod()) {
2987  m_filterType = constantFactorDistanceThreshold;
2988  break;
2989 
2991  m_filterType = stdDistanceThreshold;
2992  break;
2993 
2995  m_filterType = ratioDistanceThreshold;
2996  break;
2997 
2999  m_filterType = stdAndRatioDistanceThreshold;
3000  break;
3001 
3003  m_filterType = noFilterMatching;
3004  break;
3005 
3006  default:
3007  break;
3008  }
3009 
3010  m_matchingFactorThreshold = xmlp.getMatchingFactorThreshold();
3011  m_matchingRatioThreshold = xmlp.getMatchingRatioThreshold();
3012 
3013  m_useRansacVVS = xmlp.getUseRansacVVSPoseEstimation();
3014  m_useConsensusPercentage = xmlp.getUseRansacConsensusPercentage();
3015  m_nbRansacIterations = xmlp.getNbRansacIterations();
3016  m_ransacReprojectionError = xmlp.getRansacReprojectionError();
3017  m_nbRansacMinInlierCount = xmlp.getNbRansacMinInlierCount();
3018  m_ransacThreshold = xmlp.getRansacThreshold();
3019  m_ransacConsensusPercentage = xmlp.getRansacConsensusPercentage();
3020 
3021  if (m_filterType == ratioDistanceThreshold || m_filterType == stdAndRatioDistanceThreshold) {
3022  m_useKnn = true;
3023  } else {
3024  m_useKnn = false;
3025  }
3026 
3027  init();
3028  } catch (...) {
3029  throw vpException(vpException::ioError, "Can't open XML file \"%s\"\n ", configFile.c_str());
3030  }
3031 }
3032 
3041 void vpKeyPoint::loadLearningData(const std::string &filename, bool binaryMode, bool append)
3042 {
3043  int startClassId = 0;
3044  int startImageId = 0;
3045  if (!append) {
3046  m_trainKeyPoints.clear();
3047  m_trainPoints.clear();
3048  m_mapOfImageId.clear();
3049  m_mapOfImages.clear();
3050  } else {
3051  // In append case, find the max index of keypoint class Id
3052  for (std::map<int, int>::const_iterator it = m_mapOfImageId.begin(); it != m_mapOfImageId.end(); ++it) {
3053  if (startClassId < it->first) {
3054  startClassId = it->first;
3055  }
3056  }
3057 
3058  // In append case, find the max index of images Id
3059  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
3060  ++it) {
3061  if (startImageId < it->first) {
3062  startImageId = it->first;
3063  }
3064  }
3065  }
3066 
3067  // Get parent directory
3068  std::string parent = vpIoTools::getParent(filename);
3069  if (!parent.empty()) {
3070  parent += "/";
3071  }
3072 
3073  if (binaryMode) {
3074  std::ifstream file(filename.c_str(), std::ifstream::binary);
3075  if (!file.is_open()) {
3076  throw vpException(vpException::ioError, "Cannot open the file.");
3077  }
3078 
3079  // Read info about training images
3080  int nbImgs = 0;
3081  vpIoTools::readBinaryValueLE(file, nbImgs);
3082 
3083 #if !defined(VISP_HAVE_MODULE_IO)
3084  if (nbImgs > 0) {
3085  std::cout << "Warning: The learning file contains image data that will "
3086  "not be loaded as visp_io module "
3087  "is not available !"
3088  << std::endl;
3089  }
3090 #endif
3091 
3092  for (int i = 0; i < nbImgs; i++) {
3093  // Read image_id
3094  int id = 0;
3095  vpIoTools::readBinaryValueLE(file, id);
3096 
3097  int length = 0;
3098  vpIoTools::readBinaryValueLE(file, length);
3099  // Will contain the path to the training images
3100  char *path = new char[length + 1]; // char path[length + 1];
3101 
3102  for (int cpt = 0; cpt < length; cpt++) {
3103  char c;
3104  file.read((char *)(&c), sizeof(c));
3105  path[cpt] = c;
3106  }
3107  path[length] = '\0';
3108 
3110 #ifdef VISP_HAVE_MODULE_IO
3111  if (vpIoTools::isAbsolutePathname(std::string(path))) {
3112  vpImageIo::read(I, path);
3113  } else {
3114  vpImageIo::read(I, parent + path);
3115  }
3116 
3117  // Add the image previously loaded only if VISP_HAVE_MODULE_IO
3118  m_mapOfImages[id + startImageId] = I;
3119 #endif
3120 
3121  // Delete path
3122  delete[] path;
3123  }
3124 
3125  // Read if 3D point information are saved or not
3126  int have3DInfoInt = 0;
3127  vpIoTools::readBinaryValueLE(file, have3DInfoInt);
3128  bool have3DInfo = have3DInfoInt != 0;
3129 
3130  // Read the number of descriptors
3131  int nRows = 0;
3132  vpIoTools::readBinaryValueLE(file, nRows);
3133 
3134  // Read the size of the descriptor
3135  int nCols = 0;
3136  vpIoTools::readBinaryValueLE(file, nCols);
3137 
3138  // Read the type of the descriptor
3139  int descriptorType = 5; // CV_32F
3140  vpIoTools::readBinaryValueLE(file, descriptorType);
3141 
3142  cv::Mat trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
3143  for (int i = 0; i < nRows; i++) {
3144  // Read information about keyPoint
3145  float u, v, size, angle, response;
3146  int octave, class_id, image_id;
3149  vpIoTools::readBinaryValueLE(file, size);
3150  vpIoTools::readBinaryValueLE(file, angle);
3151  vpIoTools::readBinaryValueLE(file, response);
3152  vpIoTools::readBinaryValueLE(file, octave);
3153  vpIoTools::readBinaryValueLE(file, class_id);
3154  vpIoTools::readBinaryValueLE(file, image_id);
3155  cv::KeyPoint keyPoint(cv::Point2f(u, v), size, angle, response, octave, (class_id + startClassId));
3156  m_trainKeyPoints.push_back(keyPoint);
3157 
3158  if (image_id != -1) {
3159 #ifdef VISP_HAVE_MODULE_IO
3160  // No training images if image_id == -1
3161  m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
3162 #endif
3163  }
3164 
3165  if (have3DInfo) {
3166  // Read oX, oY, oZ
3167  float oX, oY, oZ;
3168  vpIoTools::readBinaryValueLE(file, oX);
3169  vpIoTools::readBinaryValueLE(file, oY);
3170  vpIoTools::readBinaryValueLE(file, oZ);
3171  m_trainPoints.push_back(cv::Point3f(oX, oY, oZ));
3172  }
3173 
3174  for (int j = 0; j < nCols; j++) {
3175  // Read the value of the descriptor
3176  switch (descriptorType) {
3177  case CV_8U: {
3178  unsigned char value;
3179  file.read((char *)(&value), sizeof(value));
3180  trainDescriptorsTmp.at<unsigned char>(i, j) = value;
3181  } break;
3182 
3183  case CV_8S: {
3184  char value;
3185  file.read((char *)(&value), sizeof(value));
3186  trainDescriptorsTmp.at<char>(i, j) = value;
3187  } break;
3188 
3189  case CV_16U: {
3190  unsigned short int value;
3191  vpIoTools::readBinaryValueLE(file, value);
3192  trainDescriptorsTmp.at<unsigned short int>(i, j) = value;
3193  } break;
3194 
3195  case CV_16S: {
3196  short int value;
3197  vpIoTools::readBinaryValueLE(file, value);
3198  trainDescriptorsTmp.at<short int>(i, j) = value;
3199  } break;
3200 
3201  case CV_32S: {
3202  int value;
3203  vpIoTools::readBinaryValueLE(file, value);
3204  trainDescriptorsTmp.at<int>(i, j) = value;
3205  } break;
3206 
3207  case CV_32F: {
3208  float value;
3209  vpIoTools::readBinaryValueLE(file, value);
3210  trainDescriptorsTmp.at<float>(i, j) = value;
3211  } break;
3212 
3213  case CV_64F: {
3214  double value;
3215  vpIoTools::readBinaryValueLE(file, value);
3216  trainDescriptorsTmp.at<double>(i, j) = value;
3217  } break;
3218 
3219  default: {
3220  float value;
3221  vpIoTools::readBinaryValueLE(file, value);
3222  trainDescriptorsTmp.at<float>(i, j) = value;
3223  } break;
3224  }
3225  }
3226  }
3227 
3228  if (!append || m_trainDescriptors.empty()) {
3229  trainDescriptorsTmp.copyTo(m_trainDescriptors);
3230  } else {
3231  cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
3232  }
3233 
3234  file.close();
3235  } else {
3236  pugi::xml_document doc;
3237 
3238  /*parse the file and get the DOM */
3239  if (!doc.load_file(filename.c_str())) {
3240  throw vpException(vpException::ioError, "Error with file: %s", filename.c_str());
3241  }
3242 
3243  pugi::xml_node root_element = doc.document_element();
3244 
3245  int descriptorType = CV_32F; // float
3246  int nRows = 0, nCols = 0;
3247  int i = 0, j = 0;
3248 
3249  cv::Mat trainDescriptorsTmp;
3250 
3251  for (pugi::xml_node first_level_node = root_element.first_child(); first_level_node;
3252  first_level_node = first_level_node.next_sibling()) {
3253 
3254  std::string name(first_level_node.name());
3255  if (first_level_node.type() == pugi::node_element && name == "TrainingImageInfo") {
3256 
3257  for (pugi::xml_node image_info_node = first_level_node.first_child(); image_info_node;
3258  image_info_node = image_info_node.next_sibling()) {
3259  name = std::string(image_info_node.name());
3260 
3261  if (name == "trainImg") {
3262  // Read image_id
3263  int id = image_info_node.attribute("image_id").as_int();
3264 
3266 #ifdef VISP_HAVE_MODULE_IO
3267  std::string path(image_info_node.text().as_string());
3268  // Read path to the training images
3269  if (vpIoTools::isAbsolutePathname(std::string(path))) {
3270  vpImageIo::read(I, path);
3271  } else {
3272  vpImageIo::read(I, parent + path);
3273  }
3274 
3275  // Add the image previously loaded only if VISP_HAVE_MODULE_IO
3276  m_mapOfImages[id + startImageId] = I;
3277 #endif
3278  }
3279  }
3280  } else if (first_level_node.type() == pugi::node_element && name == "DescriptorsInfo") {
3281  for (pugi::xml_node descriptors_info_node = first_level_node.first_child(); descriptors_info_node;
3282  descriptors_info_node = descriptors_info_node.next_sibling()) {
3283  if (descriptors_info_node.type() == pugi::node_element) {
3284  name = std::string(descriptors_info_node.name());
3285 
3286  if (name == "nrows") {
3287  nRows = descriptors_info_node.text().as_int();
3288  } else if (name == "ncols") {
3289  nCols = descriptors_info_node.text().as_int();
3290  } else if (name == "type") {
3291  descriptorType = descriptors_info_node.text().as_int();
3292  }
3293  }
3294  }
3295 
3296  trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
3297  } else if (first_level_node.type() == pugi::node_element && name == "DescriptorInfo") {
3298  double u = 0.0, v = 0.0, size = 0.0, angle = 0.0, response = 0.0;
3299  int octave = 0, class_id = 0, image_id = 0;
3300  double oX = 0.0, oY = 0.0, oZ = 0.0;
3301 
3302  std::stringstream ss;
3303 
3304  for (pugi::xml_node point_node = first_level_node.first_child(); point_node;
3305  point_node = point_node.next_sibling()) {
3306  if (point_node.type() == pugi::node_element) {
3307  name = std::string(point_node.name());
3308 
3309  // Read information about keypoints
3310  if (name == "u") {
3311  u = point_node.text().as_double();
3312  } else if (name == "v") {
3313  v = point_node.text().as_double();
3314  } else if (name == "size") {
3315  size = point_node.text().as_double();
3316  } else if (name == "angle") {
3317  angle = point_node.text().as_double();
3318  } else if (name == "response") {
3319  response = point_node.text().as_double();
3320  } else if (name == "octave") {
3321  octave = point_node.text().as_int();
3322  } else if (name == "class_id") {
3323  class_id = point_node.text().as_int();
3324  cv::KeyPoint keyPoint(cv::Point2f((float)u, (float)v), (float)size, (float)angle, (float)response, octave,
3325  (class_id + startClassId));
3326  m_trainKeyPoints.push_back(keyPoint);
3327  } else if (name == "image_id") {
3328  image_id = point_node.text().as_int();
3329  if (image_id != -1) {
3330 #ifdef VISP_HAVE_MODULE_IO
3331  // No training images if image_id == -1
3332  m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
3333 #endif
3334  }
3335  } else if (name == "oX") {
3336  oX = point_node.text().as_double();
3337  } else if (name == "oY") {
3338  oY = point_node.text().as_double();
3339  } else if (name == "oZ") {
3340  oZ = point_node.text().as_double();
3341  m_trainPoints.push_back(cv::Point3f((float)oX, (float)oY, (float)oZ));
3342  } else if (name == "desc") {
3343  j = 0;
3344 
3345  for (pugi::xml_node descriptor_value_node = point_node.first_child(); descriptor_value_node;
3346  descriptor_value_node = descriptor_value_node.next_sibling()) {
3347 
3348  if (descriptor_value_node.type() == pugi::node_element) {
3349  // Read descriptors values
3350  std::string parseStr(descriptor_value_node.text().as_string());
3351  ss.clear();
3352  ss.str(parseStr);
3353 
3354  if (!ss.fail()) {
3355  switch (descriptorType) {
3356  case CV_8U: {
3357  // Parse the numeric value [0 ; 255] to an int
3358  int parseValue;
3359  ss >> parseValue;
3360  trainDescriptorsTmp.at<unsigned char>(i, j) = (unsigned char)parseValue;
3361  } break;
3362 
3363  case CV_8S:
3364  // Parse the numeric value [-128 ; 127] to an int
3365  int parseValue;
3366  ss >> parseValue;
3367  trainDescriptorsTmp.at<char>(i, j) = (char)parseValue;
3368  break;
3369 
3370  case CV_16U:
3371  ss >> trainDescriptorsTmp.at<unsigned short int>(i, j);
3372  break;
3373 
3374  case CV_16S:
3375  ss >> trainDescriptorsTmp.at<short int>(i, j);
3376  break;
3377 
3378  case CV_32S:
3379  ss >> trainDescriptorsTmp.at<int>(i, j);
3380  break;
3381 
3382  case CV_32F:
3383  ss >> trainDescriptorsTmp.at<float>(i, j);
3384  break;
3385 
3386  case CV_64F:
3387  ss >> trainDescriptorsTmp.at<double>(i, j);
3388  break;
3389 
3390  default:
3391  ss >> trainDescriptorsTmp.at<float>(i, j);
3392  break;
3393  }
3394  } else {
3395  std::cerr << "Error when converting:" << ss.str() << std::endl;
3396  }
3397 
3398  j++;
3399  }
3400  }
3401  }
3402  }
3403  }
3404  i++;
3405  }
3406  }
3407 
3408  if (!append || m_trainDescriptors.empty()) {
3409  trainDescriptorsTmp.copyTo(m_trainDescriptors);
3410  } else {
3411  cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
3412  }
3413  }
3414 
3415  // Convert OpenCV type to ViSP type for compatibility
3417  vpConvert::convertFromOpenCV(this->m_trainPoints, m_trainVpPoints);
3418 
3419  // Add train descriptors in matcher object
3420  m_matcher->clear();
3421  m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
3422 
3423  // Set _reference_computed to true as we load a learning file
3424  _reference_computed = true;
3425 
3426  // Set m_currentImageId
3427  m_currentImageId = (int)m_mapOfImages.size();
3428 }
3429 
3438 void vpKeyPoint::match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors,
3439  std::vector<cv::DMatch> &matches, double &elapsedTime)
3440 {
3441  double t = vpTime::measureTimeMs();
3442 
3443  if (m_useKnn) {
3444  m_knnMatches.clear();
3445 
3446  if (m_useMatchTrainToQuery) {
3447  std::vector<std::vector<cv::DMatch> > knnMatchesTmp;
3448 
3449  // Match train descriptors to query descriptors
3450  cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(true);
3451  matcherTmp->knnMatch(trainDescriptors, queryDescriptors, knnMatchesTmp, 2);
3452 
3453  for (std::vector<std::vector<cv::DMatch> >::const_iterator it1 = knnMatchesTmp.begin();
3454  it1 != knnMatchesTmp.end(); ++it1) {
3455  std::vector<cv::DMatch> tmp;
3456  for (std::vector<cv::DMatch>::const_iterator it2 = it1->begin(); it2 != it1->end(); ++it2) {
3457  tmp.push_back(cv::DMatch(it2->trainIdx, it2->queryIdx, it2->distance));
3458  }
3459  m_knnMatches.push_back(tmp);
3460  }
3461 
3462  matches.resize(m_knnMatches.size());
3463  std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3464  } else {
3465  // Match query descriptors to train descriptors
3466  m_matcher->knnMatch(queryDescriptors, m_knnMatches, 2);
3467  matches.resize(m_knnMatches.size());
3468  std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3469  }
3470  } else {
3471  matches.clear();
3472 
3473  if (m_useMatchTrainToQuery) {
3474  std::vector<cv::DMatch> matchesTmp;
3475  // Match train descriptors to query descriptors
3476  cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(true);
3477  matcherTmp->match(trainDescriptors, queryDescriptors, matchesTmp);
3478 
3479  for (std::vector<cv::DMatch>::const_iterator it = matchesTmp.begin(); it != matchesTmp.end(); ++it) {
3480  matches.push_back(cv::DMatch(it->trainIdx, it->queryIdx, it->distance));
3481  }
3482  } else {
3483  // Match query descriptors to train descriptors
3484  m_matcher->match(queryDescriptors, matches);
3485  }
3486  }
3487  elapsedTime = vpTime::measureTimeMs() - t;
3488 }
3489 
3497 unsigned int vpKeyPoint::matchPoint(const vpImage<unsigned char> &I) { return matchPoint(I, vpRect()); }
3498 
3506 unsigned int vpKeyPoint::matchPoint(const vpImage<vpRGBa> &I_color) { return matchPoint(I_color, vpRect()); }
3507 
3518 unsigned int vpKeyPoint::matchPoint(const vpImage<unsigned char> &I, const vpImagePoint &iP, unsigned int height,
3519  unsigned int width)
3520 {
3521  return matchPoint(I, vpRect(iP, width, height));
3522 }
3523 
3534 unsigned int vpKeyPoint::matchPoint(const vpImage<vpRGBa> &I_color, const vpImagePoint &iP, unsigned int height,
3535  unsigned int width)
3536 {
3537  return matchPoint(I_color, vpRect(iP, width, height));
3538 }
3539 
3548 unsigned int vpKeyPoint::matchPoint(const vpImage<unsigned char> &I, const vpRect &rectangle)
3549 {
3550  if (m_trainDescriptors.empty()) {
3551  std::cerr << "Reference is empty." << std::endl;
3552  if (!_reference_computed) {
3553  std::cerr << "Reference is not computed." << std::endl;
3554  }
3555  std::cerr << "Matching is not possible." << std::endl;
3556 
3557  return 0;
3558  }
3559 
3560  if (m_useAffineDetection) {
3561  std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3562  std::vector<cv::Mat> listOfQueryDescriptors;
3563 
3564  // Detect keypoints and extract descriptors on multiple images
3565  detectExtractAffine(I, listOfQueryKeyPoints, listOfQueryDescriptors);
3566 
3567  // Flatten the different train lists
3568  m_queryKeyPoints.clear();
3569  for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3570  it != listOfQueryKeyPoints.end(); ++it) {
3571  m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3572  }
3573 
3574  bool first = true;
3575  for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3576  ++it) {
3577  if (first) {
3578  first = false;
3579  it->copyTo(m_queryDescriptors);
3580  } else {
3581  m_queryDescriptors.push_back(*it);
3582  }
3583  }
3584  } else {
3585  detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3586  extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3587  }
3588 
3589  return matchPoint(m_queryKeyPoints, m_queryDescriptors);
3590 }
3591 
3600 unsigned int vpKeyPoint::matchPoint(const std::vector<cv::KeyPoint> &queryKeyPoints, const cv::Mat &queryDescriptors)
3601 {
3602  m_queryKeyPoints = queryKeyPoints;
3603  m_queryDescriptors = queryDescriptors;
3604 
3605  match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3606 
3607  if (m_filterType != noFilterMatching) {
3608  m_queryFilteredKeyPoints.clear();
3609  m_objectFilteredPoints.clear();
3610  m_filteredMatches.clear();
3611 
3612  filterMatches();
3613  } else {
3614  if (m_useMatchTrainToQuery) {
3615  // Add only query keypoints matched with a train keypoints
3616  m_queryFilteredKeyPoints.clear();
3617  m_filteredMatches.clear();
3618  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3619  m_filteredMatches.push_back(cv::DMatch((int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3620  m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(size_t)it->queryIdx]);
3621  }
3622  } else {
3623  m_queryFilteredKeyPoints = m_queryKeyPoints;
3624  m_filteredMatches = m_matches;
3625  }
3626 
3627  if (!m_trainPoints.empty()) {
3628  m_objectFilteredPoints.clear();
3629  // Add 3D object points such as the same index in
3630  // m_queryFilteredKeyPoints and in m_objectFilteredPoints
3631  // matches to the same train object
3632  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3633  // m_matches is normally ordered following the queryDescriptor index
3634  m_objectFilteredPoints.push_back(m_trainPoints[(size_t)it->trainIdx]);
3635  }
3636  }
3637  }
3638 
3639  // Convert OpenCV type to ViSP type for compatibility
3640  vpConvert::convertFromOpenCV(m_queryFilteredKeyPoints, currentImagePointsList);
3642 
3643  return static_cast<unsigned int>(m_filteredMatches.size());
3644 }
3645 
3654 unsigned int vpKeyPoint::matchPoint(const vpImage<vpRGBa> &I_color, const vpRect &rectangle)
3655 {
3656  vpImageConvert::convert(I_color, m_I);
3657  return matchPoint(m_I, rectangle);
3658 }
3659 
3673  bool (*func)(const vpHomogeneousMatrix &), const vpRect &rectangle)
3674 {
3675  double error, elapsedTime;
3676  return matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3677 }
3678 
3692  bool (*func)(const vpHomogeneousMatrix &), const vpRect &rectangle)
3693 {
3694  double error, elapsedTime;
3695  return matchPoint(I_color, cam, cMo, error, elapsedTime, func, rectangle);
3696 }
3697 
3714  double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &),
3715  const vpRect &rectangle)
3716 {
3717  // Check if we have training descriptors
3718  if (m_trainDescriptors.empty()) {
3719  std::cerr << "Reference is empty." << std::endl;
3720  if (!_reference_computed) {
3721  std::cerr << "Reference is not computed." << std::endl;
3722  }
3723  std::cerr << "Matching is not possible." << std::endl;
3724 
3725  return false;
3726  }
3727 
3728  if (m_useAffineDetection) {
3729  std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3730  std::vector<cv::Mat> listOfQueryDescriptors;
3731 
3732  // Detect keypoints and extract descriptors on multiple images
3733  detectExtractAffine(I, listOfQueryKeyPoints, listOfQueryDescriptors);
3734 
3735  // Flatten the different train lists
3736  m_queryKeyPoints.clear();
3737  for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3738  it != listOfQueryKeyPoints.end(); ++it) {
3739  m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3740  }
3741 
3742  bool first = true;
3743  for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3744  ++it) {
3745  if (first) {
3746  first = false;
3747  it->copyTo(m_queryDescriptors);
3748  } else {
3749  m_queryDescriptors.push_back(*it);
3750  }
3751  }
3752  } else {
3753  detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3754  extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3755  }
3756 
3757  match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3758 
3759  elapsedTime = m_detectionTime + m_extractionTime + m_matchingTime;
3760 
3761  if (m_filterType != noFilterMatching) {
3762  m_queryFilteredKeyPoints.clear();
3763  m_objectFilteredPoints.clear();
3764  m_filteredMatches.clear();
3765 
3766  filterMatches();
3767  } else {
3768  if (m_useMatchTrainToQuery) {
3769  // Add only query keypoints matched with a train keypoints
3770  m_queryFilteredKeyPoints.clear();
3771  m_filteredMatches.clear();
3772  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3773  m_filteredMatches.push_back(cv::DMatch((int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3774  m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(size_t)it->queryIdx]);
3775  }
3776  } else {
3777  m_queryFilteredKeyPoints = m_queryKeyPoints;
3778  m_filteredMatches = m_matches;
3779  }
3780 
3781  if (!m_trainPoints.empty()) {
3782  m_objectFilteredPoints.clear();
3783  // Add 3D object points such as the same index in
3784  // m_queryFilteredKeyPoints and in m_objectFilteredPoints
3785  // matches to the same train object
3786  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3787  // m_matches is normally ordered following the queryDescriptor index
3788  m_objectFilteredPoints.push_back(m_trainPoints[(size_t)it->trainIdx]);
3789  }
3790  }
3791  }
3792 
3793  // Convert OpenCV type to ViSP type for compatibility
3794  vpConvert::convertFromOpenCV(m_queryFilteredKeyPoints, currentImagePointsList);
3796 
3797  // error = std::numeric_limits<double>::max(); // create an error under
3798  // Windows. To fix it we have to add #undef max
3799  error = DBL_MAX;
3800  m_ransacInliers.clear();
3801  m_ransacOutliers.clear();
3802 
3803  if (m_useRansacVVS) {
3804  std::vector<vpPoint> objectVpPoints(m_objectFilteredPoints.size());
3805  size_t cpt = 0;
3806  // Create a list of vpPoint with 2D coordinates (current keypoint
3807  // location) + 3D coordinates (world/object coordinates)
3808  for (std::vector<cv::Point3f>::const_iterator it = m_objectFilteredPoints.begin();
3809  it != m_objectFilteredPoints.end(); ++it, cpt++) {
3810  vpPoint pt;
3811  pt.setWorldCoordinates(it->x, it->y, it->z);
3812 
3813  vpImagePoint imP(m_queryFilteredKeyPoints[cpt].pt.y, m_queryFilteredKeyPoints[cpt].pt.x);
3814 
3815  double x = 0.0, y = 0.0;
3816  vpPixelMeterConversion::convertPoint(cam, imP, x, y);
3817  pt.set_x(x);
3818  pt.set_y(y);
3819 
3820  objectVpPoints[cpt] = pt;
3821  }
3822 
3823  std::vector<vpPoint> inliers;
3824  std::vector<unsigned int> inlierIndex;
3825 
3826  bool res = computePose(objectVpPoints, cMo, inliers, inlierIndex, m_poseTime, func);
3827 
3828  std::map<unsigned int, bool> mapOfInlierIndex;
3829  m_matchRansacKeyPointsToPoints.clear();
3830 
3831  for (std::vector<unsigned int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3832  m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3833  m_queryFilteredKeyPoints[(size_t)(*it)], m_objectFilteredPoints[(size_t)(*it)]));
3834  mapOfInlierIndex[*it] = true;
3835  }
3836 
3837  for (size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3838  if (mapOfInlierIndex.find((unsigned int)i) == mapOfInlierIndex.end()) {
3839  m_ransacOutliers.push_back(vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3840  }
3841  }
3842 
3843  error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3844 
3845  m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3846  std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3847  m_ransacInliers.begin(), matchRansacToVpImage);
3848 
3849  elapsedTime += m_poseTime;
3850 
3851  return res;
3852  } else {
3853  std::vector<cv::Point2f> imageFilteredPoints;
3854  cv::KeyPoint::convert(m_queryFilteredKeyPoints, imageFilteredPoints);
3855  std::vector<int> inlierIndex;
3856  bool res = computePose(imageFilteredPoints, m_objectFilteredPoints, cam, cMo, inlierIndex, m_poseTime);
3857 
3858  std::map<int, bool> mapOfInlierIndex;
3859  m_matchRansacKeyPointsToPoints.clear();
3860 
3861  for (std::vector<int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3862  m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3863  m_queryFilteredKeyPoints[(size_t)(*it)], m_objectFilteredPoints[(size_t)(*it)]));
3864  mapOfInlierIndex[*it] = true;
3865  }
3866 
3867  for (size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3868  if (mapOfInlierIndex.find((int)i) == mapOfInlierIndex.end()) {
3869  m_ransacOutliers.push_back(vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3870  }
3871  }
3872 
3873  error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3874 
3875  m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3876  std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3877  m_ransacInliers.begin(), matchRansacToVpImage);
3878 
3879  elapsedTime += m_poseTime;
3880 
3881  return res;
3882  }
3883 }
3884 
3901  double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &),
3902  const vpRect &rectangle)
3903 {
3904  vpImageConvert::convert(I_color, m_I);
3905  return (matchPoint(m_I, cam, cMo, error, elapsedTime, func, rectangle));
3906 }
3907 
3929  vpImagePoint &centerOfGravity, const bool isPlanarObject,
3930  std::vector<vpImagePoint> *imPts1, std::vector<vpImagePoint> *imPts2,
3931  double *meanDescriptorDistance, double *detection_score, const vpRect &rectangle)
3932 {
3933  if (imPts1 != NULL && imPts2 != NULL) {
3934  imPts1->clear();
3935  imPts2->clear();
3936  }
3937 
3938  matchPoint(I, rectangle);
3939 
3940  double meanDescriptorDistanceTmp = 0.0;
3941  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
3942  meanDescriptorDistanceTmp += (double)it->distance;
3943  }
3944 
3945  meanDescriptorDistanceTmp /= (double)m_filteredMatches.size();
3946  double score = (double)m_filteredMatches.size() / meanDescriptorDistanceTmp;
3947 
3948  if (meanDescriptorDistance != NULL) {
3949  *meanDescriptorDistance = meanDescriptorDistanceTmp;
3950  }
3951  if (detection_score != NULL) {
3952  *detection_score = score;
3953  }
3954 
3955  if (m_filteredMatches.size() >= 4) {
3956  // Training / Reference 2D points
3957  std::vector<cv::Point2f> points1(m_filteredMatches.size());
3958  // Query / Current 2D points
3959  std::vector<cv::Point2f> points2(m_filteredMatches.size());
3960 
3961  for (size_t i = 0; i < m_filteredMatches.size(); i++) {
3962  points1[i] = cv::Point2f(m_trainKeyPoints[(size_t)m_filteredMatches[i].trainIdx].pt);
3963  points2[i] = cv::Point2f(m_queryFilteredKeyPoints[(size_t)m_filteredMatches[i].queryIdx].pt);
3964  }
3965 
3966  std::vector<vpImagePoint> inliers;
3967  if (isPlanarObject) {
3968 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
3969  cv::Mat homographyMatrix = cv::findHomography(points1, points2, CV_RANSAC);
3970 #else
3971  cv::Mat homographyMatrix = cv::findHomography(points1, points2, cv::RANSAC);
3972 #endif
3973 
3974  for (size_t i = 0; i < m_filteredMatches.size(); i++) {
3975  // Compute reprojection error
3976  cv::Mat realPoint = cv::Mat(3, 1, CV_64F);
3977  realPoint.at<double>(0, 0) = points1[i].x;
3978  realPoint.at<double>(1, 0) = points1[i].y;
3979  realPoint.at<double>(2, 0) = 1.f;
3980 
3981  cv::Mat reprojectedPoint = homographyMatrix * realPoint;
3982  double err_x = (reprojectedPoint.at<double>(0, 0) / reprojectedPoint.at<double>(2, 0)) - points2[i].x;
3983  double err_y = (reprojectedPoint.at<double>(1, 0) / reprojectedPoint.at<double>(2, 0)) - points2[i].y;
3984  double reprojectionError = std::sqrt(err_x * err_x + err_y * err_y);
3985 
3986  if (reprojectionError < 6.0) {
3987  inliers.push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3988  if (imPts1 != NULL) {
3989  imPts1->push_back(vpImagePoint((double)points1[i].y, (double)points1[i].x));
3990  }
3991 
3992  if (imPts2 != NULL) {
3993  imPts2->push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3994  }
3995  }
3996  }
3997  } else if (m_filteredMatches.size() >= 8) {
3998  cv::Mat fundamentalInliers;
3999  cv::Mat fundamentalMatrix = cv::findFundamentalMat(points1, points2, cv::FM_RANSAC, 3, 0.99, fundamentalInliers);
4000 
4001  for (size_t i = 0; i < (size_t)fundamentalInliers.rows; i++) {
4002  if (fundamentalInliers.at<uchar>((int)i, 0)) {
4003  inliers.push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
4004 
4005  if (imPts1 != NULL) {
4006  imPts1->push_back(vpImagePoint((double)points1[i].y, (double)points1[i].x));
4007  }
4008 
4009  if (imPts2 != NULL) {
4010  imPts2->push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
4011  }
4012  }
4013  }
4014  }
4015 
4016  if (!inliers.empty()) {
4017  // Build a polygon with the list of inlier keypoints detected in the
4018  // current image to get the bounding box
4019  vpPolygon polygon(inliers);
4020  boundingBox = polygon.getBoundingBox();
4021 
4022  // Compute the center of gravity
4023  double meanU = 0.0, meanV = 0.0;
4024  for (std::vector<vpImagePoint>::const_iterator it = inliers.begin(); it != inliers.end(); ++it) {
4025  meanU += it->get_u();
4026  meanV += it->get_v();
4027  }
4028 
4029  meanU /= (double)inliers.size();
4030  meanV /= (double)inliers.size();
4031 
4032  centerOfGravity.set_u(meanU);
4033  centerOfGravity.set_v(meanV);
4034  }
4035  } else {
4036  // Too few matches
4037  return false;
4038  }
4039 
4040  if (m_detectionMethod == detectionThreshold) {
4041  return meanDescriptorDistanceTmp < m_detectionThreshold;
4042  } else {
4043  return score > m_detectionScore;
4044  }
4045 }
4046 
4067  vpHomogeneousMatrix &cMo, double &error, double &elapsedTime, vpRect &boundingBox,
4068  vpImagePoint &centerOfGravity, bool (*func)(const vpHomogeneousMatrix &),
4069  const vpRect &rectangle)
4070 {
4071  bool isMatchOk = matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
4072  if (isMatchOk) {
4073  // Use the pose estimated to project the model points in the image
4074  vpPoint pt;
4075  vpImagePoint imPt;
4076  std::vector<vpImagePoint> modelImagePoints(m_trainVpPoints.size());
4077  size_t cpt = 0;
4078  for (std::vector<vpPoint>::const_iterator it = m_trainVpPoints.begin(); it != m_trainVpPoints.end(); ++it, cpt++) {
4079  pt = *it;
4080  pt.project(cMo);
4081  vpMeterPixelConversion::convertPoint(cam, pt.get_x(), pt.get_y(), imPt);
4082  modelImagePoints[cpt] = imPt;
4083  }
4084 
4085  // Build a polygon with the list of model image points to get the bounding
4086  // box
4087  vpPolygon polygon(modelImagePoints);
4088  boundingBox = polygon.getBoundingBox();
4089 
4090  // Compute the center of gravity of the current inlier keypoints
4091  double meanU = 0.0, meanV = 0.0;
4092  for (std::vector<vpImagePoint>::const_iterator it = m_ransacInliers.begin(); it != m_ransacInliers.end(); ++it) {
4093  meanU += it->get_u();
4094  meanV += it->get_v();
4095  }
4096 
4097  meanU /= (double)m_ransacInliers.size();
4098  meanV /= (double)m_ransacInliers.size();
4099 
4100  centerOfGravity.set_u(meanU);
4101  centerOfGravity.set_v(meanV);
4102  }
4103 
4104  return isMatchOk;
4105 }
4106 
4122  std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
4123  std::vector<cv::Mat> &listOfDescriptors,
4124  std::vector<vpImage<unsigned char> > *listOfAffineI)
4125 {
4126 #if 0
4127  cv::Mat img;
4128  vpImageConvert::convert(I, img);
4129  listOfKeypoints.clear();
4130  listOfDescriptors.clear();
4131 
4132  for (int tl = 1; tl < 6; tl++) {
4133  double t = pow(2, 0.5 * tl);
4134  for (int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
4135  std::vector<cv::KeyPoint> keypoints;
4136  cv::Mat descriptors;
4137 
4138  cv::Mat timg, mask, Ai;
4139  img.copyTo(timg);
4140 
4141  affineSkew(t, phi, timg, mask, Ai);
4142 
4143 
4144  if(listOfAffineI != NULL) {
4145  cv::Mat img_disp;
4146  bitwise_and(mask, timg, img_disp);
4148  vpImageConvert::convert(img_disp, tI);
4149  listOfAffineI->push_back(tI);
4150  }
4151 #if 0
4152  cv::Mat img_disp;
4153  cv::bitwise_and(mask, timg, img_disp);
4154  cv::namedWindow( "Skew", cv::WINDOW_AUTOSIZE ); // Create a window for display.
4155  cv::imshow( "Skew", img_disp );
4156  cv::waitKey(0);
4157 #endif
4158 
4159  for(std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
4160  it != m_detectors.end(); ++it) {
4161  std::vector<cv::KeyPoint> kp;
4162  it->second->detect(timg, kp, mask);
4163  keypoints.insert(keypoints.end(), kp.begin(), kp.end());
4164  }
4165 
4166  double elapsedTime;
4167  extract(timg, keypoints, descriptors, elapsedTime);
4168 
4169  for(unsigned int i = 0; i < keypoints.size(); i++) {
4170  cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
4171  cv::Mat kpt_t = Ai * cv::Mat(kpt);
4172  keypoints[i].pt.x = kpt_t.at<float>(0, 0);
4173  keypoints[i].pt.y = kpt_t.at<float>(1, 0);
4174  }
4175 
4176  listOfKeypoints.push_back(keypoints);
4177  listOfDescriptors.push_back(descriptors);
4178  }
4179  }
4180 
4181 #else
4182  cv::Mat img;
4183  vpImageConvert::convert(I, img);
4184 
4185  // Create a vector for storing the affine skew parameters
4186  std::vector<std::pair<double, int> > listOfAffineParams;
4187  for (int tl = 1; tl < 6; tl++) {
4188  double t = pow(2, 0.5 * tl);
4189  for (int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
4190  listOfAffineParams.push_back(std::pair<double, int>(t, phi));
4191  }
4192  }
4193 
4194  listOfKeypoints.resize(listOfAffineParams.size());
4195  listOfDescriptors.resize(listOfAffineParams.size());
4196 
4197  if (listOfAffineI != NULL) {
4198  listOfAffineI->resize(listOfAffineParams.size());
4199  }
4200 
4201 #ifdef VISP_HAVE_OPENMP
4202 #pragma omp parallel for
4203 #endif
4204  for (int cpt = 0; cpt < static_cast<int>(listOfAffineParams.size()); cpt++) {
4205  std::vector<cv::KeyPoint> keypoints;
4206  cv::Mat descriptors;
4207 
4208  cv::Mat timg, mask, Ai;
4209  img.copyTo(timg);
4210 
4211  affineSkew(listOfAffineParams[(size_t)cpt].first, listOfAffineParams[(size_t)cpt].second, timg, mask, Ai);
4212 
4213  if (listOfAffineI != NULL) {
4214  cv::Mat img_disp;
4215  bitwise_and(mask, timg, img_disp);
4217  vpImageConvert::convert(img_disp, tI);
4218  (*listOfAffineI)[(size_t)cpt] = tI;
4219  }
4220 
4221 #if 0
4222  cv::Mat img_disp;
4223  cv::bitwise_and(mask, timg, img_disp);
4224  cv::namedWindow( "Skew", cv::WINDOW_AUTOSIZE ); // Create a window for display.
4225  cv::imshow( "Skew", img_disp );
4226  cv::waitKey(0);
4227 #endif
4228 
4229  for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
4230  it != m_detectors.end(); ++it) {
4231  std::vector<cv::KeyPoint> kp;
4232  it->second->detect(timg, kp, mask);
4233  keypoints.insert(keypoints.end(), kp.begin(), kp.end());
4234  }
4235 
4236  double elapsedTime;
4237  extract(timg, keypoints, descriptors, elapsedTime);
4238 
4239  for (size_t i = 0; i < keypoints.size(); i++) {
4240  cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
4241  cv::Mat kpt_t = Ai * cv::Mat(kpt);
4242  keypoints[i].pt.x = kpt_t.at<float>(0, 0);
4243  keypoints[i].pt.y = kpt_t.at<float>(1, 0);
4244  }
4245 
4246  listOfKeypoints[(size_t)cpt] = keypoints;
4247  listOfDescriptors[(size_t)cpt] = descriptors;
4248  }
4249 #endif
4250 }
4251 
4256 {
4257  // vpBasicKeyPoint class
4258  referenceImagePointsList.clear();
4259  currentImagePointsList.clear();
4260  matchedReferencePoints.clear();
4261  _reference_computed = false;
4262 
4263  m_computeCovariance = false;
4264  m_covarianceMatrix = vpMatrix();
4265  m_currentImageId = 0;
4266  m_detectionMethod = detectionScore;
4267  m_detectionScore = 0.15;
4268  m_detectionThreshold = 100.0;
4269  m_detectionTime = 0.0;
4270  m_detectorNames.clear();
4271  m_detectors.clear();
4272  m_extractionTime = 0.0;
4273  m_extractorNames.clear();
4274  m_extractors.clear();
4275  m_filteredMatches.clear();
4276  m_filterType = ratioDistanceThreshold;
4277  m_imageFormat = jpgImageFormat;
4278  m_knnMatches.clear();
4279  m_mapOfImageId.clear();
4280  m_mapOfImages.clear();
4281  m_matcher = cv::Ptr<cv::DescriptorMatcher>();
4282  m_matcherName = "BruteForce-Hamming";
4283  m_matches.clear();
4284  m_matchingFactorThreshold = 2.0;
4285  m_matchingRatioThreshold = 0.85;
4286  m_matchingTime = 0.0;
4287  m_matchRansacKeyPointsToPoints.clear();
4288  m_nbRansacIterations = 200;
4289  m_nbRansacMinInlierCount = 100;
4290  m_objectFilteredPoints.clear();
4291  m_poseTime = 0.0;
4292  m_queryDescriptors = cv::Mat();
4293  m_queryFilteredKeyPoints.clear();
4294  m_queryKeyPoints.clear();
4295  m_ransacConsensusPercentage = 20.0;
4296  m_ransacFilterFlag = vpPose::NO_FILTER;
4297  m_ransacInliers.clear();
4298  m_ransacOutliers.clear();
4299  m_ransacParallel = true;
4300  m_ransacParallelNbThreads = 0;
4301  m_ransacReprojectionError = 6.0;
4302  m_ransacThreshold = 0.01;
4303  m_trainDescriptors = cv::Mat();
4304  m_trainKeyPoints.clear();
4305  m_trainPoints.clear();
4306  m_trainVpPoints.clear();
4307  m_useAffineDetection = false;
4308 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
4309  m_useBruteForceCrossCheck = true;
4310 #endif
4311  m_useConsensusPercentage = false;
4312  m_useKnn = true; // as m_filterType == ratioDistanceThreshold
4313  m_useMatchTrainToQuery = false;
4314  m_useRansacVVS = true;
4315  m_useSingleMatchFilter = true;
4316 
4317  m_detectorNames.push_back("ORB");
4318  m_extractorNames.push_back("ORB");
4319 
4320  init();
4321 }
4322 
4331 void vpKeyPoint::saveLearningData(const std::string &filename, bool binaryMode, bool saveTrainingImages)
4332 {
4333  std::string parent = vpIoTools::getParent(filename);
4334  if (!parent.empty()) {
4335  vpIoTools::makeDirectory(parent);
4336  }
4337 
4338  std::map<int, std::string> mapOfImgPath;
4339  if (saveTrainingImages) {
4340 #ifdef VISP_HAVE_MODULE_IO
4341  // Save the training image files in the same directory
4342  unsigned int cpt = 0;
4343 
4344  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
4345  ++it, cpt++) {
4346  if (cpt > 999) {
4347  throw vpException(vpException::fatalError, "The number of training images to save is too big !");
4348  }
4349 
4350  std::stringstream ss;
4351  ss << "train_image_" << std::setfill('0') << std::setw(3) << cpt;
4352 
4353  switch (m_imageFormat) {
4354  case jpgImageFormat:
4355  ss << ".jpg";
4356  break;
4357 
4358  case pngImageFormat:
4359  ss << ".png";
4360  break;
4361 
4362  case ppmImageFormat:
4363  ss << ".ppm";
4364  break;
4365 
4366  case pgmImageFormat:
4367  ss << ".pgm";
4368  break;
4369 
4370  default:
4371  ss << ".png";
4372  break;
4373  }
4374 
4375  std::string imgFilename = ss.str();
4376  mapOfImgPath[it->first] = imgFilename;
4377  vpImageIo::write(it->second, parent + (!parent.empty() ? "/" : "") + imgFilename);
4378  }
4379 #else
4380  std::cout << "Warning: in vpKeyPoint::saveLearningData() training images "
4381  "are not saved because "
4382  "visp_io module is not available !"
4383  << std::endl;
4384 #endif
4385  }
4386 
4387  bool have3DInfo = m_trainPoints.size() > 0;
4388  if (have3DInfo && m_trainPoints.size() != m_trainKeyPoints.size()) {
4389  throw vpException(vpException::fatalError, "List of keypoints and list of 3D points have different size !");
4390  }
4391 
4392  if (binaryMode) {
4393  // Save the learning data into little endian binary file.
4394  std::ofstream file(filename.c_str(), std::ofstream::binary);
4395  if (!file.is_open()) {
4396  throw vpException(vpException::ioError, "Cannot create the file.");
4397  }
4398 
4399  // Write info about training images
4400  int nbImgs = (int)mapOfImgPath.size();
4401  vpIoTools::writeBinaryValueLE(file, nbImgs);
4402 
4403 #ifdef VISP_HAVE_MODULE_IO
4404  for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
4405  // Write image_id
4406  int id = it->first;
4408 
4409  // Write image path
4410  std::string path = it->second;
4411  int length = (int)path.length();
4412  vpIoTools::writeBinaryValueLE(file, length);
4413 
4414  for (int cpt = 0; cpt < length; cpt++) {
4415  file.write((char *)(&path[(size_t)cpt]), sizeof(path[(size_t)cpt]));
4416  }
4417  }
4418 #endif
4419 
4420  // Write if we have 3D point information
4421  int have3DInfoInt = have3DInfo ? 1 : 0;
4422  vpIoTools::writeBinaryValueLE(file, have3DInfoInt);
4423 
4424  int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
4425  int descriptorType = m_trainDescriptors.type();
4426 
4427  // Write the number of descriptors
4428  vpIoTools::writeBinaryValueLE(file, nRows);
4429 
4430  // Write the size of the descriptor
4431  vpIoTools::writeBinaryValueLE(file, nCols);
4432 
4433  // Write the type of the descriptor
4434  vpIoTools::writeBinaryValueLE(file, descriptorType);
4435 
4436  for (int i = 0; i < nRows; i++) {
4437  unsigned int i_ = (unsigned int)i;
4438  // Write u
4439  float u = m_trainKeyPoints[i_].pt.x;
4441 
4442  // Write v
4443  float v = m_trainKeyPoints[i_].pt.y;
4445 
4446  // Write size
4447  float size = m_trainKeyPoints[i_].size;
4448  vpIoTools::writeBinaryValueLE(file, size);
4449 
4450  // Write angle
4451  float angle = m_trainKeyPoints[i_].angle;
4452  vpIoTools::writeBinaryValueLE(file, angle);
4453 
4454  // Write response
4455  float response = m_trainKeyPoints[i_].response;
4456  vpIoTools::writeBinaryValueLE(file, response);
4457 
4458  // Write octave
4459  int octave = m_trainKeyPoints[i_].octave;
4460  vpIoTools::writeBinaryValueLE(file, octave);
4461 
4462  // Write class_id
4463  int class_id = m_trainKeyPoints[i_].class_id;
4464  vpIoTools::writeBinaryValueLE(file, class_id);
4465 
4466 // Write image_id
4467 #ifdef VISP_HAVE_MODULE_IO
4468  std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
4469  int image_id = (saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1;
4470  vpIoTools::writeBinaryValueLE(file, image_id);
4471 #else
4472  int image_id = -1;
4473  // file.write((char *)(&image_id), sizeof(image_id));
4474  vpIoTools::writeBinaryValueLE(file, image_id);
4475 #endif
4476 
4477  if (have3DInfo) {
4478  float oX = m_trainPoints[i_].x, oY = m_trainPoints[i_].y, oZ = m_trainPoints[i_].z;
4479  // Write oX
4481 
4482  // Write oY
4484 
4485  // Write oZ
4487  }
4488 
4489  for (int j = 0; j < nCols; j++) {
4490  // Write the descriptor value
4491  switch (descriptorType) {
4492  case CV_8U:
4493  file.write((char *)(&m_trainDescriptors.at<unsigned char>(i, j)),
4494  sizeof(m_trainDescriptors.at<unsigned char>(i, j)));
4495  break;
4496 
4497  case CV_8S:
4498  file.write((char *)(&m_trainDescriptors.at<char>(i, j)), sizeof(m_trainDescriptors.at<char>(i, j)));
4499  break;
4500 
4501  case CV_16U:
4502  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<unsigned short int>(i, j));
4503  break;
4504 
4505  case CV_16S:
4506  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<short int>(i, j));
4507  break;
4508 
4509  case CV_32S:
4510  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<int>(i, j));
4511  break;
4512 
4513  case CV_32F:
4514  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<float>(i, j));
4515  break;
4516 
4517  case CV_64F:
4518  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<double>(i, j));
4519  break;
4520 
4521  default:
4522  throw vpException(vpException::fatalError, "Problem with the data type of descriptors !");
4523  break;
4524  }
4525  }
4526  }
4527 
4528  file.close();
4529  } else {
4530  pugi::xml_document doc;
4531  pugi::xml_node node = doc.append_child(pugi::node_declaration);
4532  node.append_attribute("version") = "1.0";
4533  node.append_attribute("encoding") = "UTF-8";
4534 
4535  if (!doc) {
4536  throw vpException(vpException::ioError, "Error with file: ", filename.c_str());
4537  }
4538 
4539  pugi::xml_node root_node = doc.append_child("LearningData");
4540 
4541  // Write the training images info
4542  pugi::xml_node image_node = root_node.append_child("TrainingImageInfo");
4543 
4544 #ifdef VISP_HAVE_MODULE_IO
4545  for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
4546  pugi::xml_node image_info_node = image_node.append_child("trainImg");
4547  image_info_node.append_child(pugi::node_pcdata).set_value(it->second.c_str());
4548  std::stringstream ss;
4549  ss << it->first;
4550  image_info_node.append_attribute("image_id") = ss.str().c_str();
4551  }
4552 #endif
4553 
4554  // Write information about descriptors
4555  pugi::xml_node descriptors_info_node = root_node.append_child("DescriptorsInfo");
4556 
4557  int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
4558  int descriptorType = m_trainDescriptors.type();
4559 
4560  // Write the number of rows
4561  descriptors_info_node.append_child("nrows").append_child(pugi::node_pcdata).text() = nRows;
4562 
4563  // Write the number of cols
4564  descriptors_info_node.append_child("ncols").append_child(pugi::node_pcdata).text() = nCols;
4565 
4566  // Write the descriptors type
4567  descriptors_info_node.append_child("type").append_child(pugi::node_pcdata).text() = descriptorType;
4568 
4569  for (int i = 0; i < nRows; i++) {
4570  unsigned int i_ = (unsigned int)i;
4571  pugi::xml_node descriptor_node = root_node.append_child("DescriptorInfo");
4572 
4573  descriptor_node.append_child("u").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.x;
4574  descriptor_node.append_child("v").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.y;
4575  descriptor_node.append_child("size").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].size;
4576  descriptor_node.append_child("angle").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].angle;
4577  descriptor_node.append_child("response").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].response;
4578  descriptor_node.append_child("octave").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].octave;
4579  descriptor_node.append_child("class_id").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].class_id;
4580 
4581 #ifdef VISP_HAVE_MODULE_IO
4582  std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
4583  descriptor_node.append_child("image_id").append_child(pugi::node_pcdata).text() =
4584  ((saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1);
4585 #else
4586  descriptor_node.append_child("image_id").append_child(pugi::node_pcdata).text() = -1;
4587 #endif
4588 
4589  if (have3DInfo) {
4590  descriptor_node.append_child("oX").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].x;
4591  descriptor_node.append_child("oY").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].y;
4592  descriptor_node.append_child("oZ").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].z;
4593  }
4594 
4595  pugi::xml_node desc_node = descriptor_node.append_child("desc");
4596 
4597  for (int j = 0; j < nCols; j++) {
4598  switch (descriptorType) {
4599  case CV_8U: {
4600  // Promote an unsigned char to an int
4601  // val_tmp holds the numeric value that will be written
4602  // We save the value in numeric form otherwise xml library will not be
4603  // able to parse A better solution could be possible
4604  int val_tmp = m_trainDescriptors.at<unsigned char>(i, j);
4605  desc_node.append_child("val").append_child(pugi::node_pcdata).text() = val_tmp;
4606  } break;
4607 
4608  case CV_8S: {
4609  // Promote a char to an int
4610  // val_tmp holds the numeric value that will be written
4611  // We save the value in numeric form otherwise xml library will not be
4612  // able to parse A better solution could be possible
4613  int val_tmp = m_trainDescriptors.at<char>(i, j);
4614  desc_node.append_child("val").append_child(pugi::node_pcdata).text() = val_tmp;
4615  } break;
4616 
4617  case CV_16U:
4618  desc_node.append_child("val").append_child(pugi::node_pcdata).text() =
4619  m_trainDescriptors.at<unsigned short int>(i, j);
4620  break;
4621 
4622  case CV_16S:
4623  desc_node.append_child("val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<short int>(i, j);
4624  break;
4625 
4626  case CV_32S:
4627  desc_node.append_child("val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<int>(i, j);
4628  break;
4629 
4630  case CV_32F:
4631  desc_node.append_child("val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<float>(i, j);
4632  break;
4633 
4634  case CV_64F:
4635  desc_node.append_child("val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<double>(i, j);
4636  break;
4637 
4638  default:
4639  throw vpException(vpException::fatalError, "Problem with the data type of descriptors !");
4640  break;
4641  }
4642  }
4643  }
4644 
4645  doc.save_file(filename.c_str(), PUGIXML_TEXT(" "), pugi::format_default, pugi::encoding_utf8);
4646  }
4647 }
4648 
4649 #if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x030000)
4650 #ifndef DOXYGEN_SHOULD_SKIP_THIS
4651 // From OpenCV 2.4.11 source code.
4652 struct KeypointResponseGreaterThanThreshold {
4653  KeypointResponseGreaterThanThreshold(float _value) : value(_value) {}
4654  inline bool operator()(const cv::KeyPoint &kpt) const { return kpt.response >= value; }
4655  float value;
4656 };
4657 
4658 struct KeypointResponseGreater {
4659  inline bool operator()(const cv::KeyPoint &kp1, const cv::KeyPoint &kp2) const { return kp1.response > kp2.response; }
4660 };
4661 
4662 // takes keypoints and culls them by the response
4663 void vpKeyPoint::KeyPointsFilter::retainBest(std::vector<cv::KeyPoint> &keypoints, int n_points)
4664 {
4665  // this is only necessary if the keypoints size is greater than the number
4666  // of desired points.
4667  if (n_points >= 0 && keypoints.size() > (size_t)n_points) {
4668  if (n_points == 0) {
4669  keypoints.clear();
4670  return;
4671  }
4672  // first use nth element to partition the keypoints into the best and
4673  // worst.
4674  std::nth_element(keypoints.begin(), keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreater());
4675  // this is the boundary response, and in the case of FAST may be ambiguous
4676  float ambiguous_response = keypoints[(size_t)(n_points - 1)].response;
4677  // use std::partition to grab all of the keypoints with the boundary
4678  // response.
4679  std::vector<cv::KeyPoint>::const_iterator new_end = std::partition(
4680  keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreaterThanThreshold(ambiguous_response));
4681  // resize the keypoints, given this new end point. nth_element and
4682  // partition reordered the points inplace
4683  keypoints.resize((size_t)(new_end - keypoints.begin()));
4684  }
4685 }
4686 
4687 struct RoiPredicate {
4688  RoiPredicate(const cv::Rect &_r) : r(_r) {}
4689 
4690  bool operator()(const cv::KeyPoint &keyPt) const { return !r.contains(keyPt.pt); }
4691 
4692  cv::Rect r;
4693 };
4694 
4695 void vpKeyPoint::KeyPointsFilter::runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize,
4696  int borderSize)
4697 {
4698  if (borderSize > 0) {
4699  if (imageSize.height <= borderSize * 2 || imageSize.width <= borderSize * 2)
4700  keypoints.clear();
4701  else
4702  keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(),
4703  RoiPredicate(cv::Rect(
4704  cv::Point(borderSize, borderSize),
4705  cv::Point(imageSize.width - borderSize, imageSize.height - borderSize)))),
4706  keypoints.end());
4707  }
4708 }
4709 
4710 struct SizePredicate {
4711  SizePredicate(float _minSize, float _maxSize) : minSize(_minSize), maxSize(_maxSize) {}
4712 
4713  bool operator()(const cv::KeyPoint &keyPt) const
4714  {
4715  float size = keyPt.size;
4716  return (size < minSize) || (size > maxSize);
4717  }
4718 
4719  float minSize, maxSize;
4720 };
4721 
4722 void vpKeyPoint::KeyPointsFilter::runByKeypointSize(std::vector<cv::KeyPoint> &keypoints, float minSize, float maxSize)
4723 {
4724  CV_Assert(minSize >= 0);
4725  CV_Assert(maxSize >= 0);
4726  CV_Assert(minSize <= maxSize);
4727 
4728  keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), SizePredicate(minSize, maxSize)), keypoints.end());
4729 }
4730 
4731 class MaskPredicate
4732 {
4733 public:
4734  MaskPredicate(const cv::Mat &_mask) : mask(_mask) {}
4735  bool operator()(const cv::KeyPoint &key_pt) const
4736  {
4737  return mask.at<uchar>((int)(key_pt.pt.y + 0.5f), (int)(key_pt.pt.x + 0.5f)) == 0;
4738  }
4739 
4740 private:
4741  const cv::Mat mask;
4742 };
4743 
4744 void vpKeyPoint::KeyPointsFilter::runByPixelsMask(std::vector<cv::KeyPoint> &keypoints, const cv::Mat &mask)
4745 {
4746  if (mask.empty())
4747  return;
4748 
4749  keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), MaskPredicate(mask)), keypoints.end());
4750 }
4751 
4752 struct KeyPoint_LessThan {
4753  KeyPoint_LessThan(const std::vector<cv::KeyPoint> &_kp) : kp(&_kp) {}
4754  bool operator()(/*int i, int j*/ size_t i, size_t j) const
4755  {
4756  const cv::KeyPoint &kp1 = (*kp)[/*(size_t)*/ i];
4757  const cv::KeyPoint &kp2 = (*kp)[/*(size_t)*/ j];
4758  if (!vpMath::equal(kp1.pt.x, kp2.pt.x,
4759  std::numeric_limits<float>::epsilon())) { // if (kp1.pt.x !=
4760  // kp2.pt.x) {
4761  return kp1.pt.x < kp2.pt.x;
4762  }
4763 
4764  if (!vpMath::equal(kp1.pt.y, kp2.pt.y,
4765  std::numeric_limits<float>::epsilon())) { // if (kp1.pt.y !=
4766  // kp2.pt.y) {
4767  return kp1.pt.y < kp2.pt.y;
4768  }
4769 
4770  if (!vpMath::equal(kp1.size, kp2.size,
4771  std::numeric_limits<float>::epsilon())) { // if (kp1.size !=
4772  // kp2.size) {
4773  return kp1.size > kp2.size;
4774  }
4775 
4776  if (!vpMath::equal(kp1.angle, kp2.angle,
4777  std::numeric_limits<float>::epsilon())) { // if (kp1.angle !=
4778  // kp2.angle) {
4779  return kp1.angle < kp2.angle;
4780  }
4781 
4782  if (!vpMath::equal(kp1.response, kp2.response,
4783  std::numeric_limits<float>::epsilon())) { // if (kp1.response !=
4784  // kp2.response) {
4785  return kp1.response > kp2.response;
4786  }
4787 
4788  if (kp1.octave != kp2.octave) {
4789  return kp1.octave > kp2.octave;
4790  }
4791 
4792  if (kp1.class_id != kp2.class_id) {
4793  return kp1.class_id > kp2.class_id;
4794  }
4795 
4796  return i < j;
4797  }
4798  const std::vector<cv::KeyPoint> *kp;
4799 };
4800 
4801 void vpKeyPoint::KeyPointsFilter::removeDuplicated(std::vector<cv::KeyPoint> &keypoints)
4802 {
4803  size_t i, j, n = keypoints.size();
4804  std::vector<size_t> kpidx(n);
4805  std::vector<uchar> mask(n, (uchar)1);
4806 
4807  for (i = 0; i < n; i++) {
4808  kpidx[i] = i;
4809  }
4810  std::sort(kpidx.begin(), kpidx.end(), KeyPoint_LessThan(keypoints));
4811  for (i = 1, j = 0; i < n; i++) {
4812  cv::KeyPoint &kp1 = keypoints[kpidx[i]];
4813  cv::KeyPoint &kp2 = keypoints[kpidx[j]];
4814  // if (kp1.pt.x != kp2.pt.x || kp1.pt.y != kp2.pt.y || kp1.size !=
4815  // kp2.size || kp1.angle != kp2.angle) {
4816  if (!vpMath::equal(kp1.pt.x, kp2.pt.x, std::numeric_limits<float>::epsilon()) ||
4817  !vpMath::equal(kp1.pt.y, kp2.pt.y, std::numeric_limits<float>::epsilon()) ||
4818  !vpMath::equal(kp1.size, kp2.size, std::numeric_limits<float>::epsilon()) ||
4819  !vpMath::equal(kp1.angle, kp2.angle, std::numeric_limits<float>::epsilon())) {
4820  j = i;
4821  } else {
4822  mask[kpidx[i]] = 0;
4823  }
4824  }
4825 
4826  for (i = j = 0; i < n; i++) {
4827  if (mask[i]) {
4828  if (i != j) {
4829  keypoints[j] = keypoints[i];
4830  }
4831  j++;
4832  }
4833  }
4834  keypoints.resize(j);
4835 }
4836 
4837 /*
4838  * PyramidAdaptedFeatureDetector
4839  */
4840 vpKeyPoint::PyramidAdaptedFeatureDetector::PyramidAdaptedFeatureDetector(const cv::Ptr<cv::FeatureDetector> &_detector,
4841  int _maxLevel)
4842  : detector(_detector), maxLevel(_maxLevel)
4843 {
4844 }
4845 
4846 bool vpKeyPoint::PyramidAdaptedFeatureDetector::empty() const
4847 {
4848  return detector.empty() || (cv::FeatureDetector *)detector->empty();
4849 }
4850 
4851 void vpKeyPoint::PyramidAdaptedFeatureDetector::detect(cv::InputArray image,
4852  CV_OUT std::vector<cv::KeyPoint> &keypoints, cv::InputArray mask)
4853 {
4854  detectImpl(image.getMat(), keypoints, mask.getMat());
4855 }
4856 
4857 void vpKeyPoint::PyramidAdaptedFeatureDetector::detectImpl(const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
4858  const cv::Mat &mask) const
4859 {
4860  cv::Mat src = image;
4861  cv::Mat src_mask = mask;
4862 
4863  cv::Mat dilated_mask;
4864  if (!mask.empty()) {
4865  cv::dilate(mask, dilated_mask, cv::Mat());
4866  cv::Mat mask255(mask.size(), CV_8UC1, cv::Scalar(0));
4867  mask255.setTo(cv::Scalar(255), dilated_mask != 0);
4868  dilated_mask = mask255;
4869  }
4870 
4871  for (int l = 0, multiplier = 1; l <= maxLevel; ++l, multiplier *= 2) {
4872  // Detect on current level of the pyramid
4873  std::vector<cv::KeyPoint> new_pts;
4874  detector->detect(src, new_pts, src_mask);
4875  std::vector<cv::KeyPoint>::iterator it = new_pts.begin(), end = new_pts.end();
4876  for (; it != end; ++it) {
4877  it->pt.x *= multiplier;
4878  it->pt.y *= multiplier;
4879  it->size *= multiplier;
4880  it->octave = l;
4881  }
4882  keypoints.insert(keypoints.end(), new_pts.begin(), new_pts.end());
4883 
4884  // Downsample
4885  if (l < maxLevel) {
4886  cv::Mat dst;
4887  pyrDown(src, dst);
4888  src = dst;
4889 
4890  if (!mask.empty())
4891  resize(dilated_mask, src_mask, src.size(), 0, 0, CV_INTER_AREA);
4892  }
4893  }
4894 
4895  if (!mask.empty())
4896  vpKeyPoint::KeyPointsFilter::runByPixelsMask(keypoints, mask);
4897 }
4898 #endif
4899 #endif
4900 
4901 #elif !defined(VISP_BUILD_SHARED_LIBS)
4902 // Work around to avoid warning: libvisp_vision.a(vpKeyPoint.cpp.o) has no
4903 // symbols
4904 void dummy_vpKeyPoint(){};
4905 #endif
bool _reference_computed
flag to indicate if the reference has been built.
std::vector< vpImagePoint > currentImagePointsList
std::vector< unsigned int > matchedReferencePoints
std::vector< vpImagePoint > referenceImagePointsList
Generic class defining intrinsic camera parameters.
Implementation of column vector and the associated operations.
Definition: vpColVector.h:131
Class to define RGB colors available for display functionnalities.
Definition: vpColor.h:158
static const vpColor red
Definition: vpColor.h:217
static const vpColor none
Definition: vpColor.h:229
static const vpColor green
Definition: vpColor.h:220
static void convertFromOpenCV(const cv::KeyPoint &from, vpImagePoint &to)
Definition: vpConvert.cpp:226
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void displayCircle(const vpImage< unsigned char > &I, const vpImagePoint &center, unsigned int radius, const vpColor &color, bool fill=false, unsigned int thickness=1)
error that can be emited by ViSP classes.
Definition: vpException.h:72
@ ioError
I/O error.
Definition: vpException.h:91
@ badValue
Used to indicate that a value is not in the allowed range.
Definition: vpException.h:97
@ fatalError
Fatal error.
Definition: vpException.h:96
const char * what() const
Definition: vpException.cpp:99
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpHomogeneousMatrix inverse() const
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Definition: vpImageIo.cpp:148
static void write(const vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Definition: vpImageIo.cpp:292
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:89
double get_j() const
Definition: vpImagePoint.h:132
void set_ij(double ii, double jj)
Definition: vpImagePoint.h:320
void set_u(double u)
Definition: vpImagePoint.h:335
void set_v(double v)
Definition: vpImagePoint.h:346
double get_i() const
Definition: vpImagePoint.h:121
unsigned int getWidth() const
Definition: vpImage.h:247
void insert(const vpImage< Type > &src, const vpImagePoint &topLeft)
Definition: vpImage.h:1359
unsigned int getHeight() const
Definition: vpImage.h:189
static bool isAbsolutePathname(const std::string &pathname)
Definition: vpIoTools.cpp:1740
static void readBinaryValueLE(std::ifstream &file, int16_t &short_value)
Definition: vpIoTools.cpp:2022
static void makeDirectory(const std::string &dirname)
Definition: vpIoTools.cpp:578
static void writeBinaryValueLE(std::ofstream &file, const int16_t short_value)
Definition: vpIoTools.cpp:2100
static std::string getParent(const std::string &pathname)
Definition: vpIoTools.cpp:1633
unsigned int matchPoint(const vpImage< unsigned char > &I)
@ detectionThreshold
Definition: vpKeyPoint.h:242
@ detectionScore
Definition: vpKeyPoint.h:244
void getTrainKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
void initMatcher(const std::string &matcherName)
bool computePose(const std::vector< cv::Point2f > &imagePoints, const std::vector< cv::Point3f > &objectPoints, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector< int > &inlierIndex, double &elapsedTime, bool(*func)(const vpHomogeneousMatrix &)=NULL)
Definition: vpKeyPoint.cpp:901
void display(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, unsigned int size=3)
void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector< cv::DMatch > &matches, double &elapsedTime)
void detectExtractAffine(const vpImage< unsigned char > &I, std::vector< std::vector< cv::KeyPoint > > &listOfKeypoints, std::vector< cv::Mat > &listOfDescriptors, std::vector< vpImage< unsigned char > > *listOfAffineI=NULL)
static void compute3D(const cv::KeyPoint &candidate, const std::vector< vpPoint > &roi, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
Definition: vpKeyPoint.cpp:549
vpFeatureDetectorType
Definition: vpKeyPoint.h:258
@ DETECTOR_BRISK
Definition: vpKeyPoint.h:263
@ DETECTOR_AKAZE
Definition: vpKeyPoint.h:278
@ DETECTOR_AGAST
Definition: vpKeyPoint.h:279
@ DETECTOR_SimpleBlob
Definition: vpKeyPoint.h:265
void reset()
void createImageMatching(vpImage< unsigned char > &IRef, vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
void extract(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, cv::Mat &descriptors, std::vector< cv::Point3f > *trainPoints=NULL)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
vpFeatureDescriptorType
Definition: vpKeyPoint.h:289
@ DESCRIPTOR_AKAZE
Definition: vpKeyPoint.h:306
@ DESCRIPTOR_ORB
Definition: vpKeyPoint.h:291
@ DESCRIPTOR_KAZE
Definition: vpKeyPoint.h:305
@ DESCRIPTOR_SURF
Definition: vpKeyPoint.h:302
@ DESCRIPTOR_BRISK
Definition: vpKeyPoint.h:292
@ DESCRIPTOR_SIFT
Definition: vpKeyPoint.h:299
static void compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpCylinder > &cylinders, const std::vector< std::vector< std::vector< vpImagePoint > > > &vectorOfCylinderRois, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
Definition: vpKeyPoint.cpp:753
void getQueryKeyPoints(std::vector< cv::KeyPoint > &keyPoints, bool matches=true) const
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
Definition: vpKeyPoint.cpp:634
bool matchPointAndDetect(const vpImage< unsigned char > &I, vpRect &boundingBox, vpImagePoint &centerOfGravity, const bool isPlanarObject=true, std::vector< vpImagePoint > *imPts1=NULL, std::vector< vpImagePoint > *imPts2=NULL, double *meanDescriptorDistance=NULL, double *detectionScore=NULL, const vpRect &rectangle=vpRect())
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType, const std::string &matcherName, const vpFilterMatchingType &filterType=ratioDistanceThreshold)
Definition: vpKeyPoint.cpp:81
@ ppmImageFormat
Definition: vpKeyPoint.h:253
@ jpgImageFormat
Definition: vpKeyPoint.h:251
@ pngImageFormat
Definition: vpKeyPoint.h:252
@ pgmImageFormat
Definition: vpKeyPoint.h:254
vpFilterMatchingType
Definition: vpKeyPoint.h:227
@ stdAndRatioDistanceThreshold
Definition: vpKeyPoint.h:235
@ constantFactorDistanceThreshold
Definition: vpKeyPoint.h:228
@ ratioDistanceThreshold
Definition: vpKeyPoint.h:232
@ stdDistanceThreshold
Definition: vpKeyPoint.h:230
@ noFilterMatching
Definition: vpKeyPoint.h:237
void displayMatching(const vpImage< unsigned char > &IRef, vpImage< unsigned char > &IMatching, unsigned int crossSize, unsigned int lineThickness=1, const vpColor &color=vpColor::green)
unsigned int buildReference(const vpImage< unsigned char > &I)
Definition: vpKeyPoint.cpp:237
void loadConfigFile(const std::string &configFile)
void getTrainPoints(std::vector< cv::Point3f > &points) const
void getObjectPoints(std::vector< cv::Point3f > &objectPoints) const
void insertImageMatching(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
static bool isNaN(double value)
Definition: vpMath.cpp:90
static bool equal(double x, double y, double s=0.001)
Definition: vpMath.h:371
static int round(double x)
Definition: vpMath.h:325
Implementation of a matrix and operations on matrices.
Definition: vpMatrix.h:154
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
This class defines the container for a plane geometrical structure.
Definition: vpPlane.h:59
double getD() const
Definition: vpPlane.h:111
double getA() const
Definition: vpPlane.h:105
double getC() const
Definition: vpPlane.h:109
double getB() const
Definition: vpPlane.h:107
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Definition: vpPoint.h:82
double get_oX() const
Get the point oX coordinate in the object frame.
Definition: vpPoint.cpp:461
void set_x(double x)
Set the point x coordinate in the image plane.
Definition: vpPoint.cpp:511
double get_y() const
Get the point y coordinate in the image plane.
Definition: vpPoint.cpp:472
double get_oZ() const
Get the point oZ coordinate in the object frame.
Definition: vpPoint.cpp:465
void set_oY(double oY)
Set the point oY coordinate in the object frame.
Definition: vpPoint.cpp:504
double get_x() const
Get the point x coordinate in the image plane.
Definition: vpPoint.cpp:470
void set_oZ(double oZ)
Set the point oZ coordinate in the object frame.
Definition: vpPoint.cpp:506
void set_oX(double oX)
Set the point oX coordinate in the object frame.
Definition: vpPoint.cpp:502
double get_oY() const
Get the point oY coordinate in the object frame.
Definition: vpPoint.cpp:463
void setWorldCoordinates(double oX, double oY, double oZ)
Definition: vpPoint.cpp:113
void set_y(double y)
Set the point y coordinate in the image plane.
Definition: vpPoint.cpp:513
Defines a generic 2D polygon.
Definition: vpPolygon.h:106
vpRect getBoundingBox() const
Definition: vpPolygon.h:180
bool isInside(const vpImagePoint &iP, const PointInPolygonMethod &method=PnPolyRayCasting) const
Definition: vpPolygon.cpp:401
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
Definition: vpPose.h:90
void setRansacMaxTrials(const int &rM)
Definition: vpPose.h:260
void addPoint(const vpPoint &P)
Definition: vpPose.cpp:149
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
Definition: vpPose.h:250
vpMatrix getCovarianceMatrix() const
Definition: vpPose.h:282
@ RANSAC
Definition: vpPose.h:99
void setCovarianceComputation(const bool &flag)
Definition: vpPose.h:271
std::vector< unsigned int > getRansacInlierIndex() const
Definition: vpPose.h:262
void setRansacFilterFlag(const RANSAC_FILTER_FLAGS &flag)
Definition: vpPose.h:302
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool(*func)(const vpHomogeneousMatrix &)=NULL)
Definition: vpPose.cpp:414
@ NO_FILTER
Definition: vpPose.h:116
void setUseParallelRansac(bool use)
Definition: vpPose.h:333
std::vector< vpPoint > getRansacInliers() const
Definition: vpPose.h:263
void setNbParallelRansacThreads(int nb)
Definition: vpPose.h:319
void setRansacThreshold(const double &t)
Definition: vpPose.h:251
Defines a rectangle in the plane.
Definition: vpRect.h:80
double getWidth() const
Definition: vpRect.h:228
double getLeft() const
Definition: vpRect.h:174
double getRight() const
Definition: vpRect.h:180
double getBottom() const
Definition: vpRect.h:98
double getHeight() const
Definition: vpRect.h:167
double getTop() const
Definition: vpRect.h:193
Implementation of a rotation vector as axis-angle minimal representation.
Class that consider the case of a translation vector.
void parse(const std::string &filename)
vpMatchingMethodEnum getMatchingMethod() const
VISP_EXPORT double measureTimeMs()