Visual Servoing Platform  version 3.6.1 under development (2024-11-14)
vpKeyPoint.cpp
1 /*
2  * ViSP, open source Visual Servoing Platform software.
3  * Copyright (C) 2005 - 2023 by Inria. All rights reserved.
4  *
5  * This software is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  * See the file LICENSE.txt at the root directory of this source
10  * distribution for additional information about the GNU GPL.
11  *
12  * For using ViSP with software that can not be combined with the GNU
13  * GPL, please contact Inria about acquiring a ViSP Professional
14  * Edition License.
15  *
16  * See https://visp.inria.fr for more information.
17  *
18  * This software was developed at:
19  * Inria Rennes - Bretagne Atlantique
20  * Campus Universitaire de Beaulieu
21  * 35042 Rennes Cedex
22  * France
23  *
24  * If you have questions regarding the use of this file, please contact
25  * Inria at visp@inria.fr
26  *
27  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
28  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
29  *
30  * Description:
31  * Key point functionalities.
32  */
33 
34 #include <iomanip>
35 #include <limits>
36 
37 #include <visp3/core/vpIoTools.h>
38 #include <visp3/vision/vpKeyPoint.h>
39 
40 #if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_FEATURES2D)
41 
42 #if defined(VISP_HAVE_PUGIXML)
43 #include <pugixml.hpp>
44 #endif
45 
46 BEGIN_VISP_NAMESPACE
47 
48 #ifndef DOXYGEN_SHOULD_SKIP_THIS
49 namespace
50 {
51 // Specific Type transformation functions
52 inline cv::DMatch knnToDMatch(const std::vector<cv::DMatch> &knnMatches)
53 {
54  if (knnMatches.size() > 0) {
55  return knnMatches[0];
56  }
57 
58  return cv::DMatch();
59 }
60 
61 inline vpImagePoint matchRansacToVpImage(const std::pair<cv::KeyPoint, cv::Point3f> &pair)
62 {
63  return vpImagePoint(pair.first.pt.y, pair.first.pt.x);
64 }
65 
66 } // namespace
67 
68 #endif // DOXYGEN_SHOULD_SKIP_THIS
69 
70 vpKeyPoint::vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType,
71  const std::string &matcherName, const vpFilterMatchingType &filterType)
72  : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
73  m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
74  m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
75  m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
76  m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
77  m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
78  m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
79  m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
80  m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
81  m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
82 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
83  m_useBruteForceCrossCheck(true),
84 #endif
85  m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
86  m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
87 {
88  initFeatureNames();
89 
90  m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
91  m_extractorNames.push_back(m_mapOfDescriptorNames[descriptorType]);
92 
93  init();
94 }
95 
96 vpKeyPoint::vpKeyPoint(const std::string &detectorName, const std::string &extractorName,
97  const std::string &matcherName, const vpFilterMatchingType &filterType)
98  : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
99  m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
100  m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
101  m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
102  m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
103  m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
104  m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
105  m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
106  m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
107  m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
108 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
109  m_useBruteForceCrossCheck(true),
110 #endif
111  m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
112  m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
113 {
114  initFeatureNames();
115 
116  m_detectorNames.push_back(detectorName);
117  m_extractorNames.push_back(extractorName);
118 
119  init();
120 }
121 
122 vpKeyPoint::vpKeyPoint(const std::vector<std::string> &detectorNames, const std::vector<std::string> &extractorNames,
123  const std::string &matcherName, const vpFilterMatchingType &filterType)
124  : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
125  m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(detectorNames),
126  m_detectors(), m_extractionTime(0.), m_extractorNames(extractorNames), m_extractors(), m_filteredMatches(),
127  m_filterType(filterType), m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(),
128  m_matcher(), m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0),
129  m_matchingRatioThreshold(0.85), m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200),
130  m_nbRansacMinInlierCount(100), m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(),
131  m_queryFilteredKeyPoints(), m_queryKeyPoints(), m_ransacConsensusPercentage(20.0),
132  m_ransacFilterFlag(vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(), m_ransacParallel(false),
133  m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01), m_trainDescriptors(),
134  m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
135 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
136  m_useBruteForceCrossCheck(true),
137 #endif
138  m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
139  m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
140 {
141  initFeatureNames();
142  init();
143 }
144 
145 void vpKeyPoint::affineSkew(double tilt, double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai)
146 {
147  int h = img.rows;
148  int w = img.cols;
149 
150  mask = cv::Mat(h, w, CV_8UC1, cv::Scalar(255));
151 
152  cv::Mat A = cv::Mat::eye(2, 3, CV_32F);
153 
154  // if (phi != 0.0) {
155  if (std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
156  phi *= M_PI / 180.;
157  double s = sin(phi);
158  double c = cos(phi);
159 
160  A = (cv::Mat_<float>(2, 2) << c, -s, s, c);
161 
162  cv::Mat corners = (cv::Mat_<float>(4, 2) << 0, 0, w, 0, w, h, 0, h);
163  cv::Mat tcorners = corners * A.t();
164  cv::Mat tcorners_x, tcorners_y;
165  tcorners.col(0).copyTo(tcorners_x);
166  tcorners.col(1).copyTo(tcorners_y);
167  std::vector<cv::Mat> channels;
168  channels.push_back(tcorners_x);
169  channels.push_back(tcorners_y);
170  cv::merge(channels, tcorners);
171 
172  cv::Rect rect = cv::boundingRect(tcorners);
173  A = (cv::Mat_<float>(2, 3) << c, -s, -rect.x, s, c, -rect.y);
174 
175  cv::warpAffine(img, img, A, cv::Size(rect.width, rect.height), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
176  }
177  // if (tilt != 1.0) {
178  if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon()) {
179  double s = 0.8 * sqrt(tilt * tilt - 1);
180  cv::GaussianBlur(img, img, cv::Size(0, 0), s, 0.01);
181  cv::resize(img, img, cv::Size(0, 0), 1.0 / tilt, 1.0, cv::INTER_NEAREST);
182  A.row(0) = A.row(0) / tilt;
183  }
184  // if (tilt != 1.0 || phi != 0.0) {
185  if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon() ||
186  std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
187  h = img.rows;
188  w = img.cols;
189  cv::warpAffine(mask, mask, A, cv::Size(w, h), cv::INTER_NEAREST);
190  }
191  cv::invertAffineTransform(A, Ai);
192 }
193 
195 
196 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color) { return buildReference(I_color, vpRect()); }
197 
198 unsigned int vpKeyPoint::buildReference(const vpImage<unsigned char> &I, const vpImagePoint &iP, unsigned int height,
199  unsigned int width)
200 {
201  return buildReference(I, vpRect(iP, width, height));
202 }
203 
204 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, const vpImagePoint &iP, unsigned int height,
205  unsigned int width)
206 {
207  return buildReference(I_color, vpRect(iP, width, height));
208 }
209 
210 
211 unsigned int vpKeyPoint::buildReference(const vpImage<unsigned char> &I, const vpRect &rectangle)
212 {
213  // Reset variables used when dealing with 3D models
214  // So as no 3D point list is passed, we dont need this variables
215  m_trainPoints.clear();
216  m_mapOfImageId.clear();
217  m_mapOfImages.clear();
218  m_currentImageId = 1;
219 
220  if (m_useAffineDetection) {
221  std::vector<std::vector<cv::KeyPoint> > listOfTrainKeyPoints;
222  std::vector<cv::Mat> listOfTrainDescriptors;
223 
224  // Detect keypoints and extract descriptors on multiple images
225  detectExtractAffine(I, listOfTrainKeyPoints, listOfTrainDescriptors);
226 
227  // Flatten the different train lists
228  m_trainKeyPoints.clear();
229  for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfTrainKeyPoints.begin();
230  it != listOfTrainKeyPoints.end(); ++it) {
231  m_trainKeyPoints.insert(m_trainKeyPoints.end(), it->begin(), it->end());
232  }
233 
234  bool first = true;
235  for (std::vector<cv::Mat>::const_iterator it = listOfTrainDescriptors.begin(); it != listOfTrainDescriptors.end();
236  ++it) {
237  if (first) {
238  first = false;
239  it->copyTo(m_trainDescriptors);
240  }
241  else {
242  m_trainDescriptors.push_back(*it);
243  }
244  }
245  }
246  else {
247  detect(I, m_trainKeyPoints, m_detectionTime, rectangle);
248  extract(I, m_trainKeyPoints, m_trainDescriptors, m_extractionTime);
249  }
250 
251  // Save the correspondence keypoint class_id with the training image_id in a map.
252  // Used to display the matching with all the training images.
253  for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
254  m_mapOfImageId[it->class_id] = m_currentImageId;
255  }
256 
257  // Save the image in a map at a specific image_id
258  m_mapOfImages[m_currentImageId] = I;
259 
260  // Convert OpenCV type to ViSP type for compatibility
262 
263  m_reference_computed = true;
264 
265  // Add train descriptors in matcher object
266  m_matcher->clear();
267  m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
268 
269  return static_cast<unsigned int>(m_trainKeyPoints.size());
270 }
271 
272 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, const vpRect &rectangle)
273 {
274  vpImageConvert::convert(I_color, m_I);
275  return (buildReference(m_I, rectangle));
276 }
277 
278 unsigned int vpKeyPoint::buildReference(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &trainKeyPoints,
279  std::vector<cv::Point3f> &points3f, bool append, int class_id)
280 {
281  cv::Mat trainDescriptors;
282  // Copy the input list of keypoints
283  std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
284 
285  extract(I, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
286 
287  if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
288  // Keypoints have been removed
289  // Store the hash of a keypoint as the key and the index of the keypoint
290  // as the value
291  std::map<size_t, size_t> mapOfKeypointHashes;
292  size_t cpt = 0;
293  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
294  ++it, cpt++) {
295  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
296  }
297 
298  std::vector<cv::Point3f> trainPoints_tmp;
299  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
300  if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
301  trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
302  }
303  }
304 
305  // Copy trainPoints_tmp to points3f
306  points3f = trainPoints_tmp;
307  }
308 
309  return (buildReference(I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
310 }
311 
312 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &trainKeyPoints,
313  std::vector<cv::Point3f> &points3f, bool append, int class_id)
314 {
315  cv::Mat trainDescriptors;
316  // Copy the input list of keypoints
317  std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
318 
319  extract(I_color, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
320 
321  if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
322  // Keypoints have been removed
323  // Store the hash of a keypoint as the key and the index of the keypoint
324  // as the value
325  std::map<size_t, size_t> mapOfKeypointHashes;
326  size_t cpt = 0;
327  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
328  ++it, cpt++) {
329  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
330  }
331 
332  std::vector<cv::Point3f> trainPoints_tmp;
333  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
334  if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
335  trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
336  }
337  }
338 
339  // Copy trainPoints_tmp to points3f
340  points3f = trainPoints_tmp;
341  }
342 
343  return (buildReference(I_color, trainKeyPoints, trainDescriptors, points3f, append, class_id));
344 }
345 
346 
348  const std::vector<cv::KeyPoint> &trainKeyPoints,
349  const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
350  bool append, int class_id)
351 {
352  if (!append) {
353  m_trainPoints.clear();
354  m_mapOfImageId.clear();
355  m_mapOfImages.clear();
356  m_currentImageId = 0;
357  m_trainKeyPoints.clear();
358  }
359 
360  m_currentImageId++;
361 
362  std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
363  // Set class_id if != -1
364  if (class_id != -1) {
365  for (std::vector<cv::KeyPoint>::iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
366  it->class_id = class_id;
367  }
368  }
369 
370  // Save the correspondence keypoint class_id with the training image_id in a map.
371  // Used to display the matching with all the training images.
372  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
373  ++it) {
374  m_mapOfImageId[it->class_id] = m_currentImageId;
375  }
376 
377  // Save the image in a map at a specific image_id
378  m_mapOfImages[m_currentImageId] = I;
379 
380  // Append reference lists
381  m_trainKeyPoints.insert(m_trainKeyPoints.end(), trainKeyPoints_tmp.begin(), trainKeyPoints_tmp.end());
382  if (!append) {
383  trainDescriptors.copyTo(m_trainDescriptors);
384  }
385  else {
386  m_trainDescriptors.push_back(trainDescriptors);
387  }
388  this->m_trainPoints.insert(m_trainPoints.end(), points3f.begin(), points3f.end());
389 
390  // Convert OpenCV type to ViSP type for compatibility
392  vpConvert::convertFromOpenCV(m_trainPoints, m_trainVpPoints);
393 
394  // Add train descriptors in matcher object
395  m_matcher->clear();
396  m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
397 
398  m_reference_computed = true;
399 
400  return static_cast<unsigned int>(m_trainKeyPoints.size());
401 }
402 
403 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, const std::vector<cv::KeyPoint> &trainKeyPoints,
404  const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
405  bool append, int class_id)
406 {
407  vpImageConvert::convert(I_color, m_I);
408  return (buildReference(m_I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
409 }
410 
411 void vpKeyPoint::compute3D(const cv::KeyPoint &candidate, const std::vector<vpPoint> &roi,
412  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
413 {
414  /* compute plane equation */
415  std::vector<vpPoint>::const_iterator it_roi = roi.begin();
416  vpPoint pts[3];
417  pts[0] = *it_roi;
418  ++it_roi;
419  pts[1] = *it_roi;
420  ++it_roi;
421  pts[2] = *it_roi;
422  vpPlane Po(pts[0], pts[1], pts[2]);
423  double xc = 0.0, yc = 0.0;
424  vpPixelMeterConversion::convertPoint(cam, candidate.pt.x, candidate.pt.y, xc, yc);
425  double Z = -Po.getD() / (Po.getA() * xc + Po.getB() * yc + Po.getC());
426  double X = xc * Z;
427  double Y = yc * Z;
428  vpColVector point_cam(4);
429  point_cam[0] = X;
430  point_cam[1] = Y;
431  point_cam[2] = Z;
432  point_cam[3] = 1;
433  vpColVector point_obj(4);
434  point_obj = cMo.inverse() * point_cam;
435  point = cv::Point3f((float)point_obj[0], (float)point_obj[1], (float)point_obj[2]);
436 }
437 
438 void vpKeyPoint::compute3D(const vpImagePoint &candidate, const std::vector<vpPoint> &roi,
439  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, vpPoint &point)
440 {
441  /* compute plane equation */
442  std::vector<vpPoint>::const_iterator it_roi = roi.begin();
443  vpPoint pts[3];
444  pts[0] = *it_roi;
445  ++it_roi;
446  pts[1] = *it_roi;
447  ++it_roi;
448  pts[2] = *it_roi;
449  vpPlane Po(pts[0], pts[1], pts[2]);
450  double xc = 0.0, yc = 0.0;
451  vpPixelMeterConversion::convertPoint(cam, candidate, xc, yc);
452  double Z = -Po.getD() / (Po.getA() * xc + Po.getB() * yc + Po.getC());
453  double X = xc * Z;
454  double Y = yc * Z;
455  vpColVector point_cam(4);
456  point_cam[0] = X;
457  point_cam[1] = Y;
458  point_cam[2] = Z;
459  point_cam[3] = 1;
460  vpColVector point_obj(4);
461  point_obj = cMo.inverse() * point_cam;
462  point.setWorldCoordinates(point_obj);
463 }
464 
466  std::vector<cv::KeyPoint> &candidates,
467  const std::vector<vpPolygon> &polygons,
468  const std::vector<std::vector<vpPoint> > &roisPt,
469  std::vector<cv::Point3f> &points, cv::Mat *descriptors)
470 {
471  std::vector<cv::KeyPoint> candidatesToCheck = candidates;
472  candidates.clear();
473  points.clear();
474  vpImagePoint imPt;
475  cv::Point3f pt;
476  cv::Mat desc;
477 
478  std::vector<std::pair<cv::KeyPoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
479  for (size_t i = 0; i < candidatesToCheck.size(); i++) {
480  pairOfCandidatesToCheck[i] = std::pair<cv::KeyPoint, size_t>(candidatesToCheck[i], i);
481  }
482 
483  size_t cpt1 = 0;
484  std::vector<vpPolygon> polygons_tmp = polygons;
485  for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
486  std::vector<std::pair<cv::KeyPoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
487 
488  while (it2 != pairOfCandidatesToCheck.end()) {
489  imPt.set_ij(it2->first.pt.y, it2->first.pt.x);
490  if (it1->isInside(imPt)) {
491  candidates.push_back(it2->first);
492  vpKeyPoint::compute3D(it2->first, roisPt[cpt1], cam, cMo, pt);
493  points.push_back(pt);
494 
495  if (descriptors != nullptr) {
496  desc.push_back(descriptors->row((int)it2->second));
497  }
498 
499  // Remove candidate keypoint which is located on the current polygon
500  it2 = pairOfCandidatesToCheck.erase(it2);
501  }
502  else {
503  ++it2;
504  }
505  }
506  }
507 
508  if (descriptors != nullptr) {
509  desc.copyTo(*descriptors);
510  }
511 }
512 
514  std::vector<vpImagePoint> &candidates,
515  const std::vector<vpPolygon> &polygons,
516  const std::vector<std::vector<vpPoint> > &roisPt,
517  std::vector<vpPoint> &points, cv::Mat *descriptors)
518 {
519  std::vector<vpImagePoint> candidatesToCheck = candidates;
520  candidates.clear();
521  points.clear();
522  vpPoint pt;
523  cv::Mat desc;
524 
525  std::vector<std::pair<vpImagePoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
526  for (size_t i = 0; i < candidatesToCheck.size(); i++) {
527  pairOfCandidatesToCheck[i] = std::pair<vpImagePoint, size_t>(candidatesToCheck[i], i);
528  }
529 
530  size_t cpt1 = 0;
531  std::vector<vpPolygon> polygons_tmp = polygons;
532  for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
533  std::vector<std::pair<vpImagePoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
534 
535  while (it2 != pairOfCandidatesToCheck.end()) {
536  if (it1->isInside(it2->first)) {
537  candidates.push_back(it2->first);
538  vpKeyPoint::compute3D(it2->first, roisPt[cpt1], cam, cMo, pt);
539  points.push_back(pt);
540 
541  if (descriptors != nullptr) {
542  desc.push_back(descriptors->row((int)it2->second));
543  }
544 
545  // Remove candidate keypoint which is located on the current polygon
546  it2 = pairOfCandidatesToCheck.erase(it2);
547  }
548  else {
549  ++it2;
550  }
551  }
552  }
553 }
554 
555 
557  const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector<cv::KeyPoint> &candidates,
558  const std::vector<vpCylinder> &cylinders,
559  const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<cv::Point3f> &points,
560  cv::Mat *descriptors)
561 {
562  std::vector<cv::KeyPoint> candidatesToCheck = candidates;
563  candidates.clear();
564  points.clear();
565  cv::Mat desc;
566 
567  // Keep only keypoints on cylinders
568  size_t cpt_keypoint = 0;
569  for (std::vector<cv::KeyPoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
570  ++it1, cpt_keypoint++) {
571  size_t cpt_cylinder = 0;
572 
573  // Iterate through the list of vpCylinders
574  for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
575  it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
576  // Iterate through the list of the bounding boxes of the current
577  // vpCylinder
578  for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
579  if (vpPolygon::isInside(*it3, it1->pt.y, it1->pt.x)) {
580  candidates.push_back(*it1);
581 
582  // Calculate the 3D coordinates for each keypoint located on
583  // cylinders
584  double xm = 0.0, ym = 0.0;
585  vpPixelMeterConversion::convertPoint(cam, it1->pt.x, it1->pt.y, xm, ym);
586  double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
587 
588  if (!vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
589  vpColVector point_cam(4);
590  point_cam[0] = xm * Z;
591  point_cam[1] = ym * Z;
592  point_cam[2] = Z;
593  point_cam[3] = 1;
594  vpColVector point_obj(4);
595  point_obj = cMo.inverse() * point_cam;
596  vpPoint pt;
597  pt.setWorldCoordinates(point_obj);
598  points.push_back(cv::Point3f((float)pt.get_oX(), (float)pt.get_oY(), (float)pt.get_oZ()));
599 
600  if (descriptors != nullptr) {
601  desc.push_back(descriptors->row((int)cpt_keypoint));
602  }
603 
604  break;
605  }
606  }
607  }
608  }
609  }
610 
611  if (descriptors != nullptr) {
612  desc.copyTo(*descriptors);
613  }
614 }
615 
617  const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector<vpImagePoint> &candidates,
618  const std::vector<vpCylinder> &cylinders,
619  const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<vpPoint> &points,
620  cv::Mat *descriptors)
621 {
622  std::vector<vpImagePoint> candidatesToCheck = candidates;
623  candidates.clear();
624  points.clear();
625  cv::Mat desc;
626 
627  // Keep only keypoints on cylinders
628  size_t cpt_keypoint = 0;
629  for (std::vector<vpImagePoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
630  ++it1, cpt_keypoint++) {
631  size_t cpt_cylinder = 0;
632 
633  // Iterate through the list of vpCylinders
634  for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
635  it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
636  // Iterate through the list of the bounding boxes of the current
637  // vpCylinder
638  for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
639  if (vpPolygon::isInside(*it3, it1->get_i(), it1->get_j())) {
640  candidates.push_back(*it1);
641 
642  // Calculate the 3D coordinates for each keypoint located on
643  // cylinders
644  double xm = 0.0, ym = 0.0;
645  vpPixelMeterConversion::convertPoint(cam, it1->get_u(), it1->get_v(), xm, ym);
646  double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
647 
648  if (!vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
649  vpColVector point_cam(4);
650  point_cam[0] = xm * Z;
651  point_cam[1] = ym * Z;
652  point_cam[2] = Z;
653  point_cam[3] = 1;
654  vpColVector point_obj(4);
655  point_obj = cMo.inverse() * point_cam;
656  vpPoint pt;
657  pt.setWorldCoordinates(point_obj);
658  points.push_back(pt);
659 
660  if (descriptors != nullptr) {
661  desc.push_back(descriptors->row((int)cpt_keypoint));
662  }
663 
664  break;
665  }
666  }
667  }
668  }
669  }
670 
671  if (descriptors != nullptr) {
672  desc.copyTo(*descriptors);
673  }
674 }
675 
676 bool vpKeyPoint::computePose(const std::vector<cv::Point2f> &imagePoints, const std::vector<cv::Point3f> &objectPoints,
677  const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector<int> &inlierIndex,
678  double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &))
679 {
680  double t = vpTime::measureTimeMs();
681 
682  if (imagePoints.size() < 4 || objectPoints.size() < 4 || imagePoints.size() != objectPoints.size()) {
683  elapsedTime = (vpTime::measureTimeMs() - t);
684  std::cerr << "Not enough points to compute the pose (at least 4 points "
685  "are needed)."
686  << std::endl;
687 
688  return false;
689  }
690 
691  cv::Mat cameraMatrix =
692  (cv::Mat_<double>(3, 3) << cam.get_px(), 0, cam.get_u0(), 0, cam.get_py(), cam.get_v0(), 0, 0, 1);
693  cv::Mat rvec, tvec;
694 
695  // Bug with OpenCV < 2.4.0 when zero distortion is provided by an empty
696  // array. http://code.opencv.org/issues/1700 ;
697  // http://code.opencv.org/issues/1718 what(): Distortion coefficients must
698  // be 1x4, 4x1, 1x5, 5x1, 1x8 or 8x1 floating-point vector in function
699  // cvProjectPoints2 Fixed in OpenCV 2.4.0 (r7558)
700  // cv::Mat distCoeffs;
701  cv::Mat distCoeffs = cv::Mat::zeros(1, 5, CV_64F);
702 
703  try {
704 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
705  // OpenCV 3.0.0 (2014/12/12)
706  cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, false, m_nbRansacIterations,
707  (float)m_ransacReprojectionError,
708  0.99, // confidence=0.99 (default) – The probability
709  // that the algorithm produces a useful result.
710  inlierIndex, cv::SOLVEPNP_ITERATIVE);
711 // SOLVEPNP_ITERATIVE (default): Iterative method is based on
712 // Levenberg-Marquardt optimization. In this case the function finds such a
713 // pose that minimizes reprojection error, that is the sum of squared
714 // distances between the observed projections imagePoints and the projected
715 // (using projectPoints() ) objectPoints . SOLVEPNP_P3P: Method is based on
716 // the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang “Complete Solution
717 // Classification for the Perspective-Three-Point Problem”. In this case the
718 // function requires exactly four object and image points. SOLVEPNP_EPNP:
719 // Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the
720 // paper “EPnP: Efficient Perspective-n-Point Camera Pose Estimation”.
721 // SOLVEPNP_DLS: Method is based on the paper of Joel A. Hesch and Stergios I.
722 // Roumeliotis. “A Direct Least-Squares (DLS) Method for PnP”. SOLVEPNP_UPNP
723 // Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto,
724 // F.Moreno-Noguer. “Exhaustive Linearization for Robust Camera Pose and Focal
725 // Length Estimation”. In this case the function also estimates the
726 // parameters
727 // f_x and f_y assuming that both have the same value. Then the cameraMatrix
728 // is updated with the estimated focal length.
729 #else
730  int nbInlierToReachConsensus = m_nbRansacMinInlierCount;
731  if (m_useConsensusPercentage) {
732  nbInlierToReachConsensus = (int)(m_ransacConsensusPercentage / 100.0 * (double)m_queryFilteredKeyPoints.size());
733  }
734 
735  cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, false, m_nbRansacIterations,
736  (float)m_ransacReprojectionError, nbInlierToReachConsensus, inlierIndex);
737 #endif
738  }
739  catch (cv::Exception &e) {
740  std::cerr << e.what() << std::endl;
741  elapsedTime = (vpTime::measureTimeMs() - t);
742  return false;
743  }
744  vpTranslationVector translationVec(tvec.at<double>(0), tvec.at<double>(1), tvec.at<double>(2));
745  vpThetaUVector thetaUVector(rvec.at<double>(0), rvec.at<double>(1), rvec.at<double>(2));
746  cMo = vpHomogeneousMatrix(translationVec, thetaUVector);
747 
748  if (func != nullptr) {
749  // Check the final pose returned by solvePnPRansac to discard
750  // solutions which do not respect the pose criterion.
751  if (!func(cMo)) {
752  elapsedTime = (vpTime::measureTimeMs() - t);
753  return false;
754  }
755  }
756 
757  elapsedTime = (vpTime::measureTimeMs() - t);
758  return true;
759 }
760 
761 bool vpKeyPoint::computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo,
762  std::vector<vpPoint> &inliers, double &elapsedTime,
763  bool (*func)(const vpHomogeneousMatrix &))
764 {
765  std::vector<unsigned int> inlierIndex;
766  return computePose(objectVpPoints, cMo, inliers, inlierIndex, elapsedTime, func);
767 }
768 
769 bool vpKeyPoint::computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo,
770  std::vector<vpPoint> &inliers, std::vector<unsigned int> &inlierIndex, double &elapsedTime,
771  bool (*func)(const vpHomogeneousMatrix &))
772 {
773  double t = vpTime::measureTimeMs();
774 
775  if (objectVpPoints.size() < 4) {
776  elapsedTime = (vpTime::measureTimeMs() - t);
777  // std::cerr << "Not enough points to compute the pose (at least 4
778  // points are needed)." << std::endl;
779 
780  return false;
781  }
782 
783  vpPose pose;
784 
785  for (std::vector<vpPoint>::const_iterator it = objectVpPoints.begin(); it != objectVpPoints.end(); ++it) {
786  pose.addPoint(*it);
787  }
788 
789  unsigned int nbInlierToReachConsensus = (unsigned int)m_nbRansacMinInlierCount;
790  if (m_useConsensusPercentage) {
791  nbInlierToReachConsensus =
792  (unsigned int)(m_ransacConsensusPercentage / 100.0 * (double)m_queryFilteredKeyPoints.size());
793  }
794 
795  pose.setRansacFilterFlag(m_ransacFilterFlag);
796  pose.setUseParallelRansac(m_ransacParallel);
797  pose.setNbParallelRansacThreads(m_ransacParallelNbThreads);
798  pose.setRansacNbInliersToReachConsensus(nbInlierToReachConsensus);
799  pose.setRansacThreshold(m_ransacThreshold);
800  pose.setRansacMaxTrials(m_nbRansacIterations);
801 
802  bool isRansacPoseEstimationOk = false;
803  try {
804  pose.setCovarianceComputation(m_computeCovariance);
805  isRansacPoseEstimationOk = pose.computePose(vpPose::RANSAC, cMo, func);
806  inliers = pose.getRansacInliers();
807  inlierIndex = pose.getRansacInlierIndex();
808 
809  if (m_computeCovariance) {
810  m_covarianceMatrix = pose.getCovarianceMatrix();
811  }
812  }
813  catch (const vpException &e) {
814  std::cerr << "e=" << e.what() << std::endl;
815  elapsedTime = (vpTime::measureTimeMs() - t);
816  return false;
817  }
818 
819  // if(func != nullptr && isRansacPoseEstimationOk) {
820  // //Check the final pose returned by the Ransac VVS pose estimation as
821  // in rare some cases
822  // //we can converge toward a final cMo that does not respect the pose
823  // criterion even
824  // //if the 4 minimal points picked to respect the pose criterion.
825  // if(!func(&cMo)) {
826  // elapsedTime = (vpTime::measureTimeMs() - t);
827  // return false;
828  // }
829  // }
830 
831  elapsedTime = (vpTime::measureTimeMs() - t);
832  return isRansacPoseEstimationOk;
833 }
834 
835 double vpKeyPoint::computePoseEstimationError(const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
836  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo_est)
837 {
838  if (matchKeyPoints.size() == 0) {
839  // return std::numeric_limits<double>::max(); // create an error under
840  // Windows. To fix it we have to add #undef max
841  return DBL_MAX;
842  }
843 
844  std::vector<double> errors(matchKeyPoints.size());
845  size_t cpt = 0;
846  vpPoint pt;
847  for (std::vector<std::pair<cv::KeyPoint, cv::Point3f> >::const_iterator it = matchKeyPoints.begin();
848  it != matchKeyPoints.end(); ++it, cpt++) {
849  pt.set_oX(it->second.x);
850  pt.set_oY(it->second.y);
851  pt.set_oZ(it->second.z);
852  pt.project(cMo_est);
853  double u = 0.0, v = 0.0;
854  vpMeterPixelConversion::convertPoint(cam, pt.get_x(), pt.get_y(), u, v);
855  errors[cpt] = std::sqrt((u - it->first.pt.x) * (u - it->first.pt.x) + (v - it->first.pt.y) * (v - it->first.pt.y));
856  }
857 
858  return std::accumulate(errors.begin(), errors.end(), 0.0) / errors.size();
859 }
860 
862  vpImage<unsigned char> &IMatching)
863 {
864  // Image matching side by side
865  unsigned int width = IRef.getWidth() + ICurrent.getWidth();
866  unsigned int height = std::max<unsigned int>(IRef.getHeight(), ICurrent.getHeight());
867 
868  IMatching = vpImage<unsigned char>(height, width);
869 }
870 
872  vpImage<vpRGBa> &IMatching)
873 {
874  // Image matching side by side
875  unsigned int width = IRef.getWidth() + ICurrent.getWidth();
876  unsigned int height = std::max<unsigned int>(IRef.getHeight(), ICurrent.getHeight());
877 
878  IMatching = vpImage<vpRGBa>(height, width);
879 }
880 
882 {
883  // Nb images in the training database + the current image we want to detect
884  // the object
885  unsigned int nbImg = (unsigned int)(m_mapOfImages.size() + 1);
886 
887  if (m_mapOfImages.empty()) {
888  std::cerr << "There is no training image loaded !" << std::endl;
889  return;
890  }
891 
892  if (nbImg == 2) {
893  // Only one training image, so we display them side by side
894  createImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
895  }
896  else {
897  // Multiple training images, display them as a mosaic image
898  // (unsigned int) std::floor(std::sqrt((double) nbImg) + 0.5);
899  unsigned int nbImgSqrt = (unsigned int)vpMath::round(std::sqrt((double)nbImg));
900 
901  // Number of columns in the mosaic grid
902  unsigned int nbWidth = nbImgSqrt;
903  // Number of rows in the mosaic grid
904  unsigned int nbHeight = nbImgSqrt;
905 
906  // Deals with non square mosaic grid and the total number of images
907  if (nbImgSqrt * nbImgSqrt < nbImg) {
908  nbWidth++;
909  }
910 
911  unsigned int maxW = ICurrent.getWidth();
912  unsigned int maxH = ICurrent.getHeight();
913  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
914  ++it) {
915  if (maxW < it->second.getWidth()) {
916  maxW = it->second.getWidth();
917  }
918 
919  if (maxH < it->second.getHeight()) {
920  maxH = it->second.getHeight();
921  }
922  }
923 
924  IMatching = vpImage<unsigned char>(maxH * nbHeight, maxW * nbWidth);
925  }
926 }
927 
929 {
930  // Nb images in the training database + the current image we want to detect
931  // the object
932  unsigned int nbImg = (unsigned int)(m_mapOfImages.size() + 1);
933 
934  if (m_mapOfImages.empty()) {
935  std::cerr << "There is no training image loaded !" << std::endl;
936  return;
937  }
938 
939  if (nbImg == 2) {
940  // Only one training image, so we display them side by side
941  createImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
942  }
943  else {
944  // Multiple training images, display them as a mosaic image
945  //(unsigned int) std::floor(std::sqrt((double) nbImg) + 0.5);
946  unsigned int nbImgSqrt = (unsigned int)vpMath::round(std::sqrt((double)nbImg));
947 
948  // Number of columns in the mosaic grid
949  unsigned int nbWidth = nbImgSqrt;
950  // Number of rows in the mosaic grid
951  unsigned int nbHeight = nbImgSqrt;
952 
953  // Deals with non square mosaic grid and the total number of images
954  if (nbImgSqrt * nbImgSqrt < nbImg) {
955  nbWidth++;
956  }
957 
958  unsigned int maxW = ICurrent.getWidth();
959  unsigned int maxH = ICurrent.getHeight();
960  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
961  ++it) {
962  if (maxW < it->second.getWidth()) {
963  maxW = it->second.getWidth();
964  }
965 
966  if (maxH < it->second.getHeight()) {
967  maxH = it->second.getHeight();
968  }
969  }
970 
971  IMatching = vpImage<vpRGBa>(maxH * nbHeight, maxW * nbWidth);
972  }
973 }
974 
975 void vpKeyPoint::detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, const vpRect &rectangle)
976 {
977  double elapsedTime;
978  detect(I, keyPoints, elapsedTime, rectangle);
979 }
980 
981 void vpKeyPoint::detect(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, const vpRect &rectangle)
982 {
983  double elapsedTime;
984  detect(I_color, keyPoints, elapsedTime, rectangle);
985 }
986 
987 void vpKeyPoint::detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, const cv::Mat &mask)
988 {
989  double elapsedTime;
990  detect(matImg, keyPoints, elapsedTime, mask);
991 }
992 
993 void vpKeyPoint::detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
994  const vpRect &rectangle)
995 {
996  cv::Mat matImg;
997  vpImageConvert::convert(I, matImg, false);
998  cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
999 
1000  if (rectangle.getWidth() > 0 && rectangle.getHeight() > 0) {
1001  cv::Point leftTop((int)rectangle.getLeft(), (int)rectangle.getTop()),
1002  rightBottom((int)rectangle.getRight(), (int)rectangle.getBottom());
1003  cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1004  }
1005  else {
1006  mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1007  }
1008 
1009  detect(matImg, keyPoints, elapsedTime, mask);
1010 }
1011 
1012 void vpKeyPoint::detect(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
1013  const vpRect &rectangle)
1014 {
1015  cv::Mat matImg;
1016  vpImageConvert::convert(I_color, matImg);
1017  cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1018 
1019  if (rectangle.getWidth() > 0 && rectangle.getHeight() > 0) {
1020  cv::Point leftTop((int)rectangle.getLeft(), (int)rectangle.getTop()),
1021  rightBottom((int)rectangle.getRight(), (int)rectangle.getBottom());
1022  cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1023  }
1024  else {
1025  mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1026  }
1027 
1028  detect(matImg, keyPoints, elapsedTime, mask);
1029 }
1030 
1031 void vpKeyPoint::detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
1032  const cv::Mat &mask)
1033 {
1034  double t = vpTime::measureTimeMs();
1035  keyPoints.clear();
1036 
1037  for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
1038  it != m_detectors.end(); ++it) {
1039  std::vector<cv::KeyPoint> kp;
1040 
1041  it->second->detect(matImg, kp, mask);
1042  keyPoints.insert(keyPoints.end(), kp.begin(), kp.end());
1043  }
1044 
1045  elapsedTime = vpTime::measureTimeMs() - t;
1046 }
1047 
1048 void vpKeyPoint::display(const vpImage<unsigned char> &IRef, const vpImage<unsigned char> &ICurrent, unsigned int size)
1049 {
1050  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1051  getQueryKeyPoints(vpQueryImageKeyPoints);
1052  std::vector<vpImagePoint> vpTrainImageKeyPoints;
1053  getTrainKeyPoints(vpTrainImageKeyPoints);
1054 
1055  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1056  vpDisplay::displayCross(IRef, vpTrainImageKeyPoints[(size_t)(it->trainIdx)], size, vpColor::red);
1057  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, vpColor::green);
1058  }
1059 }
1060 
1061 void vpKeyPoint::display(const vpImage<vpRGBa> &IRef, const vpImage<vpRGBa> &ICurrent, unsigned int size)
1062 {
1063  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1064  getQueryKeyPoints(vpQueryImageKeyPoints);
1065  std::vector<vpImagePoint> vpTrainImageKeyPoints;
1066  getTrainKeyPoints(vpTrainImageKeyPoints);
1067 
1068  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1069  vpDisplay::displayCross(IRef, vpTrainImageKeyPoints[(size_t)(it->trainIdx)], size, vpColor::red);
1070  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, vpColor::green);
1071  }
1072 }
1073 
1074 void vpKeyPoint::display(const vpImage<unsigned char> &ICurrent, unsigned int size, const vpColor &color)
1075 {
1076  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1077  getQueryKeyPoints(vpQueryImageKeyPoints);
1078 
1079  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1080  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, color);
1081  }
1082 }
1083 
1084 void vpKeyPoint::display(const vpImage<vpRGBa> &ICurrent, unsigned int size, const vpColor &color)
1085 {
1086  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1087  getQueryKeyPoints(vpQueryImageKeyPoints);
1088 
1089  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1090  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, color);
1091  }
1092 }
1093 
1095  unsigned int crossSize, unsigned int lineThickness, const vpColor &color)
1096 {
1097  bool randomColor = (color == vpColor::none);
1098  srand((unsigned int)time(nullptr));
1099  vpColor currentColor = color;
1100 
1101  std::vector<vpImagePoint> queryImageKeyPoints;
1102  getQueryKeyPoints(queryImageKeyPoints);
1103  std::vector<vpImagePoint> trainImageKeyPoints;
1104  getTrainKeyPoints(trainImageKeyPoints);
1105 
1106  vpImagePoint leftPt, rightPt;
1107  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1108  if (randomColor) {
1109  currentColor = vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1110  }
1111 
1112  leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1113  rightPt = vpImagePoint(queryImageKeyPoints[(size_t)(it->queryIdx)].get_i(),
1114  queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.getWidth());
1115  vpDisplay::displayCross(IMatching, leftPt, crossSize, currentColor);
1116  vpDisplay::displayCross(IMatching, rightPt, crossSize, currentColor);
1117  vpDisplay::displayLine(IMatching, leftPt, rightPt, currentColor, lineThickness);
1118  }
1119 }
1120 
1121 void vpKeyPoint::displayMatching(const vpImage<unsigned char> &IRef, vpImage<vpRGBa> &IMatching, unsigned int crossSize,
1122  unsigned int lineThickness, const vpColor &color)
1123 {
1124  bool randomColor = (color == vpColor::none);
1125  srand((unsigned int)time(nullptr));
1126  vpColor currentColor = color;
1127 
1128  std::vector<vpImagePoint> queryImageKeyPoints;
1129  getQueryKeyPoints(queryImageKeyPoints);
1130  std::vector<vpImagePoint> trainImageKeyPoints;
1131  getTrainKeyPoints(trainImageKeyPoints);
1132 
1133  vpImagePoint leftPt, rightPt;
1134  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1135  if (randomColor) {
1136  currentColor = vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1137  }
1138 
1139  leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1140  rightPt = vpImagePoint(queryImageKeyPoints[(size_t)(it->queryIdx)].get_i(),
1141  queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.getWidth());
1142  vpDisplay::displayCross(IMatching, leftPt, crossSize, currentColor);
1143  vpDisplay::displayCross(IMatching, rightPt, crossSize, currentColor);
1144  vpDisplay::displayLine(IMatching, leftPt, rightPt, currentColor, lineThickness);
1145  }
1146 }
1147 
1148 void vpKeyPoint::displayMatching(const vpImage<vpRGBa> &IRef, vpImage<vpRGBa> &IMatching, unsigned int crossSize,
1149  unsigned int lineThickness, const vpColor &color)
1150 {
1151  bool randomColor = (color == vpColor::none);
1152  srand((unsigned int)time(nullptr));
1153  vpColor currentColor = color;
1154 
1155  std::vector<vpImagePoint> queryImageKeyPoints;
1156  getQueryKeyPoints(queryImageKeyPoints);
1157  std::vector<vpImagePoint> trainImageKeyPoints;
1158  getTrainKeyPoints(trainImageKeyPoints);
1159 
1160  vpImagePoint leftPt, rightPt;
1161  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1162  if (randomColor) {
1163  currentColor = vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1164  }
1165 
1166  leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1167  rightPt = vpImagePoint(queryImageKeyPoints[(size_t)(it->queryIdx)].get_i(),
1168  queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.getWidth());
1169  vpDisplay::displayCross(IMatching, leftPt, crossSize, currentColor);
1170  vpDisplay::displayCross(IMatching, rightPt, crossSize, currentColor);
1171  vpDisplay::displayLine(IMatching, leftPt, rightPt, currentColor, lineThickness);
1172  }
1173 }
1174 
1175 
1177  const std::vector<vpImagePoint> &ransacInliers, unsigned int crossSize,
1178  unsigned int lineThickness)
1179 {
1180  if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1181  // No training images so return
1182  std::cerr << "There is no training image loaded !" << std::endl;
1183  return;
1184  }
1185 
1186  // Nb images in the training database + the current image we want to detect
1187  // the object
1188  int nbImg = (int)(m_mapOfImages.size() + 1);
1189 
1190  if (nbImg == 2) {
1191  // Only one training image, so we display the matching result side-by-side
1192  displayMatching(m_mapOfImages.begin()->second, IMatching, crossSize);
1193  }
1194  else {
1195  // Multiple training images, display them as a mosaic image
1196  int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
1197  int nbWidth = nbImgSqrt;
1198  int nbHeight = nbImgSqrt;
1199 
1200  if (nbImgSqrt * nbImgSqrt < nbImg) {
1201  nbWidth++;
1202  }
1203 
1204  std::map<int, int> mapOfImageIdIndex;
1205  int cpt = 0;
1206  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
1207  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1208  ++it, cpt++) {
1209  mapOfImageIdIndex[it->first] = cpt;
1210 
1211  if (maxW < it->second.getWidth()) {
1212  maxW = it->second.getWidth();
1213  }
1214 
1215  if (maxH < it->second.getHeight()) {
1216  maxH = it->second.getHeight();
1217  }
1218  }
1219 
1220  // Indexes of the current image in the grid computed to put preferably the
1221  // image in the center of the mosaic grid
1222  int medianI = nbHeight / 2;
1223  int medianJ = nbWidth / 2;
1224  int medianIndex = medianI * nbWidth + medianJ;
1225  for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1226  vpImagePoint topLeftCorner;
1227  int current_class_id_index = 0;
1228  if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1229  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1230  }
1231  else {
1232  // Shift of one unity the index of the training images which are after
1233  // the current image
1234  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1235  }
1236 
1237  int indexI = current_class_id_index / nbWidth;
1238  int indexJ = current_class_id_index - (indexI * nbWidth);
1239  topLeftCorner.set_ij((int)maxH * indexI, (int)maxW * indexJ);
1240 
1241  // Display cross for keypoints in the learning database
1242  vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1243  (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1244  }
1245 
1246  vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
1247  for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1248  // Display cross for keypoints detected in the current image
1249  vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1250  (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1251  }
1252  for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1253  // Display green circle for RANSAC inliers
1254  vpDisplay::displayCircle(IMatching, (int)(it->get_v() + topLeftCorner.get_i()),
1255  (int)(it->get_u() + topLeftCorner.get_j()), 4, vpColor::green);
1256  }
1257  for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1258  // Display red circle for RANSAC outliers
1259  vpDisplay::displayCircle(IMatching, (int)(it->get_i() + topLeftCorner.get_i()),
1260  (int)(it->get_j() + topLeftCorner.get_j()), 4, vpColor::red);
1261  }
1262 
1263  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1264  int current_class_id = 0;
1265  if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] < medianIndex) {
1266  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1267  }
1268  else {
1269  // Shift of one unity the index of the training images which are after
1270  // the current image
1271  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1272  }
1273 
1274  int indexI = current_class_id / nbWidth;
1275  int indexJ = current_class_id - (indexI * nbWidth);
1276 
1277  vpImagePoint end((int)maxH * indexI + m_trainKeyPoints[(size_t)it->trainIdx].pt.y,
1278  (int)maxW * indexJ + m_trainKeyPoints[(size_t)it->trainIdx].pt.x);
1279  vpImagePoint start((int)maxH * medianI + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.y,
1280  (int)maxW * medianJ + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.x);
1281 
1282  // Draw line for matching keypoints detected in the current image and
1283  // those detected in the training images
1284  vpDisplay::displayLine(IMatching, start, end, vpColor::green, lineThickness);
1285  }
1286  }
1287 }
1288 
1290  const std::vector<vpImagePoint> &ransacInliers, unsigned int crossSize,
1291  unsigned int lineThickness)
1292 {
1293  if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1294  // No training images so return
1295  std::cerr << "There is no training image loaded !" << std::endl;
1296  return;
1297  }
1298 
1299  // Nb images in the training database + the current image we want to detect
1300  // the object
1301  int nbImg = (int)(m_mapOfImages.size() + 1);
1302 
1303  if (nbImg == 2) {
1304  // Only one training image, so we display the matching result side-by-side
1305  displayMatching(m_mapOfImages.begin()->second, IMatching, crossSize);
1306  }
1307  else {
1308  // Multiple training images, display them as a mosaic image
1309  int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
1310  int nbWidth = nbImgSqrt;
1311  int nbHeight = nbImgSqrt;
1312 
1313  if (nbImgSqrt * nbImgSqrt < nbImg) {
1314  nbWidth++;
1315  }
1316 
1317  std::map<int, int> mapOfImageIdIndex;
1318  int cpt = 0;
1319  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
1320  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1321  ++it, cpt++) {
1322  mapOfImageIdIndex[it->first] = cpt;
1323 
1324  if (maxW < it->second.getWidth()) {
1325  maxW = it->second.getWidth();
1326  }
1327 
1328  if (maxH < it->second.getHeight()) {
1329  maxH = it->second.getHeight();
1330  }
1331  }
1332 
1333  // Indexes of the current image in the grid computed to put preferably the
1334  // image in the center of the mosaic grid
1335  int medianI = nbHeight / 2;
1336  int medianJ = nbWidth / 2;
1337  int medianIndex = medianI * nbWidth + medianJ;
1338  for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1339  vpImagePoint topLeftCorner;
1340  int current_class_id_index = 0;
1341  if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1342  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1343  }
1344  else {
1345  // Shift of one unity the index of the training images which are after
1346  // the current image
1347  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1348  }
1349 
1350  int indexI = current_class_id_index / nbWidth;
1351  int indexJ = current_class_id_index - (indexI * nbWidth);
1352  topLeftCorner.set_ij((int)maxH * indexI, (int)maxW * indexJ);
1353 
1354  // Display cross for keypoints in the learning database
1355  vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1356  (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1357  }
1358 
1359  vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
1360  for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1361  // Display cross for keypoints detected in the current image
1362  vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1363  (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1364  }
1365  for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1366  // Display green circle for RANSAC inliers
1367  vpDisplay::displayCircle(IMatching, (int)(it->get_v() + topLeftCorner.get_i()),
1368  (int)(it->get_u() + topLeftCorner.get_j()), 4, vpColor::green);
1369  }
1370  for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1371  // Display red circle for RANSAC outliers
1372  vpDisplay::displayCircle(IMatching, (int)(it->get_i() + topLeftCorner.get_i()),
1373  (int)(it->get_j() + topLeftCorner.get_j()), 4, vpColor::red);
1374  }
1375 
1376  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1377  int current_class_id = 0;
1378  if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] < medianIndex) {
1379  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1380  }
1381  else {
1382  // Shift of one unity the index of the training images which are after
1383  // the current image
1384  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1385  }
1386 
1387  int indexI = current_class_id / nbWidth;
1388  int indexJ = current_class_id - (indexI * nbWidth);
1389 
1390  vpImagePoint end((int)maxH * indexI + m_trainKeyPoints[(size_t)it->trainIdx].pt.y,
1391  (int)maxW * indexJ + m_trainKeyPoints[(size_t)it->trainIdx].pt.x);
1392  vpImagePoint start((int)maxH * medianI + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.y,
1393  (int)maxW * medianJ + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.x);
1394 
1395  // Draw line for matching keypoints detected in the current image and
1396  // those detected in the training images
1397  vpDisplay::displayLine(IMatching, start, end, vpColor::green, lineThickness);
1398  }
1399  }
1400 }
1401 
1402 void vpKeyPoint::extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1403  std::vector<cv::Point3f> *trainPoints)
1404 {
1405  double elapsedTime;
1406  extract(I, keyPoints, descriptors, elapsedTime, trainPoints);
1407 }
1408 
1409 void vpKeyPoint::extract(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1410  std::vector<cv::Point3f> *trainPoints)
1411 {
1412  double elapsedTime;
1413  extract(I_color, keyPoints, descriptors, elapsedTime, trainPoints);
1414 }
1415 
1416 void vpKeyPoint::extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1417  std::vector<cv::Point3f> *trainPoints)
1418 {
1419  double elapsedTime;
1420  extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1421 }
1422 
1423 void vpKeyPoint::extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1424  double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1425 {
1426  cv::Mat matImg;
1427  vpImageConvert::convert(I, matImg, false);
1428  extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1429 }
1430 
1431 void vpKeyPoint::extract(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1432  double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1433 {
1434  cv::Mat matImg;
1435  vpImageConvert::convert(I_color, matImg);
1436  extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1437 }
1438 
1439 void vpKeyPoint::extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1440  double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1441 {
1442  double t = vpTime::measureTimeMs();
1443  bool first = true;
1444 
1445  for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator itd = m_extractors.begin();
1446  itd != m_extractors.end(); ++itd) {
1447  if (first) {
1448  first = false;
1449  // Check if we have 3D object points information
1450  if (trainPoints != nullptr && !trainPoints->empty()) {
1451  // Copy the input list of keypoints, keypoints that cannot be computed
1452  // are removed in the function compute
1453  std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1454 
1455  // Extract descriptors for the given list of keypoints
1456  itd->second->compute(matImg, keyPoints, descriptors);
1457 
1458  if (keyPoints.size() != keyPoints_tmp.size()) {
1459  // Keypoints have been removed
1460  // Store the hash of a keypoint as the key and the index of the
1461  // keypoint as the value
1462  std::map<size_t, size_t> mapOfKeypointHashes;
1463  size_t cpt = 0;
1464  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1465  ++it, cpt++) {
1466  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1467  }
1468 
1469  std::vector<cv::Point3f> trainPoints_tmp;
1470  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1471  if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1472  trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1473  }
1474  }
1475 
1476  // Copy trainPoints_tmp to m_trainPoints
1477  *trainPoints = trainPoints_tmp;
1478  }
1479  }
1480  else {
1481  // Extract descriptors for the given list of keypoints
1482  itd->second->compute(matImg, keyPoints, descriptors);
1483  }
1484  }
1485  else {
1486  // Copy the input list of keypoints, keypoints that cannot be computed
1487  // are removed in the function compute
1488  std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1489 
1490  cv::Mat desc;
1491  // Extract descriptors for the given list of keypoints
1492  itd->second->compute(matImg, keyPoints, desc);
1493 
1494  if (keyPoints.size() != keyPoints_tmp.size()) {
1495  // Keypoints have been removed
1496  // Store the hash of a keypoint as the key and the index of the
1497  // keypoint as the value
1498  std::map<size_t, size_t> mapOfKeypointHashes;
1499  size_t cpt = 0;
1500  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1501  ++it, cpt++) {
1502  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1503  }
1504 
1505  std::vector<cv::Point3f> trainPoints_tmp;
1506  cv::Mat descriptors_tmp;
1507  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1508  if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1509  if (trainPoints != nullptr && !trainPoints->empty()) {
1510  trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1511  }
1512 
1513  if (!descriptors.empty()) {
1514  descriptors_tmp.push_back(descriptors.row((int)mapOfKeypointHashes[myKeypointHash(*it)]));
1515  }
1516  }
1517  }
1518 
1519  if (trainPoints != nullptr) {
1520  // Copy trainPoints_tmp to m_trainPoints
1521  *trainPoints = trainPoints_tmp;
1522  }
1523  // Copy descriptors_tmp to descriptors
1524  descriptors_tmp.copyTo(descriptors);
1525  }
1526 
1527  // Merge descriptors horizontally
1528  if (descriptors.empty()) {
1529  desc.copyTo(descriptors);
1530  }
1531  else {
1532  cv::hconcat(descriptors, desc, descriptors);
1533  }
1534  }
1535  }
1536 
1537  if (keyPoints.size() != (size_t)descriptors.rows) {
1538  std::cerr << "keyPoints.size() != (size_t) descriptors.rows" << std::endl;
1539  }
1540  elapsedTime = vpTime::measureTimeMs() - t;
1541 }
1542 
1543 void vpKeyPoint::filterMatches()
1544 {
1545  std::vector<cv::KeyPoint> queryKpts;
1546  std::vector<cv::Point3f> trainPts;
1547  std::vector<cv::DMatch> m;
1548 
1549  if (m_useKnn) {
1550  // double max_dist = 0;
1551  // double min_dist = std::numeric_limits<double>::max(); // create an
1552  // error under Windows. To fix it we have to add #undef max
1553  double min_dist = DBL_MAX;
1554  double mean = 0.0;
1555  std::vector<double> distance_vec(m_knnMatches.size());
1556 
1557  if (m_filterType == stdAndRatioDistanceThreshold) {
1558  for (size_t i = 0; i < m_knnMatches.size(); i++) {
1559  double dist = m_knnMatches[i][0].distance;
1560  mean += dist;
1561  distance_vec[i] = dist;
1562 
1563  if (dist < min_dist) {
1564  min_dist = dist;
1565  }
1566  // if (dist > max_dist) {
1567  // max_dist = dist;
1568  //}
1569  }
1570  mean /= m_queryDescriptors.rows;
1571  }
1572 
1573  double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1574  double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1575  double threshold = min_dist + stdev;
1576 
1577  for (size_t i = 0; i < m_knnMatches.size(); i++) {
1578  if (m_knnMatches[i].size() >= 2) {
1579  // Calculate ratio of the descriptor distance between the two nearest
1580  // neighbors of the keypoint
1581  float ratio = m_knnMatches[i][0].distance / m_knnMatches[i][1].distance;
1582  // float ratio = std::sqrt((vecMatches[i][0].distance *
1583  // vecMatches[i][0].distance)
1584  // / (vecMatches[i][1].distance *
1585  // vecMatches[i][1].distance));
1586  double dist = m_knnMatches[i][0].distance;
1587 
1588  if (ratio < m_matchingRatioThreshold || (m_filterType == stdAndRatioDistanceThreshold && dist < threshold)) {
1589  m.push_back(cv::DMatch((int)queryKpts.size(), m_knnMatches[i][0].trainIdx, m_knnMatches[i][0].distance));
1590 
1591  if (!m_trainPoints.empty()) {
1592  trainPts.push_back(m_trainPoints[(size_t)m_knnMatches[i][0].trainIdx]);
1593  }
1594  queryKpts.push_back(m_queryKeyPoints[(size_t)m_knnMatches[i][0].queryIdx]);
1595  }
1596  }
1597  }
1598  }
1599  else {
1600  // double max_dist = 0;
1601  // create an error under Windows. To fix it we have to add #undef max
1602  // double min_dist = std::numeric_limits<double>::max();
1603  double min_dist = DBL_MAX;
1604  double mean = 0.0;
1605  std::vector<double> distance_vec(m_matches.size());
1606  for (size_t i = 0; i < m_matches.size(); i++) {
1607  double dist = m_matches[i].distance;
1608  mean += dist;
1609  distance_vec[i] = dist;
1610 
1611  if (dist < min_dist) {
1612  min_dist = dist;
1613  }
1614  // if (dist > max_dist) {
1615  // max_dist = dist;
1616  // }
1617  }
1618  mean /= m_queryDescriptors.rows;
1619 
1620  double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1621  double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1622 
1623  // Define a threshold where we keep all keypoints whose the descriptor
1624  // distance falls below a factor of the minimum descriptor distance (for
1625  // all the query keypoints) or below the minimum descriptor distance +
1626  // the standard deviation (calculated on all the query descriptor
1627  // distances)
1628  double threshold =
1629  m_filterType == constantFactorDistanceThreshold ? m_matchingFactorThreshold * min_dist : min_dist + stdev;
1630 
1631  for (size_t i = 0; i < m_matches.size(); i++) {
1632  if (m_matches[i].distance <= threshold) {
1633  m.push_back(cv::DMatch((int)queryKpts.size(), m_matches[i].trainIdx, m_matches[i].distance));
1634 
1635  if (!m_trainPoints.empty()) {
1636  trainPts.push_back(m_trainPoints[(size_t)m_matches[i].trainIdx]);
1637  }
1638  queryKpts.push_back(m_queryKeyPoints[(size_t)m_matches[i].queryIdx]);
1639  }
1640  }
1641  }
1642 
1643  if (m_useSingleMatchFilter) {
1644  // Eliminate matches where multiple query keypoints are matched to the
1645  // same train keypoint
1646  std::vector<cv::DMatch> mTmp;
1647  std::vector<cv::Point3f> trainPtsTmp;
1648  std::vector<cv::KeyPoint> queryKptsTmp;
1649 
1650  std::map<int, int> mapOfTrainIdx;
1651  // Count the number of query points matched to the same train point
1652  for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1653  mapOfTrainIdx[it->trainIdx]++;
1654  }
1655 
1656  // Keep matches with only one correspondence
1657  for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1658  if (mapOfTrainIdx[it->trainIdx] == 1) {
1659  mTmp.push_back(cv::DMatch((int)queryKptsTmp.size(), it->trainIdx, it->distance));
1660 
1661  if (!m_trainPoints.empty()) {
1662  trainPtsTmp.push_back(m_trainPoints[(size_t)it->trainIdx]);
1663  }
1664  queryKptsTmp.push_back(queryKpts[(size_t)it->queryIdx]);
1665  }
1666  }
1667 
1668  m_filteredMatches = mTmp;
1669  m_objectFilteredPoints = trainPtsTmp;
1670  m_queryFilteredKeyPoints = queryKptsTmp;
1671  }
1672  else {
1673  m_filteredMatches = m;
1674  m_objectFilteredPoints = trainPts;
1675  m_queryFilteredKeyPoints = queryKpts;
1676  }
1677 }
1678 
1679 void vpKeyPoint::getObjectPoints(std::vector<cv::Point3f> &objectPoints) const
1680 {
1681  objectPoints = m_objectFilteredPoints;
1682 }
1683 
1684 void vpKeyPoint::getObjectPoints(std::vector<vpPoint> &objectPoints) const
1685 {
1686  vpConvert::convertFromOpenCV(m_objectFilteredPoints, objectPoints);
1687 }
1688 
1689 void vpKeyPoint::getQueryKeyPoints(std::vector<cv::KeyPoint> &keyPoints, bool matches) const
1690 {
1691  if (matches) {
1692  keyPoints = m_queryFilteredKeyPoints;
1693  }
1694  else {
1695  keyPoints = m_queryKeyPoints;
1696  }
1697 }
1698 
1699 void vpKeyPoint::getQueryKeyPoints(std::vector<vpImagePoint> &keyPoints, bool matches) const
1700 {
1701  if (matches) {
1702  keyPoints = m_currentImagePointsList;
1703  }
1704  else {
1705  vpConvert::convertFromOpenCV(m_queryKeyPoints, keyPoints);
1706  }
1707 }
1708 
1709 void vpKeyPoint::getTrainKeyPoints(std::vector<cv::KeyPoint> &keyPoints) const { keyPoints = m_trainKeyPoints; }
1710 
1711 void vpKeyPoint::getTrainKeyPoints(std::vector<vpImagePoint> &keyPoints) const { keyPoints = m_referenceImagePointsList; }
1712 
1713 void vpKeyPoint::getTrainPoints(std::vector<cv::Point3f> &points) const { points = m_trainPoints; }
1714 
1715 void vpKeyPoint::getTrainPoints(std::vector<vpPoint> &points) const { points = m_trainVpPoints; }
1716 
1717 void vpKeyPoint::init()
1718 {
1719 // Require 2.4.0 <= opencv < 3.0.0
1720 #if defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && (VISP_HAVE_OPENCV_VERSION < 0x030000)
1721  // The following line must be called in order to use SIFT or SURF
1722  if (!cv::initModule_nonfree()) {
1723  std::cerr << "Cannot init module non free, SIFT or SURF cannot be used." << std::endl;
1724  }
1725 #endif
1726 
1727  // Use k-nearest neighbors (knn) to retrieve the two best matches for a
1728  // keypoint So this is useful only for ratioDistanceThreshold method
1729  if (m_filterType == ratioDistanceThreshold || m_filterType == stdAndRatioDistanceThreshold) {
1730  m_useKnn = true;
1731  }
1732 
1733  initDetectors(m_detectorNames);
1734  initExtractors(m_extractorNames);
1735  initMatcher(m_matcherName);
1736 }
1737 
1738 void vpKeyPoint::initDetector(const std::string &detectorName)
1739 {
1740 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
1741  m_detectors[detectorName] = cv::FeatureDetector::create(detectorName);
1742 
1743  if (m_detectors[detectorName] == nullptr) {
1744  std::stringstream ss_msg;
1745  ss_msg << "Fail to initialize the detector: " << detectorName
1746  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
1747  throw vpException(vpException::fatalError, ss_msg.str());
1748  }
1749 #else
1750  std::string detectorNameTmp = detectorName;
1751  std::string pyramid = "Pyramid";
1752  std::size_t pos = detectorName.find(pyramid);
1753  bool usePyramid = false;
1754  if (pos != std::string::npos) {
1755  detectorNameTmp = detectorName.substr(pos + pyramid.size());
1756  usePyramid = true;
1757  }
1758 
1759  if (detectorNameTmp == "SIFT") {
1760 #if (VISP_HAVE_OPENCV_VERSION >= 0x040500) // OpenCV >= 4.5.0
1761  cv::Ptr<cv::FeatureDetector> siftDetector = cv::SiftFeatureDetector::create();
1762  if (!usePyramid) {
1763  m_detectors[detectorNameTmp] = siftDetector;
1764  }
1765  else {
1766  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
1767  }
1768 #else
1769 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) || (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || \
1770  (VISP_HAVE_OPENCV_VERSION >= 0x040400)
1771  // SIFT is no more patented since 09/03/2020
1772  cv::Ptr<cv::FeatureDetector> siftDetector;
1773  if (m_maxFeatures > 0) {
1774 #if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
1775  siftDetector = cv::SIFT::create(m_maxFeatures);
1776 #else
1777  siftDetector = cv::xfeatures2d::SIFT::create(m_maxFeatures);
1778 #endif
1779  }
1780  else {
1781 #if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
1782  siftDetector = cv::SIFT::create();
1783 #else
1784  siftDetector = cv::xfeatures2d::SIFT::create();
1785 #endif
1786  }
1787  if (!usePyramid) {
1788  m_detectors[detectorNameTmp] = siftDetector;
1789  }
1790  else {
1791  std::cerr << "You should not use SIFT with Pyramid feature detection!" << std::endl;
1792  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
1793  }
1794 #else
1795  std::stringstream ss_msg;
1796  ss_msg << "Fail to initialize the detector: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1797  << " was not build with xFeatures2d module.";
1798  throw vpException(vpException::fatalError, ss_msg.str());
1799 #endif
1800 #endif
1801  }
1802  else if (detectorNameTmp == "SURF") {
1803 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1804  cv::Ptr<cv::FeatureDetector> surfDetector = cv::xfeatures2d::SURF::create();
1805  if (!usePyramid) {
1806  m_detectors[detectorNameTmp] = surfDetector;
1807  }
1808  else {
1809  std::cerr << "You should not use SURF with Pyramid feature detection!" << std::endl;
1810  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(surfDetector);
1811  }
1812 #else
1813  std::stringstream ss_msg;
1814  ss_msg << "Fail to initialize the detector: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1815  << " was not build with xFeatures2d module.";
1816  throw vpException(vpException::fatalError, ss_msg.str());
1817 #endif
1818  }
1819  else if (detectorNameTmp == "FAST") {
1820  cv::Ptr<cv::FeatureDetector> fastDetector = cv::FastFeatureDetector::create();
1821  if (!usePyramid) {
1822  m_detectors[detectorNameTmp] = fastDetector;
1823  }
1824  else {
1825  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
1826  }
1827  }
1828  else if (detectorNameTmp == "MSER") {
1829  cv::Ptr<cv::FeatureDetector> fastDetector = cv::MSER::create();
1830  if (!usePyramid) {
1831  m_detectors[detectorNameTmp] = fastDetector;
1832  }
1833  else {
1834  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
1835  }
1836  }
1837  else if (detectorNameTmp == "ORB") {
1838  cv::Ptr<cv::FeatureDetector> orbDetector;
1839  if (m_maxFeatures > 0) {
1840  orbDetector = cv::ORB::create(m_maxFeatures);
1841  }
1842  else {
1843  orbDetector = cv::ORB::create();
1844  }
1845  if (!usePyramid) {
1846  m_detectors[detectorNameTmp] = orbDetector;
1847  }
1848  else {
1849  std::cerr << "You should not use ORB with Pyramid feature detection!" << std::endl;
1850  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(orbDetector);
1851  }
1852  }
1853  else if (detectorNameTmp == "BRISK") {
1854  cv::Ptr<cv::FeatureDetector> briskDetector = cv::BRISK::create();
1855  if (!usePyramid) {
1856  m_detectors[detectorNameTmp] = briskDetector;
1857  }
1858  else {
1859  std::cerr << "You should not use BRISK with Pyramid feature detection!" << std::endl;
1860  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(briskDetector);
1861  }
1862  }
1863  else if (detectorNameTmp == "KAZE") {
1864  cv::Ptr<cv::FeatureDetector> kazeDetector = cv::KAZE::create();
1865  if (!usePyramid) {
1866  m_detectors[detectorNameTmp] = kazeDetector;
1867  }
1868  else {
1869  std::cerr << "You should not use KAZE with Pyramid feature detection!" << std::endl;
1870  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(kazeDetector);
1871  }
1872  }
1873  else if (detectorNameTmp == "AKAZE") {
1874  cv::Ptr<cv::FeatureDetector> akazeDetector = cv::AKAZE::create();
1875  if (!usePyramid) {
1876  m_detectors[detectorNameTmp] = akazeDetector;
1877  }
1878  else {
1879  std::cerr << "You should not use AKAZE with Pyramid feature detection!" << std::endl;
1880  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(akazeDetector);
1881  }
1882  }
1883  else if (detectorNameTmp == "GFTT") {
1884  cv::Ptr<cv::FeatureDetector> gfttDetector = cv::GFTTDetector::create();
1885  if (!usePyramid) {
1886  m_detectors[detectorNameTmp] = gfttDetector;
1887  }
1888  else {
1889  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(gfttDetector);
1890  }
1891  }
1892  else if (detectorNameTmp == "SimpleBlob") {
1893  cv::Ptr<cv::FeatureDetector> simpleBlobDetector = cv::SimpleBlobDetector::create();
1894  if (!usePyramid) {
1895  m_detectors[detectorNameTmp] = simpleBlobDetector;
1896  }
1897  else {
1898  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(simpleBlobDetector);
1899  }
1900  }
1901  else if (detectorNameTmp == "STAR") {
1902 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1903  cv::Ptr<cv::FeatureDetector> starDetector = cv::xfeatures2d::StarDetector::create();
1904  if (!usePyramid) {
1905  m_detectors[detectorNameTmp] = starDetector;
1906  }
1907  else {
1908  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(starDetector);
1909  }
1910 #else
1911  std::stringstream ss_msg;
1912  ss_msg << "Fail to initialize the detector: STAR. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1913  << " was not build with xFeatures2d module.";
1914  throw vpException(vpException::fatalError, ss_msg.str());
1915 #endif
1916  }
1917  else if (detectorNameTmp == "AGAST") {
1918  cv::Ptr<cv::FeatureDetector> agastDetector = cv::AgastFeatureDetector::create();
1919  if (!usePyramid) {
1920  m_detectors[detectorNameTmp] = agastDetector;
1921  }
1922  else {
1923  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(agastDetector);
1924  }
1925  }
1926  else if (detectorNameTmp == "MSD") {
1927 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100)
1928 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
1929  cv::Ptr<cv::FeatureDetector> msdDetector = cv::xfeatures2d::MSDDetector::create();
1930  if (!usePyramid) {
1931  m_detectors[detectorNameTmp] = msdDetector;
1932  }
1933  else {
1934  std::cerr << "You should not use MSD with Pyramid feature detection!" << std::endl;
1935  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(msdDetector);
1936  }
1937 #else
1938  std::stringstream ss_msg;
1939  ss_msg << "Fail to initialize the detector: MSD. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1940  << " was not build with xFeatures2d module.";
1941  throw vpException(vpException::fatalError, ss_msg.str());
1942 #endif
1943 #else
1944  std::stringstream ss_msg;
1945  ss_msg << "Feature " << detectorName << " is not available in OpenCV version: " << std::hex
1946  << VISP_HAVE_OPENCV_VERSION << " (require >= OpenCV 3.1).";
1947 #endif
1948  }
1949  else {
1950  std::cerr << "The detector:" << detectorNameTmp << " is not available." << std::endl;
1951  }
1952 
1953  bool detectorInitialized = false;
1954  if (!usePyramid) {
1955  // if not null and to avoid warning C4800: forcing value to bool 'true' or 'false' (performance warning)
1956  detectorInitialized = !m_detectors[detectorNameTmp].empty();
1957  }
1958  else {
1959  // if not null and to avoid warning C4800: forcing value to bool 'true' or 'false' (performance warning)
1960  detectorInitialized = !m_detectors[detectorName].empty();
1961  }
1962 
1963  if (!detectorInitialized) {
1964  std::stringstream ss_msg;
1965  ss_msg << "Fail to initialize the detector: " << detectorNameTmp
1966  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
1967  throw vpException(vpException::fatalError, ss_msg.str());
1968  }
1969 #endif
1970 }
1971 
1972 void vpKeyPoint::initDetectors(const std::vector<std::string> &detectorNames)
1973 {
1974  for (std::vector<std::string>::const_iterator it = detectorNames.begin(); it != detectorNames.end(); ++it) {
1975  initDetector(*it);
1976  }
1977 }
1978 
1979 void vpKeyPoint::initExtractor(const std::string &extractorName)
1980 {
1981 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
1982  m_extractors[extractorName] = cv::DescriptorExtractor::create(extractorName);
1983 #else
1984  if (extractorName == "SIFT") {
1985 #if (VISP_HAVE_OPENCV_VERSION >= 0x040500) // OpenCV >= 4.5.0
1986  m_extractors[extractorName] = cv::SIFT::create();
1987 #else
1988 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) || (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || \
1989  (VISP_HAVE_OPENCV_VERSION >= 0x040400)
1990  // SIFT is no more patented since 09/03/2020
1991 #if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
1992  m_extractors[extractorName] = cv::SIFT::create();
1993 #else
1994  m_extractors[extractorName] = cv::xfeatures2d::SIFT::create();
1995 #endif
1996 #else
1997  std::stringstream ss_msg;
1998  ss_msg << "Fail to initialize the extractor: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1999  << " was not build with xFeatures2d module.";
2000  throw vpException(vpException::fatalError, ss_msg.str());
2001 #endif
2002 #endif
2003  }
2004  else if (extractorName == "SURF") {
2005 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2006  // Use extended set of SURF descriptors (128 instead of 64)
2007  m_extractors[extractorName] = cv::xfeatures2d::SURF::create(100, 4, 3, true);
2008 #else
2009  std::stringstream ss_msg;
2010  ss_msg << "Fail to initialize the extractor: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2011  << " was not build with xFeatures2d module.";
2012  throw vpException(vpException::fatalError, ss_msg.str());
2013 #endif
2014  }
2015  else if (extractorName == "ORB") {
2016  m_extractors[extractorName] = cv::ORB::create();
2017  }
2018  else if (extractorName == "BRISK") {
2019  m_extractors[extractorName] = cv::BRISK::create();
2020  }
2021  else if (extractorName == "FREAK") {
2022 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2023  m_extractors[extractorName] = cv::xfeatures2d::FREAK::create();
2024 #else
2025  std::stringstream ss_msg;
2026  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2027  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2028  throw vpException(vpException::fatalError, ss_msg.str());
2029 #endif
2030  }
2031  else if (extractorName == "BRIEF") {
2032 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2033  m_extractors[extractorName] = cv::xfeatures2d::BriefDescriptorExtractor::create();
2034 #else
2035  std::stringstream ss_msg;
2036  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2037  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2038  throw vpException(vpException::fatalError, ss_msg.str());
2039 #endif
2040  }
2041  else if (extractorName == "KAZE") {
2042  m_extractors[extractorName] = cv::KAZE::create();
2043  }
2044  else if (extractorName == "AKAZE") {
2045  m_extractors[extractorName] = cv::AKAZE::create();
2046  }
2047  else if (extractorName == "DAISY") {
2048 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2049  m_extractors[extractorName] = cv::xfeatures2d::DAISY::create();
2050 #else
2051  std::stringstream ss_msg;
2052  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2053  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2054  throw vpException(vpException::fatalError, ss_msg.str());
2055 #endif
2056  }
2057  else if (extractorName == "LATCH") {
2058 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2059  m_extractors[extractorName] = cv::xfeatures2d::LATCH::create();
2060 #else
2061  std::stringstream ss_msg;
2062  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2063  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2064  throw vpException(vpException::fatalError, ss_msg.str());
2065 #endif
2066  }
2067  else if (extractorName == "LUCID") {
2068 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2069  // m_extractors[extractorName] = cv::xfeatures2d::LUCID::create(1, 2);
2070  // Not possible currently, need a color image
2071  throw vpException(vpException::badValue, "Not possible currently as it needs a color image.");
2072 #else
2073  std::stringstream ss_msg;
2074  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2075  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2076  throw vpException(vpException::fatalError, ss_msg.str());
2077 #endif
2078  }
2079  else if (extractorName == "VGG") {
2080 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2081 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2082  m_extractors[extractorName] = cv::xfeatures2d::VGG::create();
2083 #else
2084  std::stringstream ss_msg;
2085  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2086  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2087  throw vpException(vpException::fatalError, ss_msg.str());
2088 #endif
2089 #else
2090  std::stringstream ss_msg;
2091  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2092  << VISP_HAVE_OPENCV_VERSION << " but requires at least OpenCV 3.2.";
2093  throw vpException(vpException::fatalError, ss_msg.str());
2094 #endif
2095  }
2096  else if (extractorName == "BoostDesc") {
2097 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2098 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2099  m_extractors[extractorName] = cv::xfeatures2d::BoostDesc::create();
2100 #else
2101  std::stringstream ss_msg;
2102  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2103  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2104  throw vpException(vpException::fatalError, ss_msg.str());
2105 #endif
2106 #else
2107  std::stringstream ss_msg;
2108  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2109  << VISP_HAVE_OPENCV_VERSION << " but requires at least OpenCV 3.2.";
2110  throw vpException(vpException::fatalError, ss_msg.str());
2111 #endif
2112  }
2113  else {
2114  std::cerr << "The extractor:" << extractorName << " is not available." << std::endl;
2115  }
2116 #endif
2117 
2118  if (!m_extractors[extractorName]) { // if null
2119  std::stringstream ss_msg;
2120  ss_msg << "Fail to initialize the extractor: " << extractorName
2121  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2122  throw vpException(vpException::fatalError, ss_msg.str());
2123  }
2124 
2125 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2126  if (extractorName == "SURF") {
2127  // Use extended set of SURF descriptors (128 instead of 64)
2128  m_extractors[extractorName]->set("extended", 1);
2129  }
2130 #endif
2131 }
2132 
2133 void vpKeyPoint::initExtractors(const std::vector<std::string> &extractorNames)
2134 {
2135  for (std::vector<std::string>::const_iterator it = extractorNames.begin(); it != extractorNames.end(); ++it) {
2136  initExtractor(*it);
2137  }
2138 
2139  int descriptorType = CV_32F;
2140  bool firstIteration = true;
2141  for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2142  it != m_extractors.end(); ++it) {
2143  if (firstIteration) {
2144  firstIteration = false;
2145  descriptorType = it->second->descriptorType();
2146  }
2147  else {
2148  if (descriptorType != it->second->descriptorType()) {
2149  throw vpException(vpException::fatalError, "All the descriptors must have the same type !");
2150  }
2151  }
2152  }
2153 }
2154 
2155 void vpKeyPoint::initFeatureNames()
2156 {
2157 // Create map enum to string
2158 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2159  m_mapOfDetectorNames[DETECTOR_FAST] = "FAST";
2160  m_mapOfDetectorNames[DETECTOR_MSER] = "MSER";
2161  m_mapOfDetectorNames[DETECTOR_ORB] = "ORB";
2162  m_mapOfDetectorNames[DETECTOR_BRISK] = "BRISK";
2163  m_mapOfDetectorNames[DETECTOR_GFTT] = "GFTT";
2164  m_mapOfDetectorNames[DETECTOR_SimpleBlob] = "SimpleBlob";
2165 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2166  m_mapOfDetectorNames[DETECTOR_STAR] = "STAR";
2167 #endif
2168 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2169  (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2170  m_mapOfDetectorNames[DETECTOR_SIFT] = "SIFT";
2171 #endif
2172 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2173  m_mapOfDetectorNames[DETECTOR_SURF] = "SURF";
2174 #endif
2175 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2176  m_mapOfDetectorNames[DETECTOR_KAZE] = "KAZE";
2177  m_mapOfDetectorNames[DETECTOR_AKAZE] = "AKAZE";
2178  m_mapOfDetectorNames[DETECTOR_AGAST] = "AGAST";
2179 #endif
2180 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2181  m_mapOfDetectorNames[DETECTOR_MSD] = "MSD";
2182 #endif
2183 #endif
2184 
2185 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2186  m_mapOfDescriptorNames[DESCRIPTOR_ORB] = "ORB";
2187  m_mapOfDescriptorNames[DESCRIPTOR_BRISK] = "BRISK";
2188 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2189  m_mapOfDescriptorNames[DESCRIPTOR_FREAK] = "FREAK";
2190  m_mapOfDescriptorNames[DESCRIPTOR_BRIEF] = "BRIEF";
2191 #endif
2192 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2193  (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2194  m_mapOfDescriptorNames[DESCRIPTOR_SIFT] = "SIFT";
2195 #endif
2196 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2197  m_mapOfDescriptorNames[DESCRIPTOR_SURF] = "SURF";
2198 #endif
2199 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2200  m_mapOfDescriptorNames[DESCRIPTOR_KAZE] = "KAZE";
2201  m_mapOfDescriptorNames[DESCRIPTOR_AKAZE] = "AKAZE";
2202 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2203  m_mapOfDescriptorNames[DESCRIPTOR_DAISY] = "DAISY";
2204  m_mapOfDescriptorNames[DESCRIPTOR_LATCH] = "LATCH";
2205 #endif
2206 #endif
2207 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2208  m_mapOfDescriptorNames[DESCRIPTOR_VGG] = "VGG";
2209  m_mapOfDescriptorNames[DESCRIPTOR_BoostDesc] = "BoostDesc";
2210 #endif
2211 #endif
2212 }
2213 
2214 void vpKeyPoint::initMatcher(const std::string &matcherName)
2215 {
2216  int descriptorType = CV_32F;
2217  bool firstIteration = true;
2218  for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2219  it != m_extractors.end(); ++it) {
2220  if (firstIteration) {
2221  firstIteration = false;
2222  descriptorType = it->second->descriptorType();
2223  }
2224  else {
2225  if (descriptorType != it->second->descriptorType()) {
2226  throw vpException(vpException::fatalError, "All the descriptors must have the same type !");
2227  }
2228  }
2229  }
2230 
2231  if (matcherName == "FlannBased") {
2232  if (m_extractors.empty()) {
2233  std::cout << "Warning: No extractor initialized, by default use "
2234  "floating values (CV_32F) "
2235  "for descriptor type !"
2236  << std::endl;
2237  }
2238 
2239  if (descriptorType == CV_8U) {
2240 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2241  m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::LshIndexParams>(12, 20, 2));
2242 #else
2243  m_matcher = new cv::FlannBasedMatcher(new cv::flann::LshIndexParams(12, 20, 2));
2244 #endif
2245  }
2246  else {
2247 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2248  m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::KDTreeIndexParams>());
2249 #else
2250  m_matcher = new cv::FlannBasedMatcher(new cv::flann::KDTreeIndexParams());
2251 #endif
2252  }
2253  }
2254  else {
2255  m_matcher = cv::DescriptorMatcher::create(matcherName);
2256  }
2257 
2258 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2259  if (m_matcher != nullptr && !m_useKnn && matcherName == "BruteForce") {
2260  m_matcher->set("crossCheck", m_useBruteForceCrossCheck);
2261  }
2262 #endif
2263 
2264  if (!m_matcher) { // if null
2265  std::stringstream ss_msg;
2266  ss_msg << "Fail to initialize the matcher: " << matcherName
2267  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2268  throw vpException(vpException::fatalError, ss_msg.str());
2269  }
2270 }
2271 
2273  vpImage<unsigned char> &IMatching)
2274 {
2275  vpImagePoint topLeftCorner(0, 0);
2276  IMatching.insert(IRef, topLeftCorner);
2277  topLeftCorner = vpImagePoint(0, IRef.getWidth());
2278  IMatching.insert(ICurrent, topLeftCorner);
2279 }
2280 
2282  vpImage<vpRGBa> &IMatching)
2283 {
2284  vpImagePoint topLeftCorner(0, 0);
2285  IMatching.insert(IRef, topLeftCorner);
2286  topLeftCorner = vpImagePoint(0, IRef.getWidth());
2287  IMatching.insert(ICurrent, topLeftCorner);
2288 }
2289 
2291 {
2292  // Nb images in the training database + the current image we want to detect
2293  // the object
2294  int nbImg = (int)(m_mapOfImages.size() + 1);
2295 
2296  if (m_mapOfImages.empty()) {
2297  std::cerr << "There is no training image loaded !" << std::endl;
2298  return;
2299  }
2300 
2301  if (nbImg == 2) {
2302  // Only one training image, so we display them side by side
2303  insertImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
2304  }
2305  else {
2306  // Multiple training images, display them as a mosaic image
2307  int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
2308  int nbWidth = nbImgSqrt;
2309  int nbHeight = nbImgSqrt;
2310 
2311  if (nbImgSqrt * nbImgSqrt < nbImg) {
2312  nbWidth++;
2313  }
2314 
2315  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
2316  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2317  ++it) {
2318  if (maxW < it->second.getWidth()) {
2319  maxW = it->second.getWidth();
2320  }
2321 
2322  if (maxH < it->second.getHeight()) {
2323  maxH = it->second.getHeight();
2324  }
2325  }
2326 
2327  // Indexes of the current image in the grid made in order to the image is
2328  // in the center square in the mosaic grid
2329  int medianI = nbHeight / 2;
2330  int medianJ = nbWidth / 2;
2331  int medianIndex = medianI * nbWidth + medianJ;
2332 
2333  int cpt = 0;
2334  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2335  ++it, cpt++) {
2336  int local_cpt = cpt;
2337  if (cpt >= medianIndex) {
2338  // Shift of one unity the index of the training images which are after
2339  // the current image
2340  local_cpt++;
2341  }
2342  int indexI = local_cpt / nbWidth;
2343  int indexJ = local_cpt - (indexI * nbWidth);
2344  vpImagePoint topLeftCorner((int)maxH * indexI, (int)maxW * indexJ);
2345 
2346  IMatching.insert(it->second, topLeftCorner);
2347  }
2348 
2349  vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
2350  IMatching.insert(ICurrent, topLeftCorner);
2351  }
2352 }
2353 
2355 {
2356  // Nb images in the training database + the current image we want to detect
2357  // the object
2358  int nbImg = (int)(m_mapOfImages.size() + 1);
2359 
2360  if (m_mapOfImages.empty()) {
2361  std::cerr << "There is no training image loaded !" << std::endl;
2362  return;
2363  }
2364 
2365  if (nbImg == 2) {
2366  // Only one training image, so we display them side by side
2367  vpImage<vpRGBa> IRef;
2368  vpImageConvert::convert(m_mapOfImages.begin()->second, IRef);
2369  insertImageMatching(IRef, ICurrent, IMatching);
2370  }
2371  else {
2372  // Multiple training images, display them as a mosaic image
2373  int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
2374  int nbWidth = nbImgSqrt;
2375  int nbHeight = nbImgSqrt;
2376 
2377  if (nbImgSqrt * nbImgSqrt < nbImg) {
2378  nbWidth++;
2379  }
2380 
2381  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
2382  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2383  ++it) {
2384  if (maxW < it->second.getWidth()) {
2385  maxW = it->second.getWidth();
2386  }
2387 
2388  if (maxH < it->second.getHeight()) {
2389  maxH = it->second.getHeight();
2390  }
2391  }
2392 
2393  // Indexes of the current image in the grid made in order to the image is
2394  // in the center square in the mosaic grid
2395  int medianI = nbHeight / 2;
2396  int medianJ = nbWidth / 2;
2397  int medianIndex = medianI * nbWidth + medianJ;
2398 
2399  int cpt = 0;
2400  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2401  ++it, cpt++) {
2402  int local_cpt = cpt;
2403  if (cpt >= medianIndex) {
2404  // Shift of one unity the index of the training images which are after
2405  // the current image
2406  local_cpt++;
2407  }
2408  int indexI = local_cpt / nbWidth;
2409  int indexJ = local_cpt - (indexI * nbWidth);
2410  vpImagePoint topLeftCorner((int)maxH * indexI, (int)maxW * indexJ);
2411 
2412  vpImage<vpRGBa> IRef;
2413  vpImageConvert::convert(it->second, IRef);
2414  IMatching.insert(IRef, topLeftCorner);
2415  }
2416 
2417  vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
2418  IMatching.insert(ICurrent, topLeftCorner);
2419  }
2420 }
2421 
2422 void vpKeyPoint::loadConfigFile(const std::string &configFile)
2423 {
2424 #if defined(VISP_HAVE_PUGIXML)
2426 
2427  try {
2428  // Reset detector and extractor
2429  m_detectorNames.clear();
2430  m_extractorNames.clear();
2431  m_detectors.clear();
2432  m_extractors.clear();
2433 
2434  std::cout << " *********** Parsing XML for configuration for vpKeyPoint "
2435  "************ "
2436  << std::endl;
2437  xmlp.parse(configFile);
2438 
2439  m_detectorNames.push_back(xmlp.getDetectorName());
2440  m_extractorNames.push_back(xmlp.getExtractorName());
2441  m_matcherName = xmlp.getMatcherName();
2442 
2443  switch (xmlp.getMatchingMethod()) {
2445  m_filterType = constantFactorDistanceThreshold;
2446  break;
2447 
2449  m_filterType = stdDistanceThreshold;
2450  break;
2451 
2453  m_filterType = ratioDistanceThreshold;
2454  break;
2455 
2457  m_filterType = stdAndRatioDistanceThreshold;
2458  break;
2459 
2461  m_filterType = noFilterMatching;
2462  break;
2463 
2464  default:
2465  break;
2466  }
2467 
2468  m_matchingFactorThreshold = xmlp.getMatchingFactorThreshold();
2469  m_matchingRatioThreshold = xmlp.getMatchingRatioThreshold();
2470 
2471  m_useRansacVVS = xmlp.getUseRansacVVSPoseEstimation();
2472  m_useConsensusPercentage = xmlp.getUseRansacConsensusPercentage();
2473  m_nbRansacIterations = xmlp.getNbRansacIterations();
2474  m_ransacReprojectionError = xmlp.getRansacReprojectionError();
2475  m_nbRansacMinInlierCount = xmlp.getNbRansacMinInlierCount();
2476  m_ransacThreshold = xmlp.getRansacThreshold();
2477  m_ransacConsensusPercentage = xmlp.getRansacConsensusPercentage();
2478 
2479  if (m_filterType == ratioDistanceThreshold || m_filterType == stdAndRatioDistanceThreshold) {
2480  m_useKnn = true;
2481  }
2482  else {
2483  m_useKnn = false;
2484  }
2485 
2486  init();
2487  }
2488  catch (...) {
2489  throw vpException(vpException::ioError, "Can't open XML file \"%s\"\n ", configFile.c_str());
2490  }
2491 #else
2492  (void)configFile;
2493  throw vpException(vpException::ioError, "vpKeyPoint::loadConfigFile() in xml needs built-in pugixml 3rdparty that is not available");
2494 #endif
2495 }
2496 
2497 void vpKeyPoint::loadLearningData(const std::string &filename, bool binaryMode, bool append)
2498 {
2499  int startClassId = 0;
2500  int startImageId = 0;
2501  if (!append) {
2502  m_trainKeyPoints.clear();
2503  m_trainPoints.clear();
2504  m_mapOfImageId.clear();
2505  m_mapOfImages.clear();
2506  }
2507  else {
2508  // In append case, find the max index of keypoint class Id
2509  for (std::map<int, int>::const_iterator it = m_mapOfImageId.begin(); it != m_mapOfImageId.end(); ++it) {
2510  if (startClassId < it->first) {
2511  startClassId = it->first;
2512  }
2513  }
2514 
2515  // In append case, find the max index of images Id
2516  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2517  ++it) {
2518  if (startImageId < it->first) {
2519  startImageId = it->first;
2520  }
2521  }
2522  }
2523 
2524  // Get parent directory
2525  std::string parent = vpIoTools::getParent(filename);
2526  if (!parent.empty()) {
2527  parent += "/";
2528  }
2529 
2530  if (binaryMode) {
2531  std::ifstream file(filename.c_str(), std::ifstream::binary);
2532  if (!file.is_open()) {
2533  throw vpException(vpException::ioError, "Cannot open the file.");
2534  }
2535 
2536  // Read info about training images
2537  int nbImgs = 0;
2538  vpIoTools::readBinaryValueLE(file, nbImgs);
2539 
2540 #if !defined(VISP_HAVE_MODULE_IO)
2541  if (nbImgs > 0) {
2542  std::cout << "Warning: The learning file contains image data that will "
2543  "not be loaded as visp_io module "
2544  "is not available !"
2545  << std::endl;
2546  }
2547 #endif
2548 
2549  for (int i = 0; i < nbImgs; i++) {
2550  // Read image_id
2551  int id = 0;
2552  vpIoTools::readBinaryValueLE(file, id);
2553 
2554  int length = 0;
2555  vpIoTools::readBinaryValueLE(file, length);
2556  // Will contain the path to the training images
2557  char *path = new char[length + 1]; // char path[length + 1];
2558 
2559  for (int cpt = 0; cpt < length; cpt++) {
2560  char c;
2561  file.read((char *)(&c), sizeof(c));
2562  path[cpt] = c;
2563  }
2564  path[length] = '\0';
2565 
2567 #ifdef VISP_HAVE_MODULE_IO
2568  if (vpIoTools::isAbsolutePathname(std::string(path))) {
2569  vpImageIo::read(I, path);
2570  }
2571  else {
2572  vpImageIo::read(I, parent + path);
2573  }
2574 
2575  // Add the image previously loaded only if VISP_HAVE_MODULE_IO
2576  m_mapOfImages[id + startImageId] = I;
2577 #endif
2578 
2579  // Delete path
2580  delete[] path;
2581  }
2582 
2583  // Read if 3D point information are saved or not
2584  int have3DInfoInt = 0;
2585  vpIoTools::readBinaryValueLE(file, have3DInfoInt);
2586  bool have3DInfo = have3DInfoInt != 0;
2587 
2588  // Read the number of descriptors
2589  int nRows = 0;
2590  vpIoTools::readBinaryValueLE(file, nRows);
2591 
2592  // Read the size of the descriptor
2593  int nCols = 0;
2594  vpIoTools::readBinaryValueLE(file, nCols);
2595 
2596  // Read the type of the descriptor
2597  int descriptorType = 5; // CV_32F
2598  vpIoTools::readBinaryValueLE(file, descriptorType);
2599 
2600  cv::Mat trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
2601  for (int i = 0; i < nRows; i++) {
2602  // Read information about keyPoint
2603  float u, v, size, angle, response;
2604  int octave, class_id, image_id;
2607  vpIoTools::readBinaryValueLE(file, size);
2608  vpIoTools::readBinaryValueLE(file, angle);
2609  vpIoTools::readBinaryValueLE(file, response);
2610  vpIoTools::readBinaryValueLE(file, octave);
2611  vpIoTools::readBinaryValueLE(file, class_id);
2612  vpIoTools::readBinaryValueLE(file, image_id);
2613  cv::KeyPoint keyPoint(cv::Point2f(u, v), size, angle, response, octave, (class_id + startClassId));
2614  m_trainKeyPoints.push_back(keyPoint);
2615 
2616  if (image_id != -1) {
2617 #ifdef VISP_HAVE_MODULE_IO
2618  // No training images if image_id == -1
2619  m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
2620 #endif
2621  }
2622 
2623  if (have3DInfo) {
2624  // Read oX, oY, oZ
2625  float oX, oY, oZ;
2626  vpIoTools::readBinaryValueLE(file, oX);
2627  vpIoTools::readBinaryValueLE(file, oY);
2628  vpIoTools::readBinaryValueLE(file, oZ);
2629  m_trainPoints.push_back(cv::Point3f(oX, oY, oZ));
2630  }
2631 
2632  for (int j = 0; j < nCols; j++) {
2633  // Read the value of the descriptor
2634  switch (descriptorType) {
2635  case CV_8U: {
2636  unsigned char value;
2637  file.read((char *)(&value), sizeof(value));
2638  trainDescriptorsTmp.at<unsigned char>(i, j) = value;
2639  } break;
2640 
2641  case CV_8S: {
2642  char value;
2643  file.read((char *)(&value), sizeof(value));
2644  trainDescriptorsTmp.at<char>(i, j) = value;
2645  } break;
2646 
2647  case CV_16U: {
2648  unsigned short int value;
2649  vpIoTools::readBinaryValueLE(file, value);
2650  trainDescriptorsTmp.at<unsigned short int>(i, j) = value;
2651  } break;
2652 
2653  case CV_16S: {
2654  short int value;
2655  vpIoTools::readBinaryValueLE(file, value);
2656  trainDescriptorsTmp.at<short int>(i, j) = value;
2657  } break;
2658 
2659  case CV_32S: {
2660  int value;
2661  vpIoTools::readBinaryValueLE(file, value);
2662  trainDescriptorsTmp.at<int>(i, j) = value;
2663  } break;
2664 
2665  case CV_32F: {
2666  float value;
2667  vpIoTools::readBinaryValueLE(file, value);
2668  trainDescriptorsTmp.at<float>(i, j) = value;
2669  } break;
2670 
2671  case CV_64F: {
2672  double value;
2673  vpIoTools::readBinaryValueLE(file, value);
2674  trainDescriptorsTmp.at<double>(i, j) = value;
2675  } break;
2676 
2677  default: {
2678  float value;
2679  vpIoTools::readBinaryValueLE(file, value);
2680  trainDescriptorsTmp.at<float>(i, j) = value;
2681  } break;
2682  }
2683  }
2684  }
2685 
2686  if (!append || m_trainDescriptors.empty()) {
2687  trainDescriptorsTmp.copyTo(m_trainDescriptors);
2688  }
2689  else {
2690  cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
2691  }
2692 
2693  file.close();
2694  }
2695  else {
2696 #if defined(VISP_HAVE_PUGIXML)
2697  pugi::xml_document doc;
2698 
2699  /*parse the file and get the DOM */
2700  if (!doc.load_file(filename.c_str())) {
2701  throw vpException(vpException::ioError, "Error with file: %s", filename.c_str());
2702  }
2703 
2704  pugi::xml_node root_element = doc.document_element();
2705 
2706  int descriptorType = CV_32F; // float
2707  int nRows = 0, nCols = 0;
2708  int i = 0, j = 0;
2709 
2710  cv::Mat trainDescriptorsTmp;
2711 
2712  for (pugi::xml_node first_level_node = root_element.first_child(); first_level_node;
2713  first_level_node = first_level_node.next_sibling()) {
2714 
2715  std::string name(first_level_node.name());
2716  if (first_level_node.type() == pugi::node_element && name == "TrainingImageInfo") {
2717 
2718  for (pugi::xml_node image_info_node = first_level_node.first_child(); image_info_node;
2719  image_info_node = image_info_node.next_sibling()) {
2720  name = std::string(image_info_node.name());
2721 
2722  if (name == "trainImg") {
2723  // Read image_id
2724  int id = image_info_node.attribute("image_id").as_int();
2725 
2727 #ifdef VISP_HAVE_MODULE_IO
2728  std::string path(image_info_node.text().as_string());
2729  // Read path to the training images
2730  if (vpIoTools::isAbsolutePathname(std::string(path))) {
2731  vpImageIo::read(I, path);
2732  }
2733  else {
2734  vpImageIo::read(I, parent + path);
2735  }
2736 
2737  // Add the image previously loaded only if VISP_HAVE_MODULE_IO
2738  m_mapOfImages[id + startImageId] = I;
2739 #endif
2740  }
2741  }
2742  }
2743  else if (first_level_node.type() == pugi::node_element && name == "DescriptorsInfo") {
2744  for (pugi::xml_node descriptors_info_node = first_level_node.first_child(); descriptors_info_node;
2745  descriptors_info_node = descriptors_info_node.next_sibling()) {
2746  if (descriptors_info_node.type() == pugi::node_element) {
2747  name = std::string(descriptors_info_node.name());
2748 
2749  if (name == "nrows") {
2750  nRows = descriptors_info_node.text().as_int();
2751  }
2752  else if (name == "ncols") {
2753  nCols = descriptors_info_node.text().as_int();
2754  }
2755  else if (name == "type") {
2756  descriptorType = descriptors_info_node.text().as_int();
2757  }
2758  }
2759  }
2760 
2761  trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
2762  }
2763  else if (first_level_node.type() == pugi::node_element && name == "DescriptorInfo") {
2764  double u = 0.0, v = 0.0, size = 0.0, angle = 0.0, response = 0.0;
2765  int octave = 0, class_id = 0, image_id = 0;
2766  double oX = 0.0, oY = 0.0, oZ = 0.0;
2767 
2768  std::stringstream ss;
2769 
2770  for (pugi::xml_node point_node = first_level_node.first_child(); point_node;
2771  point_node = point_node.next_sibling()) {
2772  if (point_node.type() == pugi::node_element) {
2773  name = std::string(point_node.name());
2774 
2775  // Read information about keypoints
2776  if (name == "u") {
2777  u = point_node.text().as_double();
2778  }
2779  else if (name == "v") {
2780  v = point_node.text().as_double();
2781  }
2782  else if (name == "size") {
2783  size = point_node.text().as_double();
2784  }
2785  else if (name == "angle") {
2786  angle = point_node.text().as_double();
2787  }
2788  else if (name == "response") {
2789  response = point_node.text().as_double();
2790  }
2791  else if (name == "octave") {
2792  octave = point_node.text().as_int();
2793  }
2794  else if (name == "class_id") {
2795  class_id = point_node.text().as_int();
2796  cv::KeyPoint keyPoint(cv::Point2f((float)u, (float)v), (float)size, (float)angle, (float)response, octave,
2797  (class_id + startClassId));
2798  m_trainKeyPoints.push_back(keyPoint);
2799  }
2800  else if (name == "image_id") {
2801  image_id = point_node.text().as_int();
2802  if (image_id != -1) {
2803 #ifdef VISP_HAVE_MODULE_IO
2804  // No training images if image_id == -1
2805  m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
2806 #endif
2807  }
2808  }
2809  else if (name == "oX") {
2810  oX = point_node.text().as_double();
2811  }
2812  else if (name == "oY") {
2813  oY = point_node.text().as_double();
2814  }
2815  else if (name == "oZ") {
2816  oZ = point_node.text().as_double();
2817  m_trainPoints.push_back(cv::Point3f((float)oX, (float)oY, (float)oZ));
2818  }
2819  else if (name == "desc") {
2820  j = 0;
2821 
2822  for (pugi::xml_node descriptor_value_node = point_node.first_child(); descriptor_value_node;
2823  descriptor_value_node = descriptor_value_node.next_sibling()) {
2824 
2825  if (descriptor_value_node.type() == pugi::node_element) {
2826  // Read descriptors values
2827  std::string parseStr(descriptor_value_node.text().as_string());
2828  ss.clear();
2829  ss.str(parseStr);
2830 
2831  if (!ss.fail()) {
2832  switch (descriptorType) {
2833  case CV_8U: {
2834  // Parse the numeric value [0 ; 255] to an int
2835  int parseValue;
2836  ss >> parseValue;
2837  trainDescriptorsTmp.at<unsigned char>(i, j) = (unsigned char)parseValue;
2838  } break;
2839 
2840  case CV_8S:
2841  // Parse the numeric value [-128 ; 127] to an int
2842  int parseValue;
2843  ss >> parseValue;
2844  trainDescriptorsTmp.at<char>(i, j) = (char)parseValue;
2845  break;
2846 
2847  case CV_16U:
2848  ss >> trainDescriptorsTmp.at<unsigned short int>(i, j);
2849  break;
2850 
2851  case CV_16S:
2852  ss >> trainDescriptorsTmp.at<short int>(i, j);
2853  break;
2854 
2855  case CV_32S:
2856  ss >> trainDescriptorsTmp.at<int>(i, j);
2857  break;
2858 
2859  case CV_32F:
2860  ss >> trainDescriptorsTmp.at<float>(i, j);
2861  break;
2862 
2863  case CV_64F:
2864  ss >> trainDescriptorsTmp.at<double>(i, j);
2865  break;
2866 
2867  default:
2868  ss >> trainDescriptorsTmp.at<float>(i, j);
2869  break;
2870  }
2871  }
2872  else {
2873  std::cerr << "Error when converting:" << ss.str() << std::endl;
2874  }
2875 
2876  j++;
2877  }
2878  }
2879  }
2880  }
2881  }
2882  i++;
2883  }
2884  }
2885 
2886  if (!append || m_trainDescriptors.empty()) {
2887  trainDescriptorsTmp.copyTo(m_trainDescriptors);
2888  }
2889  else {
2890  cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
2891  }
2892 #else
2893  throw vpException(vpException::ioError, "vpKeyPoint::loadLearningData() in xml needs built-in pugixml 3rdparty that is not available");
2894 #endif
2895  }
2896 
2897  // Convert OpenCV type to ViSP type for compatibility
2899  vpConvert::convertFromOpenCV(this->m_trainPoints, m_trainVpPoints);
2900 
2901  // Add train descriptors in matcher object
2902  m_matcher->clear();
2903  m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
2904 
2905  // Set m_reference_computed to true as we load a learning file
2906  m_reference_computed = true;
2907 
2908  // Set m_currentImageId
2909  m_currentImageId = (int)m_mapOfImages.size();
2910 }
2911 
2912 void vpKeyPoint::match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors,
2913  std::vector<cv::DMatch> &matches, double &elapsedTime)
2914 {
2915  double t = vpTime::measureTimeMs();
2916 
2917  if (m_useKnn) {
2918  m_knnMatches.clear();
2919 
2920  if (m_useMatchTrainToQuery) {
2921  std::vector<std::vector<cv::DMatch> > knnMatchesTmp;
2922 
2923  // Match train descriptors to query descriptors
2924  cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(true);
2925  matcherTmp->knnMatch(trainDescriptors, queryDescriptors, knnMatchesTmp, 2);
2926 
2927  for (std::vector<std::vector<cv::DMatch> >::const_iterator it1 = knnMatchesTmp.begin();
2928  it1 != knnMatchesTmp.end(); ++it1) {
2929  std::vector<cv::DMatch> tmp;
2930  for (std::vector<cv::DMatch>::const_iterator it2 = it1->begin(); it2 != it1->end(); ++it2) {
2931  tmp.push_back(cv::DMatch(it2->trainIdx, it2->queryIdx, it2->distance));
2932  }
2933  m_knnMatches.push_back(tmp);
2934  }
2935 
2936  matches.resize(m_knnMatches.size());
2937  std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
2938  }
2939  else {
2940  // Match query descriptors to train descriptors
2941  m_matcher->knnMatch(queryDescriptors, m_knnMatches, 2);
2942  matches.resize(m_knnMatches.size());
2943  std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
2944  }
2945  }
2946  else {
2947  matches.clear();
2948 
2949  if (m_useMatchTrainToQuery) {
2950  std::vector<cv::DMatch> matchesTmp;
2951  // Match train descriptors to query descriptors
2952  cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(true);
2953  matcherTmp->match(trainDescriptors, queryDescriptors, matchesTmp);
2954 
2955  for (std::vector<cv::DMatch>::const_iterator it = matchesTmp.begin(); it != matchesTmp.end(); ++it) {
2956  matches.push_back(cv::DMatch(it->trainIdx, it->queryIdx, it->distance));
2957  }
2958  }
2959  else {
2960  // Match query descriptors to train descriptors
2961  m_matcher->match(queryDescriptors, matches);
2962  }
2963  }
2964  elapsedTime = vpTime::measureTimeMs() - t;
2965 }
2966 
2967 unsigned int vpKeyPoint::matchPoint(const vpImage<unsigned char> &I) { return matchPoint(I, vpRect()); }
2968 
2969 unsigned int vpKeyPoint::matchPoint(const vpImage<vpRGBa> &I_color) { return matchPoint(I_color, vpRect()); }
2970 
2971 unsigned int vpKeyPoint::matchPoint(const vpImage<unsigned char> &I, const vpImagePoint &iP, unsigned int height,
2972  unsigned int width)
2973 {
2974  return matchPoint(I, vpRect(iP, width, height));
2975 }
2976 
2977 unsigned int vpKeyPoint::matchPoint(const vpImage<vpRGBa> &I_color, const vpImagePoint &iP, unsigned int height,
2978  unsigned int width)
2979 {
2980  return matchPoint(I_color, vpRect(iP, width, height));
2981 }
2982 
2983 unsigned int vpKeyPoint::matchPoint(const vpImage<unsigned char> &I, const vpRect &rectangle)
2984 {
2985  if (m_trainDescriptors.empty()) {
2986  std::cerr << "Reference is empty." << std::endl;
2987  if (!m_reference_computed) {
2988  std::cerr << "Reference is not computed." << std::endl;
2989  }
2990  std::cerr << "Matching is not possible." << std::endl;
2991 
2992  return 0;
2993  }
2994 
2995  if (m_useAffineDetection) {
2996  std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
2997  std::vector<cv::Mat> listOfQueryDescriptors;
2998 
2999  // Detect keypoints and extract descriptors on multiple images
3000  detectExtractAffine(I, listOfQueryKeyPoints, listOfQueryDescriptors);
3001 
3002  // Flatten the different train lists
3003  m_queryKeyPoints.clear();
3004  for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3005  it != listOfQueryKeyPoints.end(); ++it) {
3006  m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3007  }
3008 
3009  bool first = true;
3010  for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3011  ++it) {
3012  if (first) {
3013  first = false;
3014  it->copyTo(m_queryDescriptors);
3015  }
3016  else {
3017  m_queryDescriptors.push_back(*it);
3018  }
3019  }
3020  }
3021  else {
3022  detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3023  extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3024  }
3025 
3026  return matchPoint(m_queryKeyPoints, m_queryDescriptors);
3027 }
3028 
3029 unsigned int vpKeyPoint::matchPoint(const std::vector<cv::KeyPoint> &queryKeyPoints, const cv::Mat &queryDescriptors)
3030 {
3031  m_queryKeyPoints = queryKeyPoints;
3032  m_queryDescriptors = queryDescriptors;
3033 
3034  match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3035 
3036  if (m_filterType != noFilterMatching) {
3037  m_queryFilteredKeyPoints.clear();
3038  m_objectFilteredPoints.clear();
3039  m_filteredMatches.clear();
3040 
3041  filterMatches();
3042  }
3043  else {
3044  if (m_useMatchTrainToQuery) {
3045  // Add only query keypoints matched with a train keypoints
3046  m_queryFilteredKeyPoints.clear();
3047  m_filteredMatches.clear();
3048  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3049  m_filteredMatches.push_back(cv::DMatch((int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3050  m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(size_t)it->queryIdx]);
3051  }
3052  }
3053  else {
3054  m_queryFilteredKeyPoints = m_queryKeyPoints;
3055  m_filteredMatches = m_matches;
3056  }
3057 
3058  if (!m_trainPoints.empty()) {
3059  m_objectFilteredPoints.clear();
3060  // Add 3D object points such as the same index in
3061  // m_queryFilteredKeyPoints and in m_objectFilteredPoints
3062  // matches to the same train object
3063  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3064  // m_matches is normally ordered following the queryDescriptor index
3065  m_objectFilteredPoints.push_back(m_trainPoints[(size_t)it->trainIdx]);
3066  }
3067  }
3068  }
3069 
3070  // Convert OpenCV type to ViSP type for compatibility
3071  vpConvert::convertFromOpenCV(m_queryFilteredKeyPoints, m_currentImagePointsList);
3073 
3074  return static_cast<unsigned int>(m_filteredMatches.size());
3075 }
3076 
3077 unsigned int vpKeyPoint::matchPoint(const vpImage<vpRGBa> &I_color, const vpRect &rectangle)
3078 {
3079  vpImageConvert::convert(I_color, m_I);
3080  return matchPoint(m_I, rectangle);
3081 }
3082 
3084  bool (*func)(const vpHomogeneousMatrix &), const vpRect &rectangle)
3085 {
3086  double error, elapsedTime;
3087  return matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3088 }
3089 
3091  bool (*func)(const vpHomogeneousMatrix &), const vpRect &rectangle)
3092 {
3093  double error, elapsedTime;
3094  return matchPoint(I_color, cam, cMo, error, elapsedTime, func, rectangle);
3095 }
3096 
3098  double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &),
3099  const vpRect &rectangle)
3100 {
3101  // Check if we have training descriptors
3102  if (m_trainDescriptors.empty()) {
3103  std::cerr << "Reference is empty." << std::endl;
3104  if (!m_reference_computed) {
3105  std::cerr << "Reference is not computed." << std::endl;
3106  }
3107  std::cerr << "Matching is not possible." << std::endl;
3108 
3109  return false;
3110  }
3111 
3112  if (m_useAffineDetection) {
3113  std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3114  std::vector<cv::Mat> listOfQueryDescriptors;
3115 
3116  // Detect keypoints and extract descriptors on multiple images
3117  detectExtractAffine(I, listOfQueryKeyPoints, listOfQueryDescriptors);
3118 
3119  // Flatten the different train lists
3120  m_queryKeyPoints.clear();
3121  for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3122  it != listOfQueryKeyPoints.end(); ++it) {
3123  m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3124  }
3125 
3126  bool first = true;
3127  for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3128  ++it) {
3129  if (first) {
3130  first = false;
3131  it->copyTo(m_queryDescriptors);
3132  }
3133  else {
3134  m_queryDescriptors.push_back(*it);
3135  }
3136  }
3137  }
3138  else {
3139  detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3140  extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3141  }
3142 
3143  match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3144 
3145  elapsedTime = m_detectionTime + m_extractionTime + m_matchingTime;
3146 
3147  if (m_filterType != noFilterMatching) {
3148  m_queryFilteredKeyPoints.clear();
3149  m_objectFilteredPoints.clear();
3150  m_filteredMatches.clear();
3151 
3152  filterMatches();
3153  }
3154  else {
3155  if (m_useMatchTrainToQuery) {
3156  // Add only query keypoints matched with a train keypoints
3157  m_queryFilteredKeyPoints.clear();
3158  m_filteredMatches.clear();
3159  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3160  m_filteredMatches.push_back(cv::DMatch((int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3161  m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(size_t)it->queryIdx]);
3162  }
3163  }
3164  else {
3165  m_queryFilteredKeyPoints = m_queryKeyPoints;
3166  m_filteredMatches = m_matches;
3167  }
3168 
3169  if (!m_trainPoints.empty()) {
3170  m_objectFilteredPoints.clear();
3171  // Add 3D object points such as the same index in
3172  // m_queryFilteredKeyPoints and in m_objectFilteredPoints
3173  // matches to the same train object
3174  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3175  // m_matches is normally ordered following the queryDescriptor index
3176  m_objectFilteredPoints.push_back(m_trainPoints[(size_t)it->trainIdx]);
3177  }
3178  }
3179  }
3180 
3181  // Convert OpenCV type to ViSP type for compatibility
3182  vpConvert::convertFromOpenCV(m_queryFilteredKeyPoints, m_currentImagePointsList);
3184 
3185  // error = std::numeric_limits<double>::max(); // create an error under
3186  // Windows. To fix it we have to add #undef max
3187  error = DBL_MAX;
3188  m_ransacInliers.clear();
3189  m_ransacOutliers.clear();
3190 
3191  if (m_useRansacVVS) {
3192  std::vector<vpPoint> objectVpPoints(m_objectFilteredPoints.size());
3193  size_t cpt = 0;
3194  // Create a list of vpPoint with 2D coordinates (current keypoint
3195  // location) + 3D coordinates (world/object coordinates)
3196  for (std::vector<cv::Point3f>::const_iterator it = m_objectFilteredPoints.begin();
3197  it != m_objectFilteredPoints.end(); ++it, cpt++) {
3198  vpPoint pt;
3199  pt.setWorldCoordinates(it->x, it->y, it->z);
3200 
3201  vpImagePoint imP(m_queryFilteredKeyPoints[cpt].pt.y, m_queryFilteredKeyPoints[cpt].pt.x);
3202 
3203  double x = 0.0, y = 0.0;
3204  vpPixelMeterConversion::convertPoint(cam, imP, x, y);
3205  pt.set_x(x);
3206  pt.set_y(y);
3207 
3208  objectVpPoints[cpt] = pt;
3209  }
3210 
3211  std::vector<vpPoint> inliers;
3212  std::vector<unsigned int> inlierIndex;
3213 
3214  bool res = computePose(objectVpPoints, cMo, inliers, inlierIndex, m_poseTime, func);
3215 
3216  std::map<unsigned int, bool> mapOfInlierIndex;
3217  m_matchRansacKeyPointsToPoints.clear();
3218 
3219  for (std::vector<unsigned int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3220  m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3221  m_queryFilteredKeyPoints[(size_t)(*it)], m_objectFilteredPoints[(size_t)(*it)]));
3222  mapOfInlierIndex[*it] = true;
3223  }
3224 
3225  for (size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3226  if (mapOfInlierIndex.find((unsigned int)i) == mapOfInlierIndex.end()) {
3227  m_ransacOutliers.push_back(vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3228  }
3229  }
3230 
3231  error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3232 
3233  m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3234  std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3235  m_ransacInliers.begin(), matchRansacToVpImage);
3236 
3237  elapsedTime += m_poseTime;
3238 
3239  return res;
3240  }
3241  else {
3242  std::vector<cv::Point2f> imageFilteredPoints;
3243  cv::KeyPoint::convert(m_queryFilteredKeyPoints, imageFilteredPoints);
3244  std::vector<int> inlierIndex;
3245  bool res = computePose(imageFilteredPoints, m_objectFilteredPoints, cam, cMo, inlierIndex, m_poseTime);
3246 
3247  std::map<int, bool> mapOfInlierIndex;
3248  m_matchRansacKeyPointsToPoints.clear();
3249 
3250  for (std::vector<int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3251  m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3252  m_queryFilteredKeyPoints[(size_t)(*it)], m_objectFilteredPoints[(size_t)(*it)]));
3253  mapOfInlierIndex[*it] = true;
3254  }
3255 
3256  for (size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3257  if (mapOfInlierIndex.find((int)i) == mapOfInlierIndex.end()) {
3258  m_ransacOutliers.push_back(vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3259  }
3260  }
3261 
3262  error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3263 
3264  m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3265  std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3266  m_ransacInliers.begin(), matchRansacToVpImage);
3267 
3268  elapsedTime += m_poseTime;
3269 
3270  return res;
3271  }
3272 }
3273 
3275  double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &),
3276  const vpRect &rectangle)
3277 {
3278  vpImageConvert::convert(I_color, m_I);
3279  return (matchPoint(m_I, cam, cMo, error, elapsedTime, func, rectangle));
3280 }
3281 
3283  vpImagePoint &centerOfGravity, const bool isPlanarObject,
3284  std::vector<vpImagePoint> *imPts1, std::vector<vpImagePoint> *imPts2,
3285  double *meanDescriptorDistance, double *detectionScore, const vpRect &rectangle)
3286 {
3287  if (imPts1 != nullptr && imPts2 != nullptr) {
3288  imPts1->clear();
3289  imPts2->clear();
3290  }
3291 
3292  matchPoint(I, rectangle);
3293 
3294  double meanDescriptorDistanceTmp = 0.0;
3295  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
3296  meanDescriptorDistanceTmp += (double)it->distance;
3297  }
3298 
3299  meanDescriptorDistanceTmp /= (double)m_filteredMatches.size();
3300  double score = (double)m_filteredMatches.size() / meanDescriptorDistanceTmp;
3301 
3302  if (meanDescriptorDistance != nullptr) {
3303  *meanDescriptorDistance = meanDescriptorDistanceTmp;
3304  }
3305  if (detectionScore != nullptr) {
3306  *detectionScore = score;
3307  }
3308 
3309  if (m_filteredMatches.size() >= 4) {
3310  // Training / Reference 2D points
3311  std::vector<cv::Point2f> points1(m_filteredMatches.size());
3312  // Query / Current 2D points
3313  std::vector<cv::Point2f> points2(m_filteredMatches.size());
3314 
3315  for (size_t i = 0; i < m_filteredMatches.size(); i++) {
3316  points1[i] = cv::Point2f(m_trainKeyPoints[(size_t)m_filteredMatches[i].trainIdx].pt);
3317  points2[i] = cv::Point2f(m_queryFilteredKeyPoints[(size_t)m_filteredMatches[i].queryIdx].pt);
3318  }
3319 
3320  std::vector<vpImagePoint> inliers;
3321  if (isPlanarObject) {
3322 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
3323  cv::Mat homographyMatrix = cv::findHomography(points1, points2, CV_RANSAC);
3324 #else
3325  cv::Mat homographyMatrix = cv::findHomography(points1, points2, cv::RANSAC);
3326 #endif
3327 
3328  for (size_t i = 0; i < m_filteredMatches.size(); i++) {
3329  // Compute reprojection error
3330  cv::Mat realPoint = cv::Mat(3, 1, CV_64F);
3331  realPoint.at<double>(0, 0) = points1[i].x;
3332  realPoint.at<double>(1, 0) = points1[i].y;
3333  realPoint.at<double>(2, 0) = 1.f;
3334 
3335  cv::Mat reprojectedPoint = homographyMatrix * realPoint;
3336  double err_x = (reprojectedPoint.at<double>(0, 0) / reprojectedPoint.at<double>(2, 0)) - points2[i].x;
3337  double err_y = (reprojectedPoint.at<double>(1, 0) / reprojectedPoint.at<double>(2, 0)) - points2[i].y;
3338  double reprojectionError = std::sqrt(err_x * err_x + err_y * err_y);
3339 
3340  if (reprojectionError < 6.0) {
3341  inliers.push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3342  if (imPts1 != nullptr) {
3343  imPts1->push_back(vpImagePoint((double)points1[i].y, (double)points1[i].x));
3344  }
3345 
3346  if (imPts2 != nullptr) {
3347  imPts2->push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3348  }
3349  }
3350  }
3351  }
3352  else if (m_filteredMatches.size() >= 8) {
3353  cv::Mat fundamentalInliers;
3354  cv::Mat fundamentalMatrix = cv::findFundamentalMat(points1, points2, cv::FM_RANSAC, 3, 0.99, fundamentalInliers);
3355 
3356  for (size_t i = 0; i < (size_t)fundamentalInliers.rows; i++) {
3357  if (fundamentalInliers.at<uchar>((int)i, 0)) {
3358  inliers.push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3359 
3360  if (imPts1 != nullptr) {
3361  imPts1->push_back(vpImagePoint((double)points1[i].y, (double)points1[i].x));
3362  }
3363 
3364  if (imPts2 != nullptr) {
3365  imPts2->push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3366  }
3367  }
3368  }
3369  }
3370 
3371  if (!inliers.empty()) {
3372  // Build a polygon with the list of inlier keypoints detected in the
3373  // current image to get the bounding box
3374  vpPolygon polygon(inliers);
3375  boundingBox = polygon.getBoundingBox();
3376 
3377  // Compute the center of gravity
3378  double meanU = 0.0, meanV = 0.0;
3379  for (std::vector<vpImagePoint>::const_iterator it = inliers.begin(); it != inliers.end(); ++it) {
3380  meanU += it->get_u();
3381  meanV += it->get_v();
3382  }
3383 
3384  meanU /= (double)inliers.size();
3385  meanV /= (double)inliers.size();
3386 
3387  centerOfGravity.set_u(meanU);
3388  centerOfGravity.set_v(meanV);
3389  }
3390  }
3391  else {
3392  // Too few matches
3393  return false;
3394  }
3395 
3396  if (m_detectionMethod == detectionThreshold) {
3397  return meanDescriptorDistanceTmp < m_detectionThreshold;
3398  }
3399  else {
3400  return score > m_detectionScore;
3401  }
3402 }
3403 
3405  vpHomogeneousMatrix &cMo, double &error, double &elapsedTime, vpRect &boundingBox,
3406  vpImagePoint &centerOfGravity, bool (*func)(const vpHomogeneousMatrix &),
3407  const vpRect &rectangle)
3408 {
3409  bool isMatchOk = matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3410  if (isMatchOk) {
3411  // Use the pose estimated to project the model points in the image
3412  vpPoint pt;
3413  vpImagePoint imPt;
3414  std::vector<vpImagePoint> modelImagePoints(m_trainVpPoints.size());
3415  size_t cpt = 0;
3416  for (std::vector<vpPoint>::const_iterator it = m_trainVpPoints.begin(); it != m_trainVpPoints.end(); ++it, cpt++) {
3417  pt = *it;
3418  pt.project(cMo);
3419  vpMeterPixelConversion::convertPoint(cam, pt.get_x(), pt.get_y(), imPt);
3420  modelImagePoints[cpt] = imPt;
3421  }
3422 
3423  // Build a polygon with the list of model image points to get the bounding
3424  // box
3425  vpPolygon polygon(modelImagePoints);
3426  boundingBox = polygon.getBoundingBox();
3427 
3428  // Compute the center of gravity of the current inlier keypoints
3429  double meanU = 0.0, meanV = 0.0;
3430  for (std::vector<vpImagePoint>::const_iterator it = m_ransacInliers.begin(); it != m_ransacInliers.end(); ++it) {
3431  meanU += it->get_u();
3432  meanV += it->get_v();
3433  }
3434 
3435  meanU /= (double)m_ransacInliers.size();
3436  meanV /= (double)m_ransacInliers.size();
3437 
3438  centerOfGravity.set_u(meanU);
3439  centerOfGravity.set_v(meanV);
3440  }
3441 
3442  return isMatchOk;
3443 }
3444 
3446  std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
3447  std::vector<cv::Mat> &listOfDescriptors,
3448  std::vector<vpImage<unsigned char> > *listOfAffineI)
3449 {
3450 #if 0
3451  cv::Mat img;
3452  vpImageConvert::convert(I, img);
3453  listOfKeypoints.clear();
3454  listOfDescriptors.clear();
3455 
3456  for (int tl = 1; tl < 6; tl++) {
3457  double t = pow(2, 0.5 * tl);
3458  for (int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
3459  std::vector<cv::KeyPoint> keypoints;
3460  cv::Mat descriptors;
3461 
3462  cv::Mat timg, mask, Ai;
3463  img.copyTo(timg);
3464 
3465  affineSkew(t, phi, timg, mask, Ai);
3466 
3467 
3468  if (listOfAffineI != nullptr) {
3469  cv::Mat img_disp;
3470  bitwise_and(mask, timg, img_disp);
3472  vpImageConvert::convert(img_disp, tI);
3473  listOfAffineI->push_back(tI);
3474  }
3475 #if 0
3476  cv::Mat img_disp;
3477  cv::bitwise_and(mask, timg, img_disp);
3478  cv::namedWindow("Skew", cv::WINDOW_AUTOSIZE); // Create a window for display.
3479  cv::imshow("Skew", img_disp);
3480  cv::waitKey(0);
3481 #endif
3482 
3483  for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
3484  it != m_detectors.end(); ++it) {
3485  std::vector<cv::KeyPoint> kp;
3486  it->second->detect(timg, kp, mask);
3487  keypoints.insert(keypoints.end(), kp.begin(), kp.end());
3488  }
3489 
3490  double elapsedTime;
3491  extract(timg, keypoints, descriptors, elapsedTime);
3492 
3493  for (unsigned int i = 0; i < keypoints.size(); i++) {
3494  cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
3495  cv::Mat kpt_t = Ai * cv::Mat(kpt);
3496  keypoints[i].pt.x = kpt_t.at<float>(0, 0);
3497  keypoints[i].pt.y = kpt_t.at<float>(1, 0);
3498  }
3499 
3500  listOfKeypoints.push_back(keypoints);
3501  listOfDescriptors.push_back(descriptors);
3502  }
3503  }
3504 
3505 #else
3506  cv::Mat img;
3507  vpImageConvert::convert(I, img);
3508 
3509  // Create a vector for storing the affine skew parameters
3510  std::vector<std::pair<double, int> > listOfAffineParams;
3511  for (int tl = 1; tl < 6; tl++) {
3512  double t = pow(2, 0.5 * tl);
3513  for (int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
3514  listOfAffineParams.push_back(std::pair<double, int>(t, phi));
3515  }
3516  }
3517 
3518  listOfKeypoints.resize(listOfAffineParams.size());
3519  listOfDescriptors.resize(listOfAffineParams.size());
3520 
3521  if (listOfAffineI != nullptr) {
3522  listOfAffineI->resize(listOfAffineParams.size());
3523  }
3524 
3525 #ifdef VISP_HAVE_OPENMP
3526 #pragma omp parallel for
3527 #endif
3528  for (int cpt = 0; cpt < static_cast<int>(listOfAffineParams.size()); cpt++) {
3529  std::vector<cv::KeyPoint> keypoints;
3530  cv::Mat descriptors;
3531 
3532  cv::Mat timg, mask, Ai;
3533  img.copyTo(timg);
3534 
3535  affineSkew(listOfAffineParams[(size_t)cpt].first, listOfAffineParams[(size_t)cpt].second, timg, mask, Ai);
3536 
3537  if (listOfAffineI != nullptr) {
3538  cv::Mat img_disp;
3539  bitwise_and(mask, timg, img_disp);
3541  vpImageConvert::convert(img_disp, tI);
3542  (*listOfAffineI)[(size_t)cpt] = tI;
3543  }
3544 
3545 #if 0
3546  cv::Mat img_disp;
3547  cv::bitwise_and(mask, timg, img_disp);
3548  cv::namedWindow("Skew", cv::WINDOW_AUTOSIZE); // Create a window for display.
3549  cv::imshow("Skew", img_disp);
3550  cv::waitKey(0);
3551 #endif
3552 
3553  for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
3554  it != m_detectors.end(); ++it) {
3555  std::vector<cv::KeyPoint> kp;
3556  it->second->detect(timg, kp, mask);
3557  keypoints.insert(keypoints.end(), kp.begin(), kp.end());
3558  }
3559 
3560  double elapsedTime;
3561  extract(timg, keypoints, descriptors, elapsedTime);
3562 
3563  for (size_t i = 0; i < keypoints.size(); i++) {
3564  cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
3565  cv::Mat kpt_t = Ai * cv::Mat(kpt);
3566  keypoints[i].pt.x = kpt_t.at<float>(0, 0);
3567  keypoints[i].pt.y = kpt_t.at<float>(1, 0);
3568  }
3569 
3570  listOfKeypoints[(size_t)cpt] = keypoints;
3571  listOfDescriptors[(size_t)cpt] = descriptors;
3572  }
3573 #endif
3574 }
3575 
3577 {
3578  // vpBasicKeyPoint class
3580  m_currentImagePointsList.clear();
3581  m_matchedReferencePoints.clear();
3582  m_reference_computed = false;
3583 
3584  m_computeCovariance = false;
3585  m_covarianceMatrix = vpMatrix();
3586  m_currentImageId = 0;
3587  m_detectionMethod = detectionScore;
3588  m_detectionScore = 0.15;
3589  m_detectionThreshold = 100.0;
3590  m_detectionTime = 0.0;
3591  m_detectorNames.clear();
3592  m_detectors.clear();
3593  m_extractionTime = 0.0;
3594  m_extractorNames.clear();
3595  m_extractors.clear();
3596  m_filteredMatches.clear();
3597  m_filterType = ratioDistanceThreshold;
3598  m_imageFormat = jpgImageFormat;
3599  m_knnMatches.clear();
3600  m_mapOfImageId.clear();
3601  m_mapOfImages.clear();
3602  m_matcher = cv::Ptr<cv::DescriptorMatcher>();
3603  m_matcherName = "BruteForce-Hamming";
3604  m_matches.clear();
3605  m_matchingFactorThreshold = 2.0;
3606  m_matchingRatioThreshold = 0.85;
3607  m_matchingTime = 0.0;
3608  m_matchRansacKeyPointsToPoints.clear();
3609  m_nbRansacIterations = 200;
3610  m_nbRansacMinInlierCount = 100;
3611  m_objectFilteredPoints.clear();
3612  m_poseTime = 0.0;
3613  m_queryDescriptors = cv::Mat();
3614  m_queryFilteredKeyPoints.clear();
3615  m_queryKeyPoints.clear();
3616  m_ransacConsensusPercentage = 20.0;
3617  m_ransacFilterFlag = vpPose::NO_FILTER;
3618  m_ransacInliers.clear();
3619  m_ransacOutliers.clear();
3620  m_ransacParallel = true;
3621  m_ransacParallelNbThreads = 0;
3622  m_ransacReprojectionError = 6.0;
3623  m_ransacThreshold = 0.01;
3624  m_trainDescriptors = cv::Mat();
3625  m_trainKeyPoints.clear();
3626  m_trainPoints.clear();
3627  m_trainVpPoints.clear();
3628  m_useAffineDetection = false;
3629 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
3630  m_useBruteForceCrossCheck = true;
3631 #endif
3632  m_useConsensusPercentage = false;
3633  m_useKnn = true; // as m_filterType == ratioDistanceThreshold
3634  m_useMatchTrainToQuery = false;
3635  m_useRansacVVS = true;
3636  m_useSingleMatchFilter = true;
3637 
3638  m_detectorNames.push_back("ORB");
3639  m_extractorNames.push_back("ORB");
3640 
3641  init();
3642 }
3643 
3644 void vpKeyPoint::saveLearningData(const std::string &filename, bool binaryMode, bool saveTrainingImages)
3645 {
3646  std::string parent = vpIoTools::getParent(filename);
3647  if (!parent.empty()) {
3648  vpIoTools::makeDirectory(parent);
3649  }
3650 
3651  std::map<int, std::string> mapOfImgPath;
3652  if (saveTrainingImages) {
3653 #ifdef VISP_HAVE_MODULE_IO
3654  // Save the training image files in the same directory
3655  unsigned int cpt = 0;
3656 
3657  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
3658  ++it, cpt++) {
3659  if (cpt > 999) {
3660  throw vpException(vpException::fatalError, "The number of training images to save is too big !");
3661  }
3662 
3663  std::stringstream ss;
3664  ss << "train_image_" << std::setfill('0') << std::setw(3) << cpt;
3665 
3666  switch (m_imageFormat) {
3667  case jpgImageFormat:
3668  ss << ".jpg";
3669  break;
3670 
3671  case pngImageFormat:
3672  ss << ".png";
3673  break;
3674 
3675  case ppmImageFormat:
3676  ss << ".ppm";
3677  break;
3678 
3679  case pgmImageFormat:
3680  ss << ".pgm";
3681  break;
3682 
3683  default:
3684  ss << ".png";
3685  break;
3686  }
3687 
3688  std::string imgFilename = ss.str();
3689  mapOfImgPath[it->first] = imgFilename;
3690  vpImageIo::write(it->second, parent + (!parent.empty() ? "/" : "") + imgFilename);
3691  }
3692 #else
3693  std::cout << "Warning: in vpKeyPoint::saveLearningData() training images "
3694  "are not saved because "
3695  "visp_io module is not available !"
3696  << std::endl;
3697 #endif
3698  }
3699 
3700  bool have3DInfo = m_trainPoints.size() > 0;
3701  if (have3DInfo && m_trainPoints.size() != m_trainKeyPoints.size()) {
3702  throw vpException(vpException::fatalError, "List of keypoints and list of 3D points have different size !");
3703  }
3704 
3705  if (binaryMode) {
3706  // Save the learning data into little endian binary file.
3707  std::ofstream file(filename.c_str(), std::ofstream::binary);
3708  if (!file.is_open()) {
3709  throw vpException(vpException::ioError, "Cannot create the file.");
3710  }
3711 
3712  // Write info about training images
3713  int nbImgs = (int)mapOfImgPath.size();
3714  vpIoTools::writeBinaryValueLE(file, nbImgs);
3715 
3716 #ifdef VISP_HAVE_MODULE_IO
3717  for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
3718  // Write image_id
3719  int id = it->first;
3721 
3722  // Write image path
3723  std::string path = it->second;
3724  int length = (int)path.length();
3725  vpIoTools::writeBinaryValueLE(file, length);
3726 
3727  for (int cpt = 0; cpt < length; cpt++) {
3728  file.write((char *)(&path[(size_t)cpt]), sizeof(path[(size_t)cpt]));
3729  }
3730  }
3731 #endif
3732 
3733  // Write if we have 3D point information
3734  int have3DInfoInt = have3DInfo ? 1 : 0;
3735  vpIoTools::writeBinaryValueLE(file, have3DInfoInt);
3736 
3737  int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
3738  int descriptorType = m_trainDescriptors.type();
3739 
3740  // Write the number of descriptors
3741  vpIoTools::writeBinaryValueLE(file, nRows);
3742 
3743  // Write the size of the descriptor
3744  vpIoTools::writeBinaryValueLE(file, nCols);
3745 
3746  // Write the type of the descriptor
3747  vpIoTools::writeBinaryValueLE(file, descriptorType);
3748 
3749  for (int i = 0; i < nRows; i++) {
3750  unsigned int i_ = (unsigned int)i;
3751  // Write u
3752  float u = m_trainKeyPoints[i_].pt.x;
3754 
3755  // Write v
3756  float v = m_trainKeyPoints[i_].pt.y;
3758 
3759  // Write size
3760  float size = m_trainKeyPoints[i_].size;
3761  vpIoTools::writeBinaryValueLE(file, size);
3762 
3763  // Write angle
3764  float angle = m_trainKeyPoints[i_].angle;
3765  vpIoTools::writeBinaryValueLE(file, angle);
3766 
3767  // Write response
3768  float response = m_trainKeyPoints[i_].response;
3769  vpIoTools::writeBinaryValueLE(file, response);
3770 
3771  // Write octave
3772  int octave = m_trainKeyPoints[i_].octave;
3773  vpIoTools::writeBinaryValueLE(file, octave);
3774 
3775  // Write class_id
3776  int class_id = m_trainKeyPoints[i_].class_id;
3777  vpIoTools::writeBinaryValueLE(file, class_id);
3778 
3779 // Write image_id
3780 #ifdef VISP_HAVE_MODULE_IO
3781  std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
3782  int image_id = (saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1;
3783  vpIoTools::writeBinaryValueLE(file, image_id);
3784 #else
3785  int image_id = -1;
3786  // file.write((char *)(&image_id), sizeof(image_id));
3787  vpIoTools::writeBinaryValueLE(file, image_id);
3788 #endif
3789 
3790  if (have3DInfo) {
3791  float oX = m_trainPoints[i_].x, oY = m_trainPoints[i_].y, oZ = m_trainPoints[i_].z;
3792  // Write oX
3794 
3795  // Write oY
3797 
3798  // Write oZ
3800  }
3801 
3802  for (int j = 0; j < nCols; j++) {
3803  // Write the descriptor value
3804  switch (descriptorType) {
3805  case CV_8U:
3806  file.write((char *)(&m_trainDescriptors.at<unsigned char>(i, j)),
3807  sizeof(m_trainDescriptors.at<unsigned char>(i, j)));
3808  break;
3809 
3810  case CV_8S:
3811  file.write((char *)(&m_trainDescriptors.at<char>(i, j)), sizeof(m_trainDescriptors.at<char>(i, j)));
3812  break;
3813 
3814  case CV_16U:
3815  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<unsigned short int>(i, j));
3816  break;
3817 
3818  case CV_16S:
3819  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<short int>(i, j));
3820  break;
3821 
3822  case CV_32S:
3823  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<int>(i, j));
3824  break;
3825 
3826  case CV_32F:
3827  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<float>(i, j));
3828  break;
3829 
3830  case CV_64F:
3831  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<double>(i, j));
3832  break;
3833 
3834  default:
3835  throw vpException(vpException::fatalError, "Problem with the data type of descriptors !");
3836  break;
3837  }
3838  }
3839  }
3840 
3841  file.close();
3842  }
3843  else {
3844 #if defined(VISP_HAVE_PUGIXML)
3845  pugi::xml_document doc;
3846  pugi::xml_node node = doc.append_child(pugi::node_declaration);
3847  node.append_attribute("version") = "1.0";
3848  node.append_attribute("encoding") = "UTF-8";
3849 
3850  if (!doc) {
3851  throw vpException(vpException::ioError, "Error with file: ", filename.c_str());
3852  }
3853 
3854  pugi::xml_node root_node = doc.append_child("LearningData");
3855 
3856  // Write the training images info
3857  pugi::xml_node image_node = root_node.append_child("TrainingImageInfo");
3858 
3859 #ifdef VISP_HAVE_MODULE_IO
3860  for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
3861  pugi::xml_node image_info_node = image_node.append_child("trainImg");
3862  image_info_node.append_child(pugi::node_pcdata).set_value(it->second.c_str());
3863  std::stringstream ss;
3864  ss << it->first;
3865  image_info_node.append_attribute("image_id") = ss.str().c_str();
3866  }
3867 #endif
3868 
3869  // Write information about descriptors
3870  pugi::xml_node descriptors_info_node = root_node.append_child("DescriptorsInfo");
3871 
3872  int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
3873  int descriptorType = m_trainDescriptors.type();
3874 
3875  // Write the number of rows
3876  descriptors_info_node.append_child("nrows").append_child(pugi::node_pcdata).text() = nRows;
3877 
3878  // Write the number of cols
3879  descriptors_info_node.append_child("ncols").append_child(pugi::node_pcdata).text() = nCols;
3880 
3881  // Write the descriptors type
3882  descriptors_info_node.append_child("type").append_child(pugi::node_pcdata).text() = descriptorType;
3883 
3884  for (int i = 0; i < nRows; i++) {
3885  unsigned int i_ = (unsigned int)i;
3886  pugi::xml_node descriptor_node = root_node.append_child("DescriptorInfo");
3887 
3888  descriptor_node.append_child("u").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.x;
3889  descriptor_node.append_child("v").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.y;
3890  descriptor_node.append_child("size").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].size;
3891  descriptor_node.append_child("angle").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].angle;
3892  descriptor_node.append_child("response").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].response;
3893  descriptor_node.append_child("octave").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].octave;
3894  descriptor_node.append_child("class_id").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].class_id;
3895 
3896 #ifdef VISP_HAVE_MODULE_IO
3897  std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
3898  descriptor_node.append_child("image_id").append_child(pugi::node_pcdata).text() =
3899  ((saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1);
3900 #else
3901  descriptor_node.append_child("image_id").append_child(pugi::node_pcdata).text() = -1;
3902 #endif
3903 
3904  if (have3DInfo) {
3905  descriptor_node.append_child("oX").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].x;
3906  descriptor_node.append_child("oY").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].y;
3907  descriptor_node.append_child("oZ").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].z;
3908  }
3909 
3910  pugi::xml_node desc_node = descriptor_node.append_child("desc");
3911 
3912  for (int j = 0; j < nCols; j++) {
3913  switch (descriptorType) {
3914  case CV_8U: {
3915  // Promote an unsigned char to an int
3916  // val_tmp holds the numeric value that will be written
3917  // We save the value in numeric form otherwise xml library will not be
3918  // able to parse A better solution could be possible
3919  int val_tmp = m_trainDescriptors.at<unsigned char>(i, j);
3920  desc_node.append_child("val").append_child(pugi::node_pcdata).text() = val_tmp;
3921  } break;
3922 
3923  case CV_8S: {
3924  // Promote a char to an int
3925  // val_tmp holds the numeric value that will be written
3926  // We save the value in numeric form otherwise xml library will not be
3927  // able to parse A better solution could be possible
3928  int val_tmp = m_trainDescriptors.at<char>(i, j);
3929  desc_node.append_child("val").append_child(pugi::node_pcdata).text() = val_tmp;
3930  } break;
3931 
3932  case CV_16U:
3933  desc_node.append_child("val").append_child(pugi::node_pcdata).text() =
3934  m_trainDescriptors.at<unsigned short int>(i, j);
3935  break;
3936 
3937  case CV_16S:
3938  desc_node.append_child("val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<short int>(i, j);
3939  break;
3940 
3941  case CV_32S:
3942  desc_node.append_child("val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<int>(i, j);
3943  break;
3944 
3945  case CV_32F:
3946  desc_node.append_child("val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<float>(i, j);
3947  break;
3948 
3949  case CV_64F:
3950  desc_node.append_child("val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<double>(i, j);
3951  break;
3952 
3953  default:
3954  throw vpException(vpException::fatalError, "Problem with the data type of descriptors !");
3955  break;
3956  }
3957  }
3958  }
3959 
3960  doc.save_file(filename.c_str(), PUGIXML_TEXT(" "), pugi::format_default, pugi::encoding_utf8);
3961 #else
3962  throw vpException(vpException::ioError, "vpKeyPoint::saveLearningData() in xml needs built-in pugixml 3rdparty that is not available");
3963 #endif
3964  }
3965 }
3966 
3967 #if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x030000)
3968 #ifndef DOXYGEN_SHOULD_SKIP_THIS
3969 // From OpenCV 2.4.11 source code.
3970 struct KeypointResponseGreaterThanThreshold
3971 {
3972  KeypointResponseGreaterThanThreshold(float _value) : value(_value) { }
3973  inline bool operator()(const cv::KeyPoint &kpt) const { return kpt.response >= value; }
3974  float value;
3975 };
3976 
3977 struct KeypointResponseGreater
3978 {
3979  inline bool operator()(const cv::KeyPoint &kp1, const cv::KeyPoint &kp2) const { return kp1.response > kp2.response; }
3980 };
3981 
3982 // takes keypoints and culls them by the response
3983 void vpKeyPoint::KeyPointsFilter::retainBest(std::vector<cv::KeyPoint> &keypoints, int n_points)
3984 {
3985  // this is only necessary if the keypoints size is greater than the number
3986  // of desired points.
3987  if (n_points >= 0 && keypoints.size() > (size_t)n_points) {
3988  if (n_points == 0) {
3989  keypoints.clear();
3990  return;
3991  }
3992  // first use nth element to partition the keypoints into the best and
3993  // worst.
3994  std::nth_element(keypoints.begin(), keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreater());
3995  // this is the boundary response, and in the case of FAST may be ambiguous
3996  float ambiguous_response = keypoints[(size_t)(n_points - 1)].response;
3997  // use std::partition to grab all of the keypoints with the boundary
3998  // response.
3999  std::vector<cv::KeyPoint>::const_iterator new_end = std::partition(
4000  keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreaterThanThreshold(ambiguous_response));
4001  // resize the keypoints, given this new end point. nth_element and
4002  // partition reordered the points inplace
4003  keypoints.resize((size_t)(new_end - keypoints.begin()));
4004  }
4005 }
4006 
4007 struct RoiPredicate
4008 {
4009  RoiPredicate(const cv::Rect &_r) : r(_r) { }
4010 
4011  bool operator()(const cv::KeyPoint &keyPt) const { return !r.contains(keyPt.pt); }
4012 
4013  cv::Rect r;
4014 };
4015 
4016 void vpKeyPoint::KeyPointsFilter::runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize,
4017  int borderSize)
4018 {
4019  if (borderSize > 0) {
4020  if (imageSize.height <= borderSize * 2 || imageSize.width <= borderSize * 2)
4021  keypoints.clear();
4022  else
4023  keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(),
4024  RoiPredicate(cv::Rect(
4025  cv::Point(borderSize, borderSize),
4026  cv::Point(imageSize.width - borderSize, imageSize.height - borderSize)))),
4027  keypoints.end());
4028  }
4029 }
4030 
4031 struct SizePredicate
4032 {
4033  SizePredicate(float _minSize, float _maxSize) : minSize(_minSize), maxSize(_maxSize) { }
4034 
4035  bool operator()(const cv::KeyPoint &keyPt) const
4036  {
4037  float size = keyPt.size;
4038  return (size < minSize) || (size > maxSize);
4039  }
4040 
4041  float minSize, maxSize;
4042 };
4043 
4044 void vpKeyPoint::KeyPointsFilter::runByKeypointSize(std::vector<cv::KeyPoint> &keypoints, float minSize, float maxSize)
4045 {
4046  CV_Assert(minSize >= 0);
4047  CV_Assert(maxSize >= 0);
4048  CV_Assert(minSize <= maxSize);
4049 
4050  keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), SizePredicate(minSize, maxSize)), keypoints.end());
4051 }
4052 
4053 class MaskPredicate
4054 {
4055 public:
4056  MaskPredicate(const cv::Mat &_mask) : mask(_mask) { }
4057  bool operator()(const cv::KeyPoint &key_pt) const
4058  {
4059  return mask.at<uchar>((int)(key_pt.pt.y + 0.5f), (int)(key_pt.pt.x + 0.5f)) == 0;
4060  }
4061 
4062 private:
4063  const cv::Mat mask;
4064 };
4065 
4066 void vpKeyPoint::KeyPointsFilter::runByPixelsMask(std::vector<cv::KeyPoint> &keypoints, const cv::Mat &mask)
4067 {
4068  if (mask.empty())
4069  return;
4070 
4071  keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), MaskPredicate(mask)), keypoints.end());
4072 }
4073 
4074 struct KeyPoint_LessThan
4075 {
4076  KeyPoint_LessThan(const std::vector<cv::KeyPoint> &_kp) : kp(&_kp) { }
4077  bool operator()(/*int i, int j*/ size_t i, size_t j) const
4078  {
4079  const cv::KeyPoint &kp1 = (*kp)[/*(size_t)*/ i];
4080  const cv::KeyPoint &kp2 = (*kp)[/*(size_t)*/ j];
4081  if (!vpMath::equal(kp1.pt.x, kp2.pt.x,
4082  std::numeric_limits<float>::epsilon())) { // if (kp1.pt.x !=
4083  // kp2.pt.x) {
4084  return kp1.pt.x < kp2.pt.x;
4085  }
4086 
4087  if (!vpMath::equal(kp1.pt.y, kp2.pt.y,
4088  std::numeric_limits<float>::epsilon())) { // if (kp1.pt.y !=
4089  // kp2.pt.y) {
4090  return kp1.pt.y < kp2.pt.y;
4091  }
4092 
4093  if (!vpMath::equal(kp1.size, kp2.size,
4094  std::numeric_limits<float>::epsilon())) { // if (kp1.size !=
4095  // kp2.size) {
4096  return kp1.size > kp2.size;
4097  }
4098 
4099  if (!vpMath::equal(kp1.angle, kp2.angle,
4100  std::numeric_limits<float>::epsilon())) { // if (kp1.angle !=
4101  // kp2.angle) {
4102  return kp1.angle < kp2.angle;
4103  }
4104 
4105  if (!vpMath::equal(kp1.response, kp2.response,
4106  std::numeric_limits<float>::epsilon())) { // if (kp1.response !=
4107  // kp2.response) {
4108  return kp1.response > kp2.response;
4109  }
4110 
4111  if (kp1.octave != kp2.octave) {
4112  return kp1.octave > kp2.octave;
4113  }
4114 
4115  if (kp1.class_id != kp2.class_id) {
4116  return kp1.class_id > kp2.class_id;
4117  }
4118 
4119  return i < j;
4120  }
4121  const std::vector<cv::KeyPoint> *kp;
4122 };
4123 
4124 void vpKeyPoint::KeyPointsFilter::removeDuplicated(std::vector<cv::KeyPoint> &keypoints)
4125 {
4126  size_t i, j, n = keypoints.size();
4127  std::vector<size_t> kpidx(n);
4128  std::vector<uchar> mask(n, (uchar)1);
4129 
4130  for (i = 0; i < n; i++) {
4131  kpidx[i] = i;
4132  }
4133  std::sort(kpidx.begin(), kpidx.end(), KeyPoint_LessThan(keypoints));
4134  for (i = 1, j = 0; i < n; i++) {
4135  cv::KeyPoint &kp1 = keypoints[kpidx[i]];
4136  cv::KeyPoint &kp2 = keypoints[kpidx[j]];
4137  // if (kp1.pt.x != kp2.pt.x || kp1.pt.y != kp2.pt.y || kp1.size !=
4138  // kp2.size || kp1.angle != kp2.angle) {
4139  if (!vpMath::equal(kp1.pt.x, kp2.pt.x, std::numeric_limits<float>::epsilon()) ||
4140  !vpMath::equal(kp1.pt.y, kp2.pt.y, std::numeric_limits<float>::epsilon()) ||
4141  !vpMath::equal(kp1.size, kp2.size, std::numeric_limits<float>::epsilon()) ||
4142  !vpMath::equal(kp1.angle, kp2.angle, std::numeric_limits<float>::epsilon())) {
4143  j = i;
4144  }
4145  else {
4146  mask[kpidx[i]] = 0;
4147  }
4148  }
4149 
4150  for (i = j = 0; i < n; i++) {
4151  if (mask[i]) {
4152  if (i != j) {
4153  keypoints[j] = keypoints[i];
4154  }
4155  j++;
4156  }
4157  }
4158  keypoints.resize(j);
4159 }
4160 
4161 /*
4162  * PyramidAdaptedFeatureDetector
4163  */
4164 vpKeyPoint::PyramidAdaptedFeatureDetector::PyramidAdaptedFeatureDetector(const cv::Ptr<cv::FeatureDetector> &detector,
4165  int maxLevel)
4166  : m_detector(detector), m_maxLevel(maxLevel)
4167 { }
4168 
4169 bool vpKeyPoint::PyramidAdaptedFeatureDetector::empty() const
4170 {
4171  return m_detector.empty() || (cv::FeatureDetector *)m_detector->empty();
4172 }
4173 
4174 void vpKeyPoint::PyramidAdaptedFeatureDetector::detect(cv::InputArray image,
4175  CV_OUT std::vector<cv::KeyPoint> &keypoints, cv::InputArray mask)
4176 {
4177  detectImpl(image.getMat(), keypoints, mask.getMat());
4178 }
4179 
4180 void vpKeyPoint::PyramidAdaptedFeatureDetector::detectImpl(const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
4181  const cv::Mat &mask) const
4182 {
4183  cv::Mat src = image;
4184  cv::Mat src_mask = mask;
4185 
4186  cv::Mat dilated_mask;
4187  if (!mask.empty()) {
4188  cv::dilate(mask, dilated_mask, cv::Mat());
4189  cv::Mat mask255(mask.size(), CV_8UC1, cv::Scalar(0));
4190  mask255.setTo(cv::Scalar(255), dilated_mask != 0);
4191  dilated_mask = mask255;
4192  }
4193 
4194  for (int l = 0, multiplier = 1; l <= m_maxLevel; ++l, multiplier *= 2) {
4195  // Detect on current level of the pyramid
4196  std::vector<cv::KeyPoint> new_pts;
4197  m_detector->detect(src, new_pts, src_mask);
4198  std::vector<cv::KeyPoint>::iterator it = new_pts.begin(), end = new_pts.end();
4199  for (; it != end; ++it) {
4200  it->pt.x *= multiplier;
4201  it->pt.y *= multiplier;
4202  it->size *= multiplier;
4203  it->octave = l;
4204  }
4205  keypoints.insert(keypoints.end(), new_pts.begin(), new_pts.end());
4206 
4207  // Downsample
4208  if (l < m_maxLevel) {
4209  cv::Mat dst;
4210  pyrDown(src, dst);
4211  src = dst;
4212 
4213  if (!mask.empty())
4214  resize(dilated_mask, src_mask, src.size(), 0, 0, CV_INTER_AREA);
4215  }
4216  }
4217 
4218  if (!mask.empty())
4219  vpKeyPoint::KeyPointsFilter::runByPixelsMask(keypoints, mask);
4220 }
4221 #endif
4222 #endif
4223 
4224 END_VISP_NAMESPACE
4225 
4226 #elif !defined(VISP_BUILD_SHARED_LIBS)
4227 // Work around to avoid warning: libvisp_vision.a(vpKeyPoint.cpp.o) has no symbols
4228 void dummy_vpKeyPoint() { };
4229 #endif
std::vector< unsigned int > m_matchedReferencePoints
std::vector< vpImagePoint > m_currentImagePointsList
std::vector< vpImagePoint > m_referenceImagePointsList
Generic class defining intrinsic camera parameters.
Implementation of column vector and the associated operations.
Definition: vpColVector.h:191
Class to define RGB colors available for display functionalities.
Definition: vpColor.h:157
static const vpColor red
Definition: vpColor.h:217
static const vpColor none
Definition: vpColor.h:229
static const vpColor green
Definition: vpColor.h:220
static void convertFromOpenCV(const cv::KeyPoint &from, vpImagePoint &to)
Definition: vpConvert.cpp:225
static void displayCircle(const vpImage< unsigned char > &I, const vpImageCircle &circle, const vpColor &color, bool fill=false, unsigned int thickness=1)
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
error that can be emitted by ViSP classes.
Definition: vpException.h:60
@ ioError
I/O error.
Definition: vpException.h:67
@ badValue
Used to indicate that a value is not in the allowed range.
Definition: vpException.h:73
@ fatalError
Fatal error.
Definition: vpException.h:72
const char * what() const
Definition: vpException.cpp:71
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpHomogeneousMatrix inverse() const
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Definition: vpImageIo.cpp:147
static void write(const vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Definition: vpImageIo.cpp:291
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:82
double get_j() const
Definition: vpImagePoint.h:125
void set_ij(double ii, double jj)
Definition: vpImagePoint.h:320
void set_u(double u)
Definition: vpImagePoint.h:335
void set_v(double v)
Definition: vpImagePoint.h:346
double get_i() const
Definition: vpImagePoint.h:114
unsigned int getWidth() const
Definition: vpImage.h:242
void insert(const vpImage< Type > &src, const vpImagePoint &topLeft)
Definition: vpImage.h:637
unsigned int getHeight() const
Definition: vpImage.h:181
static bool isAbsolutePathname(const std::string &pathname)
Definition: vpIoTools.cpp:1468
static void readBinaryValueLE(std::ifstream &file, int16_t &short_value)
static void makeDirectory(const std::string &dirname)
Definition: vpIoTools.cpp:550
static void writeBinaryValueLE(std::ofstream &file, const int16_t short_value)
static std::string getParent(const std::string &pathname)
Definition: vpIoTools.cpp:1314
unsigned int matchPoint(const vpImage< unsigned char > &I)
@ detectionThreshold
Definition: vpKeyPoint.h:241
@ detectionScore
Definition: vpKeyPoint.h:243
void getTrainKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
static void compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpCylinder > &cylinders, const std::vector< std::vector< std::vector< vpImagePoint > > > &vectorOfCylinderRois, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
Definition: vpKeyPoint.cpp:556
void detectExtractAffine(const vpImage< unsigned char > &I, std::vector< std::vector< cv::KeyPoint > > &listOfKeypoints, std::vector< cv::Mat > &listOfDescriptors, std::vector< vpImage< unsigned char > > *listOfAffineI=nullptr)
void initMatcher(const std::string &matcherName)
vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType, const std::string &matcherName, const vpFilterMatchingType &filterType=ratioDistanceThreshold)
Definition: vpKeyPoint.cpp:70
void display(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, unsigned int size=3)
void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector< cv::DMatch > &matches, double &elapsedTime)
static void compute3D(const cv::KeyPoint &candidate, const std::vector< vpPoint > &roi, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
Definition: vpKeyPoint.cpp:411
vpFeatureDetectorType
Definition: vpKeyPoint.h:259
@ DETECTOR_KAZE
KAZE detector.
Definition: vpKeyPoint.h:278
@ DETECTOR_BRISK
BRISK detector.
Definition: vpKeyPoint.h:264
@ DETECTOR_AKAZE
AKAZE detector.
Definition: vpKeyPoint.h:279
@ DETECTOR_MSER
MSER detector.
Definition: vpKeyPoint.h:262
@ DETECTOR_SURF
SURF detector.
Definition: vpKeyPoint.h:275
@ DETECTOR_AGAST
AGAST detector.
Definition: vpKeyPoint.h:280
@ DETECTOR_SIFT
SIFT detector.
Definition: vpKeyPoint.h:272
@ DETECTOR_FAST
FAST detector.
Definition: vpKeyPoint.h:261
@ DETECTOR_GFTT
GFTT detector.
Definition: vpKeyPoint.h:265
@ DETECTOR_ORB
ORB detector.
Definition: vpKeyPoint.h:263
@ DETECTOR_SimpleBlob
SimpleBlob detector.
Definition: vpKeyPoint.h:266
void reset()
void createImageMatching(vpImage< unsigned char > &IRef, vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
Definition: vpKeyPoint.cpp:861
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
bool computePose(const std::vector< cv::Point2f > &imagePoints, const std::vector< cv::Point3f > &objectPoints, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector< int > &inlierIndex, double &elapsedTime, bool(*func)(const vpHomogeneousMatrix &)=nullptr)
Definition: vpKeyPoint.cpp:676
void extract(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, cv::Mat &descriptors, std::vector< cv::Point3f > *trainPoints=nullptr)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
Definition: vpKeyPoint.cpp:975
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
Definition: vpKeyPoint.cpp:465
bool matchPointAndDetect(const vpImage< unsigned char > &I, vpRect &boundingBox, vpImagePoint &centerOfGravity, const bool isPlanarObject=true, std::vector< vpImagePoint > *imPts1=nullptr, std::vector< vpImagePoint > *imPts2=nullptr, double *meanDescriptorDistance=nullptr, double *detectionScore=nullptr, const vpRect &rectangle=vpRect())
vpFeatureDescriptorType
Definition: vpKeyPoint.h:291
@ DESCRIPTOR_AKAZE
AKAZE descriptor.
Definition: vpKeyPoint.h:308
@ DESCRIPTOR_ORB
ORB descriptor.
Definition: vpKeyPoint.h:293
@ DESCRIPTOR_KAZE
KAZE descriptor.
Definition: vpKeyPoint.h:307
@ DESCRIPTOR_SURF
SUFT descriptor.
Definition: vpKeyPoint.h:304
@ DESCRIPTOR_BRISK
BRISK descriptor.
Definition: vpKeyPoint.h:294
@ DESCRIPTOR_SIFT
SIFT descriptor.
Definition: vpKeyPoint.h:301
void getQueryKeyPoints(std::vector< cv::KeyPoint > &keyPoints, bool matches=true) const
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
@ ppmImageFormat
Definition: vpKeyPoint.h:253
@ jpgImageFormat
Definition: vpKeyPoint.h:251
@ pngImageFormat
Definition: vpKeyPoint.h:252
@ pgmImageFormat
Definition: vpKeyPoint.h:254
vpFilterMatchingType
Definition: vpKeyPoint.h:225
@ stdAndRatioDistanceThreshold
Definition: vpKeyPoint.h:233
@ constantFactorDistanceThreshold
Definition: vpKeyPoint.h:226
@ ratioDistanceThreshold
Definition: vpKeyPoint.h:230
@ stdDistanceThreshold
Definition: vpKeyPoint.h:228
@ noFilterMatching
Definition: vpKeyPoint.h:235
void displayMatching(const vpImage< unsigned char > &IRef, vpImage< unsigned char > &IMatching, unsigned int crossSize, unsigned int lineThickness=1, const vpColor &color=vpColor::green)
unsigned int buildReference(const vpImage< unsigned char > &I)
Definition: vpKeyPoint.cpp:194
void loadConfigFile(const std::string &configFile)
void getTrainPoints(std::vector< cv::Point3f > &points) const
void getObjectPoints(std::vector< cv::Point3f > &objectPoints) const
void insertImageMatching(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
static bool isNaN(double value)
Definition: vpMath.cpp:92
static bool equal(double x, double y, double threshold=0.001)
Definition: vpMath.h:459
static int round(double x)
Definition: vpMath.h:410
Implementation of a matrix and operations on matrices.
Definition: vpMatrix.h:169
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
This class defines the container for a plane geometrical structure.
Definition: vpPlane.h:57
double getD() const
Definition: vpPlane.h:106
double getA() const
Definition: vpPlane.h:100
double getC() const
Definition: vpPlane.h:104
double getB() const
Definition: vpPlane.h:102
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Definition: vpPoint.h:79
double get_oX() const
Get the point oX coordinate in the object frame.
Definition: vpPoint.cpp:411
void set_x(double x)
Set the point x coordinate in the image plane.
Definition: vpPoint.cpp:464
double get_y() const
Get the point y coordinate in the image plane.
Definition: vpPoint.cpp:422
double get_oZ() const
Get the point oZ coordinate in the object frame.
Definition: vpPoint.cpp:415
void set_oY(double oY)
Set the point oY coordinate in the object frame.
Definition: vpPoint.cpp:457
double get_x() const
Get the point x coordinate in the image plane.
Definition: vpPoint.cpp:420
void set_oZ(double oZ)
Set the point oZ coordinate in the object frame.
Definition: vpPoint.cpp:459
void set_oX(double oX)
Set the point oX coordinate in the object frame.
Definition: vpPoint.cpp:455
double get_oY() const
Get the point oY coordinate in the object frame.
Definition: vpPoint.cpp:413
void setWorldCoordinates(double oX, double oY, double oZ)
Definition: vpPoint.cpp:111
void set_y(double y)
Set the point y coordinate in the image plane.
Definition: vpPoint.cpp:466
Defines a generic 2D polygon.
Definition: vpPolygon.h:103
vpRect getBoundingBox() const
Definition: vpPolygon.h:164
bool isInside(const vpImagePoint &iP, const PointInPolygonMethod &method=PnPolyRayCasting) const
Definition: vpPolygon.cpp:400
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
Definition: vpPose.h:77
void setRansacMaxTrials(const int &rM)
Definition: vpPose.h:406
void addPoint(const vpPoint &P)
Definition: vpPose.cpp:96
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
Definition: vpPose.h:387
vpMatrix getCovarianceMatrix() const
Definition: vpPose.h:440
@ RANSAC
Definition: vpPose.h:87
void setCovarianceComputation(const bool &flag)
Definition: vpPose.h:429
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, FuncCheckValidityPose func=nullptr)
Definition: vpPose.cpp:385
std::vector< unsigned int > getRansacInlierIndex() const
Definition: vpPose.h:416
void setRansacFilterFlag(const RANSAC_FILTER_FLAGS &flag)
Definition: vpPose.h:459
@ NO_FILTER
No filter is applied.
Definition: vpPose.h:108
void setUseParallelRansac(bool use)
Definition: vpPose.h:490
std::vector< vpPoint > getRansacInliers() const
Definition: vpPose.h:421
void setNbParallelRansacThreads(int nb)
Definition: vpPose.h:476
void setRansacThreshold(const double &t)
Definition: vpPose.h:392
Defines a rectangle in the plane.
Definition: vpRect.h:79
double getWidth() const
Definition: vpRect.h:227
double getLeft() const
Definition: vpRect.h:173
double getRight() const
Definition: vpRect.h:179
double getBottom() const
Definition: vpRect.h:97
double getHeight() const
Definition: vpRect.h:166
double getTop() const
Definition: vpRect.h:192
Implementation of a rotation vector as axis-angle minimal representation.
Class that consider the case of a translation vector.
void parse(const std::string &filename)
vpMatchingMethodEnum getMatchingMethod() const
VISP_EXPORT double measureTimeMs()