Visual Servoing Platform  version 3.6.1 under development (2024-03-18)
vpKeyPoint.cpp
1 /*
2  * ViSP, open source Visual Servoing Platform software.
3  * Copyright (C) 2005 - 2023 by Inria. All rights reserved.
4  *
5  * This software is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  * See the file LICENSE.txt at the root directory of this source
10  * distribution for additional information about the GNU GPL.
11  *
12  * For using ViSP with software that can not be combined with the GNU
13  * GPL, please contact Inria about acquiring a ViSP Professional
14  * Edition License.
15  *
16  * See https://visp.inria.fr for more information.
17  *
18  * This software was developed at:
19  * Inria Rennes - Bretagne Atlantique
20  * Campus Universitaire de Beaulieu
21  * 35042 Rennes Cedex
22  * France
23  *
24  * If you have questions regarding the use of this file, please contact
25  * Inria at visp@inria.fr
26  *
27  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
28  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
29  *
30  * Description:
31  * Key point functionalities.
32  */
33 
34 #include <iomanip>
35 #include <limits>
36 
37 #include <visp3/core/vpIoTools.h>
38 #include <visp3/vision/vpKeyPoint.h>
39 
40 #if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_FEATURES2D)
41 
42 #if defined(VISP_HAVE_PUGIXML)
43 #include <pugixml.hpp>
44 #endif
45 
46 namespace
47 {
48 // Specific Type transformation functions
49 inline cv::DMatch knnToDMatch(const std::vector<cv::DMatch> &knnMatches)
50 {
51  if (knnMatches.size() > 0) {
52  return knnMatches[0];
53  }
54 
55  return cv::DMatch();
56 }
57 
58 inline vpImagePoint matchRansacToVpImage(const std::pair<cv::KeyPoint, cv::Point3f> &pair)
59 {
60  return vpImagePoint(pair.first.pt.y, pair.first.pt.x);
61 }
62 
63 } // namespace
64 
65 vpKeyPoint::vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType,
66  const std::string &matcherName, const vpFilterMatchingType &filterType)
67  : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
68  m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
69  m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
70  m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
71  m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
72  m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
73  m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
74  m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
75  m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
76  m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
77 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
78  m_useBruteForceCrossCheck(true),
79 #endif
80  m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
81  m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
82 {
83  initFeatureNames();
84 
85  m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
86  m_extractorNames.push_back(m_mapOfDescriptorNames[descriptorType]);
87 
88  init();
89 }
90 
91 vpKeyPoint::vpKeyPoint(const std::string &detectorName, const std::string &extractorName,
92  const std::string &matcherName, const vpFilterMatchingType &filterType)
93  : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
94  m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
95  m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
96  m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
97  m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
98  m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
99  m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
100  m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
101  m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
102  m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
103 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
104  m_useBruteForceCrossCheck(true),
105 #endif
106  m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
107  m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
108 {
109  initFeatureNames();
110 
111  m_detectorNames.push_back(detectorName);
112  m_extractorNames.push_back(extractorName);
113 
114  init();
115 }
116 
117 vpKeyPoint::vpKeyPoint(const std::vector<std::string> &detectorNames, const std::vector<std::string> &extractorNames,
118  const std::string &matcherName, const vpFilterMatchingType &filterType)
119  : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
120  m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(detectorNames),
121  m_detectors(), m_extractionTime(0.), m_extractorNames(extractorNames), m_extractors(), m_filteredMatches(),
122  m_filterType(filterType), m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(),
123  m_matcher(), m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0),
124  m_matchingRatioThreshold(0.85), m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200),
125  m_nbRansacMinInlierCount(100), m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(),
126  m_queryFilteredKeyPoints(), m_queryKeyPoints(), m_ransacConsensusPercentage(20.0),
127  m_ransacFilterFlag(vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(), m_ransacParallel(false),
128  m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01), m_trainDescriptors(),
129  m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
130 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
131  m_useBruteForceCrossCheck(true),
132 #endif
133  m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
134  m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
135 {
136  initFeatureNames();
137  init();
138 }
139 
140 void vpKeyPoint::affineSkew(double tilt, double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai)
141 {
142  int h = img.rows;
143  int w = img.cols;
144 
145  mask = cv::Mat(h, w, CV_8UC1, cv::Scalar(255));
146 
147  cv::Mat A = cv::Mat::eye(2, 3, CV_32F);
148 
149  // if (phi != 0.0) {
150  if (std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
151  phi *= M_PI / 180.;
152  double s = sin(phi);
153  double c = cos(phi);
154 
155  A = (cv::Mat_<float>(2, 2) << c, -s, s, c);
156 
157  cv::Mat corners = (cv::Mat_<float>(4, 2) << 0, 0, w, 0, w, h, 0, h);
158  cv::Mat tcorners = corners * A.t();
159  cv::Mat tcorners_x, tcorners_y;
160  tcorners.col(0).copyTo(tcorners_x);
161  tcorners.col(1).copyTo(tcorners_y);
162  std::vector<cv::Mat> channels;
163  channels.push_back(tcorners_x);
164  channels.push_back(tcorners_y);
165  cv::merge(channels, tcorners);
166 
167  cv::Rect rect = cv::boundingRect(tcorners);
168  A = (cv::Mat_<float>(2, 3) << c, -s, -rect.x, s, c, -rect.y);
169 
170  cv::warpAffine(img, img, A, cv::Size(rect.width, rect.height), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
171  }
172  // if (tilt != 1.0) {
173  if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon()) {
174  double s = 0.8 * sqrt(tilt * tilt - 1);
175  cv::GaussianBlur(img, img, cv::Size(0, 0), s, 0.01);
176  cv::resize(img, img, cv::Size(0, 0), 1.0 / tilt, 1.0, cv::INTER_NEAREST);
177  A.row(0) = A.row(0) / tilt;
178  }
179  // if (tilt != 1.0 || phi != 0.0) {
180  if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon() ||
181  std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
182  h = img.rows;
183  w = img.cols;
184  cv::warpAffine(mask, mask, A, cv::Size(w, h), cv::INTER_NEAREST);
185  }
186  cv::invertAffineTransform(A, Ai);
187 }
188 
190 
191 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color) { return buildReference(I_color, vpRect()); }
192 
193 unsigned int vpKeyPoint::buildReference(const vpImage<unsigned char> &I, const vpImagePoint &iP, unsigned int height,
194  unsigned int width)
195 {
196  return buildReference(I, vpRect(iP, width, height));
197 }
198 
199 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, const vpImagePoint &iP, unsigned int height,
200  unsigned int width)
201 {
202  return buildReference(I_color, vpRect(iP, width, height));
203 }
204 
205 
206 unsigned int vpKeyPoint::buildReference(const vpImage<unsigned char> &I, const vpRect &rectangle)
207 {
208  // Reset variables used when dealing with 3D models
209  // So as no 3D point list is passed, we dont need this variables
210  m_trainPoints.clear();
211  m_mapOfImageId.clear();
212  m_mapOfImages.clear();
213  m_currentImageId = 1;
214 
215  if (m_useAffineDetection) {
216  std::vector<std::vector<cv::KeyPoint> > listOfTrainKeyPoints;
217  std::vector<cv::Mat> listOfTrainDescriptors;
218 
219  // Detect keypoints and extract descriptors on multiple images
220  detectExtractAffine(I, listOfTrainKeyPoints, listOfTrainDescriptors);
221 
222  // Flatten the different train lists
223  m_trainKeyPoints.clear();
224  for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfTrainKeyPoints.begin();
225  it != listOfTrainKeyPoints.end(); ++it) {
226  m_trainKeyPoints.insert(m_trainKeyPoints.end(), it->begin(), it->end());
227  }
228 
229  bool first = true;
230  for (std::vector<cv::Mat>::const_iterator it = listOfTrainDescriptors.begin(); it != listOfTrainDescriptors.end();
231  ++it) {
232  if (first) {
233  first = false;
234  it->copyTo(m_trainDescriptors);
235  }
236  else {
237  m_trainDescriptors.push_back(*it);
238  }
239  }
240  }
241  else {
242  detect(I, m_trainKeyPoints, m_detectionTime, rectangle);
243  extract(I, m_trainKeyPoints, m_trainDescriptors, m_extractionTime);
244  }
245 
246  // Save the correspondence keypoint class_id with the training image_id in a map.
247  // Used to display the matching with all the training images.
248  for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
249  m_mapOfImageId[it->class_id] = m_currentImageId;
250  }
251 
252  // Save the image in a map at a specific image_id
253  m_mapOfImages[m_currentImageId] = I;
254 
255  // Convert OpenCV type to ViSP type for compatibility
257 
258  m_reference_computed = true;
259 
260  // Add train descriptors in matcher object
261  m_matcher->clear();
262  m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
263 
264  return static_cast<unsigned int>(m_trainKeyPoints.size());
265 }
266 
267 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, const vpRect &rectangle)
268 {
269  vpImageConvert::convert(I_color, m_I);
270  return (buildReference(m_I, rectangle));
271 }
272 
273 unsigned int vpKeyPoint::buildReference(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &trainKeyPoints,
274  std::vector<cv::Point3f> &points3f, bool append, int class_id)
275 {
276  cv::Mat trainDescriptors;
277  // Copy the input list of keypoints
278  std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
279 
280  extract(I, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
281 
282  if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
283  // Keypoints have been removed
284  // Store the hash of a keypoint as the key and the index of the keypoint
285  // as the value
286  std::map<size_t, size_t> mapOfKeypointHashes;
287  size_t cpt = 0;
288  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
289  ++it, cpt++) {
290  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
291  }
292 
293  std::vector<cv::Point3f> trainPoints_tmp;
294  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
295  if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
296  trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
297  }
298  }
299 
300  // Copy trainPoints_tmp to points3f
301  points3f = trainPoints_tmp;
302  }
303 
304  return (buildReference(I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
305 }
306 
307 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &trainKeyPoints,
308  std::vector<cv::Point3f> &points3f, bool append, int class_id)
309 {
310  cv::Mat trainDescriptors;
311  // Copy the input list of keypoints
312  std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
313 
314  extract(I_color, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
315 
316  if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
317  // Keypoints have been removed
318  // Store the hash of a keypoint as the key and the index of the keypoint
319  // as the value
320  std::map<size_t, size_t> mapOfKeypointHashes;
321  size_t cpt = 0;
322  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
323  ++it, cpt++) {
324  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
325  }
326 
327  std::vector<cv::Point3f> trainPoints_tmp;
328  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
329  if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
330  trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
331  }
332  }
333 
334  // Copy trainPoints_tmp to points3f
335  points3f = trainPoints_tmp;
336  }
337 
338  return (buildReference(I_color, trainKeyPoints, trainDescriptors, points3f, append, class_id));
339 }
340 
341 
343  const std::vector<cv::KeyPoint> &trainKeyPoints,
344  const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
345  bool append, int class_id)
346 {
347  if (!append) {
348  m_trainPoints.clear();
349  m_mapOfImageId.clear();
350  m_mapOfImages.clear();
351  m_currentImageId = 0;
352  m_trainKeyPoints.clear();
353  }
354 
355  m_currentImageId++;
356 
357  std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
358  // Set class_id if != -1
359  if (class_id != -1) {
360  for (std::vector<cv::KeyPoint>::iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
361  it->class_id = class_id;
362  }
363  }
364 
365  // Save the correspondence keypoint class_id with the training image_id in a map.
366  // Used to display the matching with all the training images.
367  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
368  ++it) {
369  m_mapOfImageId[it->class_id] = m_currentImageId;
370  }
371 
372  // Save the image in a map at a specific image_id
373  m_mapOfImages[m_currentImageId] = I;
374 
375  // Append reference lists
376  m_trainKeyPoints.insert(m_trainKeyPoints.end(), trainKeyPoints_tmp.begin(), trainKeyPoints_tmp.end());
377  if (!append) {
378  trainDescriptors.copyTo(m_trainDescriptors);
379  }
380  else {
381  m_trainDescriptors.push_back(trainDescriptors);
382  }
383  this->m_trainPoints.insert(m_trainPoints.end(), points3f.begin(), points3f.end());
384 
385  // Convert OpenCV type to ViSP type for compatibility
387  vpConvert::convertFromOpenCV(m_trainPoints, m_trainVpPoints);
388 
389  // Add train descriptors in matcher object
390  m_matcher->clear();
391  m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
392 
393  m_reference_computed = true;
394 
395  return static_cast<unsigned int>(m_trainKeyPoints.size());
396 }
397 
398 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, const std::vector<cv::KeyPoint> &trainKeyPoints,
399  const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
400  bool append, int class_id)
401 {
402  vpImageConvert::convert(I_color, m_I);
403  return (buildReference(m_I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
404 }
405 
406 void vpKeyPoint::compute3D(const cv::KeyPoint &candidate, const std::vector<vpPoint> &roi,
407  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
408 {
409  /* compute plane equation */
410  std::vector<vpPoint>::const_iterator it_roi = roi.begin();
411  vpPoint pts[3];
412  pts[0] = *it_roi;
413  ++it_roi;
414  pts[1] = *it_roi;
415  ++it_roi;
416  pts[2] = *it_roi;
417  vpPlane Po(pts[0], pts[1], pts[2]);
418  double xc = 0.0, yc = 0.0;
419  vpPixelMeterConversion::convertPoint(cam, candidate.pt.x, candidate.pt.y, xc, yc);
420  double Z = -Po.getD() / (Po.getA() * xc + Po.getB() * yc + Po.getC());
421  double X = xc * Z;
422  double Y = yc * Z;
423  vpColVector point_cam(4);
424  point_cam[0] = X;
425  point_cam[1] = Y;
426  point_cam[2] = Z;
427  point_cam[3] = 1;
428  vpColVector point_obj(4);
429  point_obj = cMo.inverse() * point_cam;
430  point = cv::Point3f((float)point_obj[0], (float)point_obj[1], (float)point_obj[2]);
431 }
432 
433 void vpKeyPoint::compute3D(const vpImagePoint &candidate, const std::vector<vpPoint> &roi,
434  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, vpPoint &point)
435 {
436  /* compute plane equation */
437  std::vector<vpPoint>::const_iterator it_roi = roi.begin();
438  vpPoint pts[3];
439  pts[0] = *it_roi;
440  ++it_roi;
441  pts[1] = *it_roi;
442  ++it_roi;
443  pts[2] = *it_roi;
444  vpPlane Po(pts[0], pts[1], pts[2]);
445  double xc = 0.0, yc = 0.0;
446  vpPixelMeterConversion::convertPoint(cam, candidate, xc, yc);
447  double Z = -Po.getD() / (Po.getA() * xc + Po.getB() * yc + Po.getC());
448  double X = xc * Z;
449  double Y = yc * Z;
450  vpColVector point_cam(4);
451  point_cam[0] = X;
452  point_cam[1] = Y;
453  point_cam[2] = Z;
454  point_cam[3] = 1;
455  vpColVector point_obj(4);
456  point_obj = cMo.inverse() * point_cam;
457  point.setWorldCoordinates(point_obj);
458 }
459 
461  std::vector<cv::KeyPoint> &candidates,
462  const std::vector<vpPolygon> &polygons,
463  const std::vector<std::vector<vpPoint> > &roisPt,
464  std::vector<cv::Point3f> &points, cv::Mat *descriptors)
465 {
466  std::vector<cv::KeyPoint> candidatesToCheck = candidates;
467  candidates.clear();
468  points.clear();
469  vpImagePoint imPt;
470  cv::Point3f pt;
471  cv::Mat desc;
472 
473  std::vector<std::pair<cv::KeyPoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
474  for (size_t i = 0; i < candidatesToCheck.size(); i++) {
475  pairOfCandidatesToCheck[i] = std::pair<cv::KeyPoint, size_t>(candidatesToCheck[i], i);
476  }
477 
478  size_t cpt1 = 0;
479  std::vector<vpPolygon> polygons_tmp = polygons;
480  for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
481  std::vector<std::pair<cv::KeyPoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
482 
483  while (it2 != pairOfCandidatesToCheck.end()) {
484  imPt.set_ij(it2->first.pt.y, it2->first.pt.x);
485  if (it1->isInside(imPt)) {
486  candidates.push_back(it2->first);
487  vpKeyPoint::compute3D(it2->first, roisPt[cpt1], cam, cMo, pt);
488  points.push_back(pt);
489 
490  if (descriptors != nullptr) {
491  desc.push_back(descriptors->row((int)it2->second));
492  }
493 
494  // Remove candidate keypoint which is located on the current polygon
495  it2 = pairOfCandidatesToCheck.erase(it2);
496  }
497  else {
498  ++it2;
499  }
500  }
501  }
502 
503  if (descriptors != nullptr) {
504  desc.copyTo(*descriptors);
505  }
506 }
507 
509  std::vector<vpImagePoint> &candidates,
510  const std::vector<vpPolygon> &polygons,
511  const std::vector<std::vector<vpPoint> > &roisPt,
512  std::vector<vpPoint> &points, cv::Mat *descriptors)
513 {
514  std::vector<vpImagePoint> candidatesToCheck = candidates;
515  candidates.clear();
516  points.clear();
517  vpPoint pt;
518  cv::Mat desc;
519 
520  std::vector<std::pair<vpImagePoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
521  for (size_t i = 0; i < candidatesToCheck.size(); i++) {
522  pairOfCandidatesToCheck[i] = std::pair<vpImagePoint, size_t>(candidatesToCheck[i], i);
523  }
524 
525  size_t cpt1 = 0;
526  std::vector<vpPolygon> polygons_tmp = polygons;
527  for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
528  std::vector<std::pair<vpImagePoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
529 
530  while (it2 != pairOfCandidatesToCheck.end()) {
531  if (it1->isInside(it2->first)) {
532  candidates.push_back(it2->first);
533  vpKeyPoint::compute3D(it2->first, roisPt[cpt1], cam, cMo, pt);
534  points.push_back(pt);
535 
536  if (descriptors != nullptr) {
537  desc.push_back(descriptors->row((int)it2->second));
538  }
539 
540  // Remove candidate keypoint which is located on the current polygon
541  it2 = pairOfCandidatesToCheck.erase(it2);
542  }
543  else {
544  ++it2;
545  }
546  }
547  }
548 }
549 
550 
552  const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector<cv::KeyPoint> &candidates,
553  const std::vector<vpCylinder> &cylinders,
554  const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<cv::Point3f> &points,
555  cv::Mat *descriptors)
556 {
557  std::vector<cv::KeyPoint> candidatesToCheck = candidates;
558  candidates.clear();
559  points.clear();
560  cv::Mat desc;
561 
562  // Keep only keypoints on cylinders
563  size_t cpt_keypoint = 0;
564  for (std::vector<cv::KeyPoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
565  ++it1, cpt_keypoint++) {
566  size_t cpt_cylinder = 0;
567 
568  // Iterate through the list of vpCylinders
569  for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
570  it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
571  // Iterate through the list of the bounding boxes of the current
572  // vpCylinder
573  for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
574  if (vpPolygon::isInside(*it3, it1->pt.y, it1->pt.x)) {
575  candidates.push_back(*it1);
576 
577  // Calculate the 3D coordinates for each keypoint located on
578  // cylinders
579  double xm = 0.0, ym = 0.0;
580  vpPixelMeterConversion::convertPoint(cam, it1->pt.x, it1->pt.y, xm, ym);
581  double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
582 
583  if (!vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
584  vpColVector point_cam(4);
585  point_cam[0] = xm * Z;
586  point_cam[1] = ym * Z;
587  point_cam[2] = Z;
588  point_cam[3] = 1;
589  vpColVector point_obj(4);
590  point_obj = cMo.inverse() * point_cam;
591  vpPoint pt;
592  pt.setWorldCoordinates(point_obj);
593  points.push_back(cv::Point3f((float)pt.get_oX(), (float)pt.get_oY(), (float)pt.get_oZ()));
594 
595  if (descriptors != nullptr) {
596  desc.push_back(descriptors->row((int)cpt_keypoint));
597  }
598 
599  break;
600  }
601  }
602  }
603  }
604  }
605 
606  if (descriptors != nullptr) {
607  desc.copyTo(*descriptors);
608  }
609 }
610 
612  const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector<vpImagePoint> &candidates,
613  const std::vector<vpCylinder> &cylinders,
614  const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<vpPoint> &points,
615  cv::Mat *descriptors)
616 {
617  std::vector<vpImagePoint> candidatesToCheck = candidates;
618  candidates.clear();
619  points.clear();
620  cv::Mat desc;
621 
622  // Keep only keypoints on cylinders
623  size_t cpt_keypoint = 0;
624  for (std::vector<vpImagePoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
625  ++it1, cpt_keypoint++) {
626  size_t cpt_cylinder = 0;
627 
628  // Iterate through the list of vpCylinders
629  for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
630  it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
631  // Iterate through the list of the bounding boxes of the current
632  // vpCylinder
633  for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
634  if (vpPolygon::isInside(*it3, it1->get_i(), it1->get_j())) {
635  candidates.push_back(*it1);
636 
637  // Calculate the 3D coordinates for each keypoint located on
638  // cylinders
639  double xm = 0.0, ym = 0.0;
640  vpPixelMeterConversion::convertPoint(cam, it1->get_u(), it1->get_v(), xm, ym);
641  double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
642 
643  if (!vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
644  vpColVector point_cam(4);
645  point_cam[0] = xm * Z;
646  point_cam[1] = ym * Z;
647  point_cam[2] = Z;
648  point_cam[3] = 1;
649  vpColVector point_obj(4);
650  point_obj = cMo.inverse() * point_cam;
651  vpPoint pt;
652  pt.setWorldCoordinates(point_obj);
653  points.push_back(pt);
654 
655  if (descriptors != nullptr) {
656  desc.push_back(descriptors->row((int)cpt_keypoint));
657  }
658 
659  break;
660  }
661  }
662  }
663  }
664  }
665 
666  if (descriptors != nullptr) {
667  desc.copyTo(*descriptors);
668  }
669 }
670 
671 bool vpKeyPoint::computePose(const std::vector<cv::Point2f> &imagePoints, const std::vector<cv::Point3f> &objectPoints,
672  const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector<int> &inlierIndex,
673  double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &))
674 {
675  double t = vpTime::measureTimeMs();
676 
677  if (imagePoints.size() < 4 || objectPoints.size() < 4 || imagePoints.size() != objectPoints.size()) {
678  elapsedTime = (vpTime::measureTimeMs() - t);
679  std::cerr << "Not enough points to compute the pose (at least 4 points "
680  "are needed)."
681  << std::endl;
682 
683  return false;
684  }
685 
686  cv::Mat cameraMatrix =
687  (cv::Mat_<double>(3, 3) << cam.get_px(), 0, cam.get_u0(), 0, cam.get_py(), cam.get_v0(), 0, 0, 1);
688  cv::Mat rvec, tvec;
689 
690  // Bug with OpenCV < 2.4.0 when zero distortion is provided by an empty
691  // array. http://code.opencv.org/issues/1700 ;
692  // http://code.opencv.org/issues/1718 what(): Distortion coefficients must
693  // be 1x4, 4x1, 1x5, 5x1, 1x8 or 8x1 floating-point vector in function
694  // cvProjectPoints2 Fixed in OpenCV 2.4.0 (r7558)
695  // cv::Mat distCoeffs;
696  cv::Mat distCoeffs = cv::Mat::zeros(1, 5, CV_64F);
697 
698  try {
699 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
700  // OpenCV 3.0.0 (2014/12/12)
701  cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, false, m_nbRansacIterations,
702  (float)m_ransacReprojectionError,
703  0.99, // confidence=0.99 (default) – The probability
704  // that the algorithm produces a useful result.
705  inlierIndex, cv::SOLVEPNP_ITERATIVE);
706 // SOLVEPNP_ITERATIVE (default): Iterative method is based on
707 // Levenberg-Marquardt optimization. In this case the function finds such a
708 // pose that minimizes reprojection error, that is the sum of squared
709 // distances between the observed projections imagePoints and the projected
710 // (using projectPoints() ) objectPoints . SOLVEPNP_P3P: Method is based on
711 // the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang “Complete Solution
712 // Classification for the Perspective-Three-Point Problem”. In this case the
713 // function requires exactly four object and image points. SOLVEPNP_EPNP:
714 // Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the
715 // paper “EPnP: Efficient Perspective-n-Point Camera Pose Estimation”.
716 // SOLVEPNP_DLS: Method is based on the paper of Joel A. Hesch and Stergios I.
717 // Roumeliotis. “A Direct Least-Squares (DLS) Method for PnP”. SOLVEPNP_UPNP
718 // Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto,
719 // F.Moreno-Noguer. “Exhaustive Linearization for Robust Camera Pose and Focal
720 // Length Estimation”. In this case the function also estimates the
721 // parameters
722 // f_x and f_y assuming that both have the same value. Then the cameraMatrix
723 // is updated with the estimated focal length.
724 #else
725  int nbInlierToReachConsensus = m_nbRansacMinInlierCount;
726  if (m_useConsensusPercentage) {
727  nbInlierToReachConsensus = (int)(m_ransacConsensusPercentage / 100.0 * (double)m_queryFilteredKeyPoints.size());
728  }
729 
730  cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, false, m_nbRansacIterations,
731  (float)m_ransacReprojectionError, nbInlierToReachConsensus, inlierIndex);
732 #endif
733  }
734  catch (cv::Exception &e) {
735  std::cerr << e.what() << std::endl;
736  elapsedTime = (vpTime::measureTimeMs() - t);
737  return false;
738  }
739  vpTranslationVector translationVec(tvec.at<double>(0), tvec.at<double>(1), tvec.at<double>(2));
740  vpThetaUVector thetaUVector(rvec.at<double>(0), rvec.at<double>(1), rvec.at<double>(2));
741  cMo = vpHomogeneousMatrix(translationVec, thetaUVector);
742 
743  if (func != nullptr) {
744  // Check the final pose returned by solvePnPRansac to discard
745  // solutions which do not respect the pose criterion.
746  if (!func(cMo)) {
747  elapsedTime = (vpTime::measureTimeMs() - t);
748  return false;
749  }
750  }
751 
752  elapsedTime = (vpTime::measureTimeMs() - t);
753  return true;
754 }
755 
756 bool vpKeyPoint::computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo,
757  std::vector<vpPoint> &inliers, double &elapsedTime,
758  bool (*func)(const vpHomogeneousMatrix &))
759 {
760  std::vector<unsigned int> inlierIndex;
761  return computePose(objectVpPoints, cMo, inliers, inlierIndex, elapsedTime, func);
762 }
763 
764 bool vpKeyPoint::computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo,
765  std::vector<vpPoint> &inliers, std::vector<unsigned int> &inlierIndex, double &elapsedTime,
766  bool (*func)(const vpHomogeneousMatrix &))
767 {
768  double t = vpTime::measureTimeMs();
769 
770  if (objectVpPoints.size() < 4) {
771  elapsedTime = (vpTime::measureTimeMs() - t);
772  // std::cerr << "Not enough points to compute the pose (at least 4
773  // points are needed)." << std::endl;
774 
775  return false;
776  }
777 
778  vpPose pose;
779 
780  for (std::vector<vpPoint>::const_iterator it = objectVpPoints.begin(); it != objectVpPoints.end(); ++it) {
781  pose.addPoint(*it);
782  }
783 
784  unsigned int nbInlierToReachConsensus = (unsigned int)m_nbRansacMinInlierCount;
785  if (m_useConsensusPercentage) {
786  nbInlierToReachConsensus =
787  (unsigned int)(m_ransacConsensusPercentage / 100.0 * (double)m_queryFilteredKeyPoints.size());
788  }
789 
790  pose.setRansacFilterFlag(m_ransacFilterFlag);
791  pose.setUseParallelRansac(m_ransacParallel);
792  pose.setNbParallelRansacThreads(m_ransacParallelNbThreads);
793  pose.setRansacNbInliersToReachConsensus(nbInlierToReachConsensus);
794  pose.setRansacThreshold(m_ransacThreshold);
795  pose.setRansacMaxTrials(m_nbRansacIterations);
796 
797  bool isRansacPoseEstimationOk = false;
798  try {
799  pose.setCovarianceComputation(m_computeCovariance);
800  isRansacPoseEstimationOk = pose.computePose(vpPose::RANSAC, cMo, func);
801  inliers = pose.getRansacInliers();
802  inlierIndex = pose.getRansacInlierIndex();
803 
804  if (m_computeCovariance) {
805  m_covarianceMatrix = pose.getCovarianceMatrix();
806  }
807  }
808  catch (const vpException &e) {
809  std::cerr << "e=" << e.what() << std::endl;
810  elapsedTime = (vpTime::measureTimeMs() - t);
811  return false;
812  }
813 
814  // if(func != nullptr && isRansacPoseEstimationOk) {
815  // //Check the final pose returned by the Ransac VVS pose estimation as
816  // in rare some cases
817  // //we can converge toward a final cMo that does not respect the pose
818  // criterion even
819  // //if the 4 minimal points picked to respect the pose criterion.
820  // if(!func(&cMo)) {
821  // elapsedTime = (vpTime::measureTimeMs() - t);
822  // return false;
823  // }
824  // }
825 
826  elapsedTime = (vpTime::measureTimeMs() - t);
827  return isRansacPoseEstimationOk;
828 }
829 
830 double vpKeyPoint::computePoseEstimationError(const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
831  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo_est)
832 {
833  if (matchKeyPoints.size() == 0) {
834  // return std::numeric_limits<double>::max(); // create an error under
835  // Windows. To fix it we have to add #undef max
836  return DBL_MAX;
837  }
838 
839  std::vector<double> errors(matchKeyPoints.size());
840  size_t cpt = 0;
841  vpPoint pt;
842  for (std::vector<std::pair<cv::KeyPoint, cv::Point3f> >::const_iterator it = matchKeyPoints.begin();
843  it != matchKeyPoints.end(); ++it, cpt++) {
844  pt.set_oX(it->second.x);
845  pt.set_oY(it->second.y);
846  pt.set_oZ(it->second.z);
847  pt.project(cMo_est);
848  double u = 0.0, v = 0.0;
849  vpMeterPixelConversion::convertPoint(cam, pt.get_x(), pt.get_y(), u, v);
850  errors[cpt] = std::sqrt((u - it->first.pt.x) * (u - it->first.pt.x) + (v - it->first.pt.y) * (v - it->first.pt.y));
851  }
852 
853  return std::accumulate(errors.begin(), errors.end(), 0.0) / errors.size();
854 }
855 
857  vpImage<unsigned char> &IMatching)
858 {
859  // Image matching side by side
860  unsigned int width = IRef.getWidth() + ICurrent.getWidth();
861  unsigned int height = std::max<unsigned int>(IRef.getHeight(), ICurrent.getHeight());
862 
863  IMatching = vpImage<unsigned char>(height, width);
864 }
865 
867  vpImage<vpRGBa> &IMatching)
868 {
869  // Image matching side by side
870  unsigned int width = IRef.getWidth() + ICurrent.getWidth();
871  unsigned int height = std::max<unsigned int>(IRef.getHeight(), ICurrent.getHeight());
872 
873  IMatching = vpImage<vpRGBa>(height, width);
874 }
875 
877 {
878  // Nb images in the training database + the current image we want to detect
879  // the object
880  unsigned int nbImg = (unsigned int)(m_mapOfImages.size() + 1);
881 
882  if (m_mapOfImages.empty()) {
883  std::cerr << "There is no training image loaded !" << std::endl;
884  return;
885  }
886 
887  if (nbImg == 2) {
888  // Only one training image, so we display them side by side
889  createImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
890  }
891  else {
892  // Multiple training images, display them as a mosaic image
893  // (unsigned int) std::floor(std::sqrt((double) nbImg) + 0.5);
894  unsigned int nbImgSqrt = (unsigned int)vpMath::round(std::sqrt((double)nbImg));
895 
896  // Number of columns in the mosaic grid
897  unsigned int nbWidth = nbImgSqrt;
898  // Number of rows in the mosaic grid
899  unsigned int nbHeight = nbImgSqrt;
900 
901  // Deals with non square mosaic grid and the total number of images
902  if (nbImgSqrt * nbImgSqrt < nbImg) {
903  nbWidth++;
904  }
905 
906  unsigned int maxW = ICurrent.getWidth();
907  unsigned int maxH = ICurrent.getHeight();
908  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
909  ++it) {
910  if (maxW < it->second.getWidth()) {
911  maxW = it->second.getWidth();
912  }
913 
914  if (maxH < it->second.getHeight()) {
915  maxH = it->second.getHeight();
916  }
917  }
918 
919  IMatching = vpImage<unsigned char>(maxH * nbHeight, maxW * nbWidth);
920  }
921 }
922 
924 {
925  // Nb images in the training database + the current image we want to detect
926  // the object
927  unsigned int nbImg = (unsigned int)(m_mapOfImages.size() + 1);
928 
929  if (m_mapOfImages.empty()) {
930  std::cerr << "There is no training image loaded !" << std::endl;
931  return;
932  }
933 
934  if (nbImg == 2) {
935  // Only one training image, so we display them side by side
936  createImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
937  }
938  else {
939  // Multiple training images, display them as a mosaic image
940  //(unsigned int) std::floor(std::sqrt((double) nbImg) + 0.5);
941  unsigned int nbImgSqrt = (unsigned int)vpMath::round(std::sqrt((double)nbImg));
942 
943  // Number of columns in the mosaic grid
944  unsigned int nbWidth = nbImgSqrt;
945  // Number of rows in the mosaic grid
946  unsigned int nbHeight = nbImgSqrt;
947 
948  // Deals with non square mosaic grid and the total number of images
949  if (nbImgSqrt * nbImgSqrt < nbImg) {
950  nbWidth++;
951  }
952 
953  unsigned int maxW = ICurrent.getWidth();
954  unsigned int maxH = ICurrent.getHeight();
955  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
956  ++it) {
957  if (maxW < it->second.getWidth()) {
958  maxW = it->second.getWidth();
959  }
960 
961  if (maxH < it->second.getHeight()) {
962  maxH = it->second.getHeight();
963  }
964  }
965 
966  IMatching = vpImage<vpRGBa>(maxH * nbHeight, maxW * nbWidth);
967  }
968 }
969 
970 void vpKeyPoint::detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, const vpRect &rectangle)
971 {
972  double elapsedTime;
973  detect(I, keyPoints, elapsedTime, rectangle);
974 }
975 
976 void vpKeyPoint::detect(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, const vpRect &rectangle)
977 {
978  double elapsedTime;
979  detect(I_color, keyPoints, elapsedTime, rectangle);
980 }
981 
982 void vpKeyPoint::detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, const cv::Mat &mask)
983 {
984  double elapsedTime;
985  detect(matImg, keyPoints, elapsedTime, mask);
986 }
987 
988 void vpKeyPoint::detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
989  const vpRect &rectangle)
990 {
991  cv::Mat matImg;
992  vpImageConvert::convert(I, matImg, false);
993  cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
994 
995  if (rectangle.getWidth() > 0 && rectangle.getHeight() > 0) {
996  cv::Point leftTop((int)rectangle.getLeft(), (int)rectangle.getTop()),
997  rightBottom((int)rectangle.getRight(), (int)rectangle.getBottom());
998  cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
999  }
1000  else {
1001  mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1002  }
1003 
1004  detect(matImg, keyPoints, elapsedTime, mask);
1005 }
1006 
1007 void vpKeyPoint::detect(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
1008  const vpRect &rectangle)
1009 {
1010  cv::Mat matImg;
1011  vpImageConvert::convert(I_color, matImg);
1012  cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1013 
1014  if (rectangle.getWidth() > 0 && rectangle.getHeight() > 0) {
1015  cv::Point leftTop((int)rectangle.getLeft(), (int)rectangle.getTop()),
1016  rightBottom((int)rectangle.getRight(), (int)rectangle.getBottom());
1017  cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1018  }
1019  else {
1020  mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1021  }
1022 
1023  detect(matImg, keyPoints, elapsedTime, mask);
1024 }
1025 
1026 void vpKeyPoint::detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
1027  const cv::Mat &mask)
1028 {
1029  double t = vpTime::measureTimeMs();
1030  keyPoints.clear();
1031 
1032  for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
1033  it != m_detectors.end(); ++it) {
1034  std::vector<cv::KeyPoint> kp;
1035 
1036  it->second->detect(matImg, kp, mask);
1037  keyPoints.insert(keyPoints.end(), kp.begin(), kp.end());
1038  }
1039 
1040  elapsedTime = vpTime::measureTimeMs() - t;
1041 }
1042 
1043 void vpKeyPoint::display(const vpImage<unsigned char> &IRef, const vpImage<unsigned char> &ICurrent, unsigned int size)
1044 {
1045  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1046  getQueryKeyPoints(vpQueryImageKeyPoints);
1047  std::vector<vpImagePoint> vpTrainImageKeyPoints;
1048  getTrainKeyPoints(vpTrainImageKeyPoints);
1049 
1050  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1051  vpDisplay::displayCross(IRef, vpTrainImageKeyPoints[(size_t)(it->trainIdx)], size, vpColor::red);
1052  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, vpColor::green);
1053  }
1054 }
1055 
1056 void vpKeyPoint::display(const vpImage<vpRGBa> &IRef, const vpImage<vpRGBa> &ICurrent, unsigned int size)
1057 {
1058  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1059  getQueryKeyPoints(vpQueryImageKeyPoints);
1060  std::vector<vpImagePoint> vpTrainImageKeyPoints;
1061  getTrainKeyPoints(vpTrainImageKeyPoints);
1062 
1063  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1064  vpDisplay::displayCross(IRef, vpTrainImageKeyPoints[(size_t)(it->trainIdx)], size, vpColor::red);
1065  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, vpColor::green);
1066  }
1067 }
1068 
1069 void vpKeyPoint::display(const vpImage<unsigned char> &ICurrent, unsigned int size, const vpColor &color)
1070 {
1071  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1072  getQueryKeyPoints(vpQueryImageKeyPoints);
1073 
1074  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1075  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, color);
1076  }
1077 }
1078 
1079 void vpKeyPoint::display(const vpImage<vpRGBa> &ICurrent, unsigned int size, const vpColor &color)
1080 {
1081  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1082  getQueryKeyPoints(vpQueryImageKeyPoints);
1083 
1084  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1085  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, color);
1086  }
1087 }
1088 
1090  unsigned int crossSize, unsigned int lineThickness, const vpColor &color)
1091 {
1092  bool randomColor = (color == vpColor::none);
1093  srand((unsigned int)time(nullptr));
1094  vpColor currentColor = color;
1095 
1096  std::vector<vpImagePoint> queryImageKeyPoints;
1097  getQueryKeyPoints(queryImageKeyPoints);
1098  std::vector<vpImagePoint> trainImageKeyPoints;
1099  getTrainKeyPoints(trainImageKeyPoints);
1100 
1101  vpImagePoint leftPt, rightPt;
1102  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1103  if (randomColor) {
1104  currentColor = vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1105  }
1106 
1107  leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1108  rightPt = vpImagePoint(queryImageKeyPoints[(size_t)(it->queryIdx)].get_i(),
1109  queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.getWidth());
1110  vpDisplay::displayCross(IMatching, leftPt, crossSize, currentColor);
1111  vpDisplay::displayCross(IMatching, rightPt, crossSize, currentColor);
1112  vpDisplay::displayLine(IMatching, leftPt, rightPt, currentColor, lineThickness);
1113  }
1114 }
1115 
1116 void vpKeyPoint::displayMatching(const vpImage<unsigned char> &IRef, vpImage<vpRGBa> &IMatching, unsigned int crossSize,
1117  unsigned int lineThickness, const vpColor &color)
1118 {
1119  bool randomColor = (color == vpColor::none);
1120  srand((unsigned int)time(nullptr));
1121  vpColor currentColor = color;
1122 
1123  std::vector<vpImagePoint> queryImageKeyPoints;
1124  getQueryKeyPoints(queryImageKeyPoints);
1125  std::vector<vpImagePoint> trainImageKeyPoints;
1126  getTrainKeyPoints(trainImageKeyPoints);
1127 
1128  vpImagePoint leftPt, rightPt;
1129  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1130  if (randomColor) {
1131  currentColor = vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1132  }
1133 
1134  leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1135  rightPt = vpImagePoint(queryImageKeyPoints[(size_t)(it->queryIdx)].get_i(),
1136  queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.getWidth());
1137  vpDisplay::displayCross(IMatching, leftPt, crossSize, currentColor);
1138  vpDisplay::displayCross(IMatching, rightPt, crossSize, currentColor);
1139  vpDisplay::displayLine(IMatching, leftPt, rightPt, currentColor, lineThickness);
1140  }
1141 }
1142 
1143 void vpKeyPoint::displayMatching(const vpImage<vpRGBa> &IRef, vpImage<vpRGBa> &IMatching, unsigned int crossSize,
1144  unsigned int lineThickness, const vpColor &color)
1145 {
1146  bool randomColor = (color == vpColor::none);
1147  srand((unsigned int)time(nullptr));
1148  vpColor currentColor = color;
1149 
1150  std::vector<vpImagePoint> queryImageKeyPoints;
1151  getQueryKeyPoints(queryImageKeyPoints);
1152  std::vector<vpImagePoint> trainImageKeyPoints;
1153  getTrainKeyPoints(trainImageKeyPoints);
1154 
1155  vpImagePoint leftPt, rightPt;
1156  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1157  if (randomColor) {
1158  currentColor = vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1159  }
1160 
1161  leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1162  rightPt = vpImagePoint(queryImageKeyPoints[(size_t)(it->queryIdx)].get_i(),
1163  queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.getWidth());
1164  vpDisplay::displayCross(IMatching, leftPt, crossSize, currentColor);
1165  vpDisplay::displayCross(IMatching, rightPt, crossSize, currentColor);
1166  vpDisplay::displayLine(IMatching, leftPt, rightPt, currentColor, lineThickness);
1167  }
1168 }
1169 
1170 
1172  const std::vector<vpImagePoint> &ransacInliers, unsigned int crossSize,
1173  unsigned int lineThickness)
1174 {
1175  if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1176  // No training images so return
1177  std::cerr << "There is no training image loaded !" << std::endl;
1178  return;
1179  }
1180 
1181  // Nb images in the training database + the current image we want to detect
1182  // the object
1183  int nbImg = (int)(m_mapOfImages.size() + 1);
1184 
1185  if (nbImg == 2) {
1186  // Only one training image, so we display the matching result side-by-side
1187  displayMatching(m_mapOfImages.begin()->second, IMatching, crossSize);
1188  }
1189  else {
1190  // Multiple training images, display them as a mosaic image
1191  int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
1192  int nbWidth = nbImgSqrt;
1193  int nbHeight = nbImgSqrt;
1194 
1195  if (nbImgSqrt * nbImgSqrt < nbImg) {
1196  nbWidth++;
1197  }
1198 
1199  std::map<int, int> mapOfImageIdIndex;
1200  int cpt = 0;
1201  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
1202  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1203  ++it, cpt++) {
1204  mapOfImageIdIndex[it->first] = cpt;
1205 
1206  if (maxW < it->second.getWidth()) {
1207  maxW = it->second.getWidth();
1208  }
1209 
1210  if (maxH < it->second.getHeight()) {
1211  maxH = it->second.getHeight();
1212  }
1213  }
1214 
1215  // Indexes of the current image in the grid computed to put preferably the
1216  // image in the center of the mosaic grid
1217  int medianI = nbHeight / 2;
1218  int medianJ = nbWidth / 2;
1219  int medianIndex = medianI * nbWidth + medianJ;
1220  for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1221  vpImagePoint topLeftCorner;
1222  int current_class_id_index = 0;
1223  if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1224  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1225  }
1226  else {
1227  // Shift of one unity the index of the training images which are after
1228  // the current image
1229  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1230  }
1231 
1232  int indexI = current_class_id_index / nbWidth;
1233  int indexJ = current_class_id_index - (indexI * nbWidth);
1234  topLeftCorner.set_ij((int)maxH * indexI, (int)maxW * indexJ);
1235 
1236  // Display cross for keypoints in the learning database
1237  vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1238  (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1239  }
1240 
1241  vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
1242  for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1243  // Display cross for keypoints detected in the current image
1244  vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1245  (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1246  }
1247  for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1248  // Display green circle for RANSAC inliers
1249  vpDisplay::displayCircle(IMatching, (int)(it->get_v() + topLeftCorner.get_i()),
1250  (int)(it->get_u() + topLeftCorner.get_j()), 4, vpColor::green);
1251  }
1252  for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1253  // Display red circle for RANSAC outliers
1254  vpDisplay::displayCircle(IMatching, (int)(it->get_i() + topLeftCorner.get_i()),
1255  (int)(it->get_j() + topLeftCorner.get_j()), 4, vpColor::red);
1256  }
1257 
1258  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1259  int current_class_id = 0;
1260  if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] < medianIndex) {
1261  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1262  }
1263  else {
1264  // Shift of one unity the index of the training images which are after
1265  // the current image
1266  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1267  }
1268 
1269  int indexI = current_class_id / nbWidth;
1270  int indexJ = current_class_id - (indexI * nbWidth);
1271 
1272  vpImagePoint end((int)maxH * indexI + m_trainKeyPoints[(size_t)it->trainIdx].pt.y,
1273  (int)maxW * indexJ + m_trainKeyPoints[(size_t)it->trainIdx].pt.x);
1274  vpImagePoint start((int)maxH * medianI + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.y,
1275  (int)maxW * medianJ + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.x);
1276 
1277  // Draw line for matching keypoints detected in the current image and
1278  // those detected in the training images
1279  vpDisplay::displayLine(IMatching, start, end, vpColor::green, lineThickness);
1280  }
1281  }
1282 }
1283 
1285  const std::vector<vpImagePoint> &ransacInliers, unsigned int crossSize,
1286  unsigned int lineThickness)
1287 {
1288  if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1289  // No training images so return
1290  std::cerr << "There is no training image loaded !" << std::endl;
1291  return;
1292  }
1293 
1294  // Nb images in the training database + the current image we want to detect
1295  // the object
1296  int nbImg = (int)(m_mapOfImages.size() + 1);
1297 
1298  if (nbImg == 2) {
1299  // Only one training image, so we display the matching result side-by-side
1300  displayMatching(m_mapOfImages.begin()->second, IMatching, crossSize);
1301  }
1302  else {
1303  // Multiple training images, display them as a mosaic image
1304  int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
1305  int nbWidth = nbImgSqrt;
1306  int nbHeight = nbImgSqrt;
1307 
1308  if (nbImgSqrt * nbImgSqrt < nbImg) {
1309  nbWidth++;
1310  }
1311 
1312  std::map<int, int> mapOfImageIdIndex;
1313  int cpt = 0;
1314  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
1315  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1316  ++it, cpt++) {
1317  mapOfImageIdIndex[it->first] = cpt;
1318 
1319  if (maxW < it->second.getWidth()) {
1320  maxW = it->second.getWidth();
1321  }
1322 
1323  if (maxH < it->second.getHeight()) {
1324  maxH = it->second.getHeight();
1325  }
1326  }
1327 
1328  // Indexes of the current image in the grid computed to put preferably the
1329  // image in the center of the mosaic grid
1330  int medianI = nbHeight / 2;
1331  int medianJ = nbWidth / 2;
1332  int medianIndex = medianI * nbWidth + medianJ;
1333  for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1334  vpImagePoint topLeftCorner;
1335  int current_class_id_index = 0;
1336  if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1337  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1338  }
1339  else {
1340  // Shift of one unity the index of the training images which are after
1341  // the current image
1342  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1343  }
1344 
1345  int indexI = current_class_id_index / nbWidth;
1346  int indexJ = current_class_id_index - (indexI * nbWidth);
1347  topLeftCorner.set_ij((int)maxH * indexI, (int)maxW * indexJ);
1348 
1349  // Display cross for keypoints in the learning database
1350  vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1351  (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1352  }
1353 
1354  vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
1355  for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1356  // Display cross for keypoints detected in the current image
1357  vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1358  (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1359  }
1360  for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1361  // Display green circle for RANSAC inliers
1362  vpDisplay::displayCircle(IMatching, (int)(it->get_v() + topLeftCorner.get_i()),
1363  (int)(it->get_u() + topLeftCorner.get_j()), 4, vpColor::green);
1364  }
1365  for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1366  // Display red circle for RANSAC outliers
1367  vpDisplay::displayCircle(IMatching, (int)(it->get_i() + topLeftCorner.get_i()),
1368  (int)(it->get_j() + topLeftCorner.get_j()), 4, vpColor::red);
1369  }
1370 
1371  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1372  int current_class_id = 0;
1373  if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] < medianIndex) {
1374  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1375  }
1376  else {
1377  // Shift of one unity the index of the training images which are after
1378  // the current image
1379  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1380  }
1381 
1382  int indexI = current_class_id / nbWidth;
1383  int indexJ = current_class_id - (indexI * nbWidth);
1384 
1385  vpImagePoint end((int)maxH * indexI + m_trainKeyPoints[(size_t)it->trainIdx].pt.y,
1386  (int)maxW * indexJ + m_trainKeyPoints[(size_t)it->trainIdx].pt.x);
1387  vpImagePoint start((int)maxH * medianI + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.y,
1388  (int)maxW * medianJ + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.x);
1389 
1390  // Draw line for matching keypoints detected in the current image and
1391  // those detected in the training images
1392  vpDisplay::displayLine(IMatching, start, end, vpColor::green, lineThickness);
1393  }
1394  }
1395 }
1396 
1397 void vpKeyPoint::extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1398  std::vector<cv::Point3f> *trainPoints)
1399 {
1400  double elapsedTime;
1401  extract(I, keyPoints, descriptors, elapsedTime, trainPoints);
1402 }
1403 
1404 void vpKeyPoint::extract(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1405  std::vector<cv::Point3f> *trainPoints)
1406 {
1407  double elapsedTime;
1408  extract(I_color, keyPoints, descriptors, elapsedTime, trainPoints);
1409 }
1410 
1411 void vpKeyPoint::extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1412  std::vector<cv::Point3f> *trainPoints)
1413 {
1414  double elapsedTime;
1415  extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1416 }
1417 
1418 void vpKeyPoint::extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1419  double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1420 {
1421  cv::Mat matImg;
1422  vpImageConvert::convert(I, matImg, false);
1423  extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1424 }
1425 
1426 void vpKeyPoint::extract(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1427  double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1428 {
1429  cv::Mat matImg;
1430  vpImageConvert::convert(I_color, matImg);
1431  extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1432 }
1433 
1434 void vpKeyPoint::extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1435  double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1436 {
1437  double t = vpTime::measureTimeMs();
1438  bool first = true;
1439 
1440  for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator itd = m_extractors.begin();
1441  itd != m_extractors.end(); ++itd) {
1442  if (first) {
1443  first = false;
1444  // Check if we have 3D object points information
1445  if (trainPoints != nullptr && !trainPoints->empty()) {
1446  // Copy the input list of keypoints, keypoints that cannot be computed
1447  // are removed in the function compute
1448  std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1449 
1450  // Extract descriptors for the given list of keypoints
1451  itd->second->compute(matImg, keyPoints, descriptors);
1452 
1453  if (keyPoints.size() != keyPoints_tmp.size()) {
1454  // Keypoints have been removed
1455  // Store the hash of a keypoint as the key and the index of the
1456  // keypoint as the value
1457  std::map<size_t, size_t> mapOfKeypointHashes;
1458  size_t cpt = 0;
1459  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1460  ++it, cpt++) {
1461  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1462  }
1463 
1464  std::vector<cv::Point3f> trainPoints_tmp;
1465  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1466  if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1467  trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1468  }
1469  }
1470 
1471  // Copy trainPoints_tmp to m_trainPoints
1472  *trainPoints = trainPoints_tmp;
1473  }
1474  }
1475  else {
1476  // Extract descriptors for the given list of keypoints
1477  itd->second->compute(matImg, keyPoints, descriptors);
1478  }
1479  }
1480  else {
1481  // Copy the input list of keypoints, keypoints that cannot be computed
1482  // are removed in the function compute
1483  std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1484 
1485  cv::Mat desc;
1486  // Extract descriptors for the given list of keypoints
1487  itd->second->compute(matImg, keyPoints, desc);
1488 
1489  if (keyPoints.size() != keyPoints_tmp.size()) {
1490  // Keypoints have been removed
1491  // Store the hash of a keypoint as the key and the index of the
1492  // keypoint as the value
1493  std::map<size_t, size_t> mapOfKeypointHashes;
1494  size_t cpt = 0;
1495  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1496  ++it, cpt++) {
1497  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1498  }
1499 
1500  std::vector<cv::Point3f> trainPoints_tmp;
1501  cv::Mat descriptors_tmp;
1502  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1503  if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1504  if (trainPoints != nullptr && !trainPoints->empty()) {
1505  trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1506  }
1507 
1508  if (!descriptors.empty()) {
1509  descriptors_tmp.push_back(descriptors.row((int)mapOfKeypointHashes[myKeypointHash(*it)]));
1510  }
1511  }
1512  }
1513 
1514  if (trainPoints != nullptr) {
1515  // Copy trainPoints_tmp to m_trainPoints
1516  *trainPoints = trainPoints_tmp;
1517  }
1518  // Copy descriptors_tmp to descriptors
1519  descriptors_tmp.copyTo(descriptors);
1520  }
1521 
1522  // Merge descriptors horizontally
1523  if (descriptors.empty()) {
1524  desc.copyTo(descriptors);
1525  }
1526  else {
1527  cv::hconcat(descriptors, desc, descriptors);
1528  }
1529  }
1530  }
1531 
1532  if (keyPoints.size() != (size_t)descriptors.rows) {
1533  std::cerr << "keyPoints.size() != (size_t) descriptors.rows" << std::endl;
1534  }
1535  elapsedTime = vpTime::measureTimeMs() - t;
1536 }
1537 
1538 void vpKeyPoint::filterMatches()
1539 {
1540  std::vector<cv::KeyPoint> queryKpts;
1541  std::vector<cv::Point3f> trainPts;
1542  std::vector<cv::DMatch> m;
1543 
1544  if (m_useKnn) {
1545  // double max_dist = 0;
1546  // double min_dist = std::numeric_limits<double>::max(); // create an
1547  // error under Windows. To fix it we have to add #undef max
1548  double min_dist = DBL_MAX;
1549  double mean = 0.0;
1550  std::vector<double> distance_vec(m_knnMatches.size());
1551 
1552  if (m_filterType == stdAndRatioDistanceThreshold) {
1553  for (size_t i = 0; i < m_knnMatches.size(); i++) {
1554  double dist = m_knnMatches[i][0].distance;
1555  mean += dist;
1556  distance_vec[i] = dist;
1557 
1558  if (dist < min_dist) {
1559  min_dist = dist;
1560  }
1561  // if (dist > max_dist) {
1562  // max_dist = dist;
1563  //}
1564  }
1565  mean /= m_queryDescriptors.rows;
1566  }
1567 
1568  double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1569  double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1570  double threshold = min_dist + stdev;
1571 
1572  for (size_t i = 0; i < m_knnMatches.size(); i++) {
1573  if (m_knnMatches[i].size() >= 2) {
1574  // Calculate ratio of the descriptor distance between the two nearest
1575  // neighbors of the keypoint
1576  float ratio = m_knnMatches[i][0].distance / m_knnMatches[i][1].distance;
1577  // float ratio = std::sqrt((vecMatches[i][0].distance *
1578  // vecMatches[i][0].distance)
1579  // / (vecMatches[i][1].distance *
1580  // vecMatches[i][1].distance));
1581  double dist = m_knnMatches[i][0].distance;
1582 
1583  if (ratio < m_matchingRatioThreshold || (m_filterType == stdAndRatioDistanceThreshold && dist < threshold)) {
1584  m.push_back(cv::DMatch((int)queryKpts.size(), m_knnMatches[i][0].trainIdx, m_knnMatches[i][0].distance));
1585 
1586  if (!m_trainPoints.empty()) {
1587  trainPts.push_back(m_trainPoints[(size_t)m_knnMatches[i][0].trainIdx]);
1588  }
1589  queryKpts.push_back(m_queryKeyPoints[(size_t)m_knnMatches[i][0].queryIdx]);
1590  }
1591  }
1592  }
1593  }
1594  else {
1595  // double max_dist = 0;
1596  // create an error under Windows. To fix it we have to add #undef max
1597  // double min_dist = std::numeric_limits<double>::max();
1598  double min_dist = DBL_MAX;
1599  double mean = 0.0;
1600  std::vector<double> distance_vec(m_matches.size());
1601  for (size_t i = 0; i < m_matches.size(); i++) {
1602  double dist = m_matches[i].distance;
1603  mean += dist;
1604  distance_vec[i] = dist;
1605 
1606  if (dist < min_dist) {
1607  min_dist = dist;
1608  }
1609  // if (dist > max_dist) {
1610  // max_dist = dist;
1611  // }
1612  }
1613  mean /= m_queryDescriptors.rows;
1614 
1615  double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1616  double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1617 
1618  // Define a threshold where we keep all keypoints whose the descriptor
1619  // distance falls below a factor of the minimum descriptor distance (for
1620  // all the query keypoints) or below the minimum descriptor distance +
1621  // the standard deviation (calculated on all the query descriptor
1622  // distances)
1623  double threshold =
1624  m_filterType == constantFactorDistanceThreshold ? m_matchingFactorThreshold * min_dist : min_dist + stdev;
1625 
1626  for (size_t i = 0; i < m_matches.size(); i++) {
1627  if (m_matches[i].distance <= threshold) {
1628  m.push_back(cv::DMatch((int)queryKpts.size(), m_matches[i].trainIdx, m_matches[i].distance));
1629 
1630  if (!m_trainPoints.empty()) {
1631  trainPts.push_back(m_trainPoints[(size_t)m_matches[i].trainIdx]);
1632  }
1633  queryKpts.push_back(m_queryKeyPoints[(size_t)m_matches[i].queryIdx]);
1634  }
1635  }
1636  }
1637 
1638  if (m_useSingleMatchFilter) {
1639  // Eliminate matches where multiple query keypoints are matched to the
1640  // same train keypoint
1641  std::vector<cv::DMatch> mTmp;
1642  std::vector<cv::Point3f> trainPtsTmp;
1643  std::vector<cv::KeyPoint> queryKptsTmp;
1644 
1645  std::map<int, int> mapOfTrainIdx;
1646  // Count the number of query points matched to the same train point
1647  for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1648  mapOfTrainIdx[it->trainIdx]++;
1649  }
1650 
1651  // Keep matches with only one correspondence
1652  for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1653  if (mapOfTrainIdx[it->trainIdx] == 1) {
1654  mTmp.push_back(cv::DMatch((int)queryKptsTmp.size(), it->trainIdx, it->distance));
1655 
1656  if (!m_trainPoints.empty()) {
1657  trainPtsTmp.push_back(m_trainPoints[(size_t)it->trainIdx]);
1658  }
1659  queryKptsTmp.push_back(queryKpts[(size_t)it->queryIdx]);
1660  }
1661  }
1662 
1663  m_filteredMatches = mTmp;
1664  m_objectFilteredPoints = trainPtsTmp;
1665  m_queryFilteredKeyPoints = queryKptsTmp;
1666  }
1667  else {
1668  m_filteredMatches = m;
1669  m_objectFilteredPoints = trainPts;
1670  m_queryFilteredKeyPoints = queryKpts;
1671  }
1672 }
1673 
1674 void vpKeyPoint::getObjectPoints(std::vector<cv::Point3f> &objectPoints) const
1675 {
1676  objectPoints = m_objectFilteredPoints;
1677 }
1678 
1679 void vpKeyPoint::getObjectPoints(std::vector<vpPoint> &objectPoints) const
1680 {
1681  vpConvert::convertFromOpenCV(m_objectFilteredPoints, objectPoints);
1682 }
1683 
1684 void vpKeyPoint::getQueryKeyPoints(std::vector<cv::KeyPoint> &keyPoints, bool matches) const
1685 {
1686  if (matches) {
1687  keyPoints = m_queryFilteredKeyPoints;
1688  }
1689  else {
1690  keyPoints = m_queryKeyPoints;
1691  }
1692 }
1693 
1694 void vpKeyPoint::getQueryKeyPoints(std::vector<vpImagePoint> &keyPoints, bool matches) const
1695 {
1696  if (matches) {
1697  keyPoints = m_currentImagePointsList;
1698  }
1699  else {
1700  vpConvert::convertFromOpenCV(m_queryKeyPoints, keyPoints);
1701  }
1702 }
1703 
1704 void vpKeyPoint::getTrainKeyPoints(std::vector<cv::KeyPoint> &keyPoints) const { keyPoints = m_trainKeyPoints; }
1705 
1706 void vpKeyPoint::getTrainKeyPoints(std::vector<vpImagePoint> &keyPoints) const { keyPoints = m_referenceImagePointsList; }
1707 
1708 void vpKeyPoint::getTrainPoints(std::vector<cv::Point3f> &points) const { points = m_trainPoints; }
1709 
1710 void vpKeyPoint::getTrainPoints(std::vector<vpPoint> &points) const { points = m_trainVpPoints; }
1711 
1712 void vpKeyPoint::init()
1713 {
1714 // Require 2.4.0 <= opencv < 3.0.0
1715 #if defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && (VISP_HAVE_OPENCV_VERSION < 0x030000)
1716  // The following line must be called in order to use SIFT or SURF
1717  if (!cv::initModule_nonfree()) {
1718  std::cerr << "Cannot init module non free, SIFT or SURF cannot be used." << std::endl;
1719  }
1720 #endif
1721 
1722  // Use k-nearest neighbors (knn) to retrieve the two best matches for a
1723  // keypoint So this is useful only for ratioDistanceThreshold method
1724  if (m_filterType == ratioDistanceThreshold || m_filterType == stdAndRatioDistanceThreshold) {
1725  m_useKnn = true;
1726  }
1727 
1728  initDetectors(m_detectorNames);
1729  initExtractors(m_extractorNames);
1730  initMatcher(m_matcherName);
1731 }
1732 
1733 void vpKeyPoint::initDetector(const std::string &detectorName)
1734 {
1735 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
1736  m_detectors[detectorName] = cv::FeatureDetector::create(detectorName);
1737 
1738  if (m_detectors[detectorName] == nullptr) {
1739  std::stringstream ss_msg;
1740  ss_msg << "Fail to initialize the detector: " << detectorName
1741  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
1742  throw vpException(vpException::fatalError, ss_msg.str());
1743  }
1744 #else
1745  std::string detectorNameTmp = detectorName;
1746  std::string pyramid = "Pyramid";
1747  std::size_t pos = detectorName.find(pyramid);
1748  bool usePyramid = false;
1749  if (pos != std::string::npos) {
1750  detectorNameTmp = detectorName.substr(pos + pyramid.size());
1751  usePyramid = true;
1752  }
1753 
1754  if (detectorNameTmp == "SIFT") {
1755 #if (VISP_HAVE_OPENCV_VERSION >= 0x040500) // OpenCV >= 4.5.0
1756  cv::Ptr<cv::FeatureDetector> siftDetector = cv::SiftFeatureDetector::create();
1757  if (!usePyramid) {
1758  m_detectors[detectorNameTmp] = siftDetector;
1759  }
1760  else {
1761  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
1762  }
1763 #else
1764 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) || (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || \
1765  (VISP_HAVE_OPENCV_VERSION >= 0x040400)
1766  // SIFT is no more patented since 09/03/2020
1767  cv::Ptr<cv::FeatureDetector> siftDetector;
1768  if (m_maxFeatures > 0) {
1769 #if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
1770  siftDetector = cv::SIFT::create(m_maxFeatures);
1771 #else
1772  siftDetector = cv::xfeatures2d::SIFT::create(m_maxFeatures);
1773 #endif
1774  }
1775  else {
1776 #if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
1777  siftDetector = cv::SIFT::create();
1778 #else
1779  siftDetector = cv::xfeatures2d::SIFT::create();
1780 #endif
1781  }
1782  if (!usePyramid) {
1783  m_detectors[detectorNameTmp] = siftDetector;
1784  }
1785  else {
1786  std::cerr << "You should not use SIFT with Pyramid feature detection!" << std::endl;
1787  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
1788  }
1789 #else
1790  std::stringstream ss_msg;
1791  ss_msg << "Fail to initialize the detector: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1792  << " was not build with xFeatures2d module.";
1793  throw vpException(vpException::fatalError, ss_msg.str());
1794 #endif
1795 #endif
1796  }
1797  else if (detectorNameTmp == "SURF") {
1798 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1799  cv::Ptr<cv::FeatureDetector> surfDetector = cv::xfeatures2d::SURF::create();
1800  if (!usePyramid) {
1801  m_detectors[detectorNameTmp] = surfDetector;
1802  }
1803  else {
1804  std::cerr << "You should not use SURF with Pyramid feature detection!" << std::endl;
1805  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(surfDetector);
1806  }
1807 #else
1808  std::stringstream ss_msg;
1809  ss_msg << "Fail to initialize the detector: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1810  << " was not build with xFeatures2d module.";
1811  throw vpException(vpException::fatalError, ss_msg.str());
1812 #endif
1813  }
1814  else if (detectorNameTmp == "FAST") {
1815  cv::Ptr<cv::FeatureDetector> fastDetector = cv::FastFeatureDetector::create();
1816  if (!usePyramid) {
1817  m_detectors[detectorNameTmp] = fastDetector;
1818  }
1819  else {
1820  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
1821  }
1822  }
1823  else if (detectorNameTmp == "MSER") {
1824  cv::Ptr<cv::FeatureDetector> fastDetector = cv::MSER::create();
1825  if (!usePyramid) {
1826  m_detectors[detectorNameTmp] = fastDetector;
1827  }
1828  else {
1829  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
1830  }
1831  }
1832  else if (detectorNameTmp == "ORB") {
1833  cv::Ptr<cv::FeatureDetector> orbDetector;
1834  if (m_maxFeatures > 0) {
1835  orbDetector = cv::ORB::create(m_maxFeatures);
1836  }
1837  else {
1838  orbDetector = cv::ORB::create();
1839  }
1840  if (!usePyramid) {
1841  m_detectors[detectorNameTmp] = orbDetector;
1842  }
1843  else {
1844  std::cerr << "You should not use ORB with Pyramid feature detection!" << std::endl;
1845  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(orbDetector);
1846  }
1847  }
1848  else if (detectorNameTmp == "BRISK") {
1849  cv::Ptr<cv::FeatureDetector> briskDetector = cv::BRISK::create();
1850  if (!usePyramid) {
1851  m_detectors[detectorNameTmp] = briskDetector;
1852  }
1853  else {
1854  std::cerr << "You should not use BRISK with Pyramid feature detection!" << std::endl;
1855  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(briskDetector);
1856  }
1857  }
1858  else if (detectorNameTmp == "KAZE") {
1859  cv::Ptr<cv::FeatureDetector> kazeDetector = cv::KAZE::create();
1860  if (!usePyramid) {
1861  m_detectors[detectorNameTmp] = kazeDetector;
1862  }
1863  else {
1864  std::cerr << "You should not use KAZE with Pyramid feature detection!" << std::endl;
1865  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(kazeDetector);
1866  }
1867  }
1868  else if (detectorNameTmp == "AKAZE") {
1869  cv::Ptr<cv::FeatureDetector> akazeDetector = cv::AKAZE::create();
1870  if (!usePyramid) {
1871  m_detectors[detectorNameTmp] = akazeDetector;
1872  }
1873  else {
1874  std::cerr << "You should not use AKAZE with Pyramid feature detection!" << std::endl;
1875  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(akazeDetector);
1876  }
1877  }
1878  else if (detectorNameTmp == "GFTT") {
1879  cv::Ptr<cv::FeatureDetector> gfttDetector = cv::GFTTDetector::create();
1880  if (!usePyramid) {
1881  m_detectors[detectorNameTmp] = gfttDetector;
1882  }
1883  else {
1884  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(gfttDetector);
1885  }
1886  }
1887  else if (detectorNameTmp == "SimpleBlob") {
1888  cv::Ptr<cv::FeatureDetector> simpleBlobDetector = cv::SimpleBlobDetector::create();
1889  if (!usePyramid) {
1890  m_detectors[detectorNameTmp] = simpleBlobDetector;
1891  }
1892  else {
1893  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(simpleBlobDetector);
1894  }
1895  }
1896  else if (detectorNameTmp == "STAR") {
1897 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1898  cv::Ptr<cv::FeatureDetector> starDetector = cv::xfeatures2d::StarDetector::create();
1899  if (!usePyramid) {
1900  m_detectors[detectorNameTmp] = starDetector;
1901  }
1902  else {
1903  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(starDetector);
1904  }
1905 #else
1906  std::stringstream ss_msg;
1907  ss_msg << "Fail to initialize the detector: STAR. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1908  << " was not build with xFeatures2d module.";
1909  throw vpException(vpException::fatalError, ss_msg.str());
1910 #endif
1911  }
1912  else if (detectorNameTmp == "AGAST") {
1913  cv::Ptr<cv::FeatureDetector> agastDetector = cv::AgastFeatureDetector::create();
1914  if (!usePyramid) {
1915  m_detectors[detectorNameTmp] = agastDetector;
1916  }
1917  else {
1918  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(agastDetector);
1919  }
1920  }
1921  else if (detectorNameTmp == "MSD") {
1922 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100)
1923 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
1924  cv::Ptr<cv::FeatureDetector> msdDetector = cv::xfeatures2d::MSDDetector::create();
1925  if (!usePyramid) {
1926  m_detectors[detectorNameTmp] = msdDetector;
1927  }
1928  else {
1929  std::cerr << "You should not use MSD with Pyramid feature detection!" << std::endl;
1930  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(msdDetector);
1931  }
1932 #else
1933  std::stringstream ss_msg;
1934  ss_msg << "Fail to initialize the detector: MSD. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1935  << " was not build with xFeatures2d module.";
1936  throw vpException(vpException::fatalError, ss_msg.str());
1937 #endif
1938 #else
1939  std::stringstream ss_msg;
1940  ss_msg << "Feature " << detectorName << " is not available in OpenCV version: " << std::hex
1941  << VISP_HAVE_OPENCV_VERSION << " (require >= OpenCV 3.1).";
1942 #endif
1943  }
1944  else {
1945  std::cerr << "The detector:" << detectorNameTmp << " is not available." << std::endl;
1946  }
1947 
1948  bool detectorInitialized = false;
1949  if (!usePyramid) {
1950  // if not null and to avoid warning C4800: forcing value to bool 'true' or 'false' (performance warning)
1951  detectorInitialized = !m_detectors[detectorNameTmp].empty();
1952  }
1953  else {
1954  // if not null and to avoid warning C4800: forcing value to bool 'true' or 'false' (performance warning)
1955  detectorInitialized = !m_detectors[detectorName].empty();
1956  }
1957 
1958  if (!detectorInitialized) {
1959  std::stringstream ss_msg;
1960  ss_msg << "Fail to initialize the detector: " << detectorNameTmp
1961  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
1962  throw vpException(vpException::fatalError, ss_msg.str());
1963  }
1964 #endif
1965 }
1966 
1967 void vpKeyPoint::initDetectors(const std::vector<std::string> &detectorNames)
1968 {
1969  for (std::vector<std::string>::const_iterator it = detectorNames.begin(); it != detectorNames.end(); ++it) {
1970  initDetector(*it);
1971  }
1972 }
1973 
1974 void vpKeyPoint::initExtractor(const std::string &extractorName)
1975 {
1976 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
1977  m_extractors[extractorName] = cv::DescriptorExtractor::create(extractorName);
1978 #else
1979  if (extractorName == "SIFT") {
1980 #if (VISP_HAVE_OPENCV_VERSION >= 0x040500) // OpenCV >= 4.5.0
1981  m_extractors[extractorName] = cv::SIFT::create();
1982 #else
1983 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) || (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || \
1984  (VISP_HAVE_OPENCV_VERSION >= 0x040400)
1985  // SIFT is no more patented since 09/03/2020
1986 #if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
1987  m_extractors[extractorName] = cv::SIFT::create();
1988 #else
1989  m_extractors[extractorName] = cv::xfeatures2d::SIFT::create();
1990 #endif
1991 #else
1992  std::stringstream ss_msg;
1993  ss_msg << "Fail to initialize the extractor: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1994  << " was not build with xFeatures2d module.";
1995  throw vpException(vpException::fatalError, ss_msg.str());
1996 #endif
1997 #endif
1998  }
1999  else if (extractorName == "SURF") {
2000 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2001  // Use extended set of SURF descriptors (128 instead of 64)
2002  m_extractors[extractorName] = cv::xfeatures2d::SURF::create(100, 4, 3, true);
2003 #else
2004  std::stringstream ss_msg;
2005  ss_msg << "Fail to initialize the extractor: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2006  << " was not build with xFeatures2d module.";
2007  throw vpException(vpException::fatalError, ss_msg.str());
2008 #endif
2009  }
2010  else if (extractorName == "ORB") {
2011  m_extractors[extractorName] = cv::ORB::create();
2012  }
2013  else if (extractorName == "BRISK") {
2014  m_extractors[extractorName] = cv::BRISK::create();
2015  }
2016  else if (extractorName == "FREAK") {
2017 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2018  m_extractors[extractorName] = cv::xfeatures2d::FREAK::create();
2019 #else
2020  std::stringstream ss_msg;
2021  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2022  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2023  throw vpException(vpException::fatalError, ss_msg.str());
2024 #endif
2025  }
2026  else if (extractorName == "BRIEF") {
2027 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2028  m_extractors[extractorName] = cv::xfeatures2d::BriefDescriptorExtractor::create();
2029 #else
2030  std::stringstream ss_msg;
2031  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2032  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2033  throw vpException(vpException::fatalError, ss_msg.str());
2034 #endif
2035  }
2036  else if (extractorName == "KAZE") {
2037  m_extractors[extractorName] = cv::KAZE::create();
2038  }
2039  else if (extractorName == "AKAZE") {
2040  m_extractors[extractorName] = cv::AKAZE::create();
2041  }
2042  else if (extractorName == "DAISY") {
2043 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2044  m_extractors[extractorName] = cv::xfeatures2d::DAISY::create();
2045 #else
2046  std::stringstream ss_msg;
2047  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2048  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2049  throw vpException(vpException::fatalError, ss_msg.str());
2050 #endif
2051  }
2052  else if (extractorName == "LATCH") {
2053 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2054  m_extractors[extractorName] = cv::xfeatures2d::LATCH::create();
2055 #else
2056  std::stringstream ss_msg;
2057  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2058  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2059  throw vpException(vpException::fatalError, ss_msg.str());
2060 #endif
2061  }
2062  else if (extractorName == "LUCID") {
2063 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2064  // m_extractors[extractorName] = cv::xfeatures2d::LUCID::create(1, 2);
2065  // Not possible currently, need a color image
2066  throw vpException(vpException::badValue, "Not possible currently as it needs a color image.");
2067 #else
2068  std::stringstream ss_msg;
2069  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2070  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2071  throw vpException(vpException::fatalError, ss_msg.str());
2072 #endif
2073  }
2074  else if (extractorName == "VGG") {
2075 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2076 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2077  m_extractors[extractorName] = cv::xfeatures2d::VGG::create();
2078 #else
2079  std::stringstream ss_msg;
2080  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2081  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2082  throw vpException(vpException::fatalError, ss_msg.str());
2083 #endif
2084 #else
2085  std::stringstream ss_msg;
2086  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2087  << VISP_HAVE_OPENCV_VERSION << " but requires at least OpenCV 3.2.";
2088  throw vpException(vpException::fatalError, ss_msg.str());
2089 #endif
2090  }
2091  else if (extractorName == "BoostDesc") {
2092 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2093 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2094  m_extractors[extractorName] = cv::xfeatures2d::BoostDesc::create();
2095 #else
2096  std::stringstream ss_msg;
2097  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2098  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2099  throw vpException(vpException::fatalError, ss_msg.str());
2100 #endif
2101 #else
2102  std::stringstream ss_msg;
2103  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2104  << VISP_HAVE_OPENCV_VERSION << " but requires at least OpenCV 3.2.";
2105  throw vpException(vpException::fatalError, ss_msg.str());
2106 #endif
2107  }
2108  else {
2109  std::cerr << "The extractor:" << extractorName << " is not available." << std::endl;
2110  }
2111 #endif
2112 
2113  if (!m_extractors[extractorName]) { // if null
2114  std::stringstream ss_msg;
2115  ss_msg << "Fail to initialize the extractor: " << extractorName
2116  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2117  throw vpException(vpException::fatalError, ss_msg.str());
2118  }
2119 
2120 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2121  if (extractorName == "SURF") {
2122  // Use extended set of SURF descriptors (128 instead of 64)
2123  m_extractors[extractorName]->set("extended", 1);
2124  }
2125 #endif
2126 }
2127 
2128 void vpKeyPoint::initExtractors(const std::vector<std::string> &extractorNames)
2129 {
2130  for (std::vector<std::string>::const_iterator it = extractorNames.begin(); it != extractorNames.end(); ++it) {
2131  initExtractor(*it);
2132  }
2133 
2134  int descriptorType = CV_32F;
2135  bool firstIteration = true;
2136  for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2137  it != m_extractors.end(); ++it) {
2138  if (firstIteration) {
2139  firstIteration = false;
2140  descriptorType = it->second->descriptorType();
2141  }
2142  else {
2143  if (descriptorType != it->second->descriptorType()) {
2144  throw vpException(vpException::fatalError, "All the descriptors must have the same type !");
2145  }
2146  }
2147  }
2148 }
2149 
2150 void vpKeyPoint::initFeatureNames()
2151 {
2152 // Create map enum to string
2153 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2154  m_mapOfDetectorNames[DETECTOR_FAST] = "FAST";
2155  m_mapOfDetectorNames[DETECTOR_MSER] = "MSER";
2156  m_mapOfDetectorNames[DETECTOR_ORB] = "ORB";
2157  m_mapOfDetectorNames[DETECTOR_BRISK] = "BRISK";
2158  m_mapOfDetectorNames[DETECTOR_GFTT] = "GFTT";
2159  m_mapOfDetectorNames[DETECTOR_SimpleBlob] = "SimpleBlob";
2160 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2161  m_mapOfDetectorNames[DETECTOR_STAR] = "STAR";
2162 #endif
2163 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2164  (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2165  m_mapOfDetectorNames[DETECTOR_SIFT] = "SIFT";
2166 #endif
2167 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2168  m_mapOfDetectorNames[DETECTOR_SURF] = "SURF";
2169 #endif
2170 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2171  m_mapOfDetectorNames[DETECTOR_KAZE] = "KAZE";
2172  m_mapOfDetectorNames[DETECTOR_AKAZE] = "AKAZE";
2173  m_mapOfDetectorNames[DETECTOR_AGAST] = "AGAST";
2174 #endif
2175 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2176  m_mapOfDetectorNames[DETECTOR_MSD] = "MSD";
2177 #endif
2178 #endif
2179 
2180 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2181  m_mapOfDescriptorNames[DESCRIPTOR_ORB] = "ORB";
2182  m_mapOfDescriptorNames[DESCRIPTOR_BRISK] = "BRISK";
2183 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2184  m_mapOfDescriptorNames[DESCRIPTOR_FREAK] = "FREAK";
2185  m_mapOfDescriptorNames[DESCRIPTOR_BRIEF] = "BRIEF";
2186 #endif
2187 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2188  (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2189  m_mapOfDescriptorNames[DESCRIPTOR_SIFT] = "SIFT";
2190 #endif
2191 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2192  m_mapOfDescriptorNames[DESCRIPTOR_SURF] = "SURF";
2193 #endif
2194 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2195  m_mapOfDescriptorNames[DESCRIPTOR_KAZE] = "KAZE";
2196  m_mapOfDescriptorNames[DESCRIPTOR_AKAZE] = "AKAZE";
2197 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2198  m_mapOfDescriptorNames[DESCRIPTOR_DAISY] = "DAISY";
2199  m_mapOfDescriptorNames[DESCRIPTOR_LATCH] = "LATCH";
2200 #endif
2201 #endif
2202 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2203  m_mapOfDescriptorNames[DESCRIPTOR_VGG] = "VGG";
2204  m_mapOfDescriptorNames[DESCRIPTOR_BoostDesc] = "BoostDesc";
2205 #endif
2206 #endif
2207 }
2208 
2209 void vpKeyPoint::initMatcher(const std::string &matcherName)
2210 {
2211  int descriptorType = CV_32F;
2212  bool firstIteration = true;
2213  for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2214  it != m_extractors.end(); ++it) {
2215  if (firstIteration) {
2216  firstIteration = false;
2217  descriptorType = it->second->descriptorType();
2218  }
2219  else {
2220  if (descriptorType != it->second->descriptorType()) {
2221  throw vpException(vpException::fatalError, "All the descriptors must have the same type !");
2222  }
2223  }
2224  }
2225 
2226  if (matcherName == "FlannBased") {
2227  if (m_extractors.empty()) {
2228  std::cout << "Warning: No extractor initialized, by default use "
2229  "floating values (CV_32F) "
2230  "for descriptor type !"
2231  << std::endl;
2232  }
2233 
2234  if (descriptorType == CV_8U) {
2235 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2236  m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::LshIndexParams>(12, 20, 2));
2237 #else
2238  m_matcher = new cv::FlannBasedMatcher(new cv::flann::LshIndexParams(12, 20, 2));
2239 #endif
2240  }
2241  else {
2242 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2243  m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::KDTreeIndexParams>());
2244 #else
2245  m_matcher = new cv::FlannBasedMatcher(new cv::flann::KDTreeIndexParams());
2246 #endif
2247  }
2248  }
2249  else {
2250  m_matcher = cv::DescriptorMatcher::create(matcherName);
2251  }
2252 
2253 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2254  if (m_matcher != nullptr && !m_useKnn && matcherName == "BruteForce") {
2255  m_matcher->set("crossCheck", m_useBruteForceCrossCheck);
2256  }
2257 #endif
2258 
2259  if (!m_matcher) { // if null
2260  std::stringstream ss_msg;
2261  ss_msg << "Fail to initialize the matcher: " << matcherName
2262  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2263  throw vpException(vpException::fatalError, ss_msg.str());
2264  }
2265 }
2266 
2268  vpImage<unsigned char> &IMatching)
2269 {
2270  vpImagePoint topLeftCorner(0, 0);
2271  IMatching.insert(IRef, topLeftCorner);
2272  topLeftCorner = vpImagePoint(0, IRef.getWidth());
2273  IMatching.insert(ICurrent, topLeftCorner);
2274 }
2275 
2277  vpImage<vpRGBa> &IMatching)
2278 {
2279  vpImagePoint topLeftCorner(0, 0);
2280  IMatching.insert(IRef, topLeftCorner);
2281  topLeftCorner = vpImagePoint(0, IRef.getWidth());
2282  IMatching.insert(ICurrent, topLeftCorner);
2283 }
2284 
2286 {
2287  // Nb images in the training database + the current image we want to detect
2288  // the object
2289  int nbImg = (int)(m_mapOfImages.size() + 1);
2290 
2291  if (m_mapOfImages.empty()) {
2292  std::cerr << "There is no training image loaded !" << std::endl;
2293  return;
2294  }
2295 
2296  if (nbImg == 2) {
2297  // Only one training image, so we display them side by side
2298  insertImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
2299  }
2300  else {
2301  // Multiple training images, display them as a mosaic image
2302  int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
2303  int nbWidth = nbImgSqrt;
2304  int nbHeight = nbImgSqrt;
2305 
2306  if (nbImgSqrt * nbImgSqrt < nbImg) {
2307  nbWidth++;
2308  }
2309 
2310  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
2311  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2312  ++it) {
2313  if (maxW < it->second.getWidth()) {
2314  maxW = it->second.getWidth();
2315  }
2316 
2317  if (maxH < it->second.getHeight()) {
2318  maxH = it->second.getHeight();
2319  }
2320  }
2321 
2322  // Indexes of the current image in the grid made in order to the image is
2323  // in the center square in the mosaic grid
2324  int medianI = nbHeight / 2;
2325  int medianJ = nbWidth / 2;
2326  int medianIndex = medianI * nbWidth + medianJ;
2327 
2328  int cpt = 0;
2329  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2330  ++it, cpt++) {
2331  int local_cpt = cpt;
2332  if (cpt >= medianIndex) {
2333  // Shift of one unity the index of the training images which are after
2334  // the current image
2335  local_cpt++;
2336  }
2337  int indexI = local_cpt / nbWidth;
2338  int indexJ = local_cpt - (indexI * nbWidth);
2339  vpImagePoint topLeftCorner((int)maxH * indexI, (int)maxW * indexJ);
2340 
2341  IMatching.insert(it->second, topLeftCorner);
2342  }
2343 
2344  vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
2345  IMatching.insert(ICurrent, topLeftCorner);
2346  }
2347 }
2348 
2350 {
2351  // Nb images in the training database + the current image we want to detect
2352  // the object
2353  int nbImg = (int)(m_mapOfImages.size() + 1);
2354 
2355  if (m_mapOfImages.empty()) {
2356  std::cerr << "There is no training image loaded !" << std::endl;
2357  return;
2358  }
2359 
2360  if (nbImg == 2) {
2361  // Only one training image, so we display them side by side
2362  vpImage<vpRGBa> IRef;
2363  vpImageConvert::convert(m_mapOfImages.begin()->second, IRef);
2364  insertImageMatching(IRef, ICurrent, IMatching);
2365  }
2366  else {
2367  // Multiple training images, display them as a mosaic image
2368  int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
2369  int nbWidth = nbImgSqrt;
2370  int nbHeight = nbImgSqrt;
2371 
2372  if (nbImgSqrt * nbImgSqrt < nbImg) {
2373  nbWidth++;
2374  }
2375 
2376  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
2377  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2378  ++it) {
2379  if (maxW < it->second.getWidth()) {
2380  maxW = it->second.getWidth();
2381  }
2382 
2383  if (maxH < it->second.getHeight()) {
2384  maxH = it->second.getHeight();
2385  }
2386  }
2387 
2388  // Indexes of the current image in the grid made in order to the image is
2389  // in the center square in the mosaic grid
2390  int medianI = nbHeight / 2;
2391  int medianJ = nbWidth / 2;
2392  int medianIndex = medianI * nbWidth + medianJ;
2393 
2394  int cpt = 0;
2395  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2396  ++it, cpt++) {
2397  int local_cpt = cpt;
2398  if (cpt >= medianIndex) {
2399  // Shift of one unity the index of the training images which are after
2400  // the current image
2401  local_cpt++;
2402  }
2403  int indexI = local_cpt / nbWidth;
2404  int indexJ = local_cpt - (indexI * nbWidth);
2405  vpImagePoint topLeftCorner((int)maxH * indexI, (int)maxW * indexJ);
2406 
2407  vpImage<vpRGBa> IRef;
2408  vpImageConvert::convert(it->second, IRef);
2409  IMatching.insert(IRef, topLeftCorner);
2410  }
2411 
2412  vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
2413  IMatching.insert(ICurrent, topLeftCorner);
2414  }
2415 }
2416 
2417 void vpKeyPoint::loadConfigFile(const std::string &configFile)
2418 {
2419 #if defined(VISP_HAVE_PUGIXML)
2421 
2422  try {
2423  // Reset detector and extractor
2424  m_detectorNames.clear();
2425  m_extractorNames.clear();
2426  m_detectors.clear();
2427  m_extractors.clear();
2428 
2429  std::cout << " *********** Parsing XML for configuration for vpKeyPoint "
2430  "************ "
2431  << std::endl;
2432  xmlp.parse(configFile);
2433 
2434  m_detectorNames.push_back(xmlp.getDetectorName());
2435  m_extractorNames.push_back(xmlp.getExtractorName());
2436  m_matcherName = xmlp.getMatcherName();
2437 
2438  switch (xmlp.getMatchingMethod()) {
2440  m_filterType = constantFactorDistanceThreshold;
2441  break;
2442 
2444  m_filterType = stdDistanceThreshold;
2445  break;
2446 
2448  m_filterType = ratioDistanceThreshold;
2449  break;
2450 
2452  m_filterType = stdAndRatioDistanceThreshold;
2453  break;
2454 
2456  m_filterType = noFilterMatching;
2457  break;
2458 
2459  default:
2460  break;
2461  }
2462 
2463  m_matchingFactorThreshold = xmlp.getMatchingFactorThreshold();
2464  m_matchingRatioThreshold = xmlp.getMatchingRatioThreshold();
2465 
2466  m_useRansacVVS = xmlp.getUseRansacVVSPoseEstimation();
2467  m_useConsensusPercentage = xmlp.getUseRansacConsensusPercentage();
2468  m_nbRansacIterations = xmlp.getNbRansacIterations();
2469  m_ransacReprojectionError = xmlp.getRansacReprojectionError();
2470  m_nbRansacMinInlierCount = xmlp.getNbRansacMinInlierCount();
2471  m_ransacThreshold = xmlp.getRansacThreshold();
2472  m_ransacConsensusPercentage = xmlp.getRansacConsensusPercentage();
2473 
2474  if (m_filterType == ratioDistanceThreshold || m_filterType == stdAndRatioDistanceThreshold) {
2475  m_useKnn = true;
2476  }
2477  else {
2478  m_useKnn = false;
2479  }
2480 
2481  init();
2482  }
2483  catch (...) {
2484  throw vpException(vpException::ioError, "Can't open XML file \"%s\"\n ", configFile.c_str());
2485  }
2486 #else
2487  (void)configFile;
2488  throw vpException(vpException::ioError, "vpKeyPoint::loadConfigFile() in xml needs built-in pugixml 3rdparty that is not available");
2489 #endif
2490 }
2491 
2492 void vpKeyPoint::loadLearningData(const std::string &filename, bool binaryMode, bool append)
2493 {
2494  int startClassId = 0;
2495  int startImageId = 0;
2496  if (!append) {
2497  m_trainKeyPoints.clear();
2498  m_trainPoints.clear();
2499  m_mapOfImageId.clear();
2500  m_mapOfImages.clear();
2501  }
2502  else {
2503  // In append case, find the max index of keypoint class Id
2504  for (std::map<int, int>::const_iterator it = m_mapOfImageId.begin(); it != m_mapOfImageId.end(); ++it) {
2505  if (startClassId < it->first) {
2506  startClassId = it->first;
2507  }
2508  }
2509 
2510  // In append case, find the max index of images Id
2511  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2512  ++it) {
2513  if (startImageId < it->first) {
2514  startImageId = it->first;
2515  }
2516  }
2517  }
2518 
2519  // Get parent directory
2520  std::string parent = vpIoTools::getParent(filename);
2521  if (!parent.empty()) {
2522  parent += "/";
2523  }
2524 
2525  if (binaryMode) {
2526  std::ifstream file(filename.c_str(), std::ifstream::binary);
2527  if (!file.is_open()) {
2528  throw vpException(vpException::ioError, "Cannot open the file.");
2529  }
2530 
2531  // Read info about training images
2532  int nbImgs = 0;
2533  vpIoTools::readBinaryValueLE(file, nbImgs);
2534 
2535 #if !defined(VISP_HAVE_MODULE_IO)
2536  if (nbImgs > 0) {
2537  std::cout << "Warning: The learning file contains image data that will "
2538  "not be loaded as visp_io module "
2539  "is not available !"
2540  << std::endl;
2541  }
2542 #endif
2543 
2544  for (int i = 0; i < nbImgs; i++) {
2545  // Read image_id
2546  int id = 0;
2547  vpIoTools::readBinaryValueLE(file, id);
2548 
2549  int length = 0;
2550  vpIoTools::readBinaryValueLE(file, length);
2551  // Will contain the path to the training images
2552  char *path = new char[length + 1]; // char path[length + 1];
2553 
2554  for (int cpt = 0; cpt < length; cpt++) {
2555  char c;
2556  file.read((char *)(&c), sizeof(c));
2557  path[cpt] = c;
2558  }
2559  path[length] = '\0';
2560 
2562 #ifdef VISP_HAVE_MODULE_IO
2563  if (vpIoTools::isAbsolutePathname(std::string(path))) {
2564  vpImageIo::read(I, path);
2565  }
2566  else {
2567  vpImageIo::read(I, parent + path);
2568  }
2569 
2570  // Add the image previously loaded only if VISP_HAVE_MODULE_IO
2571  m_mapOfImages[id + startImageId] = I;
2572 #endif
2573 
2574  // Delete path
2575  delete[] path;
2576  }
2577 
2578  // Read if 3D point information are saved or not
2579  int have3DInfoInt = 0;
2580  vpIoTools::readBinaryValueLE(file, have3DInfoInt);
2581  bool have3DInfo = have3DInfoInt != 0;
2582 
2583  // Read the number of descriptors
2584  int nRows = 0;
2585  vpIoTools::readBinaryValueLE(file, nRows);
2586 
2587  // Read the size of the descriptor
2588  int nCols = 0;
2589  vpIoTools::readBinaryValueLE(file, nCols);
2590 
2591  // Read the type of the descriptor
2592  int descriptorType = 5; // CV_32F
2593  vpIoTools::readBinaryValueLE(file, descriptorType);
2594 
2595  cv::Mat trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
2596  for (int i = 0; i < nRows; i++) {
2597  // Read information about keyPoint
2598  float u, v, size, angle, response;
2599  int octave, class_id, image_id;
2602  vpIoTools::readBinaryValueLE(file, size);
2603  vpIoTools::readBinaryValueLE(file, angle);
2604  vpIoTools::readBinaryValueLE(file, response);
2605  vpIoTools::readBinaryValueLE(file, octave);
2606  vpIoTools::readBinaryValueLE(file, class_id);
2607  vpIoTools::readBinaryValueLE(file, image_id);
2608  cv::KeyPoint keyPoint(cv::Point2f(u, v), size, angle, response, octave, (class_id + startClassId));
2609  m_trainKeyPoints.push_back(keyPoint);
2610 
2611  if (image_id != -1) {
2612 #ifdef VISP_HAVE_MODULE_IO
2613  // No training images if image_id == -1
2614  m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
2615 #endif
2616  }
2617 
2618  if (have3DInfo) {
2619  // Read oX, oY, oZ
2620  float oX, oY, oZ;
2621  vpIoTools::readBinaryValueLE(file, oX);
2622  vpIoTools::readBinaryValueLE(file, oY);
2623  vpIoTools::readBinaryValueLE(file, oZ);
2624  m_trainPoints.push_back(cv::Point3f(oX, oY, oZ));
2625  }
2626 
2627  for (int j = 0; j < nCols; j++) {
2628  // Read the value of the descriptor
2629  switch (descriptorType) {
2630  case CV_8U: {
2631  unsigned char value;
2632  file.read((char *)(&value), sizeof(value));
2633  trainDescriptorsTmp.at<unsigned char>(i, j) = value;
2634  } break;
2635 
2636  case CV_8S: {
2637  char value;
2638  file.read((char *)(&value), sizeof(value));
2639  trainDescriptorsTmp.at<char>(i, j) = value;
2640  } break;
2641 
2642  case CV_16U: {
2643  unsigned short int value;
2644  vpIoTools::readBinaryValueLE(file, value);
2645  trainDescriptorsTmp.at<unsigned short int>(i, j) = value;
2646  } break;
2647 
2648  case CV_16S: {
2649  short int value;
2650  vpIoTools::readBinaryValueLE(file, value);
2651  trainDescriptorsTmp.at<short int>(i, j) = value;
2652  } break;
2653 
2654  case CV_32S: {
2655  int value;
2656  vpIoTools::readBinaryValueLE(file, value);
2657  trainDescriptorsTmp.at<int>(i, j) = value;
2658  } break;
2659 
2660  case CV_32F: {
2661  float value;
2662  vpIoTools::readBinaryValueLE(file, value);
2663  trainDescriptorsTmp.at<float>(i, j) = value;
2664  } break;
2665 
2666  case CV_64F: {
2667  double value;
2668  vpIoTools::readBinaryValueLE(file, value);
2669  trainDescriptorsTmp.at<double>(i, j) = value;
2670  } break;
2671 
2672  default: {
2673  float value;
2674  vpIoTools::readBinaryValueLE(file, value);
2675  trainDescriptorsTmp.at<float>(i, j) = value;
2676  } break;
2677  }
2678  }
2679  }
2680 
2681  if (!append || m_trainDescriptors.empty()) {
2682  trainDescriptorsTmp.copyTo(m_trainDescriptors);
2683  }
2684  else {
2685  cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
2686  }
2687 
2688  file.close();
2689  }
2690  else {
2691 #if defined(VISP_HAVE_PUGIXML)
2692  pugi::xml_document doc;
2693 
2694  /*parse the file and get the DOM */
2695  if (!doc.load_file(filename.c_str())) {
2696  throw vpException(vpException::ioError, "Error with file: %s", filename.c_str());
2697  }
2698 
2699  pugi::xml_node root_element = doc.document_element();
2700 
2701  int descriptorType = CV_32F; // float
2702  int nRows = 0, nCols = 0;
2703  int i = 0, j = 0;
2704 
2705  cv::Mat trainDescriptorsTmp;
2706 
2707  for (pugi::xml_node first_level_node = root_element.first_child(); first_level_node;
2708  first_level_node = first_level_node.next_sibling()) {
2709 
2710  std::string name(first_level_node.name());
2711  if (first_level_node.type() == pugi::node_element && name == "TrainingImageInfo") {
2712 
2713  for (pugi::xml_node image_info_node = first_level_node.first_child(); image_info_node;
2714  image_info_node = image_info_node.next_sibling()) {
2715  name = std::string(image_info_node.name());
2716 
2717  if (name == "trainImg") {
2718  // Read image_id
2719  int id = image_info_node.attribute("image_id").as_int();
2720 
2722 #ifdef VISP_HAVE_MODULE_IO
2723  std::string path(image_info_node.text().as_string());
2724  // Read path to the training images
2725  if (vpIoTools::isAbsolutePathname(std::string(path))) {
2726  vpImageIo::read(I, path);
2727  }
2728  else {
2729  vpImageIo::read(I, parent + path);
2730  }
2731 
2732  // Add the image previously loaded only if VISP_HAVE_MODULE_IO
2733  m_mapOfImages[id + startImageId] = I;
2734 #endif
2735  }
2736  }
2737  }
2738  else if (first_level_node.type() == pugi::node_element && name == "DescriptorsInfo") {
2739  for (pugi::xml_node descriptors_info_node = first_level_node.first_child(); descriptors_info_node;
2740  descriptors_info_node = descriptors_info_node.next_sibling()) {
2741  if (descriptors_info_node.type() == pugi::node_element) {
2742  name = std::string(descriptors_info_node.name());
2743 
2744  if (name == "nrows") {
2745  nRows = descriptors_info_node.text().as_int();
2746  }
2747  else if (name == "ncols") {
2748  nCols = descriptors_info_node.text().as_int();
2749  }
2750  else if (name == "type") {
2751  descriptorType = descriptors_info_node.text().as_int();
2752  }
2753  }
2754  }
2755 
2756  trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
2757  }
2758  else if (first_level_node.type() == pugi::node_element && name == "DescriptorInfo") {
2759  double u = 0.0, v = 0.0, size = 0.0, angle = 0.0, response = 0.0;
2760  int octave = 0, class_id = 0, image_id = 0;
2761  double oX = 0.0, oY = 0.0, oZ = 0.0;
2762 
2763  std::stringstream ss;
2764 
2765  for (pugi::xml_node point_node = first_level_node.first_child(); point_node;
2766  point_node = point_node.next_sibling()) {
2767  if (point_node.type() == pugi::node_element) {
2768  name = std::string(point_node.name());
2769 
2770  // Read information about keypoints
2771  if (name == "u") {
2772  u = point_node.text().as_double();
2773  }
2774  else if (name == "v") {
2775  v = point_node.text().as_double();
2776  }
2777  else if (name == "size") {
2778  size = point_node.text().as_double();
2779  }
2780  else if (name == "angle") {
2781  angle = point_node.text().as_double();
2782  }
2783  else if (name == "response") {
2784  response = point_node.text().as_double();
2785  }
2786  else if (name == "octave") {
2787  octave = point_node.text().as_int();
2788  }
2789  else if (name == "class_id") {
2790  class_id = point_node.text().as_int();
2791  cv::KeyPoint keyPoint(cv::Point2f((float)u, (float)v), (float)size, (float)angle, (float)response, octave,
2792  (class_id + startClassId));
2793  m_trainKeyPoints.push_back(keyPoint);
2794  }
2795  else if (name == "image_id") {
2796  image_id = point_node.text().as_int();
2797  if (image_id != -1) {
2798 #ifdef VISP_HAVE_MODULE_IO
2799  // No training images if image_id == -1
2800  m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
2801 #endif
2802  }
2803  }
2804  else if (name == "oX") {
2805  oX = point_node.text().as_double();
2806  }
2807  else if (name == "oY") {
2808  oY = point_node.text().as_double();
2809  }
2810  else if (name == "oZ") {
2811  oZ = point_node.text().as_double();
2812  m_trainPoints.push_back(cv::Point3f((float)oX, (float)oY, (float)oZ));
2813  }
2814  else if (name == "desc") {
2815  j = 0;
2816 
2817  for (pugi::xml_node descriptor_value_node = point_node.first_child(); descriptor_value_node;
2818  descriptor_value_node = descriptor_value_node.next_sibling()) {
2819 
2820  if (descriptor_value_node.type() == pugi::node_element) {
2821  // Read descriptors values
2822  std::string parseStr(descriptor_value_node.text().as_string());
2823  ss.clear();
2824  ss.str(parseStr);
2825 
2826  if (!ss.fail()) {
2827  switch (descriptorType) {
2828  case CV_8U: {
2829  // Parse the numeric value [0 ; 255] to an int
2830  int parseValue;
2831  ss >> parseValue;
2832  trainDescriptorsTmp.at<unsigned char>(i, j) = (unsigned char)parseValue;
2833  } break;
2834 
2835  case CV_8S:
2836  // Parse the numeric value [-128 ; 127] to an int
2837  int parseValue;
2838  ss >> parseValue;
2839  trainDescriptorsTmp.at<char>(i, j) = (char)parseValue;
2840  break;
2841 
2842  case CV_16U:
2843  ss >> trainDescriptorsTmp.at<unsigned short int>(i, j);
2844  break;
2845 
2846  case CV_16S:
2847  ss >> trainDescriptorsTmp.at<short int>(i, j);
2848  break;
2849 
2850  case CV_32S:
2851  ss >> trainDescriptorsTmp.at<int>(i, j);
2852  break;
2853 
2854  case CV_32F:
2855  ss >> trainDescriptorsTmp.at<float>(i, j);
2856  break;
2857 
2858  case CV_64F:
2859  ss >> trainDescriptorsTmp.at<double>(i, j);
2860  break;
2861 
2862  default:
2863  ss >> trainDescriptorsTmp.at<float>(i, j);
2864  break;
2865  }
2866  }
2867  else {
2868  std::cerr << "Error when converting:" << ss.str() << std::endl;
2869  }
2870 
2871  j++;
2872  }
2873  }
2874  }
2875  }
2876  }
2877  i++;
2878  }
2879  }
2880 
2881  if (!append || m_trainDescriptors.empty()) {
2882  trainDescriptorsTmp.copyTo(m_trainDescriptors);
2883  }
2884  else {
2885  cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
2886  }
2887 #else
2888  throw vpException(vpException::ioError, "vpKeyPoint::loadLearningData() in xml needs built-in pugixml 3rdparty that is not available");
2889 #endif
2890  }
2891 
2892  // Convert OpenCV type to ViSP type for compatibility
2894  vpConvert::convertFromOpenCV(this->m_trainPoints, m_trainVpPoints);
2895 
2896  // Add train descriptors in matcher object
2897  m_matcher->clear();
2898  m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
2899 
2900  // Set m_reference_computed to true as we load a learning file
2901  m_reference_computed = true;
2902 
2903  // Set m_currentImageId
2904  m_currentImageId = (int)m_mapOfImages.size();
2905 }
2906 
2907 void vpKeyPoint::match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors,
2908  std::vector<cv::DMatch> &matches, double &elapsedTime)
2909 {
2910  double t = vpTime::measureTimeMs();
2911 
2912  if (m_useKnn) {
2913  m_knnMatches.clear();
2914 
2915  if (m_useMatchTrainToQuery) {
2916  std::vector<std::vector<cv::DMatch> > knnMatchesTmp;
2917 
2918  // Match train descriptors to query descriptors
2919  cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(true);
2920  matcherTmp->knnMatch(trainDescriptors, queryDescriptors, knnMatchesTmp, 2);
2921 
2922  for (std::vector<std::vector<cv::DMatch> >::const_iterator it1 = knnMatchesTmp.begin();
2923  it1 != knnMatchesTmp.end(); ++it1) {
2924  std::vector<cv::DMatch> tmp;
2925  for (std::vector<cv::DMatch>::const_iterator it2 = it1->begin(); it2 != it1->end(); ++it2) {
2926  tmp.push_back(cv::DMatch(it2->trainIdx, it2->queryIdx, it2->distance));
2927  }
2928  m_knnMatches.push_back(tmp);
2929  }
2930 
2931  matches.resize(m_knnMatches.size());
2932  std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
2933  }
2934  else {
2935  // Match query descriptors to train descriptors
2936  m_matcher->knnMatch(queryDescriptors, m_knnMatches, 2);
2937  matches.resize(m_knnMatches.size());
2938  std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
2939  }
2940  }
2941  else {
2942  matches.clear();
2943 
2944  if (m_useMatchTrainToQuery) {
2945  std::vector<cv::DMatch> matchesTmp;
2946  // Match train descriptors to query descriptors
2947  cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(true);
2948  matcherTmp->match(trainDescriptors, queryDescriptors, matchesTmp);
2949 
2950  for (std::vector<cv::DMatch>::const_iterator it = matchesTmp.begin(); it != matchesTmp.end(); ++it) {
2951  matches.push_back(cv::DMatch(it->trainIdx, it->queryIdx, it->distance));
2952  }
2953  }
2954  else {
2955  // Match query descriptors to train descriptors
2956  m_matcher->match(queryDescriptors, matches);
2957  }
2958  }
2959  elapsedTime = vpTime::measureTimeMs() - t;
2960 }
2961 
2962 unsigned int vpKeyPoint::matchPoint(const vpImage<unsigned char> &I) { return matchPoint(I, vpRect()); }
2963 
2964 unsigned int vpKeyPoint::matchPoint(const vpImage<vpRGBa> &I_color) { return matchPoint(I_color, vpRect()); }
2965 
2966 unsigned int vpKeyPoint::matchPoint(const vpImage<unsigned char> &I, const vpImagePoint &iP, unsigned int height,
2967  unsigned int width)
2968 {
2969  return matchPoint(I, vpRect(iP, width, height));
2970 }
2971 
2972 unsigned int vpKeyPoint::matchPoint(const vpImage<vpRGBa> &I_color, const vpImagePoint &iP, unsigned int height,
2973  unsigned int width)
2974 {
2975  return matchPoint(I_color, vpRect(iP, width, height));
2976 }
2977 
2978 unsigned int vpKeyPoint::matchPoint(const vpImage<unsigned char> &I, const vpRect &rectangle)
2979 {
2980  if (m_trainDescriptors.empty()) {
2981  std::cerr << "Reference is empty." << std::endl;
2982  if (!m_reference_computed) {
2983  std::cerr << "Reference is not computed." << std::endl;
2984  }
2985  std::cerr << "Matching is not possible." << std::endl;
2986 
2987  return 0;
2988  }
2989 
2990  if (m_useAffineDetection) {
2991  std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
2992  std::vector<cv::Mat> listOfQueryDescriptors;
2993 
2994  // Detect keypoints and extract descriptors on multiple images
2995  detectExtractAffine(I, listOfQueryKeyPoints, listOfQueryDescriptors);
2996 
2997  // Flatten the different train lists
2998  m_queryKeyPoints.clear();
2999  for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3000  it != listOfQueryKeyPoints.end(); ++it) {
3001  m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3002  }
3003 
3004  bool first = true;
3005  for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3006  ++it) {
3007  if (first) {
3008  first = false;
3009  it->copyTo(m_queryDescriptors);
3010  }
3011  else {
3012  m_queryDescriptors.push_back(*it);
3013  }
3014  }
3015  }
3016  else {
3017  detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3018  extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3019  }
3020 
3021  return matchPoint(m_queryKeyPoints, m_queryDescriptors);
3022 }
3023 
3024 unsigned int vpKeyPoint::matchPoint(const std::vector<cv::KeyPoint> &queryKeyPoints, const cv::Mat &queryDescriptors)
3025 {
3026  m_queryKeyPoints = queryKeyPoints;
3027  m_queryDescriptors = queryDescriptors;
3028 
3029  match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3030 
3031  if (m_filterType != noFilterMatching) {
3032  m_queryFilteredKeyPoints.clear();
3033  m_objectFilteredPoints.clear();
3034  m_filteredMatches.clear();
3035 
3036  filterMatches();
3037  }
3038  else {
3039  if (m_useMatchTrainToQuery) {
3040  // Add only query keypoints matched with a train keypoints
3041  m_queryFilteredKeyPoints.clear();
3042  m_filteredMatches.clear();
3043  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3044  m_filteredMatches.push_back(cv::DMatch((int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3045  m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(size_t)it->queryIdx]);
3046  }
3047  }
3048  else {
3049  m_queryFilteredKeyPoints = m_queryKeyPoints;
3050  m_filteredMatches = m_matches;
3051  }
3052 
3053  if (!m_trainPoints.empty()) {
3054  m_objectFilteredPoints.clear();
3055  // Add 3D object points such as the same index in
3056  // m_queryFilteredKeyPoints and in m_objectFilteredPoints
3057  // matches to the same train object
3058  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3059  // m_matches is normally ordered following the queryDescriptor index
3060  m_objectFilteredPoints.push_back(m_trainPoints[(size_t)it->trainIdx]);
3061  }
3062  }
3063  }
3064 
3065  // Convert OpenCV type to ViSP type for compatibility
3066  vpConvert::convertFromOpenCV(m_queryFilteredKeyPoints, m_currentImagePointsList);
3068 
3069  return static_cast<unsigned int>(m_filteredMatches.size());
3070 }
3071 
3072 unsigned int vpKeyPoint::matchPoint(const vpImage<vpRGBa> &I_color, const vpRect &rectangle)
3073 {
3074  vpImageConvert::convert(I_color, m_I);
3075  return matchPoint(m_I, rectangle);
3076 }
3077 
3079  bool (*func)(const vpHomogeneousMatrix &), const vpRect &rectangle)
3080 {
3081  double error, elapsedTime;
3082  return matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3083 }
3084 
3086  bool (*func)(const vpHomogeneousMatrix &), const vpRect &rectangle)
3087 {
3088  double error, elapsedTime;
3089  return matchPoint(I_color, cam, cMo, error, elapsedTime, func, rectangle);
3090 }
3091 
3093  double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &),
3094  const vpRect &rectangle)
3095 {
3096  // Check if we have training descriptors
3097  if (m_trainDescriptors.empty()) {
3098  std::cerr << "Reference is empty." << std::endl;
3099  if (!m_reference_computed) {
3100  std::cerr << "Reference is not computed." << std::endl;
3101  }
3102  std::cerr << "Matching is not possible." << std::endl;
3103 
3104  return false;
3105  }
3106 
3107  if (m_useAffineDetection) {
3108  std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3109  std::vector<cv::Mat> listOfQueryDescriptors;
3110 
3111  // Detect keypoints and extract descriptors on multiple images
3112  detectExtractAffine(I, listOfQueryKeyPoints, listOfQueryDescriptors);
3113 
3114  // Flatten the different train lists
3115  m_queryKeyPoints.clear();
3116  for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3117  it != listOfQueryKeyPoints.end(); ++it) {
3118  m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3119  }
3120 
3121  bool first = true;
3122  for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3123  ++it) {
3124  if (first) {
3125  first = false;
3126  it->copyTo(m_queryDescriptors);
3127  }
3128  else {
3129  m_queryDescriptors.push_back(*it);
3130  }
3131  }
3132  }
3133  else {
3134  detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3135  extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3136  }
3137 
3138  match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3139 
3140  elapsedTime = m_detectionTime + m_extractionTime + m_matchingTime;
3141 
3142  if (m_filterType != noFilterMatching) {
3143  m_queryFilteredKeyPoints.clear();
3144  m_objectFilteredPoints.clear();
3145  m_filteredMatches.clear();
3146 
3147  filterMatches();
3148  }
3149  else {
3150  if (m_useMatchTrainToQuery) {
3151  // Add only query keypoints matched with a train keypoints
3152  m_queryFilteredKeyPoints.clear();
3153  m_filteredMatches.clear();
3154  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3155  m_filteredMatches.push_back(cv::DMatch((int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3156  m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(size_t)it->queryIdx]);
3157  }
3158  }
3159  else {
3160  m_queryFilteredKeyPoints = m_queryKeyPoints;
3161  m_filteredMatches = m_matches;
3162  }
3163 
3164  if (!m_trainPoints.empty()) {
3165  m_objectFilteredPoints.clear();
3166  // Add 3D object points such as the same index in
3167  // m_queryFilteredKeyPoints and in m_objectFilteredPoints
3168  // matches to the same train object
3169  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3170  // m_matches is normally ordered following the queryDescriptor index
3171  m_objectFilteredPoints.push_back(m_trainPoints[(size_t)it->trainIdx]);
3172  }
3173  }
3174  }
3175 
3176  // Convert OpenCV type to ViSP type for compatibility
3177  vpConvert::convertFromOpenCV(m_queryFilteredKeyPoints, m_currentImagePointsList);
3179 
3180  // error = std::numeric_limits<double>::max(); // create an error under
3181  // Windows. To fix it we have to add #undef max
3182  error = DBL_MAX;
3183  m_ransacInliers.clear();
3184  m_ransacOutliers.clear();
3185 
3186  if (m_useRansacVVS) {
3187  std::vector<vpPoint> objectVpPoints(m_objectFilteredPoints.size());
3188  size_t cpt = 0;
3189  // Create a list of vpPoint with 2D coordinates (current keypoint
3190  // location) + 3D coordinates (world/object coordinates)
3191  for (std::vector<cv::Point3f>::const_iterator it = m_objectFilteredPoints.begin();
3192  it != m_objectFilteredPoints.end(); ++it, cpt++) {
3193  vpPoint pt;
3194  pt.setWorldCoordinates(it->x, it->y, it->z);
3195 
3196  vpImagePoint imP(m_queryFilteredKeyPoints[cpt].pt.y, m_queryFilteredKeyPoints[cpt].pt.x);
3197 
3198  double x = 0.0, y = 0.0;
3199  vpPixelMeterConversion::convertPoint(cam, imP, x, y);
3200  pt.set_x(x);
3201  pt.set_y(y);
3202 
3203  objectVpPoints[cpt] = pt;
3204  }
3205 
3206  std::vector<vpPoint> inliers;
3207  std::vector<unsigned int> inlierIndex;
3208 
3209  bool res = computePose(objectVpPoints, cMo, inliers, inlierIndex, m_poseTime, func);
3210 
3211  std::map<unsigned int, bool> mapOfInlierIndex;
3212  m_matchRansacKeyPointsToPoints.clear();
3213 
3214  for (std::vector<unsigned int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3215  m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3216  m_queryFilteredKeyPoints[(size_t)(*it)], m_objectFilteredPoints[(size_t)(*it)]));
3217  mapOfInlierIndex[*it] = true;
3218  }
3219 
3220  for (size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3221  if (mapOfInlierIndex.find((unsigned int)i) == mapOfInlierIndex.end()) {
3222  m_ransacOutliers.push_back(vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3223  }
3224  }
3225 
3226  error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3227 
3228  m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3229  std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3230  m_ransacInliers.begin(), matchRansacToVpImage);
3231 
3232  elapsedTime += m_poseTime;
3233 
3234  return res;
3235  }
3236  else {
3237  std::vector<cv::Point2f> imageFilteredPoints;
3238  cv::KeyPoint::convert(m_queryFilteredKeyPoints, imageFilteredPoints);
3239  std::vector<int> inlierIndex;
3240  bool res = computePose(imageFilteredPoints, m_objectFilteredPoints, cam, cMo, inlierIndex, m_poseTime);
3241 
3242  std::map<int, bool> mapOfInlierIndex;
3243  m_matchRansacKeyPointsToPoints.clear();
3244 
3245  for (std::vector<int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3246  m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3247  m_queryFilteredKeyPoints[(size_t)(*it)], m_objectFilteredPoints[(size_t)(*it)]));
3248  mapOfInlierIndex[*it] = true;
3249  }
3250 
3251  for (size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3252  if (mapOfInlierIndex.find((int)i) == mapOfInlierIndex.end()) {
3253  m_ransacOutliers.push_back(vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3254  }
3255  }
3256 
3257  error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3258 
3259  m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3260  std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3261  m_ransacInliers.begin(), matchRansacToVpImage);
3262 
3263  elapsedTime += m_poseTime;
3264 
3265  return res;
3266  }
3267 }
3268 
3270  double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &),
3271  const vpRect &rectangle)
3272 {
3273  vpImageConvert::convert(I_color, m_I);
3274  return (matchPoint(m_I, cam, cMo, error, elapsedTime, func, rectangle));
3275 }
3276 
3278  vpImagePoint &centerOfGravity, const bool isPlanarObject,
3279  std::vector<vpImagePoint> *imPts1, std::vector<vpImagePoint> *imPts2,
3280  double *meanDescriptorDistance, double *detectionScore, const vpRect &rectangle)
3281 {
3282  if (imPts1 != nullptr && imPts2 != nullptr) {
3283  imPts1->clear();
3284  imPts2->clear();
3285  }
3286 
3287  matchPoint(I, rectangle);
3288 
3289  double meanDescriptorDistanceTmp = 0.0;
3290  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
3291  meanDescriptorDistanceTmp += (double)it->distance;
3292  }
3293 
3294  meanDescriptorDistanceTmp /= (double)m_filteredMatches.size();
3295  double score = (double)m_filteredMatches.size() / meanDescriptorDistanceTmp;
3296 
3297  if (meanDescriptorDistance != nullptr) {
3298  *meanDescriptorDistance = meanDescriptorDistanceTmp;
3299  }
3300  if (detectionScore != nullptr) {
3301  *detectionScore = score;
3302  }
3303 
3304  if (m_filteredMatches.size() >= 4) {
3305  // Training / Reference 2D points
3306  std::vector<cv::Point2f> points1(m_filteredMatches.size());
3307  // Query / Current 2D points
3308  std::vector<cv::Point2f> points2(m_filteredMatches.size());
3309 
3310  for (size_t i = 0; i < m_filteredMatches.size(); i++) {
3311  points1[i] = cv::Point2f(m_trainKeyPoints[(size_t)m_filteredMatches[i].trainIdx].pt);
3312  points2[i] = cv::Point2f(m_queryFilteredKeyPoints[(size_t)m_filteredMatches[i].queryIdx].pt);
3313  }
3314 
3315  std::vector<vpImagePoint> inliers;
3316  if (isPlanarObject) {
3317 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
3318  cv::Mat homographyMatrix = cv::findHomography(points1, points2, CV_RANSAC);
3319 #else
3320  cv::Mat homographyMatrix = cv::findHomography(points1, points2, cv::RANSAC);
3321 #endif
3322 
3323  for (size_t i = 0; i < m_filteredMatches.size(); i++) {
3324  // Compute reprojection error
3325  cv::Mat realPoint = cv::Mat(3, 1, CV_64F);
3326  realPoint.at<double>(0, 0) = points1[i].x;
3327  realPoint.at<double>(1, 0) = points1[i].y;
3328  realPoint.at<double>(2, 0) = 1.f;
3329 
3330  cv::Mat reprojectedPoint = homographyMatrix * realPoint;
3331  double err_x = (reprojectedPoint.at<double>(0, 0) / reprojectedPoint.at<double>(2, 0)) - points2[i].x;
3332  double err_y = (reprojectedPoint.at<double>(1, 0) / reprojectedPoint.at<double>(2, 0)) - points2[i].y;
3333  double reprojectionError = std::sqrt(err_x * err_x + err_y * err_y);
3334 
3335  if (reprojectionError < 6.0) {
3336  inliers.push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3337  if (imPts1 != nullptr) {
3338  imPts1->push_back(vpImagePoint((double)points1[i].y, (double)points1[i].x));
3339  }
3340 
3341  if (imPts2 != nullptr) {
3342  imPts2->push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3343  }
3344  }
3345  }
3346  }
3347  else if (m_filteredMatches.size() >= 8) {
3348  cv::Mat fundamentalInliers;
3349  cv::Mat fundamentalMatrix = cv::findFundamentalMat(points1, points2, cv::FM_RANSAC, 3, 0.99, fundamentalInliers);
3350 
3351  for (size_t i = 0; i < (size_t)fundamentalInliers.rows; i++) {
3352  if (fundamentalInliers.at<uchar>((int)i, 0)) {
3353  inliers.push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3354 
3355  if (imPts1 != nullptr) {
3356  imPts1->push_back(vpImagePoint((double)points1[i].y, (double)points1[i].x));
3357  }
3358 
3359  if (imPts2 != nullptr) {
3360  imPts2->push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3361  }
3362  }
3363  }
3364  }
3365 
3366  if (!inliers.empty()) {
3367  // Build a polygon with the list of inlier keypoints detected in the
3368  // current image to get the bounding box
3369  vpPolygon polygon(inliers);
3370  boundingBox = polygon.getBoundingBox();
3371 
3372  // Compute the center of gravity
3373  double meanU = 0.0, meanV = 0.0;
3374  for (std::vector<vpImagePoint>::const_iterator it = inliers.begin(); it != inliers.end(); ++it) {
3375  meanU += it->get_u();
3376  meanV += it->get_v();
3377  }
3378 
3379  meanU /= (double)inliers.size();
3380  meanV /= (double)inliers.size();
3381 
3382  centerOfGravity.set_u(meanU);
3383  centerOfGravity.set_v(meanV);
3384  }
3385  }
3386  else {
3387  // Too few matches
3388  return false;
3389  }
3390 
3391  if (m_detectionMethod == detectionThreshold) {
3392  return meanDescriptorDistanceTmp < m_detectionThreshold;
3393  }
3394  else {
3395  return score > m_detectionScore;
3396  }
3397 }
3398 
3400  vpHomogeneousMatrix &cMo, double &error, double &elapsedTime, vpRect &boundingBox,
3401  vpImagePoint &centerOfGravity, bool (*func)(const vpHomogeneousMatrix &),
3402  const vpRect &rectangle)
3403 {
3404  bool isMatchOk = matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3405  if (isMatchOk) {
3406  // Use the pose estimated to project the model points in the image
3407  vpPoint pt;
3408  vpImagePoint imPt;
3409  std::vector<vpImagePoint> modelImagePoints(m_trainVpPoints.size());
3410  size_t cpt = 0;
3411  for (std::vector<vpPoint>::const_iterator it = m_trainVpPoints.begin(); it != m_trainVpPoints.end(); ++it, cpt++) {
3412  pt = *it;
3413  pt.project(cMo);
3414  vpMeterPixelConversion::convertPoint(cam, pt.get_x(), pt.get_y(), imPt);
3415  modelImagePoints[cpt] = imPt;
3416  }
3417 
3418  // Build a polygon with the list of model image points to get the bounding
3419  // box
3420  vpPolygon polygon(modelImagePoints);
3421  boundingBox = polygon.getBoundingBox();
3422 
3423  // Compute the center of gravity of the current inlier keypoints
3424  double meanU = 0.0, meanV = 0.0;
3425  for (std::vector<vpImagePoint>::const_iterator it = m_ransacInliers.begin(); it != m_ransacInliers.end(); ++it) {
3426  meanU += it->get_u();
3427  meanV += it->get_v();
3428  }
3429 
3430  meanU /= (double)m_ransacInliers.size();
3431  meanV /= (double)m_ransacInliers.size();
3432 
3433  centerOfGravity.set_u(meanU);
3434  centerOfGravity.set_v(meanV);
3435  }
3436 
3437  return isMatchOk;
3438 }
3439 
3441  std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
3442  std::vector<cv::Mat> &listOfDescriptors,
3443  std::vector<vpImage<unsigned char> > *listOfAffineI)
3444 {
3445 #if 0
3446  cv::Mat img;
3447  vpImageConvert::convert(I, img);
3448  listOfKeypoints.clear();
3449  listOfDescriptors.clear();
3450 
3451  for (int tl = 1; tl < 6; tl++) {
3452  double t = pow(2, 0.5 * tl);
3453  for (int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
3454  std::vector<cv::KeyPoint> keypoints;
3455  cv::Mat descriptors;
3456 
3457  cv::Mat timg, mask, Ai;
3458  img.copyTo(timg);
3459 
3460  affineSkew(t, phi, timg, mask, Ai);
3461 
3462 
3463  if (listOfAffineI != nullptr) {
3464  cv::Mat img_disp;
3465  bitwise_and(mask, timg, img_disp);
3467  vpImageConvert::convert(img_disp, tI);
3468  listOfAffineI->push_back(tI);
3469  }
3470 #if 0
3471  cv::Mat img_disp;
3472  cv::bitwise_and(mask, timg, img_disp);
3473  cv::namedWindow("Skew", cv::WINDOW_AUTOSIZE); // Create a window for display.
3474  cv::imshow("Skew", img_disp);
3475  cv::waitKey(0);
3476 #endif
3477 
3478  for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
3479  it != m_detectors.end(); ++it) {
3480  std::vector<cv::KeyPoint> kp;
3481  it->second->detect(timg, kp, mask);
3482  keypoints.insert(keypoints.end(), kp.begin(), kp.end());
3483  }
3484 
3485  double elapsedTime;
3486  extract(timg, keypoints, descriptors, elapsedTime);
3487 
3488  for (unsigned int i = 0; i < keypoints.size(); i++) {
3489  cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
3490  cv::Mat kpt_t = Ai * cv::Mat(kpt);
3491  keypoints[i].pt.x = kpt_t.at<float>(0, 0);
3492  keypoints[i].pt.y = kpt_t.at<float>(1, 0);
3493  }
3494 
3495  listOfKeypoints.push_back(keypoints);
3496  listOfDescriptors.push_back(descriptors);
3497  }
3498  }
3499 
3500 #else
3501  cv::Mat img;
3502  vpImageConvert::convert(I, img);
3503 
3504  // Create a vector for storing the affine skew parameters
3505  std::vector<std::pair<double, int> > listOfAffineParams;
3506  for (int tl = 1; tl < 6; tl++) {
3507  double t = pow(2, 0.5 * tl);
3508  for (int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
3509  listOfAffineParams.push_back(std::pair<double, int>(t, phi));
3510  }
3511  }
3512 
3513  listOfKeypoints.resize(listOfAffineParams.size());
3514  listOfDescriptors.resize(listOfAffineParams.size());
3515 
3516  if (listOfAffineI != nullptr) {
3517  listOfAffineI->resize(listOfAffineParams.size());
3518  }
3519 
3520 #ifdef VISP_HAVE_OPENMP
3521 #pragma omp parallel for
3522 #endif
3523  for (int cpt = 0; cpt < static_cast<int>(listOfAffineParams.size()); cpt++) {
3524  std::vector<cv::KeyPoint> keypoints;
3525  cv::Mat descriptors;
3526 
3527  cv::Mat timg, mask, Ai;
3528  img.copyTo(timg);
3529 
3530  affineSkew(listOfAffineParams[(size_t)cpt].first, listOfAffineParams[(size_t)cpt].second, timg, mask, Ai);
3531 
3532  if (listOfAffineI != nullptr) {
3533  cv::Mat img_disp;
3534  bitwise_and(mask, timg, img_disp);
3536  vpImageConvert::convert(img_disp, tI);
3537  (*listOfAffineI)[(size_t)cpt] = tI;
3538  }
3539 
3540 #if 0
3541  cv::Mat img_disp;
3542  cv::bitwise_and(mask, timg, img_disp);
3543  cv::namedWindow("Skew", cv::WINDOW_AUTOSIZE); // Create a window for display.
3544  cv::imshow("Skew", img_disp);
3545  cv::waitKey(0);
3546 #endif
3547 
3548  for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
3549  it != m_detectors.end(); ++it) {
3550  std::vector<cv::KeyPoint> kp;
3551  it->second->detect(timg, kp, mask);
3552  keypoints.insert(keypoints.end(), kp.begin(), kp.end());
3553  }
3554 
3555  double elapsedTime;
3556  extract(timg, keypoints, descriptors, elapsedTime);
3557 
3558  for (size_t i = 0; i < keypoints.size(); i++) {
3559  cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
3560  cv::Mat kpt_t = Ai * cv::Mat(kpt);
3561  keypoints[i].pt.x = kpt_t.at<float>(0, 0);
3562  keypoints[i].pt.y = kpt_t.at<float>(1, 0);
3563  }
3564 
3565  listOfKeypoints[(size_t)cpt] = keypoints;
3566  listOfDescriptors[(size_t)cpt] = descriptors;
3567  }
3568 #endif
3569 }
3570 
3572 {
3573  // vpBasicKeyPoint class
3575  m_currentImagePointsList.clear();
3576  m_matchedReferencePoints.clear();
3577  m_reference_computed = false;
3578 
3579  m_computeCovariance = false;
3580  m_covarianceMatrix = vpMatrix();
3581  m_currentImageId = 0;
3582  m_detectionMethod = detectionScore;
3583  m_detectionScore = 0.15;
3584  m_detectionThreshold = 100.0;
3585  m_detectionTime = 0.0;
3586  m_detectorNames.clear();
3587  m_detectors.clear();
3588  m_extractionTime = 0.0;
3589  m_extractorNames.clear();
3590  m_extractors.clear();
3591  m_filteredMatches.clear();
3592  m_filterType = ratioDistanceThreshold;
3593  m_imageFormat = jpgImageFormat;
3594  m_knnMatches.clear();
3595  m_mapOfImageId.clear();
3596  m_mapOfImages.clear();
3597  m_matcher = cv::Ptr<cv::DescriptorMatcher>();
3598  m_matcherName = "BruteForce-Hamming";
3599  m_matches.clear();
3600  m_matchingFactorThreshold = 2.0;
3601  m_matchingRatioThreshold = 0.85;
3602  m_matchingTime = 0.0;
3603  m_matchRansacKeyPointsToPoints.clear();
3604  m_nbRansacIterations = 200;
3605  m_nbRansacMinInlierCount = 100;
3606  m_objectFilteredPoints.clear();
3607  m_poseTime = 0.0;
3608  m_queryDescriptors = cv::Mat();
3609  m_queryFilteredKeyPoints.clear();
3610  m_queryKeyPoints.clear();
3611  m_ransacConsensusPercentage = 20.0;
3612  m_ransacFilterFlag = vpPose::NO_FILTER;
3613  m_ransacInliers.clear();
3614  m_ransacOutliers.clear();
3615  m_ransacParallel = true;
3616  m_ransacParallelNbThreads = 0;
3617  m_ransacReprojectionError = 6.0;
3618  m_ransacThreshold = 0.01;
3619  m_trainDescriptors = cv::Mat();
3620  m_trainKeyPoints.clear();
3621  m_trainPoints.clear();
3622  m_trainVpPoints.clear();
3623  m_useAffineDetection = false;
3624 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
3625  m_useBruteForceCrossCheck = true;
3626 #endif
3627  m_useConsensusPercentage = false;
3628  m_useKnn = true; // as m_filterType == ratioDistanceThreshold
3629  m_useMatchTrainToQuery = false;
3630  m_useRansacVVS = true;
3631  m_useSingleMatchFilter = true;
3632 
3633  m_detectorNames.push_back("ORB");
3634  m_extractorNames.push_back("ORB");
3635 
3636  init();
3637 }
3638 
3639 void vpKeyPoint::saveLearningData(const std::string &filename, bool binaryMode, bool saveTrainingImages)
3640 {
3641  std::string parent = vpIoTools::getParent(filename);
3642  if (!parent.empty()) {
3643  vpIoTools::makeDirectory(parent);
3644  }
3645 
3646  std::map<int, std::string> mapOfImgPath;
3647  if (saveTrainingImages) {
3648 #ifdef VISP_HAVE_MODULE_IO
3649  // Save the training image files in the same directory
3650  unsigned int cpt = 0;
3651 
3652  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
3653  ++it, cpt++) {
3654  if (cpt > 999) {
3655  throw vpException(vpException::fatalError, "The number of training images to save is too big !");
3656  }
3657 
3658  std::stringstream ss;
3659  ss << "train_image_" << std::setfill('0') << std::setw(3) << cpt;
3660 
3661  switch (m_imageFormat) {
3662  case jpgImageFormat:
3663  ss << ".jpg";
3664  break;
3665 
3666  case pngImageFormat:
3667  ss << ".png";
3668  break;
3669 
3670  case ppmImageFormat:
3671  ss << ".ppm";
3672  break;
3673 
3674  case pgmImageFormat:
3675  ss << ".pgm";
3676  break;
3677 
3678  default:
3679  ss << ".png";
3680  break;
3681  }
3682 
3683  std::string imgFilename = ss.str();
3684  mapOfImgPath[it->first] = imgFilename;
3685  vpImageIo::write(it->second, parent + (!parent.empty() ? "/" : "") + imgFilename);
3686  }
3687 #else
3688  std::cout << "Warning: in vpKeyPoint::saveLearningData() training images "
3689  "are not saved because "
3690  "visp_io module is not available !"
3691  << std::endl;
3692 #endif
3693  }
3694 
3695  bool have3DInfo = m_trainPoints.size() > 0;
3696  if (have3DInfo && m_trainPoints.size() != m_trainKeyPoints.size()) {
3697  throw vpException(vpException::fatalError, "List of keypoints and list of 3D points have different size !");
3698  }
3699 
3700  if (binaryMode) {
3701  // Save the learning data into little endian binary file.
3702  std::ofstream file(filename.c_str(), std::ofstream::binary);
3703  if (!file.is_open()) {
3704  throw vpException(vpException::ioError, "Cannot create the file.");
3705  }
3706 
3707  // Write info about training images
3708  int nbImgs = (int)mapOfImgPath.size();
3709  vpIoTools::writeBinaryValueLE(file, nbImgs);
3710 
3711 #ifdef VISP_HAVE_MODULE_IO
3712  for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
3713  // Write image_id
3714  int id = it->first;
3716 
3717  // Write image path
3718  std::string path = it->second;
3719  int length = (int)path.length();
3720  vpIoTools::writeBinaryValueLE(file, length);
3721 
3722  for (int cpt = 0; cpt < length; cpt++) {
3723  file.write((char *)(&path[(size_t)cpt]), sizeof(path[(size_t)cpt]));
3724  }
3725  }
3726 #endif
3727 
3728  // Write if we have 3D point information
3729  int have3DInfoInt = have3DInfo ? 1 : 0;
3730  vpIoTools::writeBinaryValueLE(file, have3DInfoInt);
3731 
3732  int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
3733  int descriptorType = m_trainDescriptors.type();
3734 
3735  // Write the number of descriptors
3736  vpIoTools::writeBinaryValueLE(file, nRows);
3737 
3738  // Write the size of the descriptor
3739  vpIoTools::writeBinaryValueLE(file, nCols);
3740 
3741  // Write the type of the descriptor
3742  vpIoTools::writeBinaryValueLE(file, descriptorType);
3743 
3744  for (int i = 0; i < nRows; i++) {
3745  unsigned int i_ = (unsigned int)i;
3746  // Write u
3747  float u = m_trainKeyPoints[i_].pt.x;
3749 
3750  // Write v
3751  float v = m_trainKeyPoints[i_].pt.y;
3753 
3754  // Write size
3755  float size = m_trainKeyPoints[i_].size;
3756  vpIoTools::writeBinaryValueLE(file, size);
3757 
3758  // Write angle
3759  float angle = m_trainKeyPoints[i_].angle;
3760  vpIoTools::writeBinaryValueLE(file, angle);
3761 
3762  // Write response
3763  float response = m_trainKeyPoints[i_].response;
3764  vpIoTools::writeBinaryValueLE(file, response);
3765 
3766  // Write octave
3767  int octave = m_trainKeyPoints[i_].octave;
3768  vpIoTools::writeBinaryValueLE(file, octave);
3769 
3770  // Write class_id
3771  int class_id = m_trainKeyPoints[i_].class_id;
3772  vpIoTools::writeBinaryValueLE(file, class_id);
3773 
3774 // Write image_id
3775 #ifdef VISP_HAVE_MODULE_IO
3776  std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
3777  int image_id = (saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1;
3778  vpIoTools::writeBinaryValueLE(file, image_id);
3779 #else
3780  int image_id = -1;
3781  // file.write((char *)(&image_id), sizeof(image_id));
3782  vpIoTools::writeBinaryValueLE(file, image_id);
3783 #endif
3784 
3785  if (have3DInfo) {
3786  float oX = m_trainPoints[i_].x, oY = m_trainPoints[i_].y, oZ = m_trainPoints[i_].z;
3787  // Write oX
3789 
3790  // Write oY
3792 
3793  // Write oZ
3795  }
3796 
3797  for (int j = 0; j < nCols; j++) {
3798  // Write the descriptor value
3799  switch (descriptorType) {
3800  case CV_8U:
3801  file.write((char *)(&m_trainDescriptors.at<unsigned char>(i, j)),
3802  sizeof(m_trainDescriptors.at<unsigned char>(i, j)));
3803  break;
3804 
3805  case CV_8S:
3806  file.write((char *)(&m_trainDescriptors.at<char>(i, j)), sizeof(m_trainDescriptors.at<char>(i, j)));
3807  break;
3808 
3809  case CV_16U:
3810  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<unsigned short int>(i, j));
3811  break;
3812 
3813  case CV_16S:
3814  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<short int>(i, j));
3815  break;
3816 
3817  case CV_32S:
3818  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<int>(i, j));
3819  break;
3820 
3821  case CV_32F:
3822  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<float>(i, j));
3823  break;
3824 
3825  case CV_64F:
3826  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<double>(i, j));
3827  break;
3828 
3829  default:
3830  throw vpException(vpException::fatalError, "Problem with the data type of descriptors !");
3831  break;
3832  }
3833  }
3834  }
3835 
3836  file.close();
3837  }
3838  else {
3839 #if defined(VISP_HAVE_PUGIXML)
3840  pugi::xml_document doc;
3841  pugi::xml_node node = doc.append_child(pugi::node_declaration);
3842  node.append_attribute("version") = "1.0";
3843  node.append_attribute("encoding") = "UTF-8";
3844 
3845  if (!doc) {
3846  throw vpException(vpException::ioError, "Error with file: ", filename.c_str());
3847  }
3848 
3849  pugi::xml_node root_node = doc.append_child("LearningData");
3850 
3851  // Write the training images info
3852  pugi::xml_node image_node = root_node.append_child("TrainingImageInfo");
3853 
3854 #ifdef VISP_HAVE_MODULE_IO
3855  for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
3856  pugi::xml_node image_info_node = image_node.append_child("trainImg");
3857  image_info_node.append_child(pugi::node_pcdata).set_value(it->second.c_str());
3858  std::stringstream ss;
3859  ss << it->first;
3860  image_info_node.append_attribute("image_id") = ss.str().c_str();
3861  }
3862 #endif
3863 
3864  // Write information about descriptors
3865  pugi::xml_node descriptors_info_node = root_node.append_child("DescriptorsInfo");
3866 
3867  int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
3868  int descriptorType = m_trainDescriptors.type();
3869 
3870  // Write the number of rows
3871  descriptors_info_node.append_child("nrows").append_child(pugi::node_pcdata).text() = nRows;
3872 
3873  // Write the number of cols
3874  descriptors_info_node.append_child("ncols").append_child(pugi::node_pcdata).text() = nCols;
3875 
3876  // Write the descriptors type
3877  descriptors_info_node.append_child("type").append_child(pugi::node_pcdata).text() = descriptorType;
3878 
3879  for (int i = 0; i < nRows; i++) {
3880  unsigned int i_ = (unsigned int)i;
3881  pugi::xml_node descriptor_node = root_node.append_child("DescriptorInfo");
3882 
3883  descriptor_node.append_child("u").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.x;
3884  descriptor_node.append_child("v").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.y;
3885  descriptor_node.append_child("size").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].size;
3886  descriptor_node.append_child("angle").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].angle;
3887  descriptor_node.append_child("response").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].response;
3888  descriptor_node.append_child("octave").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].octave;
3889  descriptor_node.append_child("class_id").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].class_id;
3890 
3891 #ifdef VISP_HAVE_MODULE_IO
3892  std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
3893  descriptor_node.append_child("image_id").append_child(pugi::node_pcdata).text() =
3894  ((saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1);
3895 #else
3896  descriptor_node.append_child("image_id").append_child(pugi::node_pcdata).text() = -1;
3897 #endif
3898 
3899  if (have3DInfo) {
3900  descriptor_node.append_child("oX").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].x;
3901  descriptor_node.append_child("oY").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].y;
3902  descriptor_node.append_child("oZ").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].z;
3903  }
3904 
3905  pugi::xml_node desc_node = descriptor_node.append_child("desc");
3906 
3907  for (int j = 0; j < nCols; j++) {
3908  switch (descriptorType) {
3909  case CV_8U: {
3910  // Promote an unsigned char to an int
3911  // val_tmp holds the numeric value that will be written
3912  // We save the value in numeric form otherwise xml library will not be
3913  // able to parse A better solution could be possible
3914  int val_tmp = m_trainDescriptors.at<unsigned char>(i, j);
3915  desc_node.append_child("val").append_child(pugi::node_pcdata).text() = val_tmp;
3916  } break;
3917 
3918  case CV_8S: {
3919  // Promote a char to an int
3920  // val_tmp holds the numeric value that will be written
3921  // We save the value in numeric form otherwise xml library will not be
3922  // able to parse A better solution could be possible
3923  int val_tmp = m_trainDescriptors.at<char>(i, j);
3924  desc_node.append_child("val").append_child(pugi::node_pcdata).text() = val_tmp;
3925  } break;
3926 
3927  case CV_16U:
3928  desc_node.append_child("val").append_child(pugi::node_pcdata).text() =
3929  m_trainDescriptors.at<unsigned short int>(i, j);
3930  break;
3931 
3932  case CV_16S:
3933  desc_node.append_child("val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<short int>(i, j);
3934  break;
3935 
3936  case CV_32S:
3937  desc_node.append_child("val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<int>(i, j);
3938  break;
3939 
3940  case CV_32F:
3941  desc_node.append_child("val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<float>(i, j);
3942  break;
3943 
3944  case CV_64F:
3945  desc_node.append_child("val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<double>(i, j);
3946  break;
3947 
3948  default:
3949  throw vpException(vpException::fatalError, "Problem with the data type of descriptors !");
3950  break;
3951  }
3952  }
3953  }
3954 
3955  doc.save_file(filename.c_str(), PUGIXML_TEXT(" "), pugi::format_default, pugi::encoding_utf8);
3956 #else
3957  throw vpException(vpException::ioError, "vpKeyPoint::saveLearningData() in xml needs built-in pugixml 3rdparty that is not available");
3958 #endif
3959  }
3960 }
3961 
3962 #if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x030000)
3963 #ifndef DOXYGEN_SHOULD_SKIP_THIS
3964 // From OpenCV 2.4.11 source code.
3965 struct KeypointResponseGreaterThanThreshold
3966 {
3967  KeypointResponseGreaterThanThreshold(float _value) : value(_value) { }
3968  inline bool operator()(const cv::KeyPoint &kpt) const { return kpt.response >= value; }
3969  float value;
3970 };
3971 
3972 struct KeypointResponseGreater
3973 {
3974  inline bool operator()(const cv::KeyPoint &kp1, const cv::KeyPoint &kp2) const { return kp1.response > kp2.response; }
3975 };
3976 
3977 // takes keypoints and culls them by the response
3978 void vpKeyPoint::KeyPointsFilter::retainBest(std::vector<cv::KeyPoint> &keypoints, int n_points)
3979 {
3980  // this is only necessary if the keypoints size is greater than the number
3981  // of desired points.
3982  if (n_points >= 0 && keypoints.size() > (size_t)n_points) {
3983  if (n_points == 0) {
3984  keypoints.clear();
3985  return;
3986  }
3987  // first use nth element to partition the keypoints into the best and
3988  // worst.
3989  std::nth_element(keypoints.begin(), keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreater());
3990  // this is the boundary response, and in the case of FAST may be ambiguous
3991  float ambiguous_response = keypoints[(size_t)(n_points - 1)].response;
3992  // use std::partition to grab all of the keypoints with the boundary
3993  // response.
3994  std::vector<cv::KeyPoint>::const_iterator new_end = std::partition(
3995  keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreaterThanThreshold(ambiguous_response));
3996  // resize the keypoints, given this new end point. nth_element and
3997  // partition reordered the points inplace
3998  keypoints.resize((size_t)(new_end - keypoints.begin()));
3999  }
4000 }
4001 
4002 struct RoiPredicate
4003 {
4004  RoiPredicate(const cv::Rect &_r) : r(_r) { }
4005 
4006  bool operator()(const cv::KeyPoint &keyPt) const { return !r.contains(keyPt.pt); }
4007 
4008  cv::Rect r;
4009 };
4010 
4011 void vpKeyPoint::KeyPointsFilter::runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize,
4012  int borderSize)
4013 {
4014  if (borderSize > 0) {
4015  if (imageSize.height <= borderSize * 2 || imageSize.width <= borderSize * 2)
4016  keypoints.clear();
4017  else
4018  keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(),
4019  RoiPredicate(cv::Rect(
4020  cv::Point(borderSize, borderSize),
4021  cv::Point(imageSize.width - borderSize, imageSize.height - borderSize)))),
4022  keypoints.end());
4023  }
4024 }
4025 
4026 struct SizePredicate
4027 {
4028  SizePredicate(float _minSize, float _maxSize) : minSize(_minSize), maxSize(_maxSize) { }
4029 
4030  bool operator()(const cv::KeyPoint &keyPt) const
4031  {
4032  float size = keyPt.size;
4033  return (size < minSize) || (size > maxSize);
4034  }
4035 
4036  float minSize, maxSize;
4037 };
4038 
4039 void vpKeyPoint::KeyPointsFilter::runByKeypointSize(std::vector<cv::KeyPoint> &keypoints, float minSize, float maxSize)
4040 {
4041  CV_Assert(minSize >= 0);
4042  CV_Assert(maxSize >= 0);
4043  CV_Assert(minSize <= maxSize);
4044 
4045  keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), SizePredicate(minSize, maxSize)), keypoints.end());
4046 }
4047 
4048 class MaskPredicate
4049 {
4050 public:
4051  MaskPredicate(const cv::Mat &_mask) : mask(_mask) { }
4052  bool operator()(const cv::KeyPoint &key_pt) const
4053  {
4054  return mask.at<uchar>((int)(key_pt.pt.y + 0.5f), (int)(key_pt.pt.x + 0.5f)) == 0;
4055  }
4056 
4057 private:
4058  const cv::Mat mask;
4059 };
4060 
4061 void vpKeyPoint::KeyPointsFilter::runByPixelsMask(std::vector<cv::KeyPoint> &keypoints, const cv::Mat &mask)
4062 {
4063  if (mask.empty())
4064  return;
4065 
4066  keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), MaskPredicate(mask)), keypoints.end());
4067 }
4068 
4069 struct KeyPoint_LessThan
4070 {
4071  KeyPoint_LessThan(const std::vector<cv::KeyPoint> &_kp) : kp(&_kp) { }
4072  bool operator()(/*int i, int j*/ size_t i, size_t j) const
4073  {
4074  const cv::KeyPoint &kp1 = (*kp)[/*(size_t)*/ i];
4075  const cv::KeyPoint &kp2 = (*kp)[/*(size_t)*/ j];
4076  if (!vpMath::equal(kp1.pt.x, kp2.pt.x,
4077  std::numeric_limits<float>::epsilon())) { // if (kp1.pt.x !=
4078  // kp2.pt.x) {
4079  return kp1.pt.x < kp2.pt.x;
4080  }
4081 
4082  if (!vpMath::equal(kp1.pt.y, kp2.pt.y,
4083  std::numeric_limits<float>::epsilon())) { // if (kp1.pt.y !=
4084  // kp2.pt.y) {
4085  return kp1.pt.y < kp2.pt.y;
4086  }
4087 
4088  if (!vpMath::equal(kp1.size, kp2.size,
4089  std::numeric_limits<float>::epsilon())) { // if (kp1.size !=
4090  // kp2.size) {
4091  return kp1.size > kp2.size;
4092  }
4093 
4094  if (!vpMath::equal(kp1.angle, kp2.angle,
4095  std::numeric_limits<float>::epsilon())) { // if (kp1.angle !=
4096  // kp2.angle) {
4097  return kp1.angle < kp2.angle;
4098  }
4099 
4100  if (!vpMath::equal(kp1.response, kp2.response,
4101  std::numeric_limits<float>::epsilon())) { // if (kp1.response !=
4102  // kp2.response) {
4103  return kp1.response > kp2.response;
4104  }
4105 
4106  if (kp1.octave != kp2.octave) {
4107  return kp1.octave > kp2.octave;
4108  }
4109 
4110  if (kp1.class_id != kp2.class_id) {
4111  return kp1.class_id > kp2.class_id;
4112  }
4113 
4114  return i < j;
4115  }
4116  const std::vector<cv::KeyPoint> *kp;
4117 };
4118 
4119 void vpKeyPoint::KeyPointsFilter::removeDuplicated(std::vector<cv::KeyPoint> &keypoints)
4120 {
4121  size_t i, j, n = keypoints.size();
4122  std::vector<size_t> kpidx(n);
4123  std::vector<uchar> mask(n, (uchar)1);
4124 
4125  for (i = 0; i < n; i++) {
4126  kpidx[i] = i;
4127  }
4128  std::sort(kpidx.begin(), kpidx.end(), KeyPoint_LessThan(keypoints));
4129  for (i = 1, j = 0; i < n; i++) {
4130  cv::KeyPoint &kp1 = keypoints[kpidx[i]];
4131  cv::KeyPoint &kp2 = keypoints[kpidx[j]];
4132  // if (kp1.pt.x != kp2.pt.x || kp1.pt.y != kp2.pt.y || kp1.size !=
4133  // kp2.size || kp1.angle != kp2.angle) {
4134  if (!vpMath::equal(kp1.pt.x, kp2.pt.x, std::numeric_limits<float>::epsilon()) ||
4135  !vpMath::equal(kp1.pt.y, kp2.pt.y, std::numeric_limits<float>::epsilon()) ||
4136  !vpMath::equal(kp1.size, kp2.size, std::numeric_limits<float>::epsilon()) ||
4137  !vpMath::equal(kp1.angle, kp2.angle, std::numeric_limits<float>::epsilon())) {
4138  j = i;
4139  }
4140  else {
4141  mask[kpidx[i]] = 0;
4142  }
4143  }
4144 
4145  for (i = j = 0; i < n; i++) {
4146  if (mask[i]) {
4147  if (i != j) {
4148  keypoints[j] = keypoints[i];
4149  }
4150  j++;
4151  }
4152  }
4153  keypoints.resize(j);
4154 }
4155 
4156 /*
4157  * PyramidAdaptedFeatureDetector
4158  */
4159 vpKeyPoint::PyramidAdaptedFeatureDetector::PyramidAdaptedFeatureDetector(const cv::Ptr<cv::FeatureDetector> &detector,
4160  int maxLevel)
4161  : m_detector(detector), m_maxLevel(maxLevel)
4162 { }
4163 
4164 bool vpKeyPoint::PyramidAdaptedFeatureDetector::empty() const
4165 {
4166  return m_detector.empty() || (cv::FeatureDetector *)m_detector->empty();
4167 }
4168 
4169 void vpKeyPoint::PyramidAdaptedFeatureDetector::detect(cv::InputArray image,
4170  CV_OUT std::vector<cv::KeyPoint> &keypoints, cv::InputArray mask)
4171 {
4172  detectImpl(image.getMat(), keypoints, mask.getMat());
4173 }
4174 
4175 void vpKeyPoint::PyramidAdaptedFeatureDetector::detectImpl(const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
4176  const cv::Mat &mask) const
4177 {
4178  cv::Mat src = image;
4179  cv::Mat src_mask = mask;
4180 
4181  cv::Mat dilated_mask;
4182  if (!mask.empty()) {
4183  cv::dilate(mask, dilated_mask, cv::Mat());
4184  cv::Mat mask255(mask.size(), CV_8UC1, cv::Scalar(0));
4185  mask255.setTo(cv::Scalar(255), dilated_mask != 0);
4186  dilated_mask = mask255;
4187  }
4188 
4189  for (int l = 0, multiplier = 1; l <= m_maxLevel; ++l, multiplier *= 2) {
4190  // Detect on current level of the pyramid
4191  std::vector<cv::KeyPoint> new_pts;
4192  m_detector->detect(src, new_pts, src_mask);
4193  std::vector<cv::KeyPoint>::iterator it = new_pts.begin(), end = new_pts.end();
4194  for (; it != end; ++it) {
4195  it->pt.x *= multiplier;
4196  it->pt.y *= multiplier;
4197  it->size *= multiplier;
4198  it->octave = l;
4199  }
4200  keypoints.insert(keypoints.end(), new_pts.begin(), new_pts.end());
4201 
4202  // Downsample
4203  if (l < m_maxLevel) {
4204  cv::Mat dst;
4205  pyrDown(src, dst);
4206  src = dst;
4207 
4208  if (!mask.empty())
4209  resize(dilated_mask, src_mask, src.size(), 0, 0, CV_INTER_AREA);
4210  }
4211  }
4212 
4213  if (!mask.empty())
4214  vpKeyPoint::KeyPointsFilter::runByPixelsMask(keypoints, mask);
4215 }
4216 #endif
4217 #endif
4218 
4219 #elif !defined(VISP_BUILD_SHARED_LIBS)
4220 // Work around to avoid warning: libvisp_vision.a(vpKeyPoint.cpp.o) has no symbols
4221 void dummy_vpKeyPoint() { };
4222 #endif
std::vector< unsigned int > m_matchedReferencePoints
std::vector< vpImagePoint > m_currentImagePointsList
std::vector< vpImagePoint > m_referenceImagePointsList
Generic class defining intrinsic camera parameters.
Implementation of column vector and the associated operations.
Definition: vpColVector.h:163
Class to define RGB colors available for display functionalities.
Definition: vpColor.h:152
static const vpColor red
Definition: vpColor.h:211
static const vpColor none
Definition: vpColor.h:223
static const vpColor green
Definition: vpColor.h:214
static void convertFromOpenCV(const cv::KeyPoint &from, vpImagePoint &to)
Definition: vpConvert.cpp:224
static void displayCircle(const vpImage< unsigned char > &I, const vpImageCircle &circle, const vpColor &color, bool fill=false, unsigned int thickness=1)
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
error that can be emitted by ViSP classes.
Definition: vpException.h:59
@ ioError
I/O error.
Definition: vpException.h:79
@ badValue
Used to indicate that a value is not in the allowed range.
Definition: vpException.h:85
@ fatalError
Fatal error.
Definition: vpException.h:84
const char * what() const
Definition: vpException.cpp:70
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpHomogeneousMatrix inverse() const
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Definition: vpImageIo.cpp:143
static void write(const vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Definition: vpImageIo.cpp:287
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:82
double get_j() const
Definition: vpImagePoint.h:125
void set_ij(double ii, double jj)
Definition: vpImagePoint.h:315
void set_u(double u)
Definition: vpImagePoint.h:330
void set_v(double v)
Definition: vpImagePoint.h:341
double get_i() const
Definition: vpImagePoint.h:114
unsigned int getWidth() const
Definition: vpImage.h:249
void insert(const vpImage< Type > &src, const vpImagePoint &topLeft)
Definition: vpImage.h:1585
unsigned int getHeight() const
Definition: vpImage.h:184
static bool isAbsolutePathname(const std::string &pathname)
Definition: vpIoTools.cpp:2183
static void readBinaryValueLE(std::ifstream &file, int16_t &short_value)
Definition: vpIoTools.cpp:2465
static void makeDirectory(const std::string &dirname)
Definition: vpIoTools.cpp:967
static void writeBinaryValueLE(std::ofstream &file, const int16_t short_value)
Definition: vpIoTools.cpp:2543
static std::string getParent(const std::string &pathname)
Definition: vpIoTools.cpp:2033
unsigned int matchPoint(const vpImage< unsigned char > &I)
@ detectionThreshold
Definition: vpKeyPoint.h:232
@ detectionScore
Definition: vpKeyPoint.h:234
void getTrainKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
static void compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpCylinder > &cylinders, const std::vector< std::vector< std::vector< vpImagePoint > > > &vectorOfCylinderRois, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
Definition: vpKeyPoint.cpp:551
void detectExtractAffine(const vpImage< unsigned char > &I, std::vector< std::vector< cv::KeyPoint > > &listOfKeypoints, std::vector< cv::Mat > &listOfDescriptors, std::vector< vpImage< unsigned char > > *listOfAffineI=nullptr)
void initMatcher(const std::string &matcherName)
void display(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, unsigned int size=3)
void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector< cv::DMatch > &matches, double &elapsedTime)
static void compute3D(const cv::KeyPoint &candidate, const std::vector< vpPoint > &roi, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
Definition: vpKeyPoint.cpp:406
vpFeatureDetectorType
Definition: vpKeyPoint.h:250
@ DETECTOR_KAZE
KAZE detector.
Definition: vpKeyPoint.h:269
@ DETECTOR_BRISK
BRISK detector.
Definition: vpKeyPoint.h:255
@ DETECTOR_AKAZE
AKAZE detector.
Definition: vpKeyPoint.h:270
@ DETECTOR_MSER
MSER detector.
Definition: vpKeyPoint.h:253
@ DETECTOR_SURF
SURF detector.
Definition: vpKeyPoint.h:266
@ DETECTOR_AGAST
AGAST detector.
Definition: vpKeyPoint.h:271
@ DETECTOR_SIFT
SIFT detector.
Definition: vpKeyPoint.h:263
@ DETECTOR_FAST
FAST detector.
Definition: vpKeyPoint.h:252
@ DETECTOR_GFTT
GFTT detector.
Definition: vpKeyPoint.h:256
@ DETECTOR_ORB
ORB detector.
Definition: vpKeyPoint.h:254
@ DETECTOR_SimpleBlob
SimpleBlob detector.
Definition: vpKeyPoint.h:257
void reset()
void createImageMatching(vpImage< unsigned char > &IRef, vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
Definition: vpKeyPoint.cpp:856
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
bool computePose(const std::vector< cv::Point2f > &imagePoints, const std::vector< cv::Point3f > &objectPoints, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector< int > &inlierIndex, double &elapsedTime, bool(*func)(const vpHomogeneousMatrix &)=nullptr)
Definition: vpKeyPoint.cpp:671
void extract(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, cv::Mat &descriptors, std::vector< cv::Point3f > *trainPoints=nullptr)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
Definition: vpKeyPoint.cpp:970
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
Definition: vpKeyPoint.cpp:460
bool matchPointAndDetect(const vpImage< unsigned char > &I, vpRect &boundingBox, vpImagePoint &centerOfGravity, const bool isPlanarObject=true, std::vector< vpImagePoint > *imPts1=nullptr, std::vector< vpImagePoint > *imPts2=nullptr, double *meanDescriptorDistance=nullptr, double *detectionScore=nullptr, const vpRect &rectangle=vpRect())
vpFeatureDescriptorType
Definition: vpKeyPoint.h:282
@ DESCRIPTOR_AKAZE
AKAZE descriptor.
Definition: vpKeyPoint.h:299
@ DESCRIPTOR_ORB
ORB descriptor.
Definition: vpKeyPoint.h:284
@ DESCRIPTOR_KAZE
KAZE descriptor.
Definition: vpKeyPoint.h:298
@ DESCRIPTOR_SURF
SUFT descriptor.
Definition: vpKeyPoint.h:295
@ DESCRIPTOR_BRISK
BRISK descriptor.
Definition: vpKeyPoint.h:285
@ DESCRIPTOR_SIFT
SIFT descriptor.
Definition: vpKeyPoint.h:292
void getQueryKeyPoints(std::vector< cv::KeyPoint > &keyPoints, bool matches=true) const
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType, const std::string &matcherName, const vpFilterMatchingType &filterType=ratioDistanceThreshold)
Definition: vpKeyPoint.cpp:65
@ ppmImageFormat
Definition: vpKeyPoint.h:244
@ jpgImageFormat
Definition: vpKeyPoint.h:242
@ pngImageFormat
Definition: vpKeyPoint.h:243
@ pgmImageFormat
Definition: vpKeyPoint.h:245
vpFilterMatchingType
Definition: vpKeyPoint.h:216
@ stdAndRatioDistanceThreshold
Definition: vpKeyPoint.h:224
@ constantFactorDistanceThreshold
Definition: vpKeyPoint.h:217
@ ratioDistanceThreshold
Definition: vpKeyPoint.h:221
@ stdDistanceThreshold
Definition: vpKeyPoint.h:219
@ noFilterMatching
Definition: vpKeyPoint.h:226
void displayMatching(const vpImage< unsigned char > &IRef, vpImage< unsigned char > &IMatching, unsigned int crossSize, unsigned int lineThickness=1, const vpColor &color=vpColor::green)
unsigned int buildReference(const vpImage< unsigned char > &I)
Definition: vpKeyPoint.cpp:189
void loadConfigFile(const std::string &configFile)
void getTrainPoints(std::vector< cv::Point3f > &points) const
void getObjectPoints(std::vector< cv::Point3f > &objectPoints) const
void insertImageMatching(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
static bool isNaN(double value)
Definition: vpMath.cpp:93
static bool equal(double x, double y, double threshold=0.001)
Definition: vpMath.h:449
static int round(double x)
Definition: vpMath.h:403
Implementation of a matrix and operations on matrices.
Definition: vpMatrix.h:146
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
This class defines the container for a plane geometrical structure.
Definition: vpPlane.h:54
double getD() const
Definition: vpPlane.h:106
double getA() const
Definition: vpPlane.h:100
double getC() const
Definition: vpPlane.h:104
double getB() const
Definition: vpPlane.h:102
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Definition: vpPoint.h:77
double get_oX() const
Get the point oX coordinate in the object frame.
Definition: vpPoint.cpp:450
void set_x(double x)
Set the point x coordinate in the image plane.
Definition: vpPoint.cpp:500
double get_y() const
Get the point y coordinate in the image plane.
Definition: vpPoint.cpp:461
double get_oZ() const
Get the point oZ coordinate in the object frame.
Definition: vpPoint.cpp:454
void set_oY(double oY)
Set the point oY coordinate in the object frame.
Definition: vpPoint.cpp:493
double get_x() const
Get the point x coordinate in the image plane.
Definition: vpPoint.cpp:459
void set_oZ(double oZ)
Set the point oZ coordinate in the object frame.
Definition: vpPoint.cpp:495
void set_oX(double oX)
Set the point oX coordinate in the object frame.
Definition: vpPoint.cpp:491
double get_oY() const
Get the point oY coordinate in the object frame.
Definition: vpPoint.cpp:452
void setWorldCoordinates(double oX, double oY, double oZ)
Definition: vpPoint.cpp:110
void set_y(double y)
Set the point y coordinate in the image plane.
Definition: vpPoint.cpp:502
Defines a generic 2D polygon.
Definition: vpPolygon.h:97
vpRect getBoundingBox() const
Definition: vpPolygon.h:171
bool isInside(const vpImagePoint &iP, const PointInPolygonMethod &method=PnPolyRayCasting) const
Definition: vpPolygon.cpp:393
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
Definition: vpPose.h:78
void setRansacMaxTrials(const int &rM)
Definition: vpPose.h:404
void addPoint(const vpPoint &P)
Definition: vpPose.cpp:93
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
Definition: vpPose.h:385
vpMatrix getCovarianceMatrix() const
Definition: vpPose.h:438
@ RANSAC
Definition: vpPose.h:88
void setCovarianceComputation(const bool &flag)
Definition: vpPose.h:427
std::vector< unsigned int > getRansacInlierIndex() const
Definition: vpPose.h:414
void setRansacFilterFlag(const RANSAC_FILTER_FLAGS &flag)
Definition: vpPose.h:458
@ NO_FILTER
No filter is applied.
Definition: vpPose.h:109
void setUseParallelRansac(bool use)
Definition: vpPose.h:489
std::vector< vpPoint > getRansacInliers() const
Definition: vpPose.h:419
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool(*func)(const vpHomogeneousMatrix &)=nullptr)
Definition: vpPose.cpp:333
void setNbParallelRansacThreads(int nb)
Definition: vpPose.h:475
void setRansacThreshold(const double &t)
Definition: vpPose.h:390
Defines a rectangle in the plane.
Definition: vpRect.h:76
double getWidth() const
Definition: vpRect.h:224
double getLeft() const
Definition: vpRect.h:170
double getRight() const
Definition: vpRect.h:176
double getBottom() const
Definition: vpRect.h:94
double getHeight() const
Definition: vpRect.h:163
double getTop() const
Definition: vpRect.h:189
Implementation of a rotation vector as axis-angle minimal representation.
Class that consider the case of a translation vector.
void parse(const std::string &filename)
vpMatchingMethodEnum getMatchingMethod() const
VISP_EXPORT double measureTimeMs()