Visual Servoing Platform  version 3.6.1 under development (2023-12-07)
vpKeyPoint.cpp
1 /*
2  * ViSP, open source Visual Servoing Platform software.
3  * Copyright (C) 2005 - 2023 by Inria. All rights reserved.
4  *
5  * This software is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  * See the file LICENSE.txt at the root directory of this source
10  * distribution for additional information about the GNU GPL.
11  *
12  * For using ViSP with software that can not be combined with the GNU
13  * GPL, please contact Inria about acquiring a ViSP Professional
14  * Edition License.
15  *
16  * See https://visp.inria.fr for more information.
17  *
18  * This software was developed at:
19  * Inria Rennes - Bretagne Atlantique
20  * Campus Universitaire de Beaulieu
21  * 35042 Rennes Cedex
22  * France
23  *
24  * If you have questions regarding the use of this file, please contact
25  * Inria at visp@inria.fr
26  *
27  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
28  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
29  *
30  * Description:
31  * Key point functionalities.
32  */
33 
34 #include <iomanip>
35 #include <limits>
36 
37 #include <visp3/core/vpIoTools.h>
38 #include <visp3/vision/vpKeyPoint.h>
39 
40 #if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_FEATURES2D)
41 
42 #include <pugixml.hpp>
43 
44 namespace
45 {
46 // Specific Type transformation functions
47 inline cv::DMatch knnToDMatch(const std::vector<cv::DMatch> &knnMatches)
48 {
49  if (knnMatches.size() > 0) {
50  return knnMatches[0];
51  }
52 
53  return cv::DMatch();
54 }
55 
56 inline vpImagePoint matchRansacToVpImage(const std::pair<cv::KeyPoint, cv::Point3f> &pair)
57 {
58  return vpImagePoint(pair.first.pt.y, pair.first.pt.x);
59 }
60 
61 } // namespace
62 
63 vpKeyPoint::vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType,
64  const std::string &matcherName, const vpFilterMatchingType &filterType)
65  : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
66  m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
67  m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
68  m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
69  m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
70  m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
71  m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
72  m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
73  m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
74  m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
75 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
76  m_useBruteForceCrossCheck(true),
77 #endif
78  m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
79  m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
80 {
81  initFeatureNames();
82 
83  m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
84  m_extractorNames.push_back(m_mapOfDescriptorNames[descriptorType]);
85 
86  init();
87 }
88 
89 vpKeyPoint::vpKeyPoint(const std::string &detectorName, const std::string &extractorName,
90  const std::string &matcherName, const vpFilterMatchingType &filterType)
91  : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
92  m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
93  m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
94  m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
95  m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
96  m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
97  m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
98  m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
99  m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
100  m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
101 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
102  m_useBruteForceCrossCheck(true),
103 #endif
104  m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
105  m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
106 {
107  initFeatureNames();
108 
109  m_detectorNames.push_back(detectorName);
110  m_extractorNames.push_back(extractorName);
111 
112  init();
113 }
114 
115 vpKeyPoint::vpKeyPoint(const std::vector<std::string> &detectorNames, const std::vector<std::string> &extractorNames,
116  const std::string &matcherName, const vpFilterMatchingType &filterType)
117  : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
118  m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(detectorNames),
119  m_detectors(), m_extractionTime(0.), m_extractorNames(extractorNames), m_extractors(), m_filteredMatches(),
120  m_filterType(filterType), m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(),
121  m_matcher(), m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0),
122  m_matchingRatioThreshold(0.85), m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200),
123  m_nbRansacMinInlierCount(100), m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(),
124  m_queryFilteredKeyPoints(), m_queryKeyPoints(), m_ransacConsensusPercentage(20.0),
125  m_ransacFilterFlag(vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(), m_ransacParallel(false),
126  m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01), m_trainDescriptors(),
127  m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
128 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
129  m_useBruteForceCrossCheck(true),
130 #endif
131  m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
132  m_useSingleMatchFilter(true), m_I(), m_maxFeatures(-1)
133 {
134  initFeatureNames();
135  init();
136 }
137 
138 void vpKeyPoint::affineSkew(double tilt, double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai)
139 {
140  int h = img.rows;
141  int w = img.cols;
142 
143  mask = cv::Mat(h, w, CV_8UC1, cv::Scalar(255));
144 
145  cv::Mat A = cv::Mat::eye(2, 3, CV_32F);
146 
147  // if (phi != 0.0) {
148  if (std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
149  phi *= M_PI / 180.;
150  double s = sin(phi);
151  double c = cos(phi);
152 
153  A = (cv::Mat_<float>(2, 2) << c, -s, s, c);
154 
155  cv::Mat corners = (cv::Mat_<float>(4, 2) << 0, 0, w, 0, w, h, 0, h);
156  cv::Mat tcorners = corners * A.t();
157  cv::Mat tcorners_x, tcorners_y;
158  tcorners.col(0).copyTo(tcorners_x);
159  tcorners.col(1).copyTo(tcorners_y);
160  std::vector<cv::Mat> channels;
161  channels.push_back(tcorners_x);
162  channels.push_back(tcorners_y);
163  cv::merge(channels, tcorners);
164 
165  cv::Rect rect = cv::boundingRect(tcorners);
166  A = (cv::Mat_<float>(2, 3) << c, -s, -rect.x, s, c, -rect.y);
167 
168  cv::warpAffine(img, img, A, cv::Size(rect.width, rect.height), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
169  }
170  // if (tilt != 1.0) {
171  if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon()) {
172  double s = 0.8 * sqrt(tilt * tilt - 1);
173  cv::GaussianBlur(img, img, cv::Size(0, 0), s, 0.01);
174  cv::resize(img, img, cv::Size(0, 0), 1.0 / tilt, 1.0, cv::INTER_NEAREST);
175  A.row(0) = A.row(0) / tilt;
176  }
177  // if (tilt != 1.0 || phi != 0.0) {
178  if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon() ||
179  std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
180  h = img.rows;
181  w = img.cols;
182  cv::warpAffine(mask, mask, A, cv::Size(w, h), cv::INTER_NEAREST);
183  }
184  cv::invertAffineTransform(A, Ai);
185 }
186 
188 
189 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color) { return buildReference(I_color, vpRect()); }
190 
191 unsigned int vpKeyPoint::buildReference(const vpImage<unsigned char> &I, const vpImagePoint &iP, unsigned int height,
192  unsigned int width)
193 {
194  return buildReference(I, vpRect(iP, width, height));
195 }
196 
197 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, const vpImagePoint &iP, unsigned int height,
198  unsigned int width)
199 {
200  return buildReference(I_color, vpRect(iP, width, height));
201 }
202 
203 
204 unsigned int vpKeyPoint::buildReference(const vpImage<unsigned char> &I, const vpRect &rectangle)
205 {
206  // Reset variables used when dealing with 3D models
207  // So as no 3D point list is passed, we dont need this variables
208  m_trainPoints.clear();
209  m_mapOfImageId.clear();
210  m_mapOfImages.clear();
211  m_currentImageId = 1;
212 
213  if (m_useAffineDetection) {
214  std::vector<std::vector<cv::KeyPoint> > listOfTrainKeyPoints;
215  std::vector<cv::Mat> listOfTrainDescriptors;
216 
217  // Detect keypoints and extract descriptors on multiple images
218  detectExtractAffine(I, listOfTrainKeyPoints, listOfTrainDescriptors);
219 
220  // Flatten the different train lists
221  m_trainKeyPoints.clear();
222  for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfTrainKeyPoints.begin();
223  it != listOfTrainKeyPoints.end(); ++it) {
224  m_trainKeyPoints.insert(m_trainKeyPoints.end(), it->begin(), it->end());
225  }
226 
227  bool first = true;
228  for (std::vector<cv::Mat>::const_iterator it = listOfTrainDescriptors.begin(); it != listOfTrainDescriptors.end();
229  ++it) {
230  if (first) {
231  first = false;
232  it->copyTo(m_trainDescriptors);
233  }
234  else {
235  m_trainDescriptors.push_back(*it);
236  }
237  }
238  }
239  else {
240  detect(I, m_trainKeyPoints, m_detectionTime, rectangle);
241  extract(I, m_trainKeyPoints, m_trainDescriptors, m_extractionTime);
242  }
243 
244  // Save the correspondence keypoint class_id with the training image_id in a map.
245  // Used to display the matching with all the training images.
246  for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
247  m_mapOfImageId[it->class_id] = m_currentImageId;
248  }
249 
250  // Save the image in a map at a specific image_id
251  m_mapOfImages[m_currentImageId] = I;
252 
253  // Convert OpenCV type to ViSP type for compatibility
255 
256  m_reference_computed = true;
257 
258  // Add train descriptors in matcher object
259  m_matcher->clear();
260  m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
261 
262  return static_cast<unsigned int>(m_trainKeyPoints.size());
263 }
264 
265 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, const vpRect &rectangle)
266 {
267  vpImageConvert::convert(I_color, m_I);
268  return (buildReference(m_I, rectangle));
269 }
270 
271 unsigned int vpKeyPoint::buildReference(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &trainKeyPoints,
272  std::vector<cv::Point3f> &points3f, bool append, int class_id)
273 {
274  cv::Mat trainDescriptors;
275  // Copy the input list of keypoints
276  std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
277 
278  extract(I, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
279 
280  if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
281  // Keypoints have been removed
282  // Store the hash of a keypoint as the key and the index of the keypoint
283  // as the value
284  std::map<size_t, size_t> mapOfKeypointHashes;
285  size_t cpt = 0;
286  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
287  ++it, cpt++) {
288  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
289  }
290 
291  std::vector<cv::Point3f> trainPoints_tmp;
292  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
293  if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
294  trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
295  }
296  }
297 
298  // Copy trainPoints_tmp to points3f
299  points3f = trainPoints_tmp;
300  }
301 
302  return (buildReference(I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
303 }
304 
305 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &trainKeyPoints,
306  std::vector<cv::Point3f> &points3f, bool append, int class_id)
307 {
308  cv::Mat trainDescriptors;
309  // Copy the input list of keypoints
310  std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
311 
312  extract(I_color, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
313 
314  if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
315  // Keypoints have been removed
316  // Store the hash of a keypoint as the key and the index of the keypoint
317  // as the value
318  std::map<size_t, size_t> mapOfKeypointHashes;
319  size_t cpt = 0;
320  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
321  ++it, cpt++) {
322  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
323  }
324 
325  std::vector<cv::Point3f> trainPoints_tmp;
326  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
327  if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
328  trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
329  }
330  }
331 
332  // Copy trainPoints_tmp to points3f
333  points3f = trainPoints_tmp;
334  }
335 
336  return (buildReference(I_color, trainKeyPoints, trainDescriptors, points3f, append, class_id));
337 }
338 
339 
341  const std::vector<cv::KeyPoint> &trainKeyPoints,
342  const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
343  bool append, int class_id)
344 {
345  if (!append) {
346  m_trainPoints.clear();
347  m_mapOfImageId.clear();
348  m_mapOfImages.clear();
349  m_currentImageId = 0;
350  m_trainKeyPoints.clear();
351  }
352 
353  m_currentImageId++;
354 
355  std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
356  // Set class_id if != -1
357  if (class_id != -1) {
358  for (std::vector<cv::KeyPoint>::iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
359  it->class_id = class_id;
360  }
361  }
362 
363  // Save the correspondence keypoint class_id with the training image_id in a map.
364  // Used to display the matching with all the training images.
365  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
366  ++it) {
367  m_mapOfImageId[it->class_id] = m_currentImageId;
368  }
369 
370  // Save the image in a map at a specific image_id
371  m_mapOfImages[m_currentImageId] = I;
372 
373  // Append reference lists
374  m_trainKeyPoints.insert(m_trainKeyPoints.end(), trainKeyPoints_tmp.begin(), trainKeyPoints_tmp.end());
375  if (!append) {
376  trainDescriptors.copyTo(m_trainDescriptors);
377  }
378  else {
379  m_trainDescriptors.push_back(trainDescriptors);
380  }
381  this->m_trainPoints.insert(m_trainPoints.end(), points3f.begin(), points3f.end());
382 
383  // Convert OpenCV type to ViSP type for compatibility
385  vpConvert::convertFromOpenCV(m_trainPoints, m_trainVpPoints);
386 
387  // Add train descriptors in matcher object
388  m_matcher->clear();
389  m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
390 
391  m_reference_computed = true;
392 
393  return static_cast<unsigned int>(m_trainKeyPoints.size());
394 }
395 
396 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, const std::vector<cv::KeyPoint> &trainKeyPoints,
397  const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
398  bool append, int class_id)
399 {
400  vpImageConvert::convert(I_color, m_I);
401  return (buildReference(m_I, trainKeyPoints, trainDescriptors, points3f, append, class_id));
402 }
403 
404 void vpKeyPoint::compute3D(const cv::KeyPoint &candidate, const std::vector<vpPoint> &roi,
405  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
406 {
407  /* compute plane equation */
408  std::vector<vpPoint>::const_iterator it_roi = roi.begin();
409  vpPoint pts[3];
410  pts[0] = *it_roi;
411  ++it_roi;
412  pts[1] = *it_roi;
413  ++it_roi;
414  pts[2] = *it_roi;
415  vpPlane Po(pts[0], pts[1], pts[2]);
416  double xc = 0.0, yc = 0.0;
417  vpPixelMeterConversion::convertPoint(cam, candidate.pt.x, candidate.pt.y, xc, yc);
418  double Z = -Po.getD() / (Po.getA() * xc + Po.getB() * yc + Po.getC());
419  double X = xc * Z;
420  double Y = yc * Z;
421  vpColVector point_cam(4);
422  point_cam[0] = X;
423  point_cam[1] = Y;
424  point_cam[2] = Z;
425  point_cam[3] = 1;
426  vpColVector point_obj(4);
427  point_obj = cMo.inverse() * point_cam;
428  point = cv::Point3f((float)point_obj[0], (float)point_obj[1], (float)point_obj[2]);
429 }
430 
431 void vpKeyPoint::compute3D(const vpImagePoint &candidate, const std::vector<vpPoint> &roi,
432  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, vpPoint &point)
433 {
434  /* compute plane equation */
435  std::vector<vpPoint>::const_iterator it_roi = roi.begin();
436  vpPoint pts[3];
437  pts[0] = *it_roi;
438  ++it_roi;
439  pts[1] = *it_roi;
440  ++it_roi;
441  pts[2] = *it_roi;
442  vpPlane Po(pts[0], pts[1], pts[2]);
443  double xc = 0.0, yc = 0.0;
444  vpPixelMeterConversion::convertPoint(cam, candidate, xc, yc);
445  double Z = -Po.getD() / (Po.getA() * xc + Po.getB() * yc + Po.getC());
446  double X = xc * Z;
447  double Y = yc * Z;
448  vpColVector point_cam(4);
449  point_cam[0] = X;
450  point_cam[1] = Y;
451  point_cam[2] = Z;
452  point_cam[3] = 1;
453  vpColVector point_obj(4);
454  point_obj = cMo.inverse() * point_cam;
455  point.setWorldCoordinates(point_obj);
456 }
457 
459  std::vector<cv::KeyPoint> &candidates,
460  const std::vector<vpPolygon> &polygons,
461  const std::vector<std::vector<vpPoint> > &roisPt,
462  std::vector<cv::Point3f> &points, cv::Mat *descriptors)
463 {
464  std::vector<cv::KeyPoint> candidatesToCheck = candidates;
465  candidates.clear();
466  points.clear();
467  vpImagePoint imPt;
468  cv::Point3f pt;
469  cv::Mat desc;
470 
471  std::vector<std::pair<cv::KeyPoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
472  for (size_t i = 0; i < candidatesToCheck.size(); i++) {
473  pairOfCandidatesToCheck[i] = std::pair<cv::KeyPoint, size_t>(candidatesToCheck[i], i);
474  }
475 
476  size_t cpt1 = 0;
477  std::vector<vpPolygon> polygons_tmp = polygons;
478  for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
479  std::vector<std::pair<cv::KeyPoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
480 
481  while (it2 != pairOfCandidatesToCheck.end()) {
482  imPt.set_ij(it2->first.pt.y, it2->first.pt.x);
483  if (it1->isInside(imPt)) {
484  candidates.push_back(it2->first);
485  vpKeyPoint::compute3D(it2->first, roisPt[cpt1], cam, cMo, pt);
486  points.push_back(pt);
487 
488  if (descriptors != nullptr) {
489  desc.push_back(descriptors->row((int)it2->second));
490  }
491 
492  // Remove candidate keypoint which is located on the current polygon
493  it2 = pairOfCandidatesToCheck.erase(it2);
494  }
495  else {
496  ++it2;
497  }
498  }
499  }
500 
501  if (descriptors != nullptr) {
502  desc.copyTo(*descriptors);
503  }
504 }
505 
507  std::vector<vpImagePoint> &candidates,
508  const std::vector<vpPolygon> &polygons,
509  const std::vector<std::vector<vpPoint> > &roisPt,
510  std::vector<vpPoint> &points, cv::Mat *descriptors)
511 {
512  std::vector<vpImagePoint> candidatesToCheck = candidates;
513  candidates.clear();
514  points.clear();
515  vpPoint pt;
516  cv::Mat desc;
517 
518  std::vector<std::pair<vpImagePoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
519  for (size_t i = 0; i < candidatesToCheck.size(); i++) {
520  pairOfCandidatesToCheck[i] = std::pair<vpImagePoint, size_t>(candidatesToCheck[i], i);
521  }
522 
523  size_t cpt1 = 0;
524  std::vector<vpPolygon> polygons_tmp = polygons;
525  for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
526  std::vector<std::pair<vpImagePoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
527 
528  while (it2 != pairOfCandidatesToCheck.end()) {
529  if (it1->isInside(it2->first)) {
530  candidates.push_back(it2->first);
531  vpKeyPoint::compute3D(it2->first, roisPt[cpt1], cam, cMo, pt);
532  points.push_back(pt);
533 
534  if (descriptors != nullptr) {
535  desc.push_back(descriptors->row((int)it2->second));
536  }
537 
538  // Remove candidate keypoint which is located on the current polygon
539  it2 = pairOfCandidatesToCheck.erase(it2);
540  }
541  else {
542  ++it2;
543  }
544  }
545  }
546 }
547 
548 
550  const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector<cv::KeyPoint> &candidates,
551  const std::vector<vpCylinder> &cylinders,
552  const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<cv::Point3f> &points,
553  cv::Mat *descriptors)
554 {
555  std::vector<cv::KeyPoint> candidatesToCheck = candidates;
556  candidates.clear();
557  points.clear();
558  cv::Mat desc;
559 
560  // Keep only keypoints on cylinders
561  size_t cpt_keypoint = 0;
562  for (std::vector<cv::KeyPoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
563  ++it1, cpt_keypoint++) {
564  size_t cpt_cylinder = 0;
565 
566  // Iterate through the list of vpCylinders
567  for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
568  it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
569  // Iterate through the list of the bounding boxes of the current
570  // vpCylinder
571  for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
572  if (vpPolygon::isInside(*it3, it1->pt.y, it1->pt.x)) {
573  candidates.push_back(*it1);
574 
575  // Calculate the 3D coordinates for each keypoint located on
576  // cylinders
577  double xm = 0.0, ym = 0.0;
578  vpPixelMeterConversion::convertPoint(cam, it1->pt.x, it1->pt.y, xm, ym);
579  double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
580 
581  if (!vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
582  vpColVector point_cam(4);
583  point_cam[0] = xm * Z;
584  point_cam[1] = ym * Z;
585  point_cam[2] = Z;
586  point_cam[3] = 1;
587  vpColVector point_obj(4);
588  point_obj = cMo.inverse() * point_cam;
589  vpPoint pt;
590  pt.setWorldCoordinates(point_obj);
591  points.push_back(cv::Point3f((float)pt.get_oX(), (float)pt.get_oY(), (float)pt.get_oZ()));
592 
593  if (descriptors != nullptr) {
594  desc.push_back(descriptors->row((int)cpt_keypoint));
595  }
596 
597  break;
598  }
599  }
600  }
601  }
602  }
603 
604  if (descriptors != nullptr) {
605  desc.copyTo(*descriptors);
606  }
607 }
608 
610  const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector<vpImagePoint> &candidates,
611  const std::vector<vpCylinder> &cylinders,
612  const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<vpPoint> &points,
613  cv::Mat *descriptors)
614 {
615  std::vector<vpImagePoint> candidatesToCheck = candidates;
616  candidates.clear();
617  points.clear();
618  cv::Mat desc;
619 
620  // Keep only keypoints on cylinders
621  size_t cpt_keypoint = 0;
622  for (std::vector<vpImagePoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
623  ++it1, cpt_keypoint++) {
624  size_t cpt_cylinder = 0;
625 
626  // Iterate through the list of vpCylinders
627  for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
628  it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
629  // Iterate through the list of the bounding boxes of the current
630  // vpCylinder
631  for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
632  if (vpPolygon::isInside(*it3, it1->get_i(), it1->get_j())) {
633  candidates.push_back(*it1);
634 
635  // Calculate the 3D coordinates for each keypoint located on
636  // cylinders
637  double xm = 0.0, ym = 0.0;
638  vpPixelMeterConversion::convertPoint(cam, it1->get_u(), it1->get_v(), xm, ym);
639  double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
640 
641  if (!vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
642  vpColVector point_cam(4);
643  point_cam[0] = xm * Z;
644  point_cam[1] = ym * Z;
645  point_cam[2] = Z;
646  point_cam[3] = 1;
647  vpColVector point_obj(4);
648  point_obj = cMo.inverse() * point_cam;
649  vpPoint pt;
650  pt.setWorldCoordinates(point_obj);
651  points.push_back(pt);
652 
653  if (descriptors != nullptr) {
654  desc.push_back(descriptors->row((int)cpt_keypoint));
655  }
656 
657  break;
658  }
659  }
660  }
661  }
662  }
663 
664  if (descriptors != nullptr) {
665  desc.copyTo(*descriptors);
666  }
667 }
668 
669 bool vpKeyPoint::computePose(const std::vector<cv::Point2f> &imagePoints, const std::vector<cv::Point3f> &objectPoints,
670  const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector<int> &inlierIndex,
671  double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &))
672 {
673  double t = vpTime::measureTimeMs();
674 
675  if (imagePoints.size() < 4 || objectPoints.size() < 4 || imagePoints.size() != objectPoints.size()) {
676  elapsedTime = (vpTime::measureTimeMs() - t);
677  std::cerr << "Not enough points to compute the pose (at least 4 points "
678  "are needed)."
679  << std::endl;
680 
681  return false;
682  }
683 
684  cv::Mat cameraMatrix =
685  (cv::Mat_<double>(3, 3) << cam.get_px(), 0, cam.get_u0(), 0, cam.get_py(), cam.get_v0(), 0, 0, 1);
686  cv::Mat rvec, tvec;
687 
688  // Bug with OpenCV < 2.4.0 when zero distortion is provided by an empty
689  // array. http://code.opencv.org/issues/1700 ;
690  // http://code.opencv.org/issues/1718 what(): Distortion coefficients must
691  // be 1x4, 4x1, 1x5, 5x1, 1x8 or 8x1 floating-point vector in function
692  // cvProjectPoints2 Fixed in OpenCV 2.4.0 (r7558)
693  // cv::Mat distCoeffs;
694  cv::Mat distCoeffs = cv::Mat::zeros(1, 5, CV_64F);
695 
696  try {
697 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
698  // OpenCV 3.0.0 (2014/12/12)
699  cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, false, m_nbRansacIterations,
700  (float)m_ransacReprojectionError,
701  0.99, // confidence=0.99 (default) – The probability
702  // that the algorithm produces a useful result.
703  inlierIndex, cv::SOLVEPNP_ITERATIVE);
704 // SOLVEPNP_ITERATIVE (default): Iterative method is based on
705 // Levenberg-Marquardt optimization. In this case the function finds such a
706 // pose that minimizes reprojection error, that is the sum of squared
707 // distances between the observed projections imagePoints and the projected
708 // (using projectPoints() ) objectPoints . SOLVEPNP_P3P: Method is based on
709 // the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang “Complete Solution
710 // Classification for the Perspective-Three-Point Problem”. In this case the
711 // function requires exactly four object and image points. SOLVEPNP_EPNP:
712 // Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the
713 // paper “EPnP: Efficient Perspective-n-Point Camera Pose Estimation”.
714 // SOLVEPNP_DLS: Method is based on the paper of Joel A. Hesch and Stergios I.
715 // Roumeliotis. “A Direct Least-Squares (DLS) Method for PnP”. SOLVEPNP_UPNP
716 // Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto,
717 // F.Moreno-Noguer. “Exhaustive Linearization for Robust Camera Pose and Focal
718 // Length Estimation”. In this case the function also estimates the
719 // parameters
720 // f_x and f_y assuming that both have the same value. Then the cameraMatrix
721 // is updated with the estimated focal length.
722 #else
723  int nbInlierToReachConsensus = m_nbRansacMinInlierCount;
724  if (m_useConsensusPercentage) {
725  nbInlierToReachConsensus = (int)(m_ransacConsensusPercentage / 100.0 * (double)m_queryFilteredKeyPoints.size());
726  }
727 
728  cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, false, m_nbRansacIterations,
729  (float)m_ransacReprojectionError, nbInlierToReachConsensus, inlierIndex);
730 #endif
731  }
732  catch (cv::Exception &e) {
733  std::cerr << e.what() << std::endl;
734  elapsedTime = (vpTime::measureTimeMs() - t);
735  return false;
736  }
737  vpTranslationVector translationVec(tvec.at<double>(0), tvec.at<double>(1), tvec.at<double>(2));
738  vpThetaUVector thetaUVector(rvec.at<double>(0), rvec.at<double>(1), rvec.at<double>(2));
739  cMo = vpHomogeneousMatrix(translationVec, thetaUVector);
740 
741  if (func != nullptr) {
742  // Check the final pose returned by solvePnPRansac to discard
743  // solutions which do not respect the pose criterion.
744  if (!func(cMo)) {
745  elapsedTime = (vpTime::measureTimeMs() - t);
746  return false;
747  }
748  }
749 
750  elapsedTime = (vpTime::measureTimeMs() - t);
751  return true;
752 }
753 
754 bool vpKeyPoint::computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo,
755  std::vector<vpPoint> &inliers, double &elapsedTime,
756  bool (*func)(const vpHomogeneousMatrix &))
757 {
758  std::vector<unsigned int> inlierIndex;
759  return computePose(objectVpPoints, cMo, inliers, inlierIndex, elapsedTime, func);
760 }
761 
762 bool vpKeyPoint::computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo,
763  std::vector<vpPoint> &inliers, std::vector<unsigned int> &inlierIndex, double &elapsedTime,
764  bool (*func)(const vpHomogeneousMatrix &))
765 {
766  double t = vpTime::measureTimeMs();
767 
768  if (objectVpPoints.size() < 4) {
769  elapsedTime = (vpTime::measureTimeMs() - t);
770  // std::cerr << "Not enough points to compute the pose (at least 4
771  // points are needed)." << std::endl;
772 
773  return false;
774  }
775 
776  vpPose pose;
777 
778  for (std::vector<vpPoint>::const_iterator it = objectVpPoints.begin(); it != objectVpPoints.end(); ++it) {
779  pose.addPoint(*it);
780  }
781 
782  unsigned int nbInlierToReachConsensus = (unsigned int)m_nbRansacMinInlierCount;
783  if (m_useConsensusPercentage) {
784  nbInlierToReachConsensus =
785  (unsigned int)(m_ransacConsensusPercentage / 100.0 * (double)m_queryFilteredKeyPoints.size());
786  }
787 
788  pose.setRansacFilterFlag(m_ransacFilterFlag);
789  pose.setUseParallelRansac(m_ransacParallel);
790  pose.setNbParallelRansacThreads(m_ransacParallelNbThreads);
791  pose.setRansacNbInliersToReachConsensus(nbInlierToReachConsensus);
792  pose.setRansacThreshold(m_ransacThreshold);
793  pose.setRansacMaxTrials(m_nbRansacIterations);
794 
795  bool isRansacPoseEstimationOk = false;
796  try {
797  pose.setCovarianceComputation(m_computeCovariance);
798  isRansacPoseEstimationOk = pose.computePose(vpPose::RANSAC, cMo, func);
799  inliers = pose.getRansacInliers();
800  inlierIndex = pose.getRansacInlierIndex();
801 
802  if (m_computeCovariance) {
803  m_covarianceMatrix = pose.getCovarianceMatrix();
804  }
805  }
806  catch (const vpException &e) {
807  std::cerr << "e=" << e.what() << std::endl;
808  elapsedTime = (vpTime::measureTimeMs() - t);
809  return false;
810  }
811 
812  // if(func != nullptr && isRansacPoseEstimationOk) {
813  // //Check the final pose returned by the Ransac VVS pose estimation as
814  // in rare some cases
815  // //we can converge toward a final cMo that does not respect the pose
816  // criterion even
817  // //if the 4 minimal points picked to respect the pose criterion.
818  // if(!func(&cMo)) {
819  // elapsedTime = (vpTime::measureTimeMs() - t);
820  // return false;
821  // }
822  // }
823 
824  elapsedTime = (vpTime::measureTimeMs() - t);
825  return isRansacPoseEstimationOk;
826 }
827 
828 double vpKeyPoint::computePoseEstimationError(const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
829  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo_est)
830 {
831  if (matchKeyPoints.size() == 0) {
832  // return std::numeric_limits<double>::max(); // create an error under
833  // Windows. To fix it we have to add #undef max
834  return DBL_MAX;
835  }
836 
837  std::vector<double> errors(matchKeyPoints.size());
838  size_t cpt = 0;
839  vpPoint pt;
840  for (std::vector<std::pair<cv::KeyPoint, cv::Point3f> >::const_iterator it = matchKeyPoints.begin();
841  it != matchKeyPoints.end(); ++it, cpt++) {
842  pt.set_oX(it->second.x);
843  pt.set_oY(it->second.y);
844  pt.set_oZ(it->second.z);
845  pt.project(cMo_est);
846  double u = 0.0, v = 0.0;
847  vpMeterPixelConversion::convertPoint(cam, pt.get_x(), pt.get_y(), u, v);
848  errors[cpt] = std::sqrt((u - it->first.pt.x) * (u - it->first.pt.x) + (v - it->first.pt.y) * (v - it->first.pt.y));
849  }
850 
851  return std::accumulate(errors.begin(), errors.end(), 0.0) / errors.size();
852 }
853 
855  vpImage<unsigned char> &IMatching)
856 {
857  // Image matching side by side
858  unsigned int width = IRef.getWidth() + ICurrent.getWidth();
859  unsigned int height = ((std::max))(IRef.getHeight(), ICurrent.getHeight());
860 
861  IMatching = vpImage<unsigned char>(height, width);
862 }
863 
865  vpImage<vpRGBa> &IMatching)
866 {
867  // Image matching side by side
868  unsigned int width = IRef.getWidth() + ICurrent.getWidth();
869  unsigned int height = ((std::max))(IRef.getHeight(), ICurrent.getHeight());
870 
871  IMatching = vpImage<vpRGBa>(height, width);
872 }
873 
875 {
876  // Nb images in the training database + the current image we want to detect
877  // the object
878  unsigned int nbImg = (unsigned int)(m_mapOfImages.size() + 1);
879 
880  if (m_mapOfImages.empty()) {
881  std::cerr << "There is no training image loaded !" << std::endl;
882  return;
883  }
884 
885  if (nbImg == 2) {
886  // Only one training image, so we display them side by side
887  createImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
888  }
889  else {
890  // Multiple training images, display them as a mosaic image
891  // (unsigned int) std::floor(std::sqrt((double) nbImg) + 0.5);
892  unsigned int nbImgSqrt = (unsigned int)vpMath::round(std::sqrt((double)nbImg));
893 
894  // Number of columns in the mosaic grid
895  unsigned int nbWidth = nbImgSqrt;
896  // Number of rows in the mosaic grid
897  unsigned int nbHeight = nbImgSqrt;
898 
899  // Deals with non square mosaic grid and the total number of images
900  if (nbImgSqrt * nbImgSqrt < nbImg) {
901  nbWidth++;
902  }
903 
904  unsigned int maxW = ICurrent.getWidth();
905  unsigned int maxH = ICurrent.getHeight();
906  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
907  ++it) {
908  if (maxW < it->second.getWidth()) {
909  maxW = it->second.getWidth();
910  }
911 
912  if (maxH < it->second.getHeight()) {
913  maxH = it->second.getHeight();
914  }
915  }
916 
917  IMatching = vpImage<unsigned char>(maxH * nbHeight, maxW * nbWidth);
918  }
919 }
920 
922 {
923  // Nb images in the training database + the current image we want to detect
924  // the object
925  unsigned int nbImg = (unsigned int)(m_mapOfImages.size() + 1);
926 
927  if (m_mapOfImages.empty()) {
928  std::cerr << "There is no training image loaded !" << std::endl;
929  return;
930  }
931 
932  if (nbImg == 2) {
933  // Only one training image, so we display them side by side
934  createImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
935  }
936  else {
937  // Multiple training images, display them as a mosaic image
938  //(unsigned int) std::floor(std::sqrt((double) nbImg) + 0.5);
939  unsigned int nbImgSqrt = (unsigned int)vpMath::round(std::sqrt((double)nbImg));
940 
941  // Number of columns in the mosaic grid
942  unsigned int nbWidth = nbImgSqrt;
943  // Number of rows in the mosaic grid
944  unsigned int nbHeight = nbImgSqrt;
945 
946  // Deals with non square mosaic grid and the total number of images
947  if (nbImgSqrt * nbImgSqrt < nbImg) {
948  nbWidth++;
949  }
950 
951  unsigned int maxW = ICurrent.getWidth();
952  unsigned int maxH = ICurrent.getHeight();
953  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
954  ++it) {
955  if (maxW < it->second.getWidth()) {
956  maxW = it->second.getWidth();
957  }
958 
959  if (maxH < it->second.getHeight()) {
960  maxH = it->second.getHeight();
961  }
962  }
963 
964  IMatching = vpImage<vpRGBa>(maxH * nbHeight, maxW * nbWidth);
965  }
966 }
967 
968 void vpKeyPoint::detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, const vpRect &rectangle)
969 {
970  double elapsedTime;
971  detect(I, keyPoints, elapsedTime, rectangle);
972 }
973 
974 void vpKeyPoint::detect(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, const vpRect &rectangle)
975 {
976  double elapsedTime;
977  detect(I_color, keyPoints, elapsedTime, rectangle);
978 }
979 
980 void vpKeyPoint::detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, const cv::Mat &mask)
981 {
982  double elapsedTime;
983  detect(matImg, keyPoints, elapsedTime, mask);
984 }
985 
986 void vpKeyPoint::detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
987  const vpRect &rectangle)
988 {
989  cv::Mat matImg;
990  vpImageConvert::convert(I, matImg, false);
991  cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
992 
993  if (rectangle.getWidth() > 0 && rectangle.getHeight() > 0) {
994  cv::Point leftTop((int)rectangle.getLeft(), (int)rectangle.getTop()),
995  rightBottom((int)rectangle.getRight(), (int)rectangle.getBottom());
996  cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
997  }
998  else {
999  mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1000  }
1001 
1002  detect(matImg, keyPoints, elapsedTime, mask);
1003 }
1004 
1005 void vpKeyPoint::detect(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
1006  const vpRect &rectangle)
1007 {
1008  cv::Mat matImg;
1009  vpImageConvert::convert(I_color, matImg);
1010  cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1011 
1012  if (rectangle.getWidth() > 0 && rectangle.getHeight() > 0) {
1013  cv::Point leftTop((int)rectangle.getLeft(), (int)rectangle.getTop()),
1014  rightBottom((int)rectangle.getRight(), (int)rectangle.getBottom());
1015  cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1016  }
1017  else {
1018  mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1019  }
1020 
1021  detect(matImg, keyPoints, elapsedTime, mask);
1022 }
1023 
1024 void vpKeyPoint::detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
1025  const cv::Mat &mask)
1026 {
1027  double t = vpTime::measureTimeMs();
1028  keyPoints.clear();
1029 
1030  for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
1031  it != m_detectors.end(); ++it) {
1032  std::vector<cv::KeyPoint> kp;
1033 
1034  it->second->detect(matImg, kp, mask);
1035  keyPoints.insert(keyPoints.end(), kp.begin(), kp.end());
1036  }
1037 
1038  elapsedTime = vpTime::measureTimeMs() - t;
1039 }
1040 
1041 void vpKeyPoint::display(const vpImage<unsigned char> &IRef, const vpImage<unsigned char> &ICurrent, unsigned int size)
1042 {
1043  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1044  getQueryKeyPoints(vpQueryImageKeyPoints);
1045  std::vector<vpImagePoint> vpTrainImageKeyPoints;
1046  getTrainKeyPoints(vpTrainImageKeyPoints);
1047 
1048  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1049  vpDisplay::displayCross(IRef, vpTrainImageKeyPoints[(size_t)(it->trainIdx)], size, vpColor::red);
1050  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, vpColor::green);
1051  }
1052 }
1053 
1054 void vpKeyPoint::display(const vpImage<vpRGBa> &IRef, const vpImage<vpRGBa> &ICurrent, unsigned int size)
1055 {
1056  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1057  getQueryKeyPoints(vpQueryImageKeyPoints);
1058  std::vector<vpImagePoint> vpTrainImageKeyPoints;
1059  getTrainKeyPoints(vpTrainImageKeyPoints);
1060 
1061  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1062  vpDisplay::displayCross(IRef, vpTrainImageKeyPoints[(size_t)(it->trainIdx)], size, vpColor::red);
1063  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, vpColor::green);
1064  }
1065 }
1066 
1067 void vpKeyPoint::display(const vpImage<unsigned char> &ICurrent, unsigned int size, const vpColor &color)
1068 {
1069  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1070  getQueryKeyPoints(vpQueryImageKeyPoints);
1071 
1072  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1073  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, color);
1074  }
1075 }
1076 
1077 void vpKeyPoint::display(const vpImage<vpRGBa> &ICurrent, unsigned int size, const vpColor &color)
1078 {
1079  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1080  getQueryKeyPoints(vpQueryImageKeyPoints);
1081 
1082  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1083  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, color);
1084  }
1085 }
1086 
1088  unsigned int crossSize, unsigned int lineThickness, const vpColor &color)
1089 {
1090  bool randomColor = (color == vpColor::none);
1091  srand((unsigned int)time(nullptr));
1092  vpColor currentColor = color;
1093 
1094  std::vector<vpImagePoint> queryImageKeyPoints;
1095  getQueryKeyPoints(queryImageKeyPoints);
1096  std::vector<vpImagePoint> trainImageKeyPoints;
1097  getTrainKeyPoints(trainImageKeyPoints);
1098 
1099  vpImagePoint leftPt, rightPt;
1100  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1101  if (randomColor) {
1102  currentColor = vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1103  }
1104 
1105  leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1106  rightPt = vpImagePoint(queryImageKeyPoints[(size_t)(it->queryIdx)].get_i(),
1107  queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.getWidth());
1108  vpDisplay::displayCross(IMatching, leftPt, crossSize, currentColor);
1109  vpDisplay::displayCross(IMatching, rightPt, crossSize, currentColor);
1110  vpDisplay::displayLine(IMatching, leftPt, rightPt, currentColor, lineThickness);
1111  }
1112 }
1113 
1114 void vpKeyPoint::displayMatching(const vpImage<unsigned char> &IRef, vpImage<vpRGBa> &IMatching, unsigned int crossSize,
1115  unsigned int lineThickness, const vpColor &color)
1116 {
1117  bool randomColor = (color == vpColor::none);
1118  srand((unsigned int)time(nullptr));
1119  vpColor currentColor = color;
1120 
1121  std::vector<vpImagePoint> queryImageKeyPoints;
1122  getQueryKeyPoints(queryImageKeyPoints);
1123  std::vector<vpImagePoint> trainImageKeyPoints;
1124  getTrainKeyPoints(trainImageKeyPoints);
1125 
1126  vpImagePoint leftPt, rightPt;
1127  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1128  if (randomColor) {
1129  currentColor = vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1130  }
1131 
1132  leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1133  rightPt = vpImagePoint(queryImageKeyPoints[(size_t)(it->queryIdx)].get_i(),
1134  queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.getWidth());
1135  vpDisplay::displayCross(IMatching, leftPt, crossSize, currentColor);
1136  vpDisplay::displayCross(IMatching, rightPt, crossSize, currentColor);
1137  vpDisplay::displayLine(IMatching, leftPt, rightPt, currentColor, lineThickness);
1138  }
1139 }
1140 
1141 void vpKeyPoint::displayMatching(const vpImage<vpRGBa> &IRef, vpImage<vpRGBa> &IMatching, unsigned int crossSize,
1142  unsigned int lineThickness, const vpColor &color)
1143 {
1144  bool randomColor = (color == vpColor::none);
1145  srand((unsigned int)time(nullptr));
1146  vpColor currentColor = color;
1147 
1148  std::vector<vpImagePoint> queryImageKeyPoints;
1149  getQueryKeyPoints(queryImageKeyPoints);
1150  std::vector<vpImagePoint> trainImageKeyPoints;
1151  getTrainKeyPoints(trainImageKeyPoints);
1152 
1153  vpImagePoint leftPt, rightPt;
1154  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1155  if (randomColor) {
1156  currentColor = vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1157  }
1158 
1159  leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1160  rightPt = vpImagePoint(queryImageKeyPoints[(size_t)(it->queryIdx)].get_i(),
1161  queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.getWidth());
1162  vpDisplay::displayCross(IMatching, leftPt, crossSize, currentColor);
1163  vpDisplay::displayCross(IMatching, rightPt, crossSize, currentColor);
1164  vpDisplay::displayLine(IMatching, leftPt, rightPt, currentColor, lineThickness);
1165  }
1166 }
1167 
1168 
1170  const std::vector<vpImagePoint> &ransacInliers, unsigned int crossSize,
1171  unsigned int lineThickness)
1172 {
1173  if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1174  // No training images so return
1175  std::cerr << "There is no training image loaded !" << std::endl;
1176  return;
1177  }
1178 
1179  // Nb images in the training database + the current image we want to detect
1180  // the object
1181  int nbImg = (int)(m_mapOfImages.size() + 1);
1182 
1183  if (nbImg == 2) {
1184  // Only one training image, so we display the matching result side-by-side
1185  displayMatching(m_mapOfImages.begin()->second, IMatching, crossSize);
1186  }
1187  else {
1188  // Multiple training images, display them as a mosaic image
1189  int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
1190  int nbWidth = nbImgSqrt;
1191  int nbHeight = nbImgSqrt;
1192 
1193  if (nbImgSqrt * nbImgSqrt < nbImg) {
1194  nbWidth++;
1195  }
1196 
1197  std::map<int, int> mapOfImageIdIndex;
1198  int cpt = 0;
1199  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
1200  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1201  ++it, cpt++) {
1202  mapOfImageIdIndex[it->first] = cpt;
1203 
1204  if (maxW < it->second.getWidth()) {
1205  maxW = it->second.getWidth();
1206  }
1207 
1208  if (maxH < it->second.getHeight()) {
1209  maxH = it->second.getHeight();
1210  }
1211  }
1212 
1213  // Indexes of the current image in the grid computed to put preferably the
1214  // image in the center of the mosaic grid
1215  int medianI = nbHeight / 2;
1216  int medianJ = nbWidth / 2;
1217  int medianIndex = medianI * nbWidth + medianJ;
1218  for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1219  vpImagePoint topLeftCorner;
1220  int current_class_id_index = 0;
1221  if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1222  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1223  }
1224  else {
1225  // Shift of one unity the index of the training images which are after
1226  // the current image
1227  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1228  }
1229 
1230  int indexI = current_class_id_index / nbWidth;
1231  int indexJ = current_class_id_index - (indexI * nbWidth);
1232  topLeftCorner.set_ij((int)maxH * indexI, (int)maxW * indexJ);
1233 
1234  // Display cross for keypoints in the learning database
1235  vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1236  (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1237  }
1238 
1239  vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
1240  for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1241  // Display cross for keypoints detected in the current image
1242  vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1243  (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1244  }
1245  for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1246  // Display green circle for RANSAC inliers
1247  vpDisplay::displayCircle(IMatching, (int)(it->get_v() + topLeftCorner.get_i()),
1248  (int)(it->get_u() + topLeftCorner.get_j()), 4, vpColor::green);
1249  }
1250  for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1251  // Display red circle for RANSAC outliers
1252  vpDisplay::displayCircle(IMatching, (int)(it->get_i() + topLeftCorner.get_i()),
1253  (int)(it->get_j() + topLeftCorner.get_j()), 4, vpColor::red);
1254  }
1255 
1256  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1257  int current_class_id = 0;
1258  if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] < medianIndex) {
1259  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1260  }
1261  else {
1262  // Shift of one unity the index of the training images which are after
1263  // the current image
1264  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1265  }
1266 
1267  int indexI = current_class_id / nbWidth;
1268  int indexJ = current_class_id - (indexI * nbWidth);
1269 
1270  vpImagePoint end((int)maxH * indexI + m_trainKeyPoints[(size_t)it->trainIdx].pt.y,
1271  (int)maxW * indexJ + m_trainKeyPoints[(size_t)it->trainIdx].pt.x);
1272  vpImagePoint start((int)maxH * medianI + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.y,
1273  (int)maxW * medianJ + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.x);
1274 
1275  // Draw line for matching keypoints detected in the current image and
1276  // those detected in the training images
1277  vpDisplay::displayLine(IMatching, start, end, vpColor::green, lineThickness);
1278  }
1279  }
1280 }
1281 
1283  const std::vector<vpImagePoint> &ransacInliers, unsigned int crossSize,
1284  unsigned int lineThickness)
1285 {
1286  if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1287  // No training images so return
1288  std::cerr << "There is no training image loaded !" << std::endl;
1289  return;
1290  }
1291 
1292  // Nb images in the training database + the current image we want to detect
1293  // the object
1294  int nbImg = (int)(m_mapOfImages.size() + 1);
1295 
1296  if (nbImg == 2) {
1297  // Only one training image, so we display the matching result side-by-side
1298  displayMatching(m_mapOfImages.begin()->second, IMatching, crossSize);
1299  }
1300  else {
1301  // Multiple training images, display them as a mosaic image
1302  int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
1303  int nbWidth = nbImgSqrt;
1304  int nbHeight = nbImgSqrt;
1305 
1306  if (nbImgSqrt * nbImgSqrt < nbImg) {
1307  nbWidth++;
1308  }
1309 
1310  std::map<int, int> mapOfImageIdIndex;
1311  int cpt = 0;
1312  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
1313  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1314  ++it, cpt++) {
1315  mapOfImageIdIndex[it->first] = cpt;
1316 
1317  if (maxW < it->second.getWidth()) {
1318  maxW = it->second.getWidth();
1319  }
1320 
1321  if (maxH < it->second.getHeight()) {
1322  maxH = it->second.getHeight();
1323  }
1324  }
1325 
1326  // Indexes of the current image in the grid computed to put preferably the
1327  // image in the center of the mosaic grid
1328  int medianI = nbHeight / 2;
1329  int medianJ = nbWidth / 2;
1330  int medianIndex = medianI * nbWidth + medianJ;
1331  for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1332  vpImagePoint topLeftCorner;
1333  int current_class_id_index = 0;
1334  if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1335  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1336  }
1337  else {
1338  // Shift of one unity the index of the training images which are after
1339  // the current image
1340  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1341  }
1342 
1343  int indexI = current_class_id_index / nbWidth;
1344  int indexJ = current_class_id_index - (indexI * nbWidth);
1345  topLeftCorner.set_ij((int)maxH * indexI, (int)maxW * indexJ);
1346 
1347  // Display cross for keypoints in the learning database
1348  vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1349  (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1350  }
1351 
1352  vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
1353  for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1354  // Display cross for keypoints detected in the current image
1355  vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1356  (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1357  }
1358  for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1359  // Display green circle for RANSAC inliers
1360  vpDisplay::displayCircle(IMatching, (int)(it->get_v() + topLeftCorner.get_i()),
1361  (int)(it->get_u() + topLeftCorner.get_j()), 4, vpColor::green);
1362  }
1363  for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1364  // Display red circle for RANSAC outliers
1365  vpDisplay::displayCircle(IMatching, (int)(it->get_i() + topLeftCorner.get_i()),
1366  (int)(it->get_j() + topLeftCorner.get_j()), 4, vpColor::red);
1367  }
1368 
1369  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1370  int current_class_id = 0;
1371  if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] < medianIndex) {
1372  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1373  }
1374  else {
1375  // Shift of one unity the index of the training images which are after
1376  // the current image
1377  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1378  }
1379 
1380  int indexI = current_class_id / nbWidth;
1381  int indexJ = current_class_id - (indexI * nbWidth);
1382 
1383  vpImagePoint end((int)maxH * indexI + m_trainKeyPoints[(size_t)it->trainIdx].pt.y,
1384  (int)maxW * indexJ + m_trainKeyPoints[(size_t)it->trainIdx].pt.x);
1385  vpImagePoint start((int)maxH * medianI + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.y,
1386  (int)maxW * medianJ + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.x);
1387 
1388  // Draw line for matching keypoints detected in the current image and
1389  // those detected in the training images
1390  vpDisplay::displayLine(IMatching, start, end, vpColor::green, lineThickness);
1391  }
1392  }
1393 }
1394 
1395 void vpKeyPoint::extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1396  std::vector<cv::Point3f> *trainPoints)
1397 {
1398  double elapsedTime;
1399  extract(I, keyPoints, descriptors, elapsedTime, trainPoints);
1400 }
1401 
1402 void vpKeyPoint::extract(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1403  std::vector<cv::Point3f> *trainPoints)
1404 {
1405  double elapsedTime;
1406  extract(I_color, keyPoints, descriptors, elapsedTime, trainPoints);
1407 }
1408 
1409 void vpKeyPoint::extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1410  std::vector<cv::Point3f> *trainPoints)
1411 {
1412  double elapsedTime;
1413  extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1414 }
1415 
1416 void vpKeyPoint::extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1417  double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1418 {
1419  cv::Mat matImg;
1420  vpImageConvert::convert(I, matImg, false);
1421  extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1422 }
1423 
1424 void vpKeyPoint::extract(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1425  double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1426 {
1427  cv::Mat matImg;
1428  vpImageConvert::convert(I_color, matImg);
1429  extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1430 }
1431 
1432 void vpKeyPoint::extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1433  double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1434 {
1435  double t = vpTime::measureTimeMs();
1436  bool first = true;
1437 
1438  for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator itd = m_extractors.begin();
1439  itd != m_extractors.end(); ++itd) {
1440  if (first) {
1441  first = false;
1442  // Check if we have 3D object points information
1443  if (trainPoints != nullptr && !trainPoints->empty()) {
1444  // Copy the input list of keypoints, keypoints that cannot be computed
1445  // are removed in the function compute
1446  std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1447 
1448  // Extract descriptors for the given list of keypoints
1449  itd->second->compute(matImg, keyPoints, descriptors);
1450 
1451  if (keyPoints.size() != keyPoints_tmp.size()) {
1452  // Keypoints have been removed
1453  // Store the hash of a keypoint as the key and the index of the
1454  // keypoint as the value
1455  std::map<size_t, size_t> mapOfKeypointHashes;
1456  size_t cpt = 0;
1457  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1458  ++it, cpt++) {
1459  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1460  }
1461 
1462  std::vector<cv::Point3f> trainPoints_tmp;
1463  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1464  if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1465  trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1466  }
1467  }
1468 
1469  // Copy trainPoints_tmp to m_trainPoints
1470  *trainPoints = trainPoints_tmp;
1471  }
1472  }
1473  else {
1474  // Extract descriptors for the given list of keypoints
1475  itd->second->compute(matImg, keyPoints, descriptors);
1476  }
1477  }
1478  else {
1479  // Copy the input list of keypoints, keypoints that cannot be computed
1480  // are removed in the function compute
1481  std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1482 
1483  cv::Mat desc;
1484  // Extract descriptors for the given list of keypoints
1485  itd->second->compute(matImg, keyPoints, desc);
1486 
1487  if (keyPoints.size() != keyPoints_tmp.size()) {
1488  // Keypoints have been removed
1489  // Store the hash of a keypoint as the key and the index of the
1490  // keypoint as the value
1491  std::map<size_t, size_t> mapOfKeypointHashes;
1492  size_t cpt = 0;
1493  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1494  ++it, cpt++) {
1495  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1496  }
1497 
1498  std::vector<cv::Point3f> trainPoints_tmp;
1499  cv::Mat descriptors_tmp;
1500  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1501  if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1502  if (trainPoints != nullptr && !trainPoints->empty()) {
1503  trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1504  }
1505 
1506  if (!descriptors.empty()) {
1507  descriptors_tmp.push_back(descriptors.row((int)mapOfKeypointHashes[myKeypointHash(*it)]));
1508  }
1509  }
1510  }
1511 
1512  if (trainPoints != nullptr) {
1513  // Copy trainPoints_tmp to m_trainPoints
1514  *trainPoints = trainPoints_tmp;
1515  }
1516  // Copy descriptors_tmp to descriptors
1517  descriptors_tmp.copyTo(descriptors);
1518  }
1519 
1520  // Merge descriptors horizontally
1521  if (descriptors.empty()) {
1522  desc.copyTo(descriptors);
1523  }
1524  else {
1525  cv::hconcat(descriptors, desc, descriptors);
1526  }
1527  }
1528  }
1529 
1530  if (keyPoints.size() != (size_t)descriptors.rows) {
1531  std::cerr << "keyPoints.size() != (size_t) descriptors.rows" << std::endl;
1532  }
1533  elapsedTime = vpTime::measureTimeMs() - t;
1534 }
1535 
1536 void vpKeyPoint::filterMatches()
1537 {
1538  std::vector<cv::KeyPoint> queryKpts;
1539  std::vector<cv::Point3f> trainPts;
1540  std::vector<cv::DMatch> m;
1541 
1542  if (m_useKnn) {
1543  // double max_dist = 0;
1544  // double min_dist = std::numeric_limits<double>::max(); // create an
1545  // error under Windows. To fix it we have to add #undef max
1546  double min_dist = DBL_MAX;
1547  double mean = 0.0;
1548  std::vector<double> distance_vec(m_knnMatches.size());
1549 
1550  if (m_filterType == stdAndRatioDistanceThreshold) {
1551  for (size_t i = 0; i < m_knnMatches.size(); i++) {
1552  double dist = m_knnMatches[i][0].distance;
1553  mean += dist;
1554  distance_vec[i] = dist;
1555 
1556  if (dist < min_dist) {
1557  min_dist = dist;
1558  }
1559  // if (dist > max_dist) {
1560  // max_dist = dist;
1561  //}
1562  }
1563  mean /= m_queryDescriptors.rows;
1564  }
1565 
1566  double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1567  double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1568  double threshold = min_dist + stdev;
1569 
1570  for (size_t i = 0; i < m_knnMatches.size(); i++) {
1571  if (m_knnMatches[i].size() >= 2) {
1572  // Calculate ratio of the descriptor distance between the two nearest
1573  // neighbors of the keypoint
1574  float ratio = m_knnMatches[i][0].distance / m_knnMatches[i][1].distance;
1575  // float ratio = std::sqrt((vecMatches[i][0].distance *
1576  // vecMatches[i][0].distance)
1577  // / (vecMatches[i][1].distance *
1578  // vecMatches[i][1].distance));
1579  double dist = m_knnMatches[i][0].distance;
1580 
1581  if (ratio < m_matchingRatioThreshold || (m_filterType == stdAndRatioDistanceThreshold && dist < threshold)) {
1582  m.push_back(cv::DMatch((int)queryKpts.size(), m_knnMatches[i][0].trainIdx, m_knnMatches[i][0].distance));
1583 
1584  if (!m_trainPoints.empty()) {
1585  trainPts.push_back(m_trainPoints[(size_t)m_knnMatches[i][0].trainIdx]);
1586  }
1587  queryKpts.push_back(m_queryKeyPoints[(size_t)m_knnMatches[i][0].queryIdx]);
1588  }
1589  }
1590  }
1591  }
1592  else {
1593  // double max_dist = 0;
1594  // create an error under Windows. To fix it we have to add #undef max
1595  // double min_dist = std::numeric_limits<double>::max();
1596  double min_dist = DBL_MAX;
1597  double mean = 0.0;
1598  std::vector<double> distance_vec(m_matches.size());
1599  for (size_t i = 0; i < m_matches.size(); i++) {
1600  double dist = m_matches[i].distance;
1601  mean += dist;
1602  distance_vec[i] = dist;
1603 
1604  if (dist < min_dist) {
1605  min_dist = dist;
1606  }
1607  // if (dist > max_dist) {
1608  // max_dist = dist;
1609  // }
1610  }
1611  mean /= m_queryDescriptors.rows;
1612 
1613  double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1614  double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1615 
1616  // Define a threshold where we keep all keypoints whose the descriptor
1617  // distance falls below a factor of the minimum descriptor distance (for
1618  // all the query keypoints) or below the minimum descriptor distance +
1619  // the standard deviation (calculated on all the query descriptor
1620  // distances)
1621  double threshold =
1622  m_filterType == constantFactorDistanceThreshold ? m_matchingFactorThreshold * min_dist : min_dist + stdev;
1623 
1624  for (size_t i = 0; i < m_matches.size(); i++) {
1625  if (m_matches[i].distance <= threshold) {
1626  m.push_back(cv::DMatch((int)queryKpts.size(), m_matches[i].trainIdx, m_matches[i].distance));
1627 
1628  if (!m_trainPoints.empty()) {
1629  trainPts.push_back(m_trainPoints[(size_t)m_matches[i].trainIdx]);
1630  }
1631  queryKpts.push_back(m_queryKeyPoints[(size_t)m_matches[i].queryIdx]);
1632  }
1633  }
1634  }
1635 
1636  if (m_useSingleMatchFilter) {
1637  // Eliminate matches where multiple query keypoints are matched to the
1638  // same train keypoint
1639  std::vector<cv::DMatch> mTmp;
1640  std::vector<cv::Point3f> trainPtsTmp;
1641  std::vector<cv::KeyPoint> queryKptsTmp;
1642 
1643  std::map<int, int> mapOfTrainIdx;
1644  // Count the number of query points matched to the same train point
1645  for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1646  mapOfTrainIdx[it->trainIdx]++;
1647  }
1648 
1649  // Keep matches with only one correspondence
1650  for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1651  if (mapOfTrainIdx[it->trainIdx] == 1) {
1652  mTmp.push_back(cv::DMatch((int)queryKptsTmp.size(), it->trainIdx, it->distance));
1653 
1654  if (!m_trainPoints.empty()) {
1655  trainPtsTmp.push_back(m_trainPoints[(size_t)it->trainIdx]);
1656  }
1657  queryKptsTmp.push_back(queryKpts[(size_t)it->queryIdx]);
1658  }
1659  }
1660 
1661  m_filteredMatches = mTmp;
1662  m_objectFilteredPoints = trainPtsTmp;
1663  m_queryFilteredKeyPoints = queryKptsTmp;
1664  }
1665  else {
1666  m_filteredMatches = m;
1667  m_objectFilteredPoints = trainPts;
1668  m_queryFilteredKeyPoints = queryKpts;
1669  }
1670 }
1671 
1672 void vpKeyPoint::getObjectPoints(std::vector<cv::Point3f> &objectPoints) const
1673 {
1674  objectPoints = m_objectFilteredPoints;
1675 }
1676 
1677 void vpKeyPoint::getObjectPoints(std::vector<vpPoint> &objectPoints) const
1678 {
1679  vpConvert::convertFromOpenCV(m_objectFilteredPoints, objectPoints);
1680 }
1681 
1682 void vpKeyPoint::getQueryKeyPoints(std::vector<cv::KeyPoint> &keyPoints, bool matches) const
1683 {
1684  if (matches) {
1685  keyPoints = m_queryFilteredKeyPoints;
1686  }
1687  else {
1688  keyPoints = m_queryKeyPoints;
1689  }
1690 }
1691 
1692 void vpKeyPoint::getQueryKeyPoints(std::vector<vpImagePoint> &keyPoints, bool matches) const
1693 {
1694  if (matches) {
1695  keyPoints = m_currentImagePointsList;
1696  }
1697  else {
1698  vpConvert::convertFromOpenCV(m_queryKeyPoints, keyPoints);
1699  }
1700 }
1701 
1702 void vpKeyPoint::getTrainKeyPoints(std::vector<cv::KeyPoint> &keyPoints) const { keyPoints = m_trainKeyPoints; }
1703 
1704 void vpKeyPoint::getTrainKeyPoints(std::vector<vpImagePoint> &keyPoints) const { keyPoints = m_referenceImagePointsList; }
1705 
1706 void vpKeyPoint::getTrainPoints(std::vector<cv::Point3f> &points) const { points = m_trainPoints; }
1707 
1708 void vpKeyPoint::getTrainPoints(std::vector<vpPoint> &points) const { points = m_trainVpPoints; }
1709 
1710 void vpKeyPoint::init()
1711 {
1712 // Require 2.4.0 <= opencv < 3.0.0
1713 #if defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && (VISP_HAVE_OPENCV_VERSION < 0x030000)
1714  // The following line must be called in order to use SIFT or SURF
1715  if (!cv::initModule_nonfree()) {
1716  std::cerr << "Cannot init module non free, SIFT or SURF cannot be used." << std::endl;
1717  }
1718 #endif
1719 
1720  // Use k-nearest neighbors (knn) to retrieve the two best matches for a
1721  // keypoint So this is useful only for ratioDistanceThreshold method
1722  if (m_filterType == ratioDistanceThreshold || m_filterType == stdAndRatioDistanceThreshold) {
1723  m_useKnn = true;
1724  }
1725 
1726  initDetectors(m_detectorNames);
1727  initExtractors(m_extractorNames);
1728  initMatcher(m_matcherName);
1729 }
1730 
1731 void vpKeyPoint::initDetector(const std::string &detectorName)
1732 {
1733 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
1734  m_detectors[detectorName] = cv::FeatureDetector::create(detectorName);
1735 
1736  if (m_detectors[detectorName] == nullptr) {
1737  std::stringstream ss_msg;
1738  ss_msg << "Fail to initialize the detector: " << detectorName
1739  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
1740  throw vpException(vpException::fatalError, ss_msg.str());
1741  }
1742 #else
1743  std::string detectorNameTmp = detectorName;
1744  std::string pyramid = "Pyramid";
1745  std::size_t pos = detectorName.find(pyramid);
1746  bool usePyramid = false;
1747  if (pos != std::string::npos) {
1748  detectorNameTmp = detectorName.substr(pos + pyramid.size());
1749  usePyramid = true;
1750  }
1751 
1752  if (detectorNameTmp == "SIFT") {
1753 #if (VISP_HAVE_OPENCV_VERSION >= 0x040500) // OpenCV >= 4.5.0
1754  cv::Ptr<cv::FeatureDetector> siftDetector = cv::SiftFeatureDetector::create();
1755  if (!usePyramid) {
1756  m_detectors[detectorNameTmp] = siftDetector;
1757  }
1758  else {
1759  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
1760  }
1761 #else
1762 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) || (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || \
1763  (VISP_HAVE_OPENCV_VERSION >= 0x040400)
1764  // SIFT is no more patented since 09/03/2020
1765  cv::Ptr<cv::FeatureDetector> siftDetector;
1766  if (m_maxFeatures > 0) {
1767 #if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
1768  siftDetector = cv::SIFT::create(m_maxFeatures);
1769 #else
1770  siftDetector = cv::xfeatures2d::SIFT::create(m_maxFeatures);
1771 #endif
1772  }
1773  else {
1774 #if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
1775  siftDetector = cv::SIFT::create();
1776 #else
1777  siftDetector = cv::xfeatures2d::SIFT::create();
1778 #endif
1779  }
1780  if (!usePyramid) {
1781  m_detectors[detectorNameTmp] = siftDetector;
1782  }
1783  else {
1784  std::cerr << "You should not use SIFT with Pyramid feature detection!" << std::endl;
1785  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
1786  }
1787 #else
1788  std::stringstream ss_msg;
1789  ss_msg << "Fail to initialize the detector: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1790  << " was not build with xFeatures2d module.";
1791  throw vpException(vpException::fatalError, ss_msg.str());
1792 #endif
1793 #endif
1794  }
1795  else if (detectorNameTmp == "SURF") {
1796 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1797  cv::Ptr<cv::FeatureDetector> surfDetector = cv::xfeatures2d::SURF::create();
1798  if (!usePyramid) {
1799  m_detectors[detectorNameTmp] = surfDetector;
1800  }
1801  else {
1802  std::cerr << "You should not use SURF with Pyramid feature detection!" << std::endl;
1803  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(surfDetector);
1804  }
1805 #else
1806  std::stringstream ss_msg;
1807  ss_msg << "Fail to initialize the detector: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1808  << " was not build with xFeatures2d module.";
1809  throw vpException(vpException::fatalError, ss_msg.str());
1810 #endif
1811  }
1812  else if (detectorNameTmp == "FAST") {
1813  cv::Ptr<cv::FeatureDetector> fastDetector = cv::FastFeatureDetector::create();
1814  if (!usePyramid) {
1815  m_detectors[detectorNameTmp] = fastDetector;
1816  }
1817  else {
1818  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
1819  }
1820  }
1821  else if (detectorNameTmp == "MSER") {
1822  cv::Ptr<cv::FeatureDetector> fastDetector = cv::MSER::create();
1823  if (!usePyramid) {
1824  m_detectors[detectorNameTmp] = fastDetector;
1825  }
1826  else {
1827  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
1828  }
1829  }
1830  else if (detectorNameTmp == "ORB") {
1831  cv::Ptr<cv::FeatureDetector> orbDetector;
1832  if (m_maxFeatures > 0) {
1833  orbDetector = cv::ORB::create(m_maxFeatures);
1834  }
1835  else {
1836  orbDetector = cv::ORB::create();
1837  }
1838  if (!usePyramid) {
1839  m_detectors[detectorNameTmp] = orbDetector;
1840  }
1841  else {
1842  std::cerr << "You should not use ORB with Pyramid feature detection!" << std::endl;
1843  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(orbDetector);
1844  }
1845  }
1846  else if (detectorNameTmp == "BRISK") {
1847  cv::Ptr<cv::FeatureDetector> briskDetector = cv::BRISK::create();
1848  if (!usePyramid) {
1849  m_detectors[detectorNameTmp] = briskDetector;
1850  }
1851  else {
1852  std::cerr << "You should not use BRISK with Pyramid feature detection!" << std::endl;
1853  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(briskDetector);
1854  }
1855  }
1856  else if (detectorNameTmp == "KAZE") {
1857  cv::Ptr<cv::FeatureDetector> kazeDetector = cv::KAZE::create();
1858  if (!usePyramid) {
1859  m_detectors[detectorNameTmp] = kazeDetector;
1860  }
1861  else {
1862  std::cerr << "You should not use KAZE with Pyramid feature detection!" << std::endl;
1863  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(kazeDetector);
1864  }
1865  }
1866  else if (detectorNameTmp == "AKAZE") {
1867  cv::Ptr<cv::FeatureDetector> akazeDetector = cv::AKAZE::create();
1868  if (!usePyramid) {
1869  m_detectors[detectorNameTmp] = akazeDetector;
1870  }
1871  else {
1872  std::cerr << "You should not use AKAZE with Pyramid feature detection!" << std::endl;
1873  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(akazeDetector);
1874  }
1875  }
1876  else if (detectorNameTmp == "GFTT") {
1877  cv::Ptr<cv::FeatureDetector> gfttDetector = cv::GFTTDetector::create();
1878  if (!usePyramid) {
1879  m_detectors[detectorNameTmp] = gfttDetector;
1880  }
1881  else {
1882  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(gfttDetector);
1883  }
1884  }
1885  else if (detectorNameTmp == "SimpleBlob") {
1886  cv::Ptr<cv::FeatureDetector> simpleBlobDetector = cv::SimpleBlobDetector::create();
1887  if (!usePyramid) {
1888  m_detectors[detectorNameTmp] = simpleBlobDetector;
1889  }
1890  else {
1891  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(simpleBlobDetector);
1892  }
1893  }
1894  else if (detectorNameTmp == "STAR") {
1895 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1896  cv::Ptr<cv::FeatureDetector> starDetector = cv::xfeatures2d::StarDetector::create();
1897  if (!usePyramid) {
1898  m_detectors[detectorNameTmp] = starDetector;
1899  }
1900  else {
1901  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(starDetector);
1902  }
1903 #else
1904  std::stringstream ss_msg;
1905  ss_msg << "Fail to initialize the detector: STAR. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1906  << " was not build with xFeatures2d module.";
1907  throw vpException(vpException::fatalError, ss_msg.str());
1908 #endif
1909  }
1910  else if (detectorNameTmp == "AGAST") {
1911  cv::Ptr<cv::FeatureDetector> agastDetector = cv::AgastFeatureDetector::create();
1912  if (!usePyramid) {
1913  m_detectors[detectorNameTmp] = agastDetector;
1914  }
1915  else {
1916  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(agastDetector);
1917  }
1918  }
1919  else if (detectorNameTmp == "MSD") {
1920 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100)
1921 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
1922  cv::Ptr<cv::FeatureDetector> msdDetector = cv::xfeatures2d::MSDDetector::create();
1923  if (!usePyramid) {
1924  m_detectors[detectorNameTmp] = msdDetector;
1925  }
1926  else {
1927  std::cerr << "You should not use MSD with Pyramid feature detection!" << std::endl;
1928  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(msdDetector);
1929  }
1930 #else
1931  std::stringstream ss_msg;
1932  ss_msg << "Fail to initialize the detector: MSD. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1933  << " was not build with xFeatures2d module.";
1934  throw vpException(vpException::fatalError, ss_msg.str());
1935 #endif
1936 #else
1937  std::stringstream ss_msg;
1938  ss_msg << "Feature " << detectorName << " is not available in OpenCV version: " << std::hex
1939  << VISP_HAVE_OPENCV_VERSION << " (require >= OpenCV 3.1).";
1940 #endif
1941  }
1942  else {
1943  std::cerr << "The detector:" << detectorNameTmp << " is not available." << std::endl;
1944  }
1945 
1946  bool detectorInitialized = false;
1947  if (!usePyramid) {
1948  // if not null and to avoid warning C4800: forcing value to bool 'true' or 'false' (performance warning)
1949  detectorInitialized = !m_detectors[detectorNameTmp].empty();
1950  }
1951  else {
1952  // if not null and to avoid warning C4800: forcing value to bool 'true' or 'false' (performance warning)
1953  detectorInitialized = !m_detectors[detectorName].empty();
1954  }
1955 
1956  if (!detectorInitialized) {
1957  std::stringstream ss_msg;
1958  ss_msg << "Fail to initialize the detector: " << detectorNameTmp
1959  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
1960  throw vpException(vpException::fatalError, ss_msg.str());
1961  }
1962 #endif
1963 }
1964 
1965 void vpKeyPoint::initDetectors(const std::vector<std::string> &detectorNames)
1966 {
1967  for (std::vector<std::string>::const_iterator it = detectorNames.begin(); it != detectorNames.end(); ++it) {
1968  initDetector(*it);
1969  }
1970 }
1971 
1972 void vpKeyPoint::initExtractor(const std::string &extractorName)
1973 {
1974 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
1975  m_extractors[extractorName] = cv::DescriptorExtractor::create(extractorName);
1976 #else
1977  if (extractorName == "SIFT") {
1978 #if (VISP_HAVE_OPENCV_VERSION >= 0x040500) // OpenCV >= 4.5.0
1979  m_extractors[extractorName] = cv::SIFT::create();
1980 #else
1981 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) || (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || \
1982  (VISP_HAVE_OPENCV_VERSION >= 0x040400)
1983  // SIFT is no more patented since 09/03/2020
1984 #if (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
1985  m_extractors[extractorName] = cv::SIFT::create();
1986 #else
1987  m_extractors[extractorName] = cv::xfeatures2d::SIFT::create();
1988 #endif
1989 #else
1990  std::stringstream ss_msg;
1991  ss_msg << "Fail to initialize the extractor: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1992  << " was not build with xFeatures2d module.";
1993  throw vpException(vpException::fatalError, ss_msg.str());
1994 #endif
1995 #endif
1996  }
1997  else if (extractorName == "SURF") {
1998 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1999  // Use extended set of SURF descriptors (128 instead of 64)
2000  m_extractors[extractorName] = cv::xfeatures2d::SURF::create(100, 4, 3, true);
2001 #else
2002  std::stringstream ss_msg;
2003  ss_msg << "Fail to initialize the extractor: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2004  << " was not build with xFeatures2d module.";
2005  throw vpException(vpException::fatalError, ss_msg.str());
2006 #endif
2007  }
2008  else if (extractorName == "ORB") {
2009  m_extractors[extractorName] = cv::ORB::create();
2010  }
2011  else if (extractorName == "BRISK") {
2012  m_extractors[extractorName] = cv::BRISK::create();
2013  }
2014  else if (extractorName == "FREAK") {
2015 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2016  m_extractors[extractorName] = cv::xfeatures2d::FREAK::create();
2017 #else
2018  std::stringstream ss_msg;
2019  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2020  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2021  throw vpException(vpException::fatalError, ss_msg.str());
2022 #endif
2023  }
2024  else if (extractorName == "BRIEF") {
2025 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2026  m_extractors[extractorName] = cv::xfeatures2d::BriefDescriptorExtractor::create();
2027 #else
2028  std::stringstream ss_msg;
2029  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2030  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2031  throw vpException(vpException::fatalError, ss_msg.str());
2032 #endif
2033  }
2034  else if (extractorName == "KAZE") {
2035  m_extractors[extractorName] = cv::KAZE::create();
2036  }
2037  else if (extractorName == "AKAZE") {
2038  m_extractors[extractorName] = cv::AKAZE::create();
2039  }
2040  else if (extractorName == "DAISY") {
2041 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2042  m_extractors[extractorName] = cv::xfeatures2d::DAISY::create();
2043 #else
2044  std::stringstream ss_msg;
2045  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2046  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2047  throw vpException(vpException::fatalError, ss_msg.str());
2048 #endif
2049  }
2050  else if (extractorName == "LATCH") {
2051 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2052  m_extractors[extractorName] = cv::xfeatures2d::LATCH::create();
2053 #else
2054  std::stringstream ss_msg;
2055  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2056  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2057  throw vpException(vpException::fatalError, ss_msg.str());
2058 #endif
2059  }
2060  else if (extractorName == "LUCID") {
2061 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2062  // m_extractors[extractorName] = cv::xfeatures2d::LUCID::create(1, 2);
2063  // Not possible currently, need a color image
2064  throw vpException(vpException::badValue, "Not possible currently as it needs a color image.");
2065 #else
2066  std::stringstream ss_msg;
2067  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2068  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2069  throw vpException(vpException::fatalError, ss_msg.str());
2070 #endif
2071  }
2072  else if (extractorName == "VGG") {
2073 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2074 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2075  m_extractors[extractorName] = cv::xfeatures2d::VGG::create();
2076 #else
2077  std::stringstream ss_msg;
2078  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2079  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2080  throw vpException(vpException::fatalError, ss_msg.str());
2081 #endif
2082 #else
2083  std::stringstream ss_msg;
2084  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2085  << VISP_HAVE_OPENCV_VERSION << " but requires at least OpenCV 3.2.";
2086  throw vpException(vpException::fatalError, ss_msg.str());
2087 #endif
2088  }
2089  else if (extractorName == "BoostDesc") {
2090 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2091 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2092  m_extractors[extractorName] = cv::xfeatures2d::BoostDesc::create();
2093 #else
2094  std::stringstream ss_msg;
2095  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2096  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2097  throw vpException(vpException::fatalError, ss_msg.str());
2098 #endif
2099 #else
2100  std::stringstream ss_msg;
2101  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2102  << VISP_HAVE_OPENCV_VERSION << " but requires at least OpenCV 3.2.";
2103  throw vpException(vpException::fatalError, ss_msg.str());
2104 #endif
2105  }
2106  else {
2107  std::cerr << "The extractor:" << extractorName << " is not available." << std::endl;
2108  }
2109 #endif
2110 
2111  if (!m_extractors[extractorName]) { // if null
2112  std::stringstream ss_msg;
2113  ss_msg << "Fail to initialize the extractor: " << extractorName
2114  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2115  throw vpException(vpException::fatalError, ss_msg.str());
2116  }
2117 
2118 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2119  if (extractorName == "SURF") {
2120  // Use extended set of SURF descriptors (128 instead of 64)
2121  m_extractors[extractorName]->set("extended", 1);
2122  }
2123 #endif
2124 }
2125 
2126 void vpKeyPoint::initExtractors(const std::vector<std::string> &extractorNames)
2127 {
2128  for (std::vector<std::string>::const_iterator it = extractorNames.begin(); it != extractorNames.end(); ++it) {
2129  initExtractor(*it);
2130  }
2131 
2132  int descriptorType = CV_32F;
2133  bool firstIteration = true;
2134  for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2135  it != m_extractors.end(); ++it) {
2136  if (firstIteration) {
2137  firstIteration = false;
2138  descriptorType = it->second->descriptorType();
2139  }
2140  else {
2141  if (descriptorType != it->second->descriptorType()) {
2142  throw vpException(vpException::fatalError, "All the descriptors must have the same type !");
2143  }
2144  }
2145  }
2146 }
2147 
2148 void vpKeyPoint::initFeatureNames()
2149 {
2150 // Create map enum to string
2151 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2152  m_mapOfDetectorNames[DETECTOR_FAST] = "FAST";
2153  m_mapOfDetectorNames[DETECTOR_MSER] = "MSER";
2154  m_mapOfDetectorNames[DETECTOR_ORB] = "ORB";
2155  m_mapOfDetectorNames[DETECTOR_BRISK] = "BRISK";
2156  m_mapOfDetectorNames[DETECTOR_GFTT] = "GFTT";
2157  m_mapOfDetectorNames[DETECTOR_SimpleBlob] = "SimpleBlob";
2158 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2159  m_mapOfDetectorNames[DETECTOR_STAR] = "STAR";
2160 #endif
2161 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2162  (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2163  m_mapOfDetectorNames[DETECTOR_SIFT] = "SIFT";
2164 #endif
2165 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2166  m_mapOfDetectorNames[DETECTOR_SURF] = "SURF";
2167 #endif
2168 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2169  m_mapOfDetectorNames[DETECTOR_KAZE] = "KAZE";
2170  m_mapOfDetectorNames[DETECTOR_AKAZE] = "AKAZE";
2171  m_mapOfDetectorNames[DETECTOR_AGAST] = "AGAST";
2172 #endif
2173 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2174  m_mapOfDetectorNames[DETECTOR_MSD] = "MSD";
2175 #endif
2176 #endif
2177 
2178 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2179  m_mapOfDescriptorNames[DESCRIPTOR_ORB] = "ORB";
2180  m_mapOfDescriptorNames[DESCRIPTOR_BRISK] = "BRISK";
2181 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2182  m_mapOfDescriptorNames[DESCRIPTOR_FREAK] = "FREAK";
2183  m_mapOfDescriptorNames[DESCRIPTOR_BRIEF] = "BRIEF";
2184 #endif
2185 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
2186  (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
2187  m_mapOfDescriptorNames[DESCRIPTOR_SIFT] = "SIFT";
2188 #endif
2189 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2190  m_mapOfDescriptorNames[DESCRIPTOR_SURF] = "SURF";
2191 #endif
2192 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2193  m_mapOfDescriptorNames[DESCRIPTOR_KAZE] = "KAZE";
2194  m_mapOfDescriptorNames[DESCRIPTOR_AKAZE] = "AKAZE";
2195 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2196  m_mapOfDescriptorNames[DESCRIPTOR_DAISY] = "DAISY";
2197  m_mapOfDescriptorNames[DESCRIPTOR_LATCH] = "LATCH";
2198 #endif
2199 #endif
2200 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2201  m_mapOfDescriptorNames[DESCRIPTOR_VGG] = "VGG";
2202  m_mapOfDescriptorNames[DESCRIPTOR_BoostDesc] = "BoostDesc";
2203 #endif
2204 #endif
2205 }
2206 
2207 void vpKeyPoint::initMatcher(const std::string &matcherName)
2208 {
2209  int descriptorType = CV_32F;
2210  bool firstIteration = true;
2211  for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2212  it != m_extractors.end(); ++it) {
2213  if (firstIteration) {
2214  firstIteration = false;
2215  descriptorType = it->second->descriptorType();
2216  }
2217  else {
2218  if (descriptorType != it->second->descriptorType()) {
2219  throw vpException(vpException::fatalError, "All the descriptors must have the same type !");
2220  }
2221  }
2222  }
2223 
2224  if (matcherName == "FlannBased") {
2225  if (m_extractors.empty()) {
2226  std::cout << "Warning: No extractor initialized, by default use "
2227  "floating values (CV_32F) "
2228  "for descriptor type !"
2229  << std::endl;
2230  }
2231 
2232  if (descriptorType == CV_8U) {
2233 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2234  m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::LshIndexParams>(12, 20, 2));
2235 #else
2236  m_matcher = new cv::FlannBasedMatcher(new cv::flann::LshIndexParams(12, 20, 2));
2237 #endif
2238  }
2239  else {
2240 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2241  m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::KDTreeIndexParams>());
2242 #else
2243  m_matcher = new cv::FlannBasedMatcher(new cv::flann::KDTreeIndexParams());
2244 #endif
2245  }
2246  }
2247  else {
2248  m_matcher = cv::DescriptorMatcher::create(matcherName);
2249  }
2250 
2251 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2252  if (m_matcher != nullptr && !m_useKnn && matcherName == "BruteForce") {
2253  m_matcher->set("crossCheck", m_useBruteForceCrossCheck);
2254  }
2255 #endif
2256 
2257  if (!m_matcher) { // if null
2258  std::stringstream ss_msg;
2259  ss_msg << "Fail to initialize the matcher: " << matcherName
2260  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2261  throw vpException(vpException::fatalError, ss_msg.str());
2262  }
2263 }
2264 
2266  vpImage<unsigned char> &IMatching)
2267 {
2268  vpImagePoint topLeftCorner(0, 0);
2269  IMatching.insert(IRef, topLeftCorner);
2270  topLeftCorner = vpImagePoint(0, IRef.getWidth());
2271  IMatching.insert(ICurrent, topLeftCorner);
2272 }
2273 
2275  vpImage<vpRGBa> &IMatching)
2276 {
2277  vpImagePoint topLeftCorner(0, 0);
2278  IMatching.insert(IRef, topLeftCorner);
2279  topLeftCorner = vpImagePoint(0, IRef.getWidth());
2280  IMatching.insert(ICurrent, topLeftCorner);
2281 }
2282 
2284 {
2285  // Nb images in the training database + the current image we want to detect
2286  // the object
2287  int nbImg = (int)(m_mapOfImages.size() + 1);
2288 
2289  if (m_mapOfImages.empty()) {
2290  std::cerr << "There is no training image loaded !" << std::endl;
2291  return;
2292  }
2293 
2294  if (nbImg == 2) {
2295  // Only one training image, so we display them side by side
2296  insertImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
2297  }
2298  else {
2299  // Multiple training images, display them as a mosaic image
2300  int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
2301  int nbWidth = nbImgSqrt;
2302  int nbHeight = nbImgSqrt;
2303 
2304  if (nbImgSqrt * nbImgSqrt < nbImg) {
2305  nbWidth++;
2306  }
2307 
2308  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
2309  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2310  ++it) {
2311  if (maxW < it->second.getWidth()) {
2312  maxW = it->second.getWidth();
2313  }
2314 
2315  if (maxH < it->second.getHeight()) {
2316  maxH = it->second.getHeight();
2317  }
2318  }
2319 
2320  // Indexes of the current image in the grid made in order to the image is
2321  // in the center square in the mosaic grid
2322  int medianI = nbHeight / 2;
2323  int medianJ = nbWidth / 2;
2324  int medianIndex = medianI * nbWidth + medianJ;
2325 
2326  int cpt = 0;
2327  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2328  ++it, cpt++) {
2329  int local_cpt = cpt;
2330  if (cpt >= medianIndex) {
2331  // Shift of one unity the index of the training images which are after
2332  // the current image
2333  local_cpt++;
2334  }
2335  int indexI = local_cpt / nbWidth;
2336  int indexJ = local_cpt - (indexI * nbWidth);
2337  vpImagePoint topLeftCorner((int)maxH * indexI, (int)maxW * indexJ);
2338 
2339  IMatching.insert(it->second, topLeftCorner);
2340  }
2341 
2342  vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
2343  IMatching.insert(ICurrent, topLeftCorner);
2344  }
2345 }
2346 
2348 {
2349  // Nb images in the training database + the current image we want to detect
2350  // the object
2351  int nbImg = (int)(m_mapOfImages.size() + 1);
2352 
2353  if (m_mapOfImages.empty()) {
2354  std::cerr << "There is no training image loaded !" << std::endl;
2355  return;
2356  }
2357 
2358  if (nbImg == 2) {
2359  // Only one training image, so we display them side by side
2360  vpImage<vpRGBa> IRef;
2361  vpImageConvert::convert(m_mapOfImages.begin()->second, IRef);
2362  insertImageMatching(IRef, ICurrent, IMatching);
2363  }
2364  else {
2365  // Multiple training images, display them as a mosaic image
2366  int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
2367  int nbWidth = nbImgSqrt;
2368  int nbHeight = nbImgSqrt;
2369 
2370  if (nbImgSqrt * nbImgSqrt < nbImg) {
2371  nbWidth++;
2372  }
2373 
2374  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
2375  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2376  ++it) {
2377  if (maxW < it->second.getWidth()) {
2378  maxW = it->second.getWidth();
2379  }
2380 
2381  if (maxH < it->second.getHeight()) {
2382  maxH = it->second.getHeight();
2383  }
2384  }
2385 
2386  // Indexes of the current image in the grid made in order to the image is
2387  // in the center square in the mosaic grid
2388  int medianI = nbHeight / 2;
2389  int medianJ = nbWidth / 2;
2390  int medianIndex = medianI * nbWidth + medianJ;
2391 
2392  int cpt = 0;
2393  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2394  ++it, cpt++) {
2395  int local_cpt = cpt;
2396  if (cpt >= medianIndex) {
2397  // Shift of one unity the index of the training images which are after
2398  // the current image
2399  local_cpt++;
2400  }
2401  int indexI = local_cpt / nbWidth;
2402  int indexJ = local_cpt - (indexI * nbWidth);
2403  vpImagePoint topLeftCorner((int)maxH * indexI, (int)maxW * indexJ);
2404 
2405  vpImage<vpRGBa> IRef;
2406  vpImageConvert::convert(it->second, IRef);
2407  IMatching.insert(IRef, topLeftCorner);
2408  }
2409 
2410  vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
2411  IMatching.insert(ICurrent, topLeftCorner);
2412  }
2413 }
2414 
2415 void vpKeyPoint::loadConfigFile(const std::string &configFile)
2416 {
2418 
2419  try {
2420  // Reset detector and extractor
2421  m_detectorNames.clear();
2422  m_extractorNames.clear();
2423  m_detectors.clear();
2424  m_extractors.clear();
2425 
2426  std::cout << " *********** Parsing XML for configuration for vpKeyPoint "
2427  "************ "
2428  << std::endl;
2429  xmlp.parse(configFile);
2430 
2431  m_detectorNames.push_back(xmlp.getDetectorName());
2432  m_extractorNames.push_back(xmlp.getExtractorName());
2433  m_matcherName = xmlp.getMatcherName();
2434 
2435  switch (xmlp.getMatchingMethod()) {
2437  m_filterType = constantFactorDistanceThreshold;
2438  break;
2439 
2441  m_filterType = stdDistanceThreshold;
2442  break;
2443 
2445  m_filterType = ratioDistanceThreshold;
2446  break;
2447 
2449  m_filterType = stdAndRatioDistanceThreshold;
2450  break;
2451 
2453  m_filterType = noFilterMatching;
2454  break;
2455 
2456  default:
2457  break;
2458  }
2459 
2460  m_matchingFactorThreshold = xmlp.getMatchingFactorThreshold();
2461  m_matchingRatioThreshold = xmlp.getMatchingRatioThreshold();
2462 
2463  m_useRansacVVS = xmlp.getUseRansacVVSPoseEstimation();
2464  m_useConsensusPercentage = xmlp.getUseRansacConsensusPercentage();
2465  m_nbRansacIterations = xmlp.getNbRansacIterations();
2466  m_ransacReprojectionError = xmlp.getRansacReprojectionError();
2467  m_nbRansacMinInlierCount = xmlp.getNbRansacMinInlierCount();
2468  m_ransacThreshold = xmlp.getRansacThreshold();
2469  m_ransacConsensusPercentage = xmlp.getRansacConsensusPercentage();
2470 
2471  if (m_filterType == ratioDistanceThreshold || m_filterType == stdAndRatioDistanceThreshold) {
2472  m_useKnn = true;
2473  }
2474  else {
2475  m_useKnn = false;
2476  }
2477 
2478  init();
2479  }
2480  catch (...) {
2481  throw vpException(vpException::ioError, "Can't open XML file \"%s\"\n ", configFile.c_str());
2482  }
2483 }
2484 
2485 void vpKeyPoint::loadLearningData(const std::string &filename, bool binaryMode, bool append)
2486 {
2487  int startClassId = 0;
2488  int startImageId = 0;
2489  if (!append) {
2490  m_trainKeyPoints.clear();
2491  m_trainPoints.clear();
2492  m_mapOfImageId.clear();
2493  m_mapOfImages.clear();
2494  }
2495  else {
2496  // In append case, find the max index of keypoint class Id
2497  for (std::map<int, int>::const_iterator it = m_mapOfImageId.begin(); it != m_mapOfImageId.end(); ++it) {
2498  if (startClassId < it->first) {
2499  startClassId = it->first;
2500  }
2501  }
2502 
2503  // In append case, find the max index of images Id
2504  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2505  ++it) {
2506  if (startImageId < it->first) {
2507  startImageId = it->first;
2508  }
2509  }
2510  }
2511 
2512  // Get parent directory
2513  std::string parent = vpIoTools::getParent(filename);
2514  if (!parent.empty()) {
2515  parent += "/";
2516  }
2517 
2518  if (binaryMode) {
2519  std::ifstream file(filename.c_str(), std::ifstream::binary);
2520  if (!file.is_open()) {
2521  throw vpException(vpException::ioError, "Cannot open the file.");
2522  }
2523 
2524  // Read info about training images
2525  int nbImgs = 0;
2526  vpIoTools::readBinaryValueLE(file, nbImgs);
2527 
2528 #if !defined(VISP_HAVE_MODULE_IO)
2529  if (nbImgs > 0) {
2530  std::cout << "Warning: The learning file contains image data that will "
2531  "not be loaded as visp_io module "
2532  "is not available !"
2533  << std::endl;
2534  }
2535 #endif
2536 
2537  for (int i = 0; i < nbImgs; i++) {
2538  // Read image_id
2539  int id = 0;
2540  vpIoTools::readBinaryValueLE(file, id);
2541 
2542  int length = 0;
2543  vpIoTools::readBinaryValueLE(file, length);
2544  // Will contain the path to the training images
2545  char *path = new char[length + 1]; // char path[length + 1];
2546 
2547  for (int cpt = 0; cpt < length; cpt++) {
2548  char c;
2549  file.read((char *)(&c), sizeof(c));
2550  path[cpt] = c;
2551  }
2552  path[length] = '\0';
2553 
2555 #ifdef VISP_HAVE_MODULE_IO
2556  if (vpIoTools::isAbsolutePathname(std::string(path))) {
2557  vpImageIo::read(I, path);
2558  }
2559  else {
2560  vpImageIo::read(I, parent + path);
2561  }
2562 
2563  // Add the image previously loaded only if VISP_HAVE_MODULE_IO
2564  m_mapOfImages[id + startImageId] = I;
2565 #endif
2566 
2567  // Delete path
2568  delete[] path;
2569  }
2570 
2571  // Read if 3D point information are saved or not
2572  int have3DInfoInt = 0;
2573  vpIoTools::readBinaryValueLE(file, have3DInfoInt);
2574  bool have3DInfo = have3DInfoInt != 0;
2575 
2576  // Read the number of descriptors
2577  int nRows = 0;
2578  vpIoTools::readBinaryValueLE(file, nRows);
2579 
2580  // Read the size of the descriptor
2581  int nCols = 0;
2582  vpIoTools::readBinaryValueLE(file, nCols);
2583 
2584  // Read the type of the descriptor
2585  int descriptorType = 5; // CV_32F
2586  vpIoTools::readBinaryValueLE(file, descriptorType);
2587 
2588  cv::Mat trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
2589  for (int i = 0; i < nRows; i++) {
2590  // Read information about keyPoint
2591  float u, v, size, angle, response;
2592  int octave, class_id, image_id;
2595  vpIoTools::readBinaryValueLE(file, size);
2596  vpIoTools::readBinaryValueLE(file, angle);
2597  vpIoTools::readBinaryValueLE(file, response);
2598  vpIoTools::readBinaryValueLE(file, octave);
2599  vpIoTools::readBinaryValueLE(file, class_id);
2600  vpIoTools::readBinaryValueLE(file, image_id);
2601  cv::KeyPoint keyPoint(cv::Point2f(u, v), size, angle, response, octave, (class_id + startClassId));
2602  m_trainKeyPoints.push_back(keyPoint);
2603 
2604  if (image_id != -1) {
2605 #ifdef VISP_HAVE_MODULE_IO
2606  // No training images if image_id == -1
2607  m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
2608 #endif
2609  }
2610 
2611  if (have3DInfo) {
2612  // Read oX, oY, oZ
2613  float oX, oY, oZ;
2614  vpIoTools::readBinaryValueLE(file, oX);
2615  vpIoTools::readBinaryValueLE(file, oY);
2616  vpIoTools::readBinaryValueLE(file, oZ);
2617  m_trainPoints.push_back(cv::Point3f(oX, oY, oZ));
2618  }
2619 
2620  for (int j = 0; j < nCols; j++) {
2621  // Read the value of the descriptor
2622  switch (descriptorType) {
2623  case CV_8U: {
2624  unsigned char value;
2625  file.read((char *)(&value), sizeof(value));
2626  trainDescriptorsTmp.at<unsigned char>(i, j) = value;
2627  } break;
2628 
2629  case CV_8S: {
2630  char value;
2631  file.read((char *)(&value), sizeof(value));
2632  trainDescriptorsTmp.at<char>(i, j) = value;
2633  } break;
2634 
2635  case CV_16U: {
2636  unsigned short int value;
2637  vpIoTools::readBinaryValueLE(file, value);
2638  trainDescriptorsTmp.at<unsigned short int>(i, j) = value;
2639  } break;
2640 
2641  case CV_16S: {
2642  short int value;
2643  vpIoTools::readBinaryValueLE(file, value);
2644  trainDescriptorsTmp.at<short int>(i, j) = value;
2645  } break;
2646 
2647  case CV_32S: {
2648  int value;
2649  vpIoTools::readBinaryValueLE(file, value);
2650  trainDescriptorsTmp.at<int>(i, j) = value;
2651  } break;
2652 
2653  case CV_32F: {
2654  float value;
2655  vpIoTools::readBinaryValueLE(file, value);
2656  trainDescriptorsTmp.at<float>(i, j) = value;
2657  } break;
2658 
2659  case CV_64F: {
2660  double value;
2661  vpIoTools::readBinaryValueLE(file, value);
2662  trainDescriptorsTmp.at<double>(i, j) = value;
2663  } break;
2664 
2665  default: {
2666  float value;
2667  vpIoTools::readBinaryValueLE(file, value);
2668  trainDescriptorsTmp.at<float>(i, j) = value;
2669  } break;
2670  }
2671  }
2672  }
2673 
2674  if (!append || m_trainDescriptors.empty()) {
2675  trainDescriptorsTmp.copyTo(m_trainDescriptors);
2676  }
2677  else {
2678  cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
2679  }
2680 
2681  file.close();
2682  }
2683  else {
2684  pugi::xml_document doc;
2685 
2686  /*parse the file and get the DOM */
2687  if (!doc.load_file(filename.c_str())) {
2688  throw vpException(vpException::ioError, "Error with file: %s", filename.c_str());
2689  }
2690 
2691  pugi::xml_node root_element = doc.document_element();
2692 
2693  int descriptorType = CV_32F; // float
2694  int nRows = 0, nCols = 0;
2695  int i = 0, j = 0;
2696 
2697  cv::Mat trainDescriptorsTmp;
2698 
2699  for (pugi::xml_node first_level_node = root_element.first_child(); first_level_node;
2700  first_level_node = first_level_node.next_sibling()) {
2701 
2702  std::string name(first_level_node.name());
2703  if (first_level_node.type() == pugi::node_element && name == "TrainingImageInfo") {
2704 
2705  for (pugi::xml_node image_info_node = first_level_node.first_child(); image_info_node;
2706  image_info_node = image_info_node.next_sibling()) {
2707  name = std::string(image_info_node.name());
2708 
2709  if (name == "trainImg") {
2710  // Read image_id
2711  int id = image_info_node.attribute("image_id").as_int();
2712 
2714 #ifdef VISP_HAVE_MODULE_IO
2715  std::string path(image_info_node.text().as_string());
2716  // Read path to the training images
2717  if (vpIoTools::isAbsolutePathname(std::string(path))) {
2718  vpImageIo::read(I, path);
2719  }
2720  else {
2721  vpImageIo::read(I, parent + path);
2722  }
2723 
2724  // Add the image previously loaded only if VISP_HAVE_MODULE_IO
2725  m_mapOfImages[id + startImageId] = I;
2726 #endif
2727  }
2728  }
2729  }
2730  else if (first_level_node.type() == pugi::node_element && name == "DescriptorsInfo") {
2731  for (pugi::xml_node descriptors_info_node = first_level_node.first_child(); descriptors_info_node;
2732  descriptors_info_node = descriptors_info_node.next_sibling()) {
2733  if (descriptors_info_node.type() == pugi::node_element) {
2734  name = std::string(descriptors_info_node.name());
2735 
2736  if (name == "nrows") {
2737  nRows = descriptors_info_node.text().as_int();
2738  }
2739  else if (name == "ncols") {
2740  nCols = descriptors_info_node.text().as_int();
2741  }
2742  else if (name == "type") {
2743  descriptorType = descriptors_info_node.text().as_int();
2744  }
2745  }
2746  }
2747 
2748  trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
2749  }
2750  else if (first_level_node.type() == pugi::node_element && name == "DescriptorInfo") {
2751  double u = 0.0, v = 0.0, size = 0.0, angle = 0.0, response = 0.0;
2752  int octave = 0, class_id = 0, image_id = 0;
2753  double oX = 0.0, oY = 0.0, oZ = 0.0;
2754 
2755  std::stringstream ss;
2756 
2757  for (pugi::xml_node point_node = first_level_node.first_child(); point_node;
2758  point_node = point_node.next_sibling()) {
2759  if (point_node.type() == pugi::node_element) {
2760  name = std::string(point_node.name());
2761 
2762  // Read information about keypoints
2763  if (name == "u") {
2764  u = point_node.text().as_double();
2765  }
2766  else if (name == "v") {
2767  v = point_node.text().as_double();
2768  }
2769  else if (name == "size") {
2770  size = point_node.text().as_double();
2771  }
2772  else if (name == "angle") {
2773  angle = point_node.text().as_double();
2774  }
2775  else if (name == "response") {
2776  response = point_node.text().as_double();
2777  }
2778  else if (name == "octave") {
2779  octave = point_node.text().as_int();
2780  }
2781  else if (name == "class_id") {
2782  class_id = point_node.text().as_int();
2783  cv::KeyPoint keyPoint(cv::Point2f((float)u, (float)v), (float)size, (float)angle, (float)response, octave,
2784  (class_id + startClassId));
2785  m_trainKeyPoints.push_back(keyPoint);
2786  }
2787  else if (name == "image_id") {
2788  image_id = point_node.text().as_int();
2789  if (image_id != -1) {
2790 #ifdef VISP_HAVE_MODULE_IO
2791  // No training images if image_id == -1
2792  m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
2793 #endif
2794  }
2795  }
2796  else if (name == "oX") {
2797  oX = point_node.text().as_double();
2798  }
2799  else if (name == "oY") {
2800  oY = point_node.text().as_double();
2801  }
2802  else if (name == "oZ") {
2803  oZ = point_node.text().as_double();
2804  m_trainPoints.push_back(cv::Point3f((float)oX, (float)oY, (float)oZ));
2805  }
2806  else if (name == "desc") {
2807  j = 0;
2808 
2809  for (pugi::xml_node descriptor_value_node = point_node.first_child(); descriptor_value_node;
2810  descriptor_value_node = descriptor_value_node.next_sibling()) {
2811 
2812  if (descriptor_value_node.type() == pugi::node_element) {
2813  // Read descriptors values
2814  std::string parseStr(descriptor_value_node.text().as_string());
2815  ss.clear();
2816  ss.str(parseStr);
2817 
2818  if (!ss.fail()) {
2819  switch (descriptorType) {
2820  case CV_8U: {
2821  // Parse the numeric value [0 ; 255] to an int
2822  int parseValue;
2823  ss >> parseValue;
2824  trainDescriptorsTmp.at<unsigned char>(i, j) = (unsigned char)parseValue;
2825  } break;
2826 
2827  case CV_8S:
2828  // Parse the numeric value [-128 ; 127] to an int
2829  int parseValue;
2830  ss >> parseValue;
2831  trainDescriptorsTmp.at<char>(i, j) = (char)parseValue;
2832  break;
2833 
2834  case CV_16U:
2835  ss >> trainDescriptorsTmp.at<unsigned short int>(i, j);
2836  break;
2837 
2838  case CV_16S:
2839  ss >> trainDescriptorsTmp.at<short int>(i, j);
2840  break;
2841 
2842  case CV_32S:
2843  ss >> trainDescriptorsTmp.at<int>(i, j);
2844  break;
2845 
2846  case CV_32F:
2847  ss >> trainDescriptorsTmp.at<float>(i, j);
2848  break;
2849 
2850  case CV_64F:
2851  ss >> trainDescriptorsTmp.at<double>(i, j);
2852  break;
2853 
2854  default:
2855  ss >> trainDescriptorsTmp.at<float>(i, j);
2856  break;
2857  }
2858  }
2859  else {
2860  std::cerr << "Error when converting:" << ss.str() << std::endl;
2861  }
2862 
2863  j++;
2864  }
2865  }
2866  }
2867  }
2868  }
2869  i++;
2870  }
2871  }
2872 
2873  if (!append || m_trainDescriptors.empty()) {
2874  trainDescriptorsTmp.copyTo(m_trainDescriptors);
2875  }
2876  else {
2877  cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
2878  }
2879  }
2880 
2881  // Convert OpenCV type to ViSP type for compatibility
2883  vpConvert::convertFromOpenCV(this->m_trainPoints, m_trainVpPoints);
2884 
2885  // Add train descriptors in matcher object
2886  m_matcher->clear();
2887  m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
2888 
2889  // Set m_reference_computed to true as we load a learning file
2890  m_reference_computed = true;
2891 
2892  // Set m_currentImageId
2893  m_currentImageId = (int)m_mapOfImages.size();
2894 }
2895 
2896 void vpKeyPoint::match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors,
2897  std::vector<cv::DMatch> &matches, double &elapsedTime)
2898 {
2899  double t = vpTime::measureTimeMs();
2900 
2901  if (m_useKnn) {
2902  m_knnMatches.clear();
2903 
2904  if (m_useMatchTrainToQuery) {
2905  std::vector<std::vector<cv::DMatch> > knnMatchesTmp;
2906 
2907  // Match train descriptors to query descriptors
2908  cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(true);
2909  matcherTmp->knnMatch(trainDescriptors, queryDescriptors, knnMatchesTmp, 2);
2910 
2911  for (std::vector<std::vector<cv::DMatch> >::const_iterator it1 = knnMatchesTmp.begin();
2912  it1 != knnMatchesTmp.end(); ++it1) {
2913  std::vector<cv::DMatch> tmp;
2914  for (std::vector<cv::DMatch>::const_iterator it2 = it1->begin(); it2 != it1->end(); ++it2) {
2915  tmp.push_back(cv::DMatch(it2->trainIdx, it2->queryIdx, it2->distance));
2916  }
2917  m_knnMatches.push_back(tmp);
2918  }
2919 
2920  matches.resize(m_knnMatches.size());
2921  std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
2922  }
2923  else {
2924  // Match query descriptors to train descriptors
2925  m_matcher->knnMatch(queryDescriptors, m_knnMatches, 2);
2926  matches.resize(m_knnMatches.size());
2927  std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
2928  }
2929  }
2930  else {
2931  matches.clear();
2932 
2933  if (m_useMatchTrainToQuery) {
2934  std::vector<cv::DMatch> matchesTmp;
2935  // Match train descriptors to query descriptors
2936  cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(true);
2937  matcherTmp->match(trainDescriptors, queryDescriptors, matchesTmp);
2938 
2939  for (std::vector<cv::DMatch>::const_iterator it = matchesTmp.begin(); it != matchesTmp.end(); ++it) {
2940  matches.push_back(cv::DMatch(it->trainIdx, it->queryIdx, it->distance));
2941  }
2942  }
2943  else {
2944  // Match query descriptors to train descriptors
2945  m_matcher->match(queryDescriptors, matches);
2946  }
2947  }
2948  elapsedTime = vpTime::measureTimeMs() - t;
2949 }
2950 
2951 unsigned int vpKeyPoint::matchPoint(const vpImage<unsigned char> &I) { return matchPoint(I, vpRect()); }
2952 
2953 unsigned int vpKeyPoint::matchPoint(const vpImage<vpRGBa> &I_color) { return matchPoint(I_color, vpRect()); }
2954 
2955 unsigned int vpKeyPoint::matchPoint(const vpImage<unsigned char> &I, const vpImagePoint &iP, unsigned int height,
2956  unsigned int width)
2957 {
2958  return matchPoint(I, vpRect(iP, width, height));
2959 }
2960 
2961 unsigned int vpKeyPoint::matchPoint(const vpImage<vpRGBa> &I_color, const vpImagePoint &iP, unsigned int height,
2962  unsigned int width)
2963 {
2964  return matchPoint(I_color, vpRect(iP, width, height));
2965 }
2966 
2967 unsigned int vpKeyPoint::matchPoint(const vpImage<unsigned char> &I, const vpRect &rectangle)
2968 {
2969  if (m_trainDescriptors.empty()) {
2970  std::cerr << "Reference is empty." << std::endl;
2971  if (!m_reference_computed) {
2972  std::cerr << "Reference is not computed." << std::endl;
2973  }
2974  std::cerr << "Matching is not possible." << std::endl;
2975 
2976  return 0;
2977  }
2978 
2979  if (m_useAffineDetection) {
2980  std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
2981  std::vector<cv::Mat> listOfQueryDescriptors;
2982 
2983  // Detect keypoints and extract descriptors on multiple images
2984  detectExtractAffine(I, listOfQueryKeyPoints, listOfQueryDescriptors);
2985 
2986  // Flatten the different train lists
2987  m_queryKeyPoints.clear();
2988  for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
2989  it != listOfQueryKeyPoints.end(); ++it) {
2990  m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
2991  }
2992 
2993  bool first = true;
2994  for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
2995  ++it) {
2996  if (first) {
2997  first = false;
2998  it->copyTo(m_queryDescriptors);
2999  }
3000  else {
3001  m_queryDescriptors.push_back(*it);
3002  }
3003  }
3004  }
3005  else {
3006  detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3007  extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3008  }
3009 
3010  return matchPoint(m_queryKeyPoints, m_queryDescriptors);
3011 }
3012 
3013 unsigned int vpKeyPoint::matchPoint(const std::vector<cv::KeyPoint> &queryKeyPoints, const cv::Mat &queryDescriptors)
3014 {
3015  m_queryKeyPoints = queryKeyPoints;
3016  m_queryDescriptors = queryDescriptors;
3017 
3018  match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3019 
3020  if (m_filterType != noFilterMatching) {
3021  m_queryFilteredKeyPoints.clear();
3022  m_objectFilteredPoints.clear();
3023  m_filteredMatches.clear();
3024 
3025  filterMatches();
3026  }
3027  else {
3028  if (m_useMatchTrainToQuery) {
3029  // Add only query keypoints matched with a train keypoints
3030  m_queryFilteredKeyPoints.clear();
3031  m_filteredMatches.clear();
3032  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3033  m_filteredMatches.push_back(cv::DMatch((int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3034  m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(size_t)it->queryIdx]);
3035  }
3036  }
3037  else {
3038  m_queryFilteredKeyPoints = m_queryKeyPoints;
3039  m_filteredMatches = m_matches;
3040  }
3041 
3042  if (!m_trainPoints.empty()) {
3043  m_objectFilteredPoints.clear();
3044  // Add 3D object points such as the same index in
3045  // m_queryFilteredKeyPoints and in m_objectFilteredPoints
3046  // matches to the same train object
3047  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3048  // m_matches is normally ordered following the queryDescriptor index
3049  m_objectFilteredPoints.push_back(m_trainPoints[(size_t)it->trainIdx]);
3050  }
3051  }
3052  }
3053 
3054  // Convert OpenCV type to ViSP type for compatibility
3055  vpConvert::convertFromOpenCV(m_queryFilteredKeyPoints, m_currentImagePointsList);
3057 
3058  return static_cast<unsigned int>(m_filteredMatches.size());
3059 }
3060 
3061 unsigned int vpKeyPoint::matchPoint(const vpImage<vpRGBa> &I_color, const vpRect &rectangle)
3062 {
3063  vpImageConvert::convert(I_color, m_I);
3064  return matchPoint(m_I, rectangle);
3065 }
3066 
3068  bool (*func)(const vpHomogeneousMatrix &), const vpRect &rectangle)
3069 {
3070  double error, elapsedTime;
3071  return matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3072 }
3073 
3075  bool (*func)(const vpHomogeneousMatrix &), const vpRect &rectangle)
3076 {
3077  double error, elapsedTime;
3078  return matchPoint(I_color, cam, cMo, error, elapsedTime, func, rectangle);
3079 }
3080 
3082  double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &),
3083  const vpRect &rectangle)
3084 {
3085  // Check if we have training descriptors
3086  if (m_trainDescriptors.empty()) {
3087  std::cerr << "Reference is empty." << std::endl;
3088  if (!m_reference_computed) {
3089  std::cerr << "Reference is not computed." << std::endl;
3090  }
3091  std::cerr << "Matching is not possible." << std::endl;
3092 
3093  return false;
3094  }
3095 
3096  if (m_useAffineDetection) {
3097  std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3098  std::vector<cv::Mat> listOfQueryDescriptors;
3099 
3100  // Detect keypoints and extract descriptors on multiple images
3101  detectExtractAffine(I, listOfQueryKeyPoints, listOfQueryDescriptors);
3102 
3103  // Flatten the different train lists
3104  m_queryKeyPoints.clear();
3105  for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3106  it != listOfQueryKeyPoints.end(); ++it) {
3107  m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3108  }
3109 
3110  bool first = true;
3111  for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3112  ++it) {
3113  if (first) {
3114  first = false;
3115  it->copyTo(m_queryDescriptors);
3116  }
3117  else {
3118  m_queryDescriptors.push_back(*it);
3119  }
3120  }
3121  }
3122  else {
3123  detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3124  extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3125  }
3126 
3127  match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3128 
3129  elapsedTime = m_detectionTime + m_extractionTime + m_matchingTime;
3130 
3131  if (m_filterType != noFilterMatching) {
3132  m_queryFilteredKeyPoints.clear();
3133  m_objectFilteredPoints.clear();
3134  m_filteredMatches.clear();
3135 
3136  filterMatches();
3137  }
3138  else {
3139  if (m_useMatchTrainToQuery) {
3140  // Add only query keypoints matched with a train keypoints
3141  m_queryFilteredKeyPoints.clear();
3142  m_filteredMatches.clear();
3143  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3144  m_filteredMatches.push_back(cv::DMatch((int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3145  m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(size_t)it->queryIdx]);
3146  }
3147  }
3148  else {
3149  m_queryFilteredKeyPoints = m_queryKeyPoints;
3150  m_filteredMatches = m_matches;
3151  }
3152 
3153  if (!m_trainPoints.empty()) {
3154  m_objectFilteredPoints.clear();
3155  // Add 3D object points such as the same index in
3156  // m_queryFilteredKeyPoints and in m_objectFilteredPoints
3157  // matches to the same train object
3158  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3159  // m_matches is normally ordered following the queryDescriptor index
3160  m_objectFilteredPoints.push_back(m_trainPoints[(size_t)it->trainIdx]);
3161  }
3162  }
3163  }
3164 
3165  // Convert OpenCV type to ViSP type for compatibility
3166  vpConvert::convertFromOpenCV(m_queryFilteredKeyPoints, m_currentImagePointsList);
3168 
3169  // error = std::numeric_limits<double>::max(); // create an error under
3170  // Windows. To fix it we have to add #undef max
3171  error = DBL_MAX;
3172  m_ransacInliers.clear();
3173  m_ransacOutliers.clear();
3174 
3175  if (m_useRansacVVS) {
3176  std::vector<vpPoint> objectVpPoints(m_objectFilteredPoints.size());
3177  size_t cpt = 0;
3178  // Create a list of vpPoint with 2D coordinates (current keypoint
3179  // location) + 3D coordinates (world/object coordinates)
3180  for (std::vector<cv::Point3f>::const_iterator it = m_objectFilteredPoints.begin();
3181  it != m_objectFilteredPoints.end(); ++it, cpt++) {
3182  vpPoint pt;
3183  pt.setWorldCoordinates(it->x, it->y, it->z);
3184 
3185  vpImagePoint imP(m_queryFilteredKeyPoints[cpt].pt.y, m_queryFilteredKeyPoints[cpt].pt.x);
3186 
3187  double x = 0.0, y = 0.0;
3188  vpPixelMeterConversion::convertPoint(cam, imP, x, y);
3189  pt.set_x(x);
3190  pt.set_y(y);
3191 
3192  objectVpPoints[cpt] = pt;
3193  }
3194 
3195  std::vector<vpPoint> inliers;
3196  std::vector<unsigned int> inlierIndex;
3197 
3198  bool res = computePose(objectVpPoints, cMo, inliers, inlierIndex, m_poseTime, func);
3199 
3200  std::map<unsigned int, bool> mapOfInlierIndex;
3201  m_matchRansacKeyPointsToPoints.clear();
3202 
3203  for (std::vector<unsigned int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3204  m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3205  m_queryFilteredKeyPoints[(size_t)(*it)], m_objectFilteredPoints[(size_t)(*it)]));
3206  mapOfInlierIndex[*it] = true;
3207  }
3208 
3209  for (size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3210  if (mapOfInlierIndex.find((unsigned int)i) == mapOfInlierIndex.end()) {
3211  m_ransacOutliers.push_back(vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3212  }
3213  }
3214 
3215  error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3216 
3217  m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3218  std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3219  m_ransacInliers.begin(), matchRansacToVpImage);
3220 
3221  elapsedTime += m_poseTime;
3222 
3223  return res;
3224  }
3225  else {
3226  std::vector<cv::Point2f> imageFilteredPoints;
3227  cv::KeyPoint::convert(m_queryFilteredKeyPoints, imageFilteredPoints);
3228  std::vector<int> inlierIndex;
3229  bool res = computePose(imageFilteredPoints, m_objectFilteredPoints, cam, cMo, inlierIndex, m_poseTime);
3230 
3231  std::map<int, bool> mapOfInlierIndex;
3232  m_matchRansacKeyPointsToPoints.clear();
3233 
3234  for (std::vector<int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3235  m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3236  m_queryFilteredKeyPoints[(size_t)(*it)], m_objectFilteredPoints[(size_t)(*it)]));
3237  mapOfInlierIndex[*it] = true;
3238  }
3239 
3240  for (size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3241  if (mapOfInlierIndex.find((int)i) == mapOfInlierIndex.end()) {
3242  m_ransacOutliers.push_back(vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3243  }
3244  }
3245 
3246  error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3247 
3248  m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3249  std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3250  m_ransacInliers.begin(), matchRansacToVpImage);
3251 
3252  elapsedTime += m_poseTime;
3253 
3254  return res;
3255  }
3256 }
3257 
3259  double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &),
3260  const vpRect &rectangle)
3261 {
3262  vpImageConvert::convert(I_color, m_I);
3263  return (matchPoint(m_I, cam, cMo, error, elapsedTime, func, rectangle));
3264 }
3265 
3267  vpImagePoint &centerOfGravity, const bool isPlanarObject,
3268  std::vector<vpImagePoint> *imPts1, std::vector<vpImagePoint> *imPts2,
3269  double *meanDescriptorDistance, double *detectionScore, const vpRect &rectangle)
3270 {
3271  if (imPts1 != nullptr && imPts2 != nullptr) {
3272  imPts1->clear();
3273  imPts2->clear();
3274  }
3275 
3276  matchPoint(I, rectangle);
3277 
3278  double meanDescriptorDistanceTmp = 0.0;
3279  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
3280  meanDescriptorDistanceTmp += (double)it->distance;
3281  }
3282 
3283  meanDescriptorDistanceTmp /= (double)m_filteredMatches.size();
3284  double score = (double)m_filteredMatches.size() / meanDescriptorDistanceTmp;
3285 
3286  if (meanDescriptorDistance != nullptr) {
3287  *meanDescriptorDistance = meanDescriptorDistanceTmp;
3288  }
3289  if (detectionScore != nullptr) {
3290  *detectionScore = score;
3291  }
3292 
3293  if (m_filteredMatches.size() >= 4) {
3294  // Training / Reference 2D points
3295  std::vector<cv::Point2f> points1(m_filteredMatches.size());
3296  // Query / Current 2D points
3297  std::vector<cv::Point2f> points2(m_filteredMatches.size());
3298 
3299  for (size_t i = 0; i < m_filteredMatches.size(); i++) {
3300  points1[i] = cv::Point2f(m_trainKeyPoints[(size_t)m_filteredMatches[i].trainIdx].pt);
3301  points2[i] = cv::Point2f(m_queryFilteredKeyPoints[(size_t)m_filteredMatches[i].queryIdx].pt);
3302  }
3303 
3304  std::vector<vpImagePoint> inliers;
3305  if (isPlanarObject) {
3306 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
3307  cv::Mat homographyMatrix = cv::findHomography(points1, points2, CV_RANSAC);
3308 #else
3309  cv::Mat homographyMatrix = cv::findHomography(points1, points2, cv::RANSAC);
3310 #endif
3311 
3312  for (size_t i = 0; i < m_filteredMatches.size(); i++) {
3313  // Compute reprojection error
3314  cv::Mat realPoint = cv::Mat(3, 1, CV_64F);
3315  realPoint.at<double>(0, 0) = points1[i].x;
3316  realPoint.at<double>(1, 0) = points1[i].y;
3317  realPoint.at<double>(2, 0) = 1.f;
3318 
3319  cv::Mat reprojectedPoint = homographyMatrix * realPoint;
3320  double err_x = (reprojectedPoint.at<double>(0, 0) / reprojectedPoint.at<double>(2, 0)) - points2[i].x;
3321  double err_y = (reprojectedPoint.at<double>(1, 0) / reprojectedPoint.at<double>(2, 0)) - points2[i].y;
3322  double reprojectionError = std::sqrt(err_x * err_x + err_y * err_y);
3323 
3324  if (reprojectionError < 6.0) {
3325  inliers.push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3326  if (imPts1 != nullptr) {
3327  imPts1->push_back(vpImagePoint((double)points1[i].y, (double)points1[i].x));
3328  }
3329 
3330  if (imPts2 != nullptr) {
3331  imPts2->push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3332  }
3333  }
3334  }
3335  }
3336  else if (m_filteredMatches.size() >= 8) {
3337  cv::Mat fundamentalInliers;
3338  cv::Mat fundamentalMatrix = cv::findFundamentalMat(points1, points2, cv::FM_RANSAC, 3, 0.99, fundamentalInliers);
3339 
3340  for (size_t i = 0; i < (size_t)fundamentalInliers.rows; i++) {
3341  if (fundamentalInliers.at<uchar>((int)i, 0)) {
3342  inliers.push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3343 
3344  if (imPts1 != nullptr) {
3345  imPts1->push_back(vpImagePoint((double)points1[i].y, (double)points1[i].x));
3346  }
3347 
3348  if (imPts2 != nullptr) {
3349  imPts2->push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3350  }
3351  }
3352  }
3353  }
3354 
3355  if (!inliers.empty()) {
3356  // Build a polygon with the list of inlier keypoints detected in the
3357  // current image to get the bounding box
3358  vpPolygon polygon(inliers);
3359  boundingBox = polygon.getBoundingBox();
3360 
3361  // Compute the center of gravity
3362  double meanU = 0.0, meanV = 0.0;
3363  for (std::vector<vpImagePoint>::const_iterator it = inliers.begin(); it != inliers.end(); ++it) {
3364  meanU += it->get_u();
3365  meanV += it->get_v();
3366  }
3367 
3368  meanU /= (double)inliers.size();
3369  meanV /= (double)inliers.size();
3370 
3371  centerOfGravity.set_u(meanU);
3372  centerOfGravity.set_v(meanV);
3373  }
3374  }
3375  else {
3376  // Too few matches
3377  return false;
3378  }
3379 
3380  if (m_detectionMethod == detectionThreshold) {
3381  return meanDescriptorDistanceTmp < m_detectionThreshold;
3382  }
3383  else {
3384  return score > m_detectionScore;
3385  }
3386 }
3387 
3389  vpHomogeneousMatrix &cMo, double &error, double &elapsedTime, vpRect &boundingBox,
3390  vpImagePoint &centerOfGravity, bool (*func)(const vpHomogeneousMatrix &),
3391  const vpRect &rectangle)
3392 {
3393  bool isMatchOk = matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3394  if (isMatchOk) {
3395  // Use the pose estimated to project the model points in the image
3396  vpPoint pt;
3397  vpImagePoint imPt;
3398  std::vector<vpImagePoint> modelImagePoints(m_trainVpPoints.size());
3399  size_t cpt = 0;
3400  for (std::vector<vpPoint>::const_iterator it = m_trainVpPoints.begin(); it != m_trainVpPoints.end(); ++it, cpt++) {
3401  pt = *it;
3402  pt.project(cMo);
3403  vpMeterPixelConversion::convertPoint(cam, pt.get_x(), pt.get_y(), imPt);
3404  modelImagePoints[cpt] = imPt;
3405  }
3406 
3407  // Build a polygon with the list of model image points to get the bounding
3408  // box
3409  vpPolygon polygon(modelImagePoints);
3410  boundingBox = polygon.getBoundingBox();
3411 
3412  // Compute the center of gravity of the current inlier keypoints
3413  double meanU = 0.0, meanV = 0.0;
3414  for (std::vector<vpImagePoint>::const_iterator it = m_ransacInliers.begin(); it != m_ransacInliers.end(); ++it) {
3415  meanU += it->get_u();
3416  meanV += it->get_v();
3417  }
3418 
3419  meanU /= (double)m_ransacInliers.size();
3420  meanV /= (double)m_ransacInliers.size();
3421 
3422  centerOfGravity.set_u(meanU);
3423  centerOfGravity.set_v(meanV);
3424  }
3425 
3426  return isMatchOk;
3427 }
3428 
3430  std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
3431  std::vector<cv::Mat> &listOfDescriptors,
3432  std::vector<vpImage<unsigned char> > *listOfAffineI)
3433 {
3434 #if 0
3435  cv::Mat img;
3436  vpImageConvert::convert(I, img);
3437  listOfKeypoints.clear();
3438  listOfDescriptors.clear();
3439 
3440  for (int tl = 1; tl < 6; tl++) {
3441  double t = pow(2, 0.5 * tl);
3442  for (int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
3443  std::vector<cv::KeyPoint> keypoints;
3444  cv::Mat descriptors;
3445 
3446  cv::Mat timg, mask, Ai;
3447  img.copyTo(timg);
3448 
3449  affineSkew(t, phi, timg, mask, Ai);
3450 
3451 
3452  if (listOfAffineI != nullptr) {
3453  cv::Mat img_disp;
3454  bitwise_and(mask, timg, img_disp);
3456  vpImageConvert::convert(img_disp, tI);
3457  listOfAffineI->push_back(tI);
3458  }
3459 #if 0
3460  cv::Mat img_disp;
3461  cv::bitwise_and(mask, timg, img_disp);
3462  cv::namedWindow("Skew", cv::WINDOW_AUTOSIZE); // Create a window for display.
3463  cv::imshow("Skew", img_disp);
3464  cv::waitKey(0);
3465 #endif
3466 
3467  for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
3468  it != m_detectors.end(); ++it) {
3469  std::vector<cv::KeyPoint> kp;
3470  it->second->detect(timg, kp, mask);
3471  keypoints.insert(keypoints.end(), kp.begin(), kp.end());
3472  }
3473 
3474  double elapsedTime;
3475  extract(timg, keypoints, descriptors, elapsedTime);
3476 
3477  for (unsigned int i = 0; i < keypoints.size(); i++) {
3478  cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
3479  cv::Mat kpt_t = Ai * cv::Mat(kpt);
3480  keypoints[i].pt.x = kpt_t.at<float>(0, 0);
3481  keypoints[i].pt.y = kpt_t.at<float>(1, 0);
3482  }
3483 
3484  listOfKeypoints.push_back(keypoints);
3485  listOfDescriptors.push_back(descriptors);
3486  }
3487  }
3488 
3489 #else
3490  cv::Mat img;
3491  vpImageConvert::convert(I, img);
3492 
3493  // Create a vector for storing the affine skew parameters
3494  std::vector<std::pair<double, int> > listOfAffineParams;
3495  for (int tl = 1; tl < 6; tl++) {
3496  double t = pow(2, 0.5 * tl);
3497  for (int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
3498  listOfAffineParams.push_back(std::pair<double, int>(t, phi));
3499  }
3500  }
3501 
3502  listOfKeypoints.resize(listOfAffineParams.size());
3503  listOfDescriptors.resize(listOfAffineParams.size());
3504 
3505  if (listOfAffineI != nullptr) {
3506  listOfAffineI->resize(listOfAffineParams.size());
3507  }
3508 
3509 #ifdef VISP_HAVE_OPENMP
3510 #pragma omp parallel for
3511 #endif
3512  for (int cpt = 0; cpt < static_cast<int>(listOfAffineParams.size()); cpt++) {
3513  std::vector<cv::KeyPoint> keypoints;
3514  cv::Mat descriptors;
3515 
3516  cv::Mat timg, mask, Ai;
3517  img.copyTo(timg);
3518 
3519  affineSkew(listOfAffineParams[(size_t)cpt].first, listOfAffineParams[(size_t)cpt].second, timg, mask, Ai);
3520 
3521  if (listOfAffineI != nullptr) {
3522  cv::Mat img_disp;
3523  bitwise_and(mask, timg, img_disp);
3525  vpImageConvert::convert(img_disp, tI);
3526  (*listOfAffineI)[(size_t)cpt] = tI;
3527  }
3528 
3529 #if 0
3530  cv::Mat img_disp;
3531  cv::bitwise_and(mask, timg, img_disp);
3532  cv::namedWindow("Skew", cv::WINDOW_AUTOSIZE); // Create a window for display.
3533  cv::imshow("Skew", img_disp);
3534  cv::waitKey(0);
3535 #endif
3536 
3537  for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
3538  it != m_detectors.end(); ++it) {
3539  std::vector<cv::KeyPoint> kp;
3540  it->second->detect(timg, kp, mask);
3541  keypoints.insert(keypoints.end(), kp.begin(), kp.end());
3542  }
3543 
3544  double elapsedTime;
3545  extract(timg, keypoints, descriptors, elapsedTime);
3546 
3547  for (size_t i = 0; i < keypoints.size(); i++) {
3548  cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
3549  cv::Mat kpt_t = Ai * cv::Mat(kpt);
3550  keypoints[i].pt.x = kpt_t.at<float>(0, 0);
3551  keypoints[i].pt.y = kpt_t.at<float>(1, 0);
3552  }
3553 
3554  listOfKeypoints[(size_t)cpt] = keypoints;
3555  listOfDescriptors[(size_t)cpt] = descriptors;
3556  }
3557 #endif
3558 }
3559 
3561 {
3562  // vpBasicKeyPoint class
3564  m_currentImagePointsList.clear();
3565  m_matchedReferencePoints.clear();
3566  m_reference_computed = false;
3567 
3568  m_computeCovariance = false;
3569  m_covarianceMatrix = vpMatrix();
3570  m_currentImageId = 0;
3571  m_detectionMethod = detectionScore;
3572  m_detectionScore = 0.15;
3573  m_detectionThreshold = 100.0;
3574  m_detectionTime = 0.0;
3575  m_detectorNames.clear();
3576  m_detectors.clear();
3577  m_extractionTime = 0.0;
3578  m_extractorNames.clear();
3579  m_extractors.clear();
3580  m_filteredMatches.clear();
3581  m_filterType = ratioDistanceThreshold;
3582  m_imageFormat = jpgImageFormat;
3583  m_knnMatches.clear();
3584  m_mapOfImageId.clear();
3585  m_mapOfImages.clear();
3586  m_matcher = cv::Ptr<cv::DescriptorMatcher>();
3587  m_matcherName = "BruteForce-Hamming";
3588  m_matches.clear();
3589  m_matchingFactorThreshold = 2.0;
3590  m_matchingRatioThreshold = 0.85;
3591  m_matchingTime = 0.0;
3592  m_matchRansacKeyPointsToPoints.clear();
3593  m_nbRansacIterations = 200;
3594  m_nbRansacMinInlierCount = 100;
3595  m_objectFilteredPoints.clear();
3596  m_poseTime = 0.0;
3597  m_queryDescriptors = cv::Mat();
3598  m_queryFilteredKeyPoints.clear();
3599  m_queryKeyPoints.clear();
3600  m_ransacConsensusPercentage = 20.0;
3601  m_ransacFilterFlag = vpPose::NO_FILTER;
3602  m_ransacInliers.clear();
3603  m_ransacOutliers.clear();
3604  m_ransacParallel = true;
3605  m_ransacParallelNbThreads = 0;
3606  m_ransacReprojectionError = 6.0;
3607  m_ransacThreshold = 0.01;
3608  m_trainDescriptors = cv::Mat();
3609  m_trainKeyPoints.clear();
3610  m_trainPoints.clear();
3611  m_trainVpPoints.clear();
3612  m_useAffineDetection = false;
3613 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
3614  m_useBruteForceCrossCheck = true;
3615 #endif
3616  m_useConsensusPercentage = false;
3617  m_useKnn = true; // as m_filterType == ratioDistanceThreshold
3618  m_useMatchTrainToQuery = false;
3619  m_useRansacVVS = true;
3620  m_useSingleMatchFilter = true;
3621 
3622  m_detectorNames.push_back("ORB");
3623  m_extractorNames.push_back("ORB");
3624 
3625  init();
3626 }
3627 
3628 void vpKeyPoint::saveLearningData(const std::string &filename, bool binaryMode, bool saveTrainingImages)
3629 {
3630  std::string parent = vpIoTools::getParent(filename);
3631  if (!parent.empty()) {
3632  vpIoTools::makeDirectory(parent);
3633  }
3634 
3635  std::map<int, std::string> mapOfImgPath;
3636  if (saveTrainingImages) {
3637 #ifdef VISP_HAVE_MODULE_IO
3638  // Save the training image files in the same directory
3639  unsigned int cpt = 0;
3640 
3641  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
3642  ++it, cpt++) {
3643  if (cpt > 999) {
3644  throw vpException(vpException::fatalError, "The number of training images to save is too big !");
3645  }
3646 
3647  std::stringstream ss;
3648  ss << "train_image_" << std::setfill('0') << std::setw(3) << cpt;
3649 
3650  switch (m_imageFormat) {
3651  case jpgImageFormat:
3652  ss << ".jpg";
3653  break;
3654 
3655  case pngImageFormat:
3656  ss << ".png";
3657  break;
3658 
3659  case ppmImageFormat:
3660  ss << ".ppm";
3661  break;
3662 
3663  case pgmImageFormat:
3664  ss << ".pgm";
3665  break;
3666 
3667  default:
3668  ss << ".png";
3669  break;
3670  }
3671 
3672  std::string imgFilename = ss.str();
3673  mapOfImgPath[it->first] = imgFilename;
3674  vpImageIo::write(it->second, parent + (!parent.empty() ? "/" : "") + imgFilename);
3675  }
3676 #else
3677  std::cout << "Warning: in vpKeyPoint::saveLearningData() training images "
3678  "are not saved because "
3679  "visp_io module is not available !"
3680  << std::endl;
3681 #endif
3682  }
3683 
3684  bool have3DInfo = m_trainPoints.size() > 0;
3685  if (have3DInfo && m_trainPoints.size() != m_trainKeyPoints.size()) {
3686  throw vpException(vpException::fatalError, "List of keypoints and list of 3D points have different size !");
3687  }
3688 
3689  if (binaryMode) {
3690  // Save the learning data into little endian binary file.
3691  std::ofstream file(filename.c_str(), std::ofstream::binary);
3692  if (!file.is_open()) {
3693  throw vpException(vpException::ioError, "Cannot create the file.");
3694  }
3695 
3696  // Write info about training images
3697  int nbImgs = (int)mapOfImgPath.size();
3698  vpIoTools::writeBinaryValueLE(file, nbImgs);
3699 
3700 #ifdef VISP_HAVE_MODULE_IO
3701  for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
3702  // Write image_id
3703  int id = it->first;
3705 
3706  // Write image path
3707  std::string path = it->second;
3708  int length = (int)path.length();
3709  vpIoTools::writeBinaryValueLE(file, length);
3710 
3711  for (int cpt = 0; cpt < length; cpt++) {
3712  file.write((char *)(&path[(size_t)cpt]), sizeof(path[(size_t)cpt]));
3713  }
3714  }
3715 #endif
3716 
3717  // Write if we have 3D point information
3718  int have3DInfoInt = have3DInfo ? 1 : 0;
3719  vpIoTools::writeBinaryValueLE(file, have3DInfoInt);
3720 
3721  int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
3722  int descriptorType = m_trainDescriptors.type();
3723 
3724  // Write the number of descriptors
3725  vpIoTools::writeBinaryValueLE(file, nRows);
3726 
3727  // Write the size of the descriptor
3728  vpIoTools::writeBinaryValueLE(file, nCols);
3729 
3730  // Write the type of the descriptor
3731  vpIoTools::writeBinaryValueLE(file, descriptorType);
3732 
3733  for (int i = 0; i < nRows; i++) {
3734  unsigned int i_ = (unsigned int)i;
3735  // Write u
3736  float u = m_trainKeyPoints[i_].pt.x;
3738 
3739  // Write v
3740  float v = m_trainKeyPoints[i_].pt.y;
3742 
3743  // Write size
3744  float size = m_trainKeyPoints[i_].size;
3745  vpIoTools::writeBinaryValueLE(file, size);
3746 
3747  // Write angle
3748  float angle = m_trainKeyPoints[i_].angle;
3749  vpIoTools::writeBinaryValueLE(file, angle);
3750 
3751  // Write response
3752  float response = m_trainKeyPoints[i_].response;
3753  vpIoTools::writeBinaryValueLE(file, response);
3754 
3755  // Write octave
3756  int octave = m_trainKeyPoints[i_].octave;
3757  vpIoTools::writeBinaryValueLE(file, octave);
3758 
3759  // Write class_id
3760  int class_id = m_trainKeyPoints[i_].class_id;
3761  vpIoTools::writeBinaryValueLE(file, class_id);
3762 
3763 // Write image_id
3764 #ifdef VISP_HAVE_MODULE_IO
3765  std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
3766  int image_id = (saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1;
3767  vpIoTools::writeBinaryValueLE(file, image_id);
3768 #else
3769  int image_id = -1;
3770  // file.write((char *)(&image_id), sizeof(image_id));
3771  vpIoTools::writeBinaryValueLE(file, image_id);
3772 #endif
3773 
3774  if (have3DInfo) {
3775  float oX = m_trainPoints[i_].x, oY = m_trainPoints[i_].y, oZ = m_trainPoints[i_].z;
3776  // Write oX
3778 
3779  // Write oY
3781 
3782  // Write oZ
3784  }
3785 
3786  for (int j = 0; j < nCols; j++) {
3787  // Write the descriptor value
3788  switch (descriptorType) {
3789  case CV_8U:
3790  file.write((char *)(&m_trainDescriptors.at<unsigned char>(i, j)),
3791  sizeof(m_trainDescriptors.at<unsigned char>(i, j)));
3792  break;
3793 
3794  case CV_8S:
3795  file.write((char *)(&m_trainDescriptors.at<char>(i, j)), sizeof(m_trainDescriptors.at<char>(i, j)));
3796  break;
3797 
3798  case CV_16U:
3799  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<unsigned short int>(i, j));
3800  break;
3801 
3802  case CV_16S:
3803  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<short int>(i, j));
3804  break;
3805 
3806  case CV_32S:
3807  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<int>(i, j));
3808  break;
3809 
3810  case CV_32F:
3811  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<float>(i, j));
3812  break;
3813 
3814  case CV_64F:
3815  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<double>(i, j));
3816  break;
3817 
3818  default:
3819  throw vpException(vpException::fatalError, "Problem with the data type of descriptors !");
3820  break;
3821  }
3822  }
3823  }
3824 
3825  file.close();
3826  }
3827  else {
3828  pugi::xml_document doc;
3829  pugi::xml_node node = doc.append_child(pugi::node_declaration);
3830  node.append_attribute("version") = "1.0";
3831  node.append_attribute("encoding") = "UTF-8";
3832 
3833  if (!doc) {
3834  throw vpException(vpException::ioError, "Error with file: ", filename.c_str());
3835  }
3836 
3837  pugi::xml_node root_node = doc.append_child("LearningData");
3838 
3839  // Write the training images info
3840  pugi::xml_node image_node = root_node.append_child("TrainingImageInfo");
3841 
3842 #ifdef VISP_HAVE_MODULE_IO
3843  for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
3844  pugi::xml_node image_info_node = image_node.append_child("trainImg");
3845  image_info_node.append_child(pugi::node_pcdata).set_value(it->second.c_str());
3846  std::stringstream ss;
3847  ss << it->first;
3848  image_info_node.append_attribute("image_id") = ss.str().c_str();
3849  }
3850 #endif
3851 
3852  // Write information about descriptors
3853  pugi::xml_node descriptors_info_node = root_node.append_child("DescriptorsInfo");
3854 
3855  int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
3856  int descriptorType = m_trainDescriptors.type();
3857 
3858  // Write the number of rows
3859  descriptors_info_node.append_child("nrows").append_child(pugi::node_pcdata).text() = nRows;
3860 
3861  // Write the number of cols
3862  descriptors_info_node.append_child("ncols").append_child(pugi::node_pcdata).text() = nCols;
3863 
3864  // Write the descriptors type
3865  descriptors_info_node.append_child("type").append_child(pugi::node_pcdata).text() = descriptorType;
3866 
3867  for (int i = 0; i < nRows; i++) {
3868  unsigned int i_ = (unsigned int)i;
3869  pugi::xml_node descriptor_node = root_node.append_child("DescriptorInfo");
3870 
3871  descriptor_node.append_child("u").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.x;
3872  descriptor_node.append_child("v").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.y;
3873  descriptor_node.append_child("size").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].size;
3874  descriptor_node.append_child("angle").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].angle;
3875  descriptor_node.append_child("response").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].response;
3876  descriptor_node.append_child("octave").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].octave;
3877  descriptor_node.append_child("class_id").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].class_id;
3878 
3879 #ifdef VISP_HAVE_MODULE_IO
3880  std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
3881  descriptor_node.append_child("image_id").append_child(pugi::node_pcdata).text() =
3882  ((saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1);
3883 #else
3884  descriptor_node.append_child("image_id").append_child(pugi::node_pcdata).text() = -1;
3885 #endif
3886 
3887  if (have3DInfo) {
3888  descriptor_node.append_child("oX").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].x;
3889  descriptor_node.append_child("oY").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].y;
3890  descriptor_node.append_child("oZ").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].z;
3891  }
3892 
3893  pugi::xml_node desc_node = descriptor_node.append_child("desc");
3894 
3895  for (int j = 0; j < nCols; j++) {
3896  switch (descriptorType) {
3897  case CV_8U: {
3898  // Promote an unsigned char to an int
3899  // val_tmp holds the numeric value that will be written
3900  // We save the value in numeric form otherwise xml library will not be
3901  // able to parse A better solution could be possible
3902  int val_tmp = m_trainDescriptors.at<unsigned char>(i, j);
3903  desc_node.append_child("val").append_child(pugi::node_pcdata).text() = val_tmp;
3904  } break;
3905 
3906  case CV_8S: {
3907  // Promote a char to an int
3908  // val_tmp holds the numeric value that will be written
3909  // We save the value in numeric form otherwise xml library will not be
3910  // able to parse A better solution could be possible
3911  int val_tmp = m_trainDescriptors.at<char>(i, j);
3912  desc_node.append_child("val").append_child(pugi::node_pcdata).text() = val_tmp;
3913  } break;
3914 
3915  case CV_16U:
3916  desc_node.append_child("val").append_child(pugi::node_pcdata).text() =
3917  m_trainDescriptors.at<unsigned short int>(i, j);
3918  break;
3919 
3920  case CV_16S:
3921  desc_node.append_child("val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<short int>(i, j);
3922  break;
3923 
3924  case CV_32S:
3925  desc_node.append_child("val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<int>(i, j);
3926  break;
3927 
3928  case CV_32F:
3929  desc_node.append_child("val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<float>(i, j);
3930  break;
3931 
3932  case CV_64F:
3933  desc_node.append_child("val").append_child(pugi::node_pcdata).text() = m_trainDescriptors.at<double>(i, j);
3934  break;
3935 
3936  default:
3937  throw vpException(vpException::fatalError, "Problem with the data type of descriptors !");
3938  break;
3939  }
3940  }
3941  }
3942 
3943  doc.save_file(filename.c_str(), PUGIXML_TEXT(" "), pugi::format_default, pugi::encoding_utf8);
3944  }
3945 }
3946 
3947 #if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x030000)
3948 #ifndef DOXYGEN_SHOULD_SKIP_THIS
3949 // From OpenCV 2.4.11 source code.
3950 struct KeypointResponseGreaterThanThreshold
3951 {
3952  KeypointResponseGreaterThanThreshold(float _value) : value(_value) { }
3953  inline bool operator()(const cv::KeyPoint &kpt) const { return kpt.response >= value; }
3954  float value;
3955 };
3956 
3957 struct KeypointResponseGreater
3958 {
3959  inline bool operator()(const cv::KeyPoint &kp1, const cv::KeyPoint &kp2) const { return kp1.response > kp2.response; }
3960 };
3961 
3962 // takes keypoints and culls them by the response
3963 void vpKeyPoint::KeyPointsFilter::retainBest(std::vector<cv::KeyPoint> &keypoints, int n_points)
3964 {
3965  // this is only necessary if the keypoints size is greater than the number
3966  // of desired points.
3967  if (n_points >= 0 && keypoints.size() > (size_t)n_points) {
3968  if (n_points == 0) {
3969  keypoints.clear();
3970  return;
3971  }
3972  // first use nth element to partition the keypoints into the best and
3973  // worst.
3974  std::nth_element(keypoints.begin(), keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreater());
3975  // this is the boundary response, and in the case of FAST may be ambiguous
3976  float ambiguous_response = keypoints[(size_t)(n_points - 1)].response;
3977  // use std::partition to grab all of the keypoints with the boundary
3978  // response.
3979  std::vector<cv::KeyPoint>::const_iterator new_end = std::partition(
3980  keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreaterThanThreshold(ambiguous_response));
3981  // resize the keypoints, given this new end point. nth_element and
3982  // partition reordered the points inplace
3983  keypoints.resize((size_t)(new_end - keypoints.begin()));
3984  }
3985 }
3986 
3987 struct RoiPredicate
3988 {
3989  RoiPredicate(const cv::Rect &_r) : r(_r) { }
3990 
3991  bool operator()(const cv::KeyPoint &keyPt) const { return !r.contains(keyPt.pt); }
3992 
3993  cv::Rect r;
3994 };
3995 
3996 void vpKeyPoint::KeyPointsFilter::runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize,
3997  int borderSize)
3998 {
3999  if (borderSize > 0) {
4000  if (imageSize.height <= borderSize * 2 || imageSize.width <= borderSize * 2)
4001  keypoints.clear();
4002  else
4003  keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(),
4004  RoiPredicate(cv::Rect(
4005  cv::Point(borderSize, borderSize),
4006  cv::Point(imageSize.width - borderSize, imageSize.height - borderSize)))),
4007  keypoints.end());
4008  }
4009 }
4010 
4011 struct SizePredicate
4012 {
4013  SizePredicate(float _minSize, float _maxSize) : minSize(_minSize), maxSize(_maxSize) { }
4014 
4015  bool operator()(const cv::KeyPoint &keyPt) const
4016  {
4017  float size = keyPt.size;
4018  return (size < minSize) || (size > maxSize);
4019  }
4020 
4021  float minSize, maxSize;
4022 };
4023 
4024 void vpKeyPoint::KeyPointsFilter::runByKeypointSize(std::vector<cv::KeyPoint> &keypoints, float minSize, float maxSize)
4025 {
4026  CV_Assert(minSize >= 0);
4027  CV_Assert(maxSize >= 0);
4028  CV_Assert(minSize <= maxSize);
4029 
4030  keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), SizePredicate(minSize, maxSize)), keypoints.end());
4031 }
4032 
4033 class MaskPredicate
4034 {
4035 public:
4036  MaskPredicate(const cv::Mat &_mask) : mask(_mask) { }
4037  bool operator()(const cv::KeyPoint &key_pt) const
4038  {
4039  return mask.at<uchar>((int)(key_pt.pt.y + 0.5f), (int)(key_pt.pt.x + 0.5f)) == 0;
4040  }
4041 
4042 private:
4043  const cv::Mat mask;
4044 };
4045 
4046 void vpKeyPoint::KeyPointsFilter::runByPixelsMask(std::vector<cv::KeyPoint> &keypoints, const cv::Mat &mask)
4047 {
4048  if (mask.empty())
4049  return;
4050 
4051  keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), MaskPredicate(mask)), keypoints.end());
4052 }
4053 
4054 struct KeyPoint_LessThan
4055 {
4056  KeyPoint_LessThan(const std::vector<cv::KeyPoint> &_kp) : kp(&_kp) { }
4057  bool operator()(/*int i, int j*/ size_t i, size_t j) const
4058  {
4059  const cv::KeyPoint &kp1 = (*kp)[/*(size_t)*/ i];
4060  const cv::KeyPoint &kp2 = (*kp)[/*(size_t)*/ j];
4061  if (!vpMath::equal(kp1.pt.x, kp2.pt.x,
4062  std::numeric_limits<float>::epsilon())) { // if (kp1.pt.x !=
4063  // kp2.pt.x) {
4064  return kp1.pt.x < kp2.pt.x;
4065  }
4066 
4067  if (!vpMath::equal(kp1.pt.y, kp2.pt.y,
4068  std::numeric_limits<float>::epsilon())) { // if (kp1.pt.y !=
4069  // kp2.pt.y) {
4070  return kp1.pt.y < kp2.pt.y;
4071  }
4072 
4073  if (!vpMath::equal(kp1.size, kp2.size,
4074  std::numeric_limits<float>::epsilon())) { // if (kp1.size !=
4075  // kp2.size) {
4076  return kp1.size > kp2.size;
4077  }
4078 
4079  if (!vpMath::equal(kp1.angle, kp2.angle,
4080  std::numeric_limits<float>::epsilon())) { // if (kp1.angle !=
4081  // kp2.angle) {
4082  return kp1.angle < kp2.angle;
4083  }
4084 
4085  if (!vpMath::equal(kp1.response, kp2.response,
4086  std::numeric_limits<float>::epsilon())) { // if (kp1.response !=
4087  // kp2.response) {
4088  return kp1.response > kp2.response;
4089  }
4090 
4091  if (kp1.octave != kp2.octave) {
4092  return kp1.octave > kp2.octave;
4093  }
4094 
4095  if (kp1.class_id != kp2.class_id) {
4096  return kp1.class_id > kp2.class_id;
4097  }
4098 
4099  return i < j;
4100  }
4101  const std::vector<cv::KeyPoint> *kp;
4102 };
4103 
4104 void vpKeyPoint::KeyPointsFilter::removeDuplicated(std::vector<cv::KeyPoint> &keypoints)
4105 {
4106  size_t i, j, n = keypoints.size();
4107  std::vector<size_t> kpidx(n);
4108  std::vector<uchar> mask(n, (uchar)1);
4109 
4110  for (i = 0; i < n; i++) {
4111  kpidx[i] = i;
4112  }
4113  std::sort(kpidx.begin(), kpidx.end(), KeyPoint_LessThan(keypoints));
4114  for (i = 1, j = 0; i < n; i++) {
4115  cv::KeyPoint &kp1 = keypoints[kpidx[i]];
4116  cv::KeyPoint &kp2 = keypoints[kpidx[j]];
4117  // if (kp1.pt.x != kp2.pt.x || kp1.pt.y != kp2.pt.y || kp1.size !=
4118  // kp2.size || kp1.angle != kp2.angle) {
4119  if (!vpMath::equal(kp1.pt.x, kp2.pt.x, std::numeric_limits<float>::epsilon()) ||
4120  !vpMath::equal(kp1.pt.y, kp2.pt.y, std::numeric_limits<float>::epsilon()) ||
4121  !vpMath::equal(kp1.size, kp2.size, std::numeric_limits<float>::epsilon()) ||
4122  !vpMath::equal(kp1.angle, kp2.angle, std::numeric_limits<float>::epsilon())) {
4123  j = i;
4124  }
4125  else {
4126  mask[kpidx[i]] = 0;
4127  }
4128  }
4129 
4130  for (i = j = 0; i < n; i++) {
4131  if (mask[i]) {
4132  if (i != j) {
4133  keypoints[j] = keypoints[i];
4134  }
4135  j++;
4136  }
4137  }
4138  keypoints.resize(j);
4139 }
4140 
4141 /*
4142  * PyramidAdaptedFeatureDetector
4143  */
4144 vpKeyPoint::PyramidAdaptedFeatureDetector::PyramidAdaptedFeatureDetector(const cv::Ptr<cv::FeatureDetector> &detector,
4145  int maxLevel)
4146  : m_detector(detector), m_maxLevel(maxLevel)
4147 { }
4148 
4149 bool vpKeyPoint::PyramidAdaptedFeatureDetector::empty() const
4150 {
4151  return m_detector.empty() || (cv::FeatureDetector *)m_detector->empty();
4152 }
4153 
4154 void vpKeyPoint::PyramidAdaptedFeatureDetector::detect(cv::InputArray image,
4155  CV_OUT std::vector<cv::KeyPoint> &keypoints, cv::InputArray mask)
4156 {
4157  detectImpl(image.getMat(), keypoints, mask.getMat());
4158 }
4159 
4160 void vpKeyPoint::PyramidAdaptedFeatureDetector::detectImpl(const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
4161  const cv::Mat &mask) const
4162 {
4163  cv::Mat src = image;
4164  cv::Mat src_mask = mask;
4165 
4166  cv::Mat dilated_mask;
4167  if (!mask.empty()) {
4168  cv::dilate(mask, dilated_mask, cv::Mat());
4169  cv::Mat mask255(mask.size(), CV_8UC1, cv::Scalar(0));
4170  mask255.setTo(cv::Scalar(255), dilated_mask != 0);
4171  dilated_mask = mask255;
4172  }
4173 
4174  for (int l = 0, multiplier = 1; l <= m_maxLevel; ++l, multiplier *= 2) {
4175  // Detect on current level of the pyramid
4176  std::vector<cv::KeyPoint> new_pts;
4177  m_detector->detect(src, new_pts, src_mask);
4178  std::vector<cv::KeyPoint>::iterator it = new_pts.begin(), end = new_pts.end();
4179  for (; it != end; ++it) {
4180  it->pt.x *= multiplier;
4181  it->pt.y *= multiplier;
4182  it->size *= multiplier;
4183  it->octave = l;
4184  }
4185  keypoints.insert(keypoints.end(), new_pts.begin(), new_pts.end());
4186 
4187  // Downsample
4188  if (l < m_maxLevel) {
4189  cv::Mat dst;
4190  pyrDown(src, dst);
4191  src = dst;
4192 
4193  if (!mask.empty())
4194  resize(dilated_mask, src_mask, src.size(), 0, 0, CV_INTER_AREA);
4195  }
4196  }
4197 
4198  if (!mask.empty())
4199  vpKeyPoint::KeyPointsFilter::runByPixelsMask(keypoints, mask);
4200 }
4201 #endif
4202 #endif
4203 
4204 #elif !defined(VISP_BUILD_SHARED_LIBS)
4205 // Work around to avoid warning: libvisp_vision.a(vpKeyPoint.cpp.o) has no symbols
4206 void dummy_vpKeyPoint() { };
4207 #endif
std::vector< unsigned int > m_matchedReferencePoints
std::vector< vpImagePoint > m_currentImagePointsList
std::vector< vpImagePoint > m_referenceImagePointsList
Generic class defining intrinsic camera parameters.
Implementation of column vector and the associated operations.
Definition: vpColVector.h:163
Class to define RGB colors available for display functionalities.
Definition: vpColor.h:152
static const vpColor red
Definition: vpColor.h:211
static const vpColor none
Definition: vpColor.h:223
static const vpColor green
Definition: vpColor.h:214
static void convertFromOpenCV(const cv::KeyPoint &from, vpImagePoint &to)
Definition: vpConvert.cpp:224
static void displayCircle(const vpImage< unsigned char > &I, const vpImageCircle &circle, const vpColor &color, bool fill=false, unsigned int thickness=1)
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
error that can be emitted by ViSP classes.
Definition: vpException.h:59
@ ioError
I/O error.
Definition: vpException.h:79
@ badValue
Used to indicate that a value is not in the allowed range.
Definition: vpException.h:85
@ fatalError
Fatal error.
Definition: vpException.h:84
const char * what() const
Definition: vpException.cpp:70
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpHomogeneousMatrix inverse() const
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Definition: vpImageIo.cpp:143
static void write(const vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Definition: vpImageIo.cpp:287
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:82
double get_j() const
Definition: vpImagePoint.h:125
void set_ij(double ii, double jj)
Definition: vpImagePoint.h:305
void set_u(double u)
Definition: vpImagePoint.h:320
void set_v(double v)
Definition: vpImagePoint.h:331
double get_i() const
Definition: vpImagePoint.h:114
unsigned int getWidth() const
Definition: vpImage.h:240
void insert(const vpImage< Type > &src, const vpImagePoint &topLeft)
Definition: vpImage.h:1357
unsigned int getHeight() const
Definition: vpImage.h:182
static bool isAbsolutePathname(const std::string &pathname)
Definition: vpIoTools.cpp:1811
static void readBinaryValueLE(std::ifstream &file, int16_t &short_value)
Definition: vpIoTools.cpp:2093
static void makeDirectory(const std::string &dirname)
Definition: vpIoTools.cpp:603
static void writeBinaryValueLE(std::ofstream &file, const int16_t short_value)
Definition: vpIoTools.cpp:2171
static std::string getParent(const std::string &pathname)
Definition: vpIoTools.cpp:1669
unsigned int matchPoint(const vpImage< unsigned char > &I)
@ detectionThreshold
Definition: vpKeyPoint.h:232
@ detectionScore
Definition: vpKeyPoint.h:234
void getTrainKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
static void compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpCylinder > &cylinders, const std::vector< std::vector< std::vector< vpImagePoint > > > &vectorOfCylinderRois, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
Definition: vpKeyPoint.cpp:549
void detectExtractAffine(const vpImage< unsigned char > &I, std::vector< std::vector< cv::KeyPoint > > &listOfKeypoints, std::vector< cv::Mat > &listOfDescriptors, std::vector< vpImage< unsigned char > > *listOfAffineI=nullptr)
void initMatcher(const std::string &matcherName)
void display(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, unsigned int size=3)
void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector< cv::DMatch > &matches, double &elapsedTime)
static void compute3D(const cv::KeyPoint &candidate, const std::vector< vpPoint > &roi, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
Definition: vpKeyPoint.cpp:404
vpFeatureDetectorType
Definition: vpKeyPoint.h:250
@ DETECTOR_KAZE
KAZE detector.
Definition: vpKeyPoint.h:269
@ DETECTOR_BRISK
BRISK detector.
Definition: vpKeyPoint.h:255
@ DETECTOR_AKAZE
AKAZE detector.
Definition: vpKeyPoint.h:270
@ DETECTOR_MSER
MSER detector.
Definition: vpKeyPoint.h:253
@ DETECTOR_SURF
SURF detector.
Definition: vpKeyPoint.h:266
@ DETECTOR_AGAST
AGAST detector.
Definition: vpKeyPoint.h:271
@ DETECTOR_SIFT
SIFT detector.
Definition: vpKeyPoint.h:263
@ DETECTOR_FAST
FAST detector.
Definition: vpKeyPoint.h:252
@ DETECTOR_GFTT
GFTT detector.
Definition: vpKeyPoint.h:256
@ DETECTOR_ORB
ORB detector.
Definition: vpKeyPoint.h:254
@ DETECTOR_SimpleBlob
SimpleBlob detector.
Definition: vpKeyPoint.h:257
void reset()
void createImageMatching(vpImage< unsigned char > &IRef, vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
Definition: vpKeyPoint.cpp:854
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
bool computePose(const std::vector< cv::Point2f > &imagePoints, const std::vector< cv::Point3f > &objectPoints, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector< int > &inlierIndex, double &elapsedTime, bool(*func)(const vpHomogeneousMatrix &)=nullptr)
Definition: vpKeyPoint.cpp:669
void extract(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, cv::Mat &descriptors, std::vector< cv::Point3f > *trainPoints=nullptr)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
Definition: vpKeyPoint.cpp:968
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
Definition: vpKeyPoint.cpp:458
bool matchPointAndDetect(const vpImage< unsigned char > &I, vpRect &boundingBox, vpImagePoint &centerOfGravity, const bool isPlanarObject=true, std::vector< vpImagePoint > *imPts1=nullptr, std::vector< vpImagePoint > *imPts2=nullptr, double *meanDescriptorDistance=nullptr, double *detectionScore=nullptr, const vpRect &rectangle=vpRect())
vpFeatureDescriptorType
Definition: vpKeyPoint.h:282
@ DESCRIPTOR_AKAZE
AKAZE descriptor.
Definition: vpKeyPoint.h:299
@ DESCRIPTOR_ORB
ORB descriptor.
Definition: vpKeyPoint.h:284
@ DESCRIPTOR_KAZE
KAZE descriptor.
Definition: vpKeyPoint.h:298
@ DESCRIPTOR_SURF
SUFT descriptor.
Definition: vpKeyPoint.h:295
@ DESCRIPTOR_BRISK
BRISK descriptor.
Definition: vpKeyPoint.h:285
@ DESCRIPTOR_SIFT
SIFT descriptor.
Definition: vpKeyPoint.h:292
void getQueryKeyPoints(std::vector< cv::KeyPoint > &keyPoints, bool matches=true) const
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType, const std::string &matcherName, const vpFilterMatchingType &filterType=ratioDistanceThreshold)
Definition: vpKeyPoint.cpp:63
@ ppmImageFormat
Definition: vpKeyPoint.h:244
@ jpgImageFormat
Definition: vpKeyPoint.h:242
@ pngImageFormat
Definition: vpKeyPoint.h:243
@ pgmImageFormat
Definition: vpKeyPoint.h:245
vpFilterMatchingType
Definition: vpKeyPoint.h:216
@ stdAndRatioDistanceThreshold
Definition: vpKeyPoint.h:224
@ constantFactorDistanceThreshold
Definition: vpKeyPoint.h:217
@ ratioDistanceThreshold
Definition: vpKeyPoint.h:221
@ stdDistanceThreshold
Definition: vpKeyPoint.h:219
@ noFilterMatching
Definition: vpKeyPoint.h:226
void displayMatching(const vpImage< unsigned char > &IRef, vpImage< unsigned char > &IMatching, unsigned int crossSize, unsigned int lineThickness=1, const vpColor &color=vpColor::green)
unsigned int buildReference(const vpImage< unsigned char > &I)
Definition: vpKeyPoint.cpp:187
void loadConfigFile(const std::string &configFile)
void getTrainPoints(std::vector< cv::Point3f > &points) const
void getObjectPoints(std::vector< cv::Point3f > &objectPoints) const
void insertImageMatching(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
static bool isNaN(double value)
Definition: vpMath.cpp:93
static bool equal(double x, double y, double threshold=0.001)
Definition: vpMath.h:449
static int round(double x)
Definition: vpMath.h:403
Implementation of a matrix and operations on matrices.
Definition: vpMatrix.h:146
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
This class defines the container for a plane geometrical structure.
Definition: vpPlane.h:54
double getD() const
Definition: vpPlane.h:106
double getA() const
Definition: vpPlane.h:100
double getC() const
Definition: vpPlane.h:104
double getB() const
Definition: vpPlane.h:102
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Definition: vpPoint.h:77
double get_oX() const
Get the point oX coordinate in the object frame.
Definition: vpPoint.cpp:450
void set_x(double x)
Set the point x coordinate in the image plane.
Definition: vpPoint.cpp:500
double get_y() const
Get the point y coordinate in the image plane.
Definition: vpPoint.cpp:461
double get_oZ() const
Get the point oZ coordinate in the object frame.
Definition: vpPoint.cpp:454
void set_oY(double oY)
Set the point oY coordinate in the object frame.
Definition: vpPoint.cpp:493
double get_x() const
Get the point x coordinate in the image plane.
Definition: vpPoint.cpp:459
void set_oZ(double oZ)
Set the point oZ coordinate in the object frame.
Definition: vpPoint.cpp:495
void set_oX(double oX)
Set the point oX coordinate in the object frame.
Definition: vpPoint.cpp:491
double get_oY() const
Get the point oY coordinate in the object frame.
Definition: vpPoint.cpp:452
void setWorldCoordinates(double oX, double oY, double oZ)
Definition: vpPoint.cpp:110
void set_y(double y)
Set the point y coordinate in the image plane.
Definition: vpPoint.cpp:502
Defines a generic 2D polygon.
Definition: vpPolygon.h:97
vpRect getBoundingBox() const
Definition: vpPolygon.h:171
bool isInside(const vpImagePoint &iP, const PointInPolygonMethod &method=PnPolyRayCasting) const
Definition: vpPolygon.cpp:388
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
Definition: vpPose.h:79
void setRansacMaxTrials(const int &rM)
Definition: vpPose.h:405
void addPoint(const vpPoint &P)
Definition: vpPose.cpp:93
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
Definition: vpPose.h:386
vpMatrix getCovarianceMatrix() const
Definition: vpPose.h:439
@ RANSAC
Definition: vpPose.h:89
void setCovarianceComputation(const bool &flag)
Definition: vpPose.h:428
std::vector< unsigned int > getRansacInlierIndex() const
Definition: vpPose.h:415
void setRansacFilterFlag(const RANSAC_FILTER_FLAGS &flag)
Definition: vpPose.h:459
@ NO_FILTER
No filter is applied.
Definition: vpPose.h:110
void setUseParallelRansac(bool use)
Definition: vpPose.h:490
std::vector< vpPoint > getRansacInliers() const
Definition: vpPose.h:420
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool(*func)(const vpHomogeneousMatrix &)=nullptr)
Definition: vpPose.cpp:333
void setNbParallelRansacThreads(int nb)
Definition: vpPose.h:476
void setRansacThreshold(const double &t)
Definition: vpPose.h:391
Defines a rectangle in the plane.
Definition: vpRect.h:76
double getWidth() const
Definition: vpRect.h:224
double getLeft() const
Definition: vpRect.h:170
double getRight() const
Definition: vpRect.h:176
double getBottom() const
Definition: vpRect.h:94
double getHeight() const
Definition: vpRect.h:163
double getTop() const
Definition: vpRect.h:189
Implementation of a rotation vector as axis-angle minimal representation.
Class that consider the case of a translation vector.
void parse(const std::string &filename)
vpMatchingMethodEnum getMatchingMethod() const
VISP_EXPORT double measureTimeMs()