Visual Servoing Platform  version 3.3.0 under development (2020-02-17)
vpKeyPoint.cpp
1 /****************************************************************************
2  *
3  * ViSP, open source Visual Servoing Platform software.
4  * Copyright (C) 2005 - 2019 by Inria. All rights reserved.
5  *
6  * This software is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  * See the file LICENSE.txt at the root directory of this source
11  * distribution for additional information about the GNU GPL.
12  *
13  * For using ViSP with software that can not be combined with the GNU
14  * GPL, please contact Inria about acquiring a ViSP Professional
15  * Edition License.
16  *
17  * See http://visp.inria.fr for more information.
18  *
19  * This software was developed at:
20  * Inria Rennes - Bretagne Atlantique
21  * Campus Universitaire de Beaulieu
22  * 35042 Rennes Cedex
23  * France
24  *
25  * If you have questions regarding the use of this file, please contact
26  * Inria at visp@inria.fr
27  *
28  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
29  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
30  *
31  * Description:
32  * Key point functionalities.
33  *
34  * Authors:
35  * Souriya Trinh
36  *
37  *****************************************************************************/
38 
39 #include <iomanip>
40 #include <limits>
41 
42 #include <visp3/core/vpIoTools.h>
43 #include <visp3/vision/vpKeyPoint.h>
44 
45 #if (VISP_HAVE_OPENCV_VERSION >= 0x020101)
46 
47 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
48 #include <opencv2/calib3d/calib3d.hpp>
49 #endif
50 
51 #ifdef VISP_HAVE_PUGIXML
52 #include <pugixml.hpp>
53 #endif
54 
55 namespace
56 {
57 // Specific Type transformation functions
58 inline cv::DMatch knnToDMatch(const std::vector<cv::DMatch> &knnMatches)
59 {
60  if (knnMatches.size() > 0) {
61  return knnMatches[0];
62  }
63 
64  return cv::DMatch();
65 }
66 
67 inline vpImagePoint matchRansacToVpImage(const std::pair<cv::KeyPoint, cv::Point3f> &pair)
68 {
69  return vpImagePoint(pair.first.pt.y, pair.first.pt.x);
70 }
71 
72 }
73 
83 vpKeyPoint::vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType,
84  const std::string &matcherName, const vpFilterMatchingType &filterType)
85  : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
86  m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
87  m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
88  m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
89  m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
90  m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
91  m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
92  m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
93  m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0),
94  m_ransacThreshold(0.01), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(),
95  m_useAffineDetection(false),
96 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
97  m_useBruteForceCrossCheck(true),
98 #endif
99  m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
100  m_useSingleMatchFilter(true), m_I()
101 {
102  initFeatureNames();
103 
104  m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
105  m_extractorNames.push_back(m_mapOfDescriptorNames[descriptorType]);
106 
107  init();
108 }
109 
119 vpKeyPoint::vpKeyPoint(const std::string &detectorName, const std::string &extractorName,
120  const std::string &matcherName, const vpFilterMatchingType &filterType)
121  : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
122  m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
123  m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
124  m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
125  m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
126  m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
127  m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
128  m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
129  m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0),
130  m_ransacThreshold(0.01), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(),
131  m_useAffineDetection(false),
132 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
133  m_useBruteForceCrossCheck(true),
134 #endif
135  m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
136  m_useSingleMatchFilter(true), m_I()
137 {
138  initFeatureNames();
139 
140  m_detectorNames.push_back(detectorName);
141  m_extractorNames.push_back(extractorName);
142 
143  init();
144 }
145 
155 vpKeyPoint::vpKeyPoint(const std::vector<std::string> &detectorNames, const std::vector<std::string> &extractorNames,
156  const std::string &matcherName, const vpFilterMatchingType &filterType)
157  : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
158  m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(detectorNames),
159  m_detectors(), m_extractionTime(0.), m_extractorNames(extractorNames), m_extractors(), m_filteredMatches(),
160  m_filterType(filterType), m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(),
161  m_matcher(), m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0),
162  m_matchingRatioThreshold(0.85), m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200),
163  m_nbRansacMinInlierCount(100), m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(),
164  m_queryFilteredKeyPoints(), m_queryKeyPoints(), m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(vpPose::NO_FILTER), m_ransacInliers(),
165  m_ransacOutliers(), m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
166  m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
167 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
168  m_useBruteForceCrossCheck(true),
169 #endif
170  m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
171  m_useSingleMatchFilter(true), m_I()
172 {
173  initFeatureNames();
174  init();
175 }
176 
185 void vpKeyPoint::affineSkew(double tilt, double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai)
186 {
187  int h = img.rows;
188  int w = img.cols;
189 
190  mask = cv::Mat(h, w, CV_8UC1, cv::Scalar(255));
191 
192  cv::Mat A = cv::Mat::eye(2, 3, CV_32F);
193 
194  // if (phi != 0.0) {
195  if (std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
196  phi *= M_PI / 180.;
197  double s = sin(phi);
198  double c = cos(phi);
199 
200  A = (cv::Mat_<float>(2, 2) << c, -s, s, c);
201 
202  cv::Mat corners = (cv::Mat_<float>(4, 2) << 0, 0, w, 0, w, h, 0, h);
203  cv::Mat tcorners = corners * A.t();
204  cv::Mat tcorners_x, tcorners_y;
205  tcorners.col(0).copyTo(tcorners_x);
206  tcorners.col(1).copyTo(tcorners_y);
207  std::vector<cv::Mat> channels;
208  channels.push_back(tcorners_x);
209  channels.push_back(tcorners_y);
210  cv::merge(channels, tcorners);
211 
212  cv::Rect rect = cv::boundingRect(tcorners);
213  A = (cv::Mat_<float>(2, 3) << c, -s, -rect.x, s, c, -rect.y);
214 
215  cv::warpAffine(img, img, A, cv::Size(rect.width, rect.height), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
216  }
217  // if (tilt != 1.0) {
218  if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon()) {
219  double s = 0.8 * sqrt(tilt * tilt - 1);
220  cv::GaussianBlur(img, img, cv::Size(0, 0), s, 0.01);
221  cv::resize(img, img, cv::Size(0, 0), 1.0 / tilt, 1.0, cv::INTER_NEAREST);
222  A.row(0) = A.row(0) / tilt;
223  }
224  // if (tilt != 1.0 || phi != 0.0) {
225  if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon() ||
226  std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
227  h = img.rows;
228  w = img.cols;
229  cv::warpAffine(mask, mask, A, cv::Size(w, h), cv::INTER_NEAREST);
230  }
231  cv::invertAffineTransform(A, Ai);
232 }
233 
241 
248 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color) { return buildReference(I_color, vpRect()); }
249 
260  unsigned int height, unsigned int width)
261 {
262  return buildReference(I, vpRect(iP, width, height));
263 }
264 
274 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, const vpImagePoint &iP,
275  unsigned int height, unsigned int width)
276 {
277  return buildReference(I_color, vpRect(iP, width, height));
278 }
279 
287 unsigned int vpKeyPoint::buildReference(const vpImage<unsigned char> &I, const vpRect &rectangle)
288 {
289  // Reset variables used when dealing with 3D models
290  // So as no 3D point list is passed, we dont need this variables
291  m_trainPoints.clear();
292  m_mapOfImageId.clear();
293  m_mapOfImages.clear();
294  m_currentImageId = 1;
295 
296  if (m_useAffineDetection) {
297  std::vector<std::vector<cv::KeyPoint> > listOfTrainKeyPoints;
298  std::vector<cv::Mat> listOfTrainDescriptors;
299 
300  // Detect keypoints and extract descriptors on multiple images
301  detectExtractAffine(I, listOfTrainKeyPoints, listOfTrainDescriptors);
302 
303  // Flatten the different train lists
304  m_trainKeyPoints.clear();
305  for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfTrainKeyPoints.begin();
306  it != listOfTrainKeyPoints.end(); ++it) {
307  m_trainKeyPoints.insert(m_trainKeyPoints.end(), it->begin(), it->end());
308  }
309 
310  bool first = true;
311  for (std::vector<cv::Mat>::const_iterator it = listOfTrainDescriptors.begin(); it != listOfTrainDescriptors.end();
312  ++it) {
313  if (first) {
314  first = false;
315  it->copyTo(m_trainDescriptors);
316  } else {
317  m_trainDescriptors.push_back(*it);
318  }
319  }
320  } else {
321  detect(I, m_trainKeyPoints, m_detectionTime, rectangle);
322  extract(I, m_trainKeyPoints, m_trainDescriptors, m_extractionTime);
323  }
324 
325  // Save the correspondence keypoint class_id with the training image_id in a
326  // map Used to display the matching with all the training images
327  for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
328  m_mapOfImageId[it->class_id] = m_currentImageId;
329  }
330 
331  // Save the image in a map at a specific image_id
332  m_mapOfImages[m_currentImageId] = I;
333 
334  // Convert OpenCV type to ViSP type for compatibility
336 
337  _reference_computed = true;
338 
339  // Add train descriptors in matcher object
340  m_matcher->clear();
341  m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
342 
343  return static_cast<unsigned int>(m_trainKeyPoints.size());
344 }
345 
353 unsigned int vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, const vpRect &rectangle)
354 {
355  vpImageConvert::convert(I_color, m_I);
356  return (buildReference(m_I, rectangle));
357 }
358 
370 void vpKeyPoint::buildReference(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &trainKeyPoints,
371  std::vector<cv::Point3f> &points3f, bool append, int class_id)
372 {
373  cv::Mat trainDescriptors;
374  // Copy the input list of keypoints
375  std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
376 
377  extract(I, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
378 
379  if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
380  // Keypoints have been removed
381  // Store the hash of a keypoint as the key and the index of the keypoint
382  // as the value
383  std::map<size_t, size_t> mapOfKeypointHashes;
384  size_t cpt = 0;
385  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
386  ++it, cpt++) {
387  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
388  }
389 
390  std::vector<cv::Point3f> trainPoints_tmp;
391  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
392  if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
393  trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
394  }
395  }
396 
397  // Copy trainPoints_tmp to points3f
398  points3f = trainPoints_tmp;
399  }
400 
401  buildReference(I, trainKeyPoints, trainDescriptors, points3f, append, class_id);
402 }
403 
415 void vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &trainKeyPoints,
416  std::vector<cv::Point3f> &points3f, bool append, int class_id)
417 {
418  cv::Mat trainDescriptors;
419  // Copy the input list of keypoints
420  std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
421 
422  extract(I_color, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
423 
424  if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
425  // Keypoints have been removed
426  // Store the hash of a keypoint as the key and the index of the keypoint
427  // as the value
428  std::map<size_t, size_t> mapOfKeypointHashes;
429  size_t cpt = 0;
430  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
431  ++it, cpt++) {
432  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
433  }
434 
435  std::vector<cv::Point3f> trainPoints_tmp;
436  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
437  if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
438  trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
439  }
440  }
441 
442  // Copy trainPoints_tmp to points3f
443  points3f = trainPoints_tmp;
444  }
445 
446  buildReference(I_color, trainKeyPoints, trainDescriptors, points3f, append, class_id);
447 }
448 
461 void vpKeyPoint::buildReference(const vpImage<unsigned char> &I, const std::vector<cv::KeyPoint> &trainKeyPoints,
462  const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
463  bool append, int class_id)
464 {
465  if (!append) {
466  m_currentImageId = 0;
467  m_mapOfImageId.clear();
468  m_mapOfImages.clear();
469  this->m_trainKeyPoints.clear();
470  this->m_trainPoints.clear();
471  }
472 
473  m_currentImageId++;
474 
475  std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
476  // Set class_id if != -1
477  if (class_id != -1) {
478  for (std::vector<cv::KeyPoint>::iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
479  it->class_id = class_id;
480  }
481  }
482 
483  // Save the correspondence keypoint class_id with the training image_id in a
484  // map Used to display the matching with all the training images
485  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
486  ++it) {
487  m_mapOfImageId[it->class_id] = m_currentImageId;
488  }
489 
490  // Save the image in a map at a specific image_id
491  m_mapOfImages[m_currentImageId] = I;
492 
493  // Append reference lists
494  this->m_trainKeyPoints.insert(this->m_trainKeyPoints.end(), trainKeyPoints_tmp.begin(), trainKeyPoints_tmp.end());
495  if (!append) {
496  trainDescriptors.copyTo(this->m_trainDescriptors);
497  } else {
498  this->m_trainDescriptors.push_back(trainDescriptors);
499  }
500  this->m_trainPoints.insert(this->m_trainPoints.end(), points3f.begin(), points3f.end());
501 
502  // Convert OpenCV type to ViSP type for compatibility
504  vpConvert::convertFromOpenCV(this->m_trainPoints, m_trainVpPoints);
505 
506  // Add train descriptors in matcher object
507  m_matcher->clear();
508  m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
509 
510  _reference_computed = true;
511 }
512 
525 void vpKeyPoint::buildReference(const vpImage<vpRGBa> &I_color, const std::vector<cv::KeyPoint> &trainKeyPoints,
526  const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
527  bool append, int class_id)
528 {
529  vpImageConvert::convert(I_color, m_I);
530  buildReference(m_I, trainKeyPoints, trainDescriptors, points3f, append, class_id);
531 }
532 
548 void vpKeyPoint::compute3D(const cv::KeyPoint &candidate, const std::vector<vpPoint> &roi,
549  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
550 {
551  /* compute plane equation */
552  std::vector<vpPoint>::const_iterator it_roi = roi.begin();
553  vpPoint pts[3];
554  pts[0] = *it_roi;
555  ++it_roi;
556  pts[1] = *it_roi;
557  ++it_roi;
558  pts[2] = *it_roi;
559  vpPlane Po(pts[0], pts[1], pts[2]);
560  double xc = 0.0, yc = 0.0;
561  vpPixelMeterConversion::convertPoint(cam, candidate.pt.x, candidate.pt.y, xc, yc);
562  double Z = -Po.getD() / (Po.getA() * xc + Po.getB() * yc + Po.getC());
563  double X = xc * Z;
564  double Y = yc * Z;
565  vpColVector point_cam(4);
566  point_cam[0] = X;
567  point_cam[1] = Y;
568  point_cam[2] = Z;
569  point_cam[3] = 1;
570  vpColVector point_obj(4);
571  point_obj = cMo.inverse() * point_cam;
572  point = cv::Point3f((float)point_obj[0], (float)point_obj[1], (float)point_obj[2]);
573 }
574 
590 void vpKeyPoint::compute3D(const vpImagePoint &candidate, const std::vector<vpPoint> &roi,
591  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, vpPoint &point)
592 {
593  /* compute plane equation */
594  std::vector<vpPoint>::const_iterator it_roi = roi.begin();
595  vpPoint pts[3];
596  pts[0] = *it_roi;
597  ++it_roi;
598  pts[1] = *it_roi;
599  ++it_roi;
600  pts[2] = *it_roi;
601  vpPlane Po(pts[0], pts[1], pts[2]);
602  double xc = 0.0, yc = 0.0;
603  vpPixelMeterConversion::convertPoint(cam, candidate, xc, yc);
604  double Z = -Po.getD() / (Po.getA() * xc + Po.getB() * yc + Po.getC());
605  double X = xc * Z;
606  double Y = yc * Z;
607  vpColVector point_cam(4);
608  point_cam[0] = X;
609  point_cam[1] = Y;
610  point_cam[2] = Z;
611  point_cam[3] = 1;
612  vpColVector point_obj(4);
613  point_obj = cMo.inverse() * point_cam;
614  point.setWorldCoordinates(point_obj);
615 }
616 
633  std::vector<cv::KeyPoint> &candidates,
634  const std::vector<vpPolygon> &polygons,
635  const std::vector<std::vector<vpPoint> > &roisPt,
636  std::vector<cv::Point3f> &points, cv::Mat *descriptors)
637 {
638 
639  std::vector<cv::KeyPoint> candidatesToCheck = candidates;
640  candidates.clear();
641  points.clear();
642  vpImagePoint imPt;
643  cv::Point3f pt;
644  cv::Mat desc;
645 
646  std::vector<std::pair<cv::KeyPoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
647  for (size_t i = 0; i < candidatesToCheck.size(); i++) {
648  pairOfCandidatesToCheck[i] = std::pair<cv::KeyPoint, size_t>(candidatesToCheck[i], i);
649  }
650 
651  size_t cpt1 = 0;
652  std::vector<vpPolygon> polygons_tmp = polygons;
653  for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
654  std::vector<std::pair<cv::KeyPoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
655 
656  while (it2 != pairOfCandidatesToCheck.end()) {
657  imPt.set_ij(it2->first.pt.y, it2->first.pt.x);
658  if (it1->isInside(imPt)) {
659  candidates.push_back(it2->first);
660  vpKeyPoint::compute3D(it2->first, roisPt[cpt1], cam, cMo, pt);
661  points.push_back(pt);
662 
663  if (descriptors != NULL) {
664  desc.push_back(descriptors->row((int)it2->second));
665  }
666 
667  // Remove candidate keypoint which is located on the current polygon
668  it2 = pairOfCandidatesToCheck.erase(it2);
669  } else {
670  ++it2;
671  }
672  }
673  }
674 
675  if (descriptors != NULL) {
676  desc.copyTo(*descriptors);
677  }
678 }
679 
697  std::vector<vpImagePoint> &candidates,
698  const std::vector<vpPolygon> &polygons,
699  const std::vector<std::vector<vpPoint> > &roisPt,
700  std::vector<vpPoint> &points, cv::Mat *descriptors)
701 {
702 
703  std::vector<vpImagePoint> candidatesToCheck = candidates;
704  candidates.clear();
705  points.clear();
706  vpPoint pt;
707  cv::Mat desc;
708 
709  std::vector<std::pair<vpImagePoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
710  for (size_t i = 0; i < candidatesToCheck.size(); i++) {
711  pairOfCandidatesToCheck[i] = std::pair<vpImagePoint, size_t>(candidatesToCheck[i], i);
712  }
713 
714  size_t cpt1 = 0;
715  std::vector<vpPolygon> polygons_tmp = polygons;
716  for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
717  std::vector<std::pair<vpImagePoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
718 
719  while (it2 != pairOfCandidatesToCheck.end()) {
720  if (it1->isInside(it2->first)) {
721  candidates.push_back(it2->first);
722  vpKeyPoint::compute3D(it2->first, roisPt[cpt1], cam, cMo, pt);
723  points.push_back(pt);
724 
725  if (descriptors != NULL) {
726  desc.push_back(descriptors->row((int)it2->second));
727  }
728 
729  // Remove candidate keypoint which is located on the current polygon
730  it2 = pairOfCandidatesToCheck.erase(it2);
731  } else {
732  ++it2;
733  }
734  }
735  }
736 }
737 
754  const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector<cv::KeyPoint> &candidates,
755  const std::vector<vpCylinder> &cylinders,
756  const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<cv::Point3f> &points,
757  cv::Mat *descriptors)
758 {
759  std::vector<cv::KeyPoint> candidatesToCheck = candidates;
760  candidates.clear();
761  points.clear();
762  cv::Mat desc;
763 
764  // Keep only keypoints on cylinders
765  size_t cpt_keypoint = 0;
766  for (std::vector<cv::KeyPoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
767  ++it1, cpt_keypoint++) {
768  size_t cpt_cylinder = 0;
769 
770  // Iterate through the list of vpCylinders
771  for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
772  it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
773  // Iterate through the list of the bounding boxes of the current
774  // vpCylinder
775  for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
776  if (vpPolygon::isInside(*it3, it1->pt.y, it1->pt.x)) {
777  candidates.push_back(*it1);
778 
779  // Calculate the 3D coordinates for each keypoint located on
780  // cylinders
781  double xm = 0.0, ym = 0.0;
782  vpPixelMeterConversion::convertPoint(cam, it1->pt.x, it1->pt.y, xm, ym);
783  double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
784 
785  if (!vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
786  vpColVector point_cam(4);
787  point_cam[0] = xm * Z;
788  point_cam[1] = ym * Z;
789  point_cam[2] = Z;
790  point_cam[3] = 1;
791  vpColVector point_obj(4);
792  point_obj = cMo.inverse() * point_cam;
793  vpPoint pt;
794  pt.setWorldCoordinates(point_obj);
795  points.push_back(cv::Point3f((float)pt.get_oX(), (float)pt.get_oY(), (float)pt.get_oZ()));
796 
797  if (descriptors != NULL) {
798  desc.push_back(descriptors->row((int)cpt_keypoint));
799  }
800 
801  break;
802  }
803  }
804  }
805  }
806  }
807 
808  if (descriptors != NULL) {
809  desc.copyTo(*descriptors);
810  }
811 }
812 
829  const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector<vpImagePoint> &candidates,
830  const std::vector<vpCylinder> &cylinders,
831  const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<vpPoint> &points,
832  cv::Mat *descriptors)
833 {
834  std::vector<vpImagePoint> candidatesToCheck = candidates;
835  candidates.clear();
836  points.clear();
837  cv::Mat desc;
838 
839  // Keep only keypoints on cylinders
840  size_t cpt_keypoint = 0;
841  for (std::vector<vpImagePoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
842  ++it1, cpt_keypoint++) {
843  size_t cpt_cylinder = 0;
844 
845  // Iterate through the list of vpCylinders
846  for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
847  it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
848  // Iterate through the list of the bounding boxes of the current
849  // vpCylinder
850  for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
851  if (vpPolygon::isInside(*it3, it1->get_i(), it1->get_j())) {
852  candidates.push_back(*it1);
853 
854  // Calculate the 3D coordinates for each keypoint located on
855  // cylinders
856  double xm = 0.0, ym = 0.0;
857  vpPixelMeterConversion::convertPoint(cam, it1->get_u(), it1->get_v(), xm, ym);
858  double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
859 
860  if (!vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
861  vpColVector point_cam(4);
862  point_cam[0] = xm * Z;
863  point_cam[1] = ym * Z;
864  point_cam[2] = Z;
865  point_cam[3] = 1;
866  vpColVector point_obj(4);
867  point_obj = cMo.inverse() * point_cam;
868  vpPoint pt;
869  pt.setWorldCoordinates(point_obj);
870  points.push_back(pt);
871 
872  if (descriptors != NULL) {
873  desc.push_back(descriptors->row((int)cpt_keypoint));
874  }
875 
876  break;
877  }
878  }
879  }
880  }
881  }
882 
883  if (descriptors != NULL) {
884  desc.copyTo(*descriptors);
885  }
886 }
887 
901 bool vpKeyPoint::computePose(const std::vector<cv::Point2f> &imagePoints, const std::vector<cv::Point3f> &objectPoints,
902  const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector<int> &inlierIndex,
903  double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &))
904 {
905  double t = vpTime::measureTimeMs();
906 
907  if (imagePoints.size() < 4 || objectPoints.size() < 4 || imagePoints.size() != objectPoints.size()) {
908  elapsedTime = (vpTime::measureTimeMs() - t);
909  std::cerr << "Not enough points to compute the pose (at least 4 points "
910  "are needed)."
911  << std::endl;
912 
913  return false;
914  }
915 
916  cv::Mat cameraMatrix =
917  (cv::Mat_<double>(3, 3) << cam.get_px(), 0, cam.get_u0(), 0, cam.get_py(), cam.get_v0(), 0, 0, 1);
918  cv::Mat rvec, tvec;
919 
920  // Bug with OpenCV < 2.4.0 when zero distorsion is provided by an empty
921  // array. http://code.opencv.org/issues/1700 ;
922  // http://code.opencv.org/issues/1718 what(): Distortion coefficients must
923  // be 1x4, 4x1, 1x5, 5x1, 1x8 or 8x1 floating-point vector in function
924  // cvProjectPoints2 Fixed in OpenCV 2.4.0 (r7558)
925  // cv::Mat distCoeffs;
926  cv::Mat distCoeffs = cv::Mat::zeros(1, 5, CV_64F);
927 
928  try {
929 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
930  // OpenCV 3.0.0 (2014/12/12)
931  cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, false, m_nbRansacIterations,
932  (float)m_ransacReprojectionError,
933  0.99, // confidence=0.99 (default) – The probability
934  // that the algorithm produces a useful result.
935  inlierIndex, cv::SOLVEPNP_ITERATIVE);
936 // SOLVEPNP_ITERATIVE (default): Iterative method is based on
937 // Levenberg-Marquardt optimization. In this case the function finds such a
938 // pose that minimizes reprojection error, that is the sum of squared
939 // distances between the observed projections imagePoints and the projected
940 // (using projectPoints() ) objectPoints . SOLVEPNP_P3P: Method is based on
941 // the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang “Complete Solution
942 // Classification for the Perspective-Three-Point Problem”. In this case the
943 // function requires exactly four object and image points. SOLVEPNP_EPNP:
944 // Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the
945 // paper “EPnP: Efficient Perspective-n-Point Camera Pose Estimation”.
946 // SOLVEPNP_DLS: Method is based on the paper of Joel A. Hesch and Stergios I.
947 // Roumeliotis. “A Direct Least-Squares (DLS) Method for PnP”. SOLVEPNP_UPNP
948 // Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto,
949 // F.Moreno-Noguer. “Exhaustive Linearization for Robust Camera Pose and Focal
950 // Length Estimation”. In this case the function also estimates the
951 // parameters
952 // f_x and f_y assuming that both have the same value. Then the cameraMatrix
953 // is updated with the estimated focal length.
954 #else
955  int nbInlierToReachConsensus = m_nbRansacMinInlierCount;
956  if (m_useConsensusPercentage) {
957  nbInlierToReachConsensus = (int)(m_ransacConsensusPercentage / 100.0 * (double)m_queryFilteredKeyPoints.size());
958  }
959 
960  cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, false, m_nbRansacIterations,
961  (float)m_ransacReprojectionError, nbInlierToReachConsensus, inlierIndex);
962 #endif
963  } catch (cv::Exception &e) {
964  std::cerr << e.what() << std::endl;
965  elapsedTime = (vpTime::measureTimeMs() - t);
966  return false;
967  }
968  vpTranslationVector translationVec(tvec.at<double>(0), tvec.at<double>(1), tvec.at<double>(2));
969  vpThetaUVector thetaUVector(rvec.at<double>(0), rvec.at<double>(1), rvec.at<double>(2));
970  cMo = vpHomogeneousMatrix(translationVec, thetaUVector);
971 
972  if (func != NULL) {
973  // Check the final pose returned by solvePnPRansac to discard
974  // solutions which do not respect the pose criterion.
975  if (!func(cMo)) {
976  elapsedTime = (vpTime::measureTimeMs() - t);
977  return false;
978  }
979  }
980 
981  elapsedTime = (vpTime::measureTimeMs() - t);
982  return true;
983 }
984 
997 bool vpKeyPoint::computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo,
998  std::vector<vpPoint> &inliers, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &))
999 {
1000  std::vector<unsigned int> inlierIndex;
1001  return computePose(objectVpPoints, cMo, inliers, inlierIndex, elapsedTime, func);
1002 }
1003 
1017 bool vpKeyPoint::computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo,
1018  std::vector<vpPoint> &inliers, std::vector<unsigned int> &inlierIndex, double &elapsedTime,
1019  bool (*func)(const vpHomogeneousMatrix &))
1020 {
1021  double t = vpTime::measureTimeMs();
1022 
1023  if (objectVpPoints.size() < 4) {
1024  elapsedTime = (vpTime::measureTimeMs() - t);
1025  // std::cerr << "Not enough points to compute the pose (at least 4
1026  // points are needed)." << std::endl;
1027 
1028  return false;
1029  }
1030 
1031  vpPose pose;
1032 
1033  for (std::vector<vpPoint>::const_iterator it = objectVpPoints.begin(); it != objectVpPoints.end(); ++it) {
1034  pose.addPoint(*it);
1035  }
1036 
1037  unsigned int nbInlierToReachConsensus = (unsigned int)m_nbRansacMinInlierCount;
1038  if (m_useConsensusPercentage) {
1039  nbInlierToReachConsensus =
1040  (unsigned int)(m_ransacConsensusPercentage / 100.0 * (double)m_queryFilteredKeyPoints.size());
1041  }
1042 
1043  pose.setRansacFilterFlag(m_ransacFilterFlag);
1044  pose.setUseParallelRansac(m_ransacParallel);
1045  pose.setNbParallelRansacThreads(m_ransacParallelNbThreads);
1046  pose.setRansacNbInliersToReachConsensus(nbInlierToReachConsensus);
1047  pose.setRansacThreshold(m_ransacThreshold);
1048  pose.setRansacMaxTrials(m_nbRansacIterations);
1049 
1050  bool isRansacPoseEstimationOk = false;
1051  try {
1052  pose.setCovarianceComputation(m_computeCovariance);
1053  isRansacPoseEstimationOk = pose.computePose(vpPose::RANSAC, cMo, func);
1054  inliers = pose.getRansacInliers();
1055  inlierIndex = pose.getRansacInlierIndex();
1056 
1057  if (m_computeCovariance) {
1058  m_covarianceMatrix = pose.getCovarianceMatrix();
1059  }
1060  } catch (const vpException &e) {
1061  std::cerr << "e=" << e.what() << std::endl;
1062  elapsedTime = (vpTime::measureTimeMs() - t);
1063  return false;
1064  }
1065 
1066  // if(func != NULL && isRansacPoseEstimationOk) {
1067  // //Check the final pose returned by the Ransac VVS pose estimation as
1068  // in rare some cases
1069  // //we can converge toward a final cMo that does not respect the pose
1070  // criterion even
1071  // //if the 4 minimal points picked to respect the pose criterion.
1072  // if(!func(&cMo)) {
1073  // elapsedTime = (vpTime::measureTimeMs() - t);
1074  // return false;
1075  // }
1076  // }
1077 
1078  elapsedTime = (vpTime::measureTimeMs() - t);
1079  return isRansacPoseEstimationOk;
1080 }
1081 
1095 double vpKeyPoint::computePoseEstimationError(const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
1096  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo_est)
1097 {
1098  if (matchKeyPoints.size() == 0) {
1099  // return std::numeric_limits<double>::max(); // create an error under
1100  // Windows. To fix it we have to add #undef max
1101  return DBL_MAX;
1102  }
1103 
1104  std::vector<double> errors(matchKeyPoints.size());
1105  size_t cpt = 0;
1106  vpPoint pt;
1107  for (std::vector<std::pair<cv::KeyPoint, cv::Point3f> >::const_iterator it = matchKeyPoints.begin();
1108  it != matchKeyPoints.end(); ++it, cpt++) {
1109  pt.set_oX(it->second.x);
1110  pt.set_oY(it->second.y);
1111  pt.set_oZ(it->second.z);
1112  pt.project(cMo_est);
1113  double u = 0.0, v = 0.0;
1114  vpMeterPixelConversion::convertPoint(cam, pt.get_x(), pt.get_y(), u, v);
1115  errors[cpt] = std::sqrt((u - it->first.pt.x) * (u - it->first.pt.x) + (v - it->first.pt.y) * (v - it->first.pt.y));
1116  }
1117 
1118  return std::accumulate(errors.begin(), errors.end(), 0.0) / errors.size();
1119 }
1120 
1130  vpImage<unsigned char> &IMatching)
1131 {
1132  // Image matching side by side
1133  unsigned int width = IRef.getWidth() + ICurrent.getWidth();
1134  unsigned int height = ((std::max))(IRef.getHeight(), ICurrent.getHeight());
1135 
1136  IMatching = vpImage<unsigned char>(height, width);
1137 }
1138 
1148  vpImage<vpRGBa> &IMatching)
1149 {
1150  // Image matching side by side
1151  unsigned int width = IRef.getWidth() + ICurrent.getWidth();
1152  unsigned int height = ((std::max))(IRef.getHeight(), ICurrent.getHeight());
1153 
1154  IMatching = vpImage<vpRGBa>(height, width);
1155 }
1156 
1167 {
1168  // Nb images in the training database + the current image we want to detect
1169  // the object
1170  unsigned int nbImg = (unsigned int)(m_mapOfImages.size() + 1);
1171 
1172  if (m_mapOfImages.empty()) {
1173  std::cerr << "There is no training image loaded !" << std::endl;
1174  return;
1175  }
1176 
1177  if (nbImg == 2) {
1178  // Only one training image, so we display them side by side
1179  createImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
1180  } else {
1181  // Multiple training images, display them as a mosaic image
1182  //(unsigned int) std::floor(std::sqrt((double) nbImg) + 0.5);
1183  unsigned int nbImgSqrt = (unsigned int)vpMath::round(std::sqrt((double)nbImg));
1184 
1185  // Number of columns in the mosaic grid
1186  unsigned int nbWidth = nbImgSqrt;
1187  // Number of rows in the mosaic grid
1188  unsigned int nbHeight = nbImgSqrt;
1189 
1190  // Deals with non square mosaic grid and the total number of images
1191  if (nbImgSqrt * nbImgSqrt < nbImg) {
1192  nbWidth++;
1193  }
1194 
1195  unsigned int maxW = ICurrent.getWidth();
1196  unsigned int maxH = ICurrent.getHeight();
1197  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1198  ++it) {
1199  if (maxW < it->second.getWidth()) {
1200  maxW = it->second.getWidth();
1201  }
1202 
1203  if (maxH < it->second.getHeight()) {
1204  maxH = it->second.getHeight();
1205  }
1206  }
1207 
1208  IMatching = vpImage<unsigned char>(maxH * nbHeight, maxW * nbWidth);
1209  }
1210 }
1211 
1222 {
1223  // Nb images in the training database + the current image we want to detect
1224  // the object
1225  unsigned int nbImg = (unsigned int)(m_mapOfImages.size() + 1);
1226 
1227  if (m_mapOfImages.empty()) {
1228  std::cerr << "There is no training image loaded !" << std::endl;
1229  return;
1230  }
1231 
1232  if (nbImg == 2) {
1233  // Only one training image, so we display them side by side
1234  createImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
1235  } else {
1236  // Multiple training images, display them as a mosaic image
1237  //(unsigned int) std::floor(std::sqrt((double) nbImg) + 0.5);
1238  unsigned int nbImgSqrt = (unsigned int)vpMath::round(std::sqrt((double)nbImg));
1239 
1240  // Number of columns in the mosaic grid
1241  unsigned int nbWidth = nbImgSqrt;
1242  // Number of rows in the mosaic grid
1243  unsigned int nbHeight = nbImgSqrt;
1244 
1245  // Deals with non square mosaic grid and the total number of images
1246  if (nbImgSqrt * nbImgSqrt < nbImg) {
1247  nbWidth++;
1248  }
1249 
1250  unsigned int maxW = ICurrent.getWidth();
1251  unsigned int maxH = ICurrent.getHeight();
1252  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1253  ++it) {
1254  if (maxW < it->second.getWidth()) {
1255  maxW = it->second.getWidth();
1256  }
1257 
1258  if (maxH < it->second.getHeight()) {
1259  maxH = it->second.getHeight();
1260  }
1261  }
1262 
1263  IMatching = vpImage<vpRGBa>(maxH * nbHeight, maxW * nbWidth);
1264  }
1265 }
1266 
1274 void vpKeyPoint::detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, const vpRect &rectangle)
1275 {
1276  double elapsedTime;
1277  detect(I, keyPoints, elapsedTime, rectangle);
1278 }
1279 
1287 void vpKeyPoint::detect(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, const vpRect &rectangle)
1288 {
1289  double elapsedTime;
1290  detect(I_color, keyPoints, elapsedTime, rectangle);
1291 }
1292 
1301 void vpKeyPoint::detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, const cv::Mat &mask)
1302 {
1303  double elapsedTime;
1304  detect(matImg, keyPoints, elapsedTime, mask);
1305 }
1306 
1315 void vpKeyPoint::detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
1316  const vpRect &rectangle)
1317 {
1318  cv::Mat matImg;
1319  vpImageConvert::convert(I, matImg, false);
1320  cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1321 
1322  if (rectangle.getWidth() > 0 && rectangle.getHeight() > 0) {
1323  cv::Point leftTop((int)rectangle.getLeft(), (int)rectangle.getTop()),
1324  rightBottom((int)rectangle.getRight(), (int)rectangle.getBottom());
1325  cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1326  } else {
1327  mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1328  }
1329 
1330  detect(matImg, keyPoints, elapsedTime, mask);
1331 }
1332 
1341 void vpKeyPoint::detect(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
1342  const vpRect &rectangle)
1343 {
1344  cv::Mat matImg;
1345  vpImageConvert::convert(I_color, matImg);
1346  cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1347 
1348  if (rectangle.getWidth() > 0 && rectangle.getHeight() > 0) {
1349  cv::Point leftTop((int)rectangle.getLeft(), (int)rectangle.getTop()),
1350  rightBottom((int)rectangle.getRight(), (int)rectangle.getBottom());
1351  cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1352  } else {
1353  mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1354  }
1355 
1356  detect(matImg, keyPoints, elapsedTime, mask);
1357 }
1358 
1368 void vpKeyPoint::detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
1369  const cv::Mat &mask)
1370 {
1371  double t = vpTime::measureTimeMs();
1372  keyPoints.clear();
1373 
1374  for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
1375  it != m_detectors.end(); ++it) {
1376  std::vector<cv::KeyPoint> kp;
1377  it->second->detect(matImg, kp, mask);
1378  keyPoints.insert(keyPoints.end(), kp.begin(), kp.end());
1379  }
1380 
1381  elapsedTime = vpTime::measureTimeMs() - t;
1382 }
1383 
1391 void vpKeyPoint::display(const vpImage<unsigned char> &IRef, const vpImage<unsigned char> &ICurrent, unsigned int size)
1392 {
1393  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1394  getQueryKeyPoints(vpQueryImageKeyPoints);
1395  std::vector<vpImagePoint> vpTrainImageKeyPoints;
1396  getTrainKeyPoints(vpTrainImageKeyPoints);
1397 
1398  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1399  vpDisplay::displayCross(IRef, vpTrainImageKeyPoints[(size_t)(it->trainIdx)], size, vpColor::red);
1400  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, vpColor::green);
1401  }
1402 }
1403 
1411 void vpKeyPoint::display(const vpImage<vpRGBa> &IRef, const vpImage<vpRGBa> &ICurrent, unsigned int size)
1412 {
1413  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1414  getQueryKeyPoints(vpQueryImageKeyPoints);
1415  std::vector<vpImagePoint> vpTrainImageKeyPoints;
1416  getTrainKeyPoints(vpTrainImageKeyPoints);
1417 
1418  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1419  vpDisplay::displayCross(IRef, vpTrainImageKeyPoints[(size_t)(it->trainIdx)], size, vpColor::red);
1420  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, vpColor::green);
1421  }
1422 }
1423 
1431 void vpKeyPoint::display(const vpImage<unsigned char> &ICurrent, unsigned int size, const vpColor &color)
1432 {
1433  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1434  getQueryKeyPoints(vpQueryImageKeyPoints);
1435 
1436  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1437  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, color);
1438  }
1439 }
1440 
1448 void vpKeyPoint::display(const vpImage<vpRGBa> &ICurrent, unsigned int size, const vpColor &color)
1449 {
1450  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1451  getQueryKeyPoints(vpQueryImageKeyPoints);
1452 
1453  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1454  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, color);
1455  }
1456 }
1457 
1470  unsigned int crossSize, unsigned int lineThickness, const vpColor &color)
1471 {
1472  bool randomColor = (color == vpColor::none);
1473  srand((unsigned int)time(NULL));
1474  vpColor currentColor = color;
1475 
1476  std::vector<vpImagePoint> queryImageKeyPoints;
1477  getQueryKeyPoints(queryImageKeyPoints);
1478  std::vector<vpImagePoint> trainImageKeyPoints;
1479  getTrainKeyPoints(trainImageKeyPoints);
1480 
1481  vpImagePoint leftPt, rightPt;
1482  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1483  if (randomColor) {
1484  currentColor = vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1485  }
1486 
1487  leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1488  rightPt = vpImagePoint(queryImageKeyPoints[(size_t)(it->queryIdx)].get_i(),
1489  queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.getWidth());
1490  vpDisplay::displayCross(IMatching, leftPt, crossSize, currentColor);
1491  vpDisplay::displayCross(IMatching, rightPt, crossSize, currentColor);
1492  vpDisplay::displayLine(IMatching, leftPt, rightPt, currentColor, lineThickness);
1493  }
1494 }
1495 
1508  unsigned int crossSize, unsigned int lineThickness, const vpColor &color)
1509 {
1510  bool randomColor = (color == vpColor::none);
1511  srand((unsigned int)time(NULL));
1512  vpColor currentColor = color;
1513 
1514  std::vector<vpImagePoint> queryImageKeyPoints;
1515  getQueryKeyPoints(queryImageKeyPoints);
1516  std::vector<vpImagePoint> trainImageKeyPoints;
1517  getTrainKeyPoints(trainImageKeyPoints);
1518 
1519  vpImagePoint leftPt, rightPt;
1520  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1521  if (randomColor) {
1522  currentColor = vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1523  }
1524 
1525  leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1526  rightPt = vpImagePoint(queryImageKeyPoints[(size_t)(it->queryIdx)].get_i(),
1527  queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.getWidth());
1528  vpDisplay::displayCross(IMatching, leftPt, crossSize, currentColor);
1529  vpDisplay::displayCross(IMatching, rightPt, crossSize, currentColor);
1530  vpDisplay::displayLine(IMatching, leftPt, rightPt, currentColor, lineThickness);
1531  }
1532 }
1533 
1546  unsigned int crossSize, unsigned int lineThickness, const vpColor &color)
1547 {
1548  bool randomColor = (color == vpColor::none);
1549  srand((unsigned int)time(NULL));
1550  vpColor currentColor = color;
1551 
1552  std::vector<vpImagePoint> queryImageKeyPoints;
1553  getQueryKeyPoints(queryImageKeyPoints);
1554  std::vector<vpImagePoint> trainImageKeyPoints;
1555  getTrainKeyPoints(trainImageKeyPoints);
1556 
1557  vpImagePoint leftPt, rightPt;
1558  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1559  if (randomColor) {
1560  currentColor = vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1561  }
1562 
1563  leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1564  rightPt = vpImagePoint(queryImageKeyPoints[(size_t)(it->queryIdx)].get_i(),
1565  queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.getWidth());
1566  vpDisplay::displayCross(IMatching, leftPt, crossSize, currentColor);
1567  vpDisplay::displayCross(IMatching, rightPt, crossSize, currentColor);
1568  vpDisplay::displayLine(IMatching, leftPt, rightPt, currentColor, lineThickness);
1569  }
1570 }
1571 
1584  const std::vector<vpImagePoint> &ransacInliers, unsigned int crossSize,
1585  unsigned int lineThickness)
1586 {
1587  if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1588  // No training images so return
1589  std::cerr << "There is no training image loaded !" << std::endl;
1590  return;
1591  }
1592 
1593  // Nb images in the training database + the current image we want to detect
1594  // the object
1595  int nbImg = (int)(m_mapOfImages.size() + 1);
1596 
1597  if (nbImg == 2) {
1598  // Only one training image, so we display the matching result side-by-side
1599  displayMatching(m_mapOfImages.begin()->second, IMatching, crossSize);
1600  } else {
1601  // Multiple training images, display them as a mosaic image
1602  int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
1603  int nbWidth = nbImgSqrt;
1604  int nbHeight = nbImgSqrt;
1605 
1606  if (nbImgSqrt * nbImgSqrt < nbImg) {
1607  nbWidth++;
1608  }
1609 
1610  std::map<int, int> mapOfImageIdIndex;
1611  int cpt = 0;
1612  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
1613  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1614  ++it, cpt++) {
1615  mapOfImageIdIndex[it->first] = cpt;
1616 
1617  if (maxW < it->second.getWidth()) {
1618  maxW = it->second.getWidth();
1619  }
1620 
1621  if (maxH < it->second.getHeight()) {
1622  maxH = it->second.getHeight();
1623  }
1624  }
1625 
1626  // Indexes of the current image in the grid computed to put preferably the
1627  // image in the center of the mosaic grid
1628  int medianI = nbHeight / 2;
1629  int medianJ = nbWidth / 2;
1630  int medianIndex = medianI * nbWidth + medianJ;
1631  for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1632  vpImagePoint topLeftCorner;
1633  int current_class_id_index = 0;
1634  if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1635  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1636  } else {
1637  // Shift of one unity the index of the training images which are after
1638  // the current image
1639  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1640  }
1641 
1642  int indexI = current_class_id_index / nbWidth;
1643  int indexJ = current_class_id_index - (indexI * nbWidth);
1644  topLeftCorner.set_ij((int)maxH * indexI, (int)maxW * indexJ);
1645 
1646  // Display cross for keypoints in the learning database
1647  vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1648  (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1649  }
1650 
1651  vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
1652  for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1653  // Display cross for keypoints detected in the current image
1654  vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1655  (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1656  }
1657  for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1658  // Display green circle for RANSAC inliers
1659  vpDisplay::displayCircle(IMatching, (int)(it->get_v() + topLeftCorner.get_i()),
1660  (int)(it->get_u() + topLeftCorner.get_j()), 4, vpColor::green);
1661  }
1662  for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1663  // Display red circle for RANSAC outliers
1664  vpDisplay::displayCircle(IMatching, (int)(it->get_i() + topLeftCorner.get_i()),
1665  (int)(it->get_j() + topLeftCorner.get_j()), 4, vpColor::red);
1666  }
1667 
1668  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1669  int current_class_id = 0;
1670  if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] < medianIndex) {
1671  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1672  } else {
1673  // Shift of one unity the index of the training images which are after
1674  // the current image
1675  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1676  }
1677 
1678  int indexI = current_class_id / nbWidth;
1679  int indexJ = current_class_id - (indexI * nbWidth);
1680 
1681  vpImagePoint end((int)maxH * indexI + m_trainKeyPoints[(size_t)it->trainIdx].pt.y,
1682  (int)maxW * indexJ + m_trainKeyPoints[(size_t)it->trainIdx].pt.x);
1683  vpImagePoint start((int)maxH * medianI + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.y,
1684  (int)maxW * medianJ + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.x);
1685 
1686  // Draw line for matching keypoints detected in the current image and
1687  // those detected in the training images
1688  vpDisplay::displayLine(IMatching, start, end, vpColor::green, lineThickness);
1689  }
1690  }
1691 }
1692 
1705  const std::vector<vpImagePoint> &ransacInliers, unsigned int crossSize,
1706  unsigned int lineThickness)
1707 {
1708  if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1709  // No training images so return
1710  std::cerr << "There is no training image loaded !" << std::endl;
1711  return;
1712  }
1713 
1714  // Nb images in the training database + the current image we want to detect
1715  // the object
1716  int nbImg = (int)(m_mapOfImages.size() + 1);
1717 
1718  if (nbImg == 2) {
1719  // Only one training image, so we display the matching result side-by-side
1720  displayMatching(m_mapOfImages.begin()->second, IMatching, crossSize);
1721  } else {
1722  // Multiple training images, display them as a mosaic image
1723  int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
1724  int nbWidth = nbImgSqrt;
1725  int nbHeight = nbImgSqrt;
1726 
1727  if (nbImgSqrt * nbImgSqrt < nbImg) {
1728  nbWidth++;
1729  }
1730 
1731  std::map<int, int> mapOfImageIdIndex;
1732  int cpt = 0;
1733  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
1734  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1735  ++it, cpt++) {
1736  mapOfImageIdIndex[it->first] = cpt;
1737 
1738  if (maxW < it->second.getWidth()) {
1739  maxW = it->second.getWidth();
1740  }
1741 
1742  if (maxH < it->second.getHeight()) {
1743  maxH = it->second.getHeight();
1744  }
1745  }
1746 
1747  // Indexes of the current image in the grid computed to put preferably the
1748  // image in the center of the mosaic grid
1749  int medianI = nbHeight / 2;
1750  int medianJ = nbWidth / 2;
1751  int medianIndex = medianI * nbWidth + medianJ;
1752  for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1753  vpImagePoint topLeftCorner;
1754  int current_class_id_index = 0;
1755  if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1756  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1757  } else {
1758  // Shift of one unity the index of the training images which are after
1759  // the current image
1760  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1761  }
1762 
1763  int indexI = current_class_id_index / nbWidth;
1764  int indexJ = current_class_id_index - (indexI * nbWidth);
1765  topLeftCorner.set_ij((int)maxH * indexI, (int)maxW * indexJ);
1766 
1767  // Display cross for keypoints in the learning database
1768  vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1769  (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1770  }
1771 
1772  vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
1773  for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1774  // Display cross for keypoints detected in the current image
1775  vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1776  (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1777  }
1778  for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1779  // Display green circle for RANSAC inliers
1780  vpDisplay::displayCircle(IMatching, (int)(it->get_v() + topLeftCorner.get_i()),
1781  (int)(it->get_u() + topLeftCorner.get_j()), 4, vpColor::green);
1782  }
1783  for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1784  // Display red circle for RANSAC outliers
1785  vpDisplay::displayCircle(IMatching, (int)(it->get_i() + topLeftCorner.get_i()),
1786  (int)(it->get_j() + topLeftCorner.get_j()), 4, vpColor::red);
1787  }
1788 
1789  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1790  int current_class_id = 0;
1791  if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] < medianIndex) {
1792  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1793  } else {
1794  // Shift of one unity the index of the training images which are after
1795  // the current image
1796  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1797  }
1798 
1799  int indexI = current_class_id / nbWidth;
1800  int indexJ = current_class_id - (indexI * nbWidth);
1801 
1802  vpImagePoint end((int)maxH * indexI + m_trainKeyPoints[(size_t)it->trainIdx].pt.y,
1803  (int)maxW * indexJ + m_trainKeyPoints[(size_t)it->trainIdx].pt.x);
1804  vpImagePoint start((int)maxH * medianI + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.y,
1805  (int)maxW * medianJ + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.x);
1806 
1807  // Draw line for matching keypoints detected in the current image and
1808  // those detected in the training images
1809  vpDisplay::displayLine(IMatching, start, end, vpColor::green, lineThickness);
1810  }
1811  }
1812 }
1813 
1824 void vpKeyPoint::extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1825  std::vector<cv::Point3f> *trainPoints)
1826 {
1827  double elapsedTime;
1828  extract(I, keyPoints, descriptors, elapsedTime, trainPoints);
1829 }
1830 
1841 void vpKeyPoint::extract(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1842  std::vector<cv::Point3f> *trainPoints)
1843 {
1844  double elapsedTime;
1845  extract(I_color, keyPoints, descriptors, elapsedTime, trainPoints);
1846 }
1847 
1858 void vpKeyPoint::extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1859  std::vector<cv::Point3f> *trainPoints)
1860 {
1861  double elapsedTime;
1862  extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1863 }
1864 
1875 void vpKeyPoint::extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1876  double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1877 {
1878  cv::Mat matImg;
1879  vpImageConvert::convert(I, matImg, false);
1880  extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1881 }
1882 
1893 void vpKeyPoint::extract(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1894  double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1895 {
1896  cv::Mat matImg;
1897  vpImageConvert::convert(I_color, matImg);
1898  extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1899 }
1900 
1911 void vpKeyPoint::extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1912  double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1913 {
1914  double t = vpTime::measureTimeMs();
1915  bool first = true;
1916 
1917  for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator itd = m_extractors.begin();
1918  itd != m_extractors.end(); ++itd) {
1919  if (first) {
1920  first = false;
1921  // Check if we have 3D object points information
1922  if (trainPoints != NULL && !trainPoints->empty()) {
1923  // Copy the input list of keypoints, keypoints that cannot be computed
1924  // are removed in the function compute
1925  std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1926 
1927  // Extract descriptors for the given list of keypoints
1928  itd->second->compute(matImg, keyPoints, descriptors);
1929 
1930  if (keyPoints.size() != keyPoints_tmp.size()) {
1931  // Keypoints have been removed
1932  // Store the hash of a keypoint as the key and the index of the
1933  // keypoint as the value
1934  std::map<size_t, size_t> mapOfKeypointHashes;
1935  size_t cpt = 0;
1936  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1937  ++it, cpt++) {
1938  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1939  }
1940 
1941  std::vector<cv::Point3f> trainPoints_tmp;
1942  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1943  if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1944  trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1945  }
1946  }
1947 
1948  // Copy trainPoints_tmp to m_trainPoints
1949  *trainPoints = trainPoints_tmp;
1950  }
1951  } else {
1952  // Extract descriptors for the given list of keypoints
1953  itd->second->compute(matImg, keyPoints, descriptors);
1954  }
1955  } else {
1956  // Copy the input list of keypoints, keypoints that cannot be computed
1957  // are removed in the function compute
1958  std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1959 
1960  cv::Mat desc;
1961  // Extract descriptors for the given list of keypoints
1962  itd->second->compute(matImg, keyPoints, desc);
1963 
1964  if (keyPoints.size() != keyPoints_tmp.size()) {
1965  // Keypoints have been removed
1966  // Store the hash of a keypoint as the key and the index of the
1967  // keypoint as the value
1968  std::map<size_t, size_t> mapOfKeypointHashes;
1969  size_t cpt = 0;
1970  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1971  ++it, cpt++) {
1972  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1973  }
1974 
1975  std::vector<cv::Point3f> trainPoints_tmp;
1976  cv::Mat descriptors_tmp;
1977  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1978  if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1979  if (trainPoints != NULL && !trainPoints->empty()) {
1980  trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1981  }
1982 
1983  if (!descriptors.empty()) {
1984  descriptors_tmp.push_back(descriptors.row((int)mapOfKeypointHashes[myKeypointHash(*it)]));
1985  }
1986  }
1987  }
1988 
1989  if (trainPoints != NULL) {
1990  // Copy trainPoints_tmp to m_trainPoints
1991  *trainPoints = trainPoints_tmp;
1992  }
1993  // Copy descriptors_tmp to descriptors
1994  descriptors_tmp.copyTo(descriptors);
1995  }
1996 
1997  // Merge descriptors horizontally
1998  if (descriptors.empty()) {
1999  desc.copyTo(descriptors);
2000  } else {
2001  cv::hconcat(descriptors, desc, descriptors);
2002  }
2003  }
2004  }
2005 
2006  if (keyPoints.size() != (size_t)descriptors.rows) {
2007  std::cerr << "keyPoints.size() != (size_t) descriptors.rows" << std::endl;
2008  }
2009  elapsedTime = vpTime::measureTimeMs() - t;
2010 }
2011 
2015 void vpKeyPoint::filterMatches()
2016 {
2017  std::vector<cv::KeyPoint> queryKpts;
2018  std::vector<cv::Point3f> trainPts;
2019  std::vector<cv::DMatch> m;
2020 
2021  if (m_useKnn) {
2022  // double max_dist = 0;
2023  // double min_dist = std::numeric_limits<double>::max(); // create an
2024  // error under Windows. To fix it we have to add #undef max
2025  double min_dist = DBL_MAX;
2026  double mean = 0.0;
2027  std::vector<double> distance_vec(m_knnMatches.size());
2028 
2029  if (m_filterType == stdAndRatioDistanceThreshold) {
2030  for (size_t i = 0; i < m_knnMatches.size(); i++) {
2031  double dist = m_knnMatches[i][0].distance;
2032  mean += dist;
2033  distance_vec[i] = dist;
2034 
2035  if (dist < min_dist) {
2036  min_dist = dist;
2037  }
2038  // if (dist > max_dist) {
2039  // max_dist = dist;
2040  //}
2041  }
2042  mean /= m_queryDescriptors.rows;
2043  }
2044 
2045  double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
2046  double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
2047  double threshold = min_dist + stdev;
2048 
2049  for (size_t i = 0; i < m_knnMatches.size(); i++) {
2050  if (m_knnMatches[i].size() >= 2) {
2051  // Calculate ratio of the descriptor distance between the two nearest
2052  // neighbors of the keypoint
2053  float ratio = m_knnMatches[i][0].distance / m_knnMatches[i][1].distance;
2054  // float ratio = std::sqrt((vecMatches[i][0].distance *
2055  // vecMatches[i][0].distance)
2056  // / (vecMatches[i][1].distance *
2057  // vecMatches[i][1].distance));
2058  double dist = m_knnMatches[i][0].distance;
2059 
2060  if (ratio < m_matchingRatioThreshold || (m_filterType == stdAndRatioDistanceThreshold && dist < threshold)) {
2061  m.push_back(cv::DMatch((int)queryKpts.size(), m_knnMatches[i][0].trainIdx, m_knnMatches[i][0].distance));
2062 
2063  if (!m_trainPoints.empty()) {
2064  trainPts.push_back(m_trainPoints[(size_t)m_knnMatches[i][0].trainIdx]);
2065  }
2066  queryKpts.push_back(m_queryKeyPoints[(size_t)m_knnMatches[i][0].queryIdx]);
2067  }
2068  }
2069  }
2070  } else {
2071  // double max_dist = 0;
2072  // create an error under Windows. To fix it we have to add #undef max
2073  // double min_dist = std::numeric_limits<double>::max();
2074  double min_dist = DBL_MAX;
2075  double mean = 0.0;
2076  std::vector<double> distance_vec(m_matches.size());
2077  for (size_t i = 0; i < m_matches.size(); i++) {
2078  double dist = m_matches[i].distance;
2079  mean += dist;
2080  distance_vec[i] = dist;
2081 
2082  if (dist < min_dist) {
2083  min_dist = dist;
2084  }
2085  // if (dist > max_dist) {
2086  // max_dist = dist;
2087  // }
2088  }
2089  mean /= m_queryDescriptors.rows;
2090 
2091  double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
2092  double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
2093 
2094  // Define a threshold where we keep all keypoints whose the descriptor
2095  // distance falls below a factor of the minimum descriptor distance (for
2096  // all the query keypoints) or below the minimum descriptor distance +
2097  // the standard deviation (calculated on all the query descriptor
2098  // distances)
2099  double threshold =
2100  m_filterType == constantFactorDistanceThreshold ? m_matchingFactorThreshold * min_dist : min_dist + stdev;
2101 
2102  for (size_t i = 0; i < m_matches.size(); i++) {
2103  if (m_matches[i].distance <= threshold) {
2104  m.push_back(cv::DMatch((int)queryKpts.size(), m_matches[i].trainIdx, m_matches[i].distance));
2105 
2106  if (!m_trainPoints.empty()) {
2107  trainPts.push_back(m_trainPoints[(size_t)m_matches[i].trainIdx]);
2108  }
2109  queryKpts.push_back(m_queryKeyPoints[(size_t)m_matches[i].queryIdx]);
2110  }
2111  }
2112  }
2113 
2114  if (m_useSingleMatchFilter) {
2115  // Eliminate matches where multiple query keypoints are matched to the
2116  // same train keypoint
2117  std::vector<cv::DMatch> mTmp;
2118  std::vector<cv::Point3f> trainPtsTmp;
2119  std::vector<cv::KeyPoint> queryKptsTmp;
2120 
2121  std::map<int, int> mapOfTrainIdx;
2122  // Count the number of query points matched to the same train point
2123  for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
2124  mapOfTrainIdx[it->trainIdx]++;
2125  }
2126 
2127  // Keep matches with only one correspondence
2128  for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
2129  if (mapOfTrainIdx[it->trainIdx] == 1) {
2130  mTmp.push_back(cv::DMatch((int)queryKptsTmp.size(), it->trainIdx, it->distance));
2131 
2132  if (!m_trainPoints.empty()) {
2133  trainPtsTmp.push_back(m_trainPoints[(size_t)it->trainIdx]);
2134  }
2135  queryKptsTmp.push_back(queryKpts[(size_t)it->queryIdx]);
2136  }
2137  }
2138 
2139  m_filteredMatches = mTmp;
2140  m_objectFilteredPoints = trainPtsTmp;
2141  m_queryFilteredKeyPoints = queryKptsTmp;
2142  } else {
2143  m_filteredMatches = m;
2144  m_objectFilteredPoints = trainPts;
2145  m_queryFilteredKeyPoints = queryKpts;
2146  }
2147 }
2148 
2156 void vpKeyPoint::getObjectPoints(std::vector<cv::Point3f> &objectPoints) const
2157 {
2158  objectPoints = m_objectFilteredPoints;
2159 }
2160 
2168 void vpKeyPoint::getObjectPoints(std::vector<vpPoint> &objectPoints) const
2169 {
2170  vpConvert::convertFromOpenCV(m_objectFilteredPoints, objectPoints);
2171 }
2172 
2179 void vpKeyPoint::getQueryKeyPoints(std::vector<cv::KeyPoint> &keyPoints) const { keyPoints = m_queryFilteredKeyPoints; }
2180 
2187 void vpKeyPoint::getQueryKeyPoints(std::vector<vpImagePoint> &keyPoints) const { keyPoints = currentImagePointsList; }
2188 
2194 void vpKeyPoint::getTrainKeyPoints(std::vector<cv::KeyPoint> &keyPoints) const { keyPoints = m_trainKeyPoints; }
2195 
2201 void vpKeyPoint::getTrainKeyPoints(std::vector<vpImagePoint> &keyPoints) const { keyPoints = referenceImagePointsList; }
2202 
2209 void vpKeyPoint::getTrainPoints(std::vector<cv::Point3f> &points) const { points = m_trainPoints; }
2210 
2217 void vpKeyPoint::getTrainPoints(std::vector<vpPoint> &points) const { points = m_trainVpPoints; }
2218 
2223 void vpKeyPoint::init()
2224 {
2225 // Require 2.4.0 <= opencv < 3.0.0
2226 #if defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && (VISP_HAVE_OPENCV_VERSION < 0x030000)
2227  // The following line must be called in order to use SIFT or SURF
2228  if (!cv::initModule_nonfree()) {
2229  std::cerr << "Cannot init module non free, SIFT or SURF cannot be used." << std::endl;
2230  }
2231 #endif
2232 
2233  // Use k-nearest neighbors (knn) to retrieve the two best matches for a
2234  // keypoint So this is useful only for ratioDistanceThreshold method
2235  if (m_filterType == ratioDistanceThreshold || m_filterType == stdAndRatioDistanceThreshold) {
2236  m_useKnn = true;
2237  }
2238 
2239  initDetectors(m_detectorNames);
2240  initExtractors(m_extractorNames);
2241  initMatcher(m_matcherName);
2242 }
2243 
2249 void vpKeyPoint::initDetector(const std::string &detectorName)
2250 {
2251 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
2252  m_detectors[detectorName] = cv::FeatureDetector::create(detectorName);
2253 
2254  if (m_detectors[detectorName] == NULL) {
2255  std::stringstream ss_msg;
2256  ss_msg << "Fail to initialize the detector: " << detectorName
2257  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2258  throw vpException(vpException::fatalError, ss_msg.str());
2259  }
2260 #else
2261  std::string detectorNameTmp = detectorName;
2262  std::string pyramid = "Pyramid";
2263  std::size_t pos = detectorName.find(pyramid);
2264  bool usePyramid = false;
2265  if (pos != std::string::npos) {
2266  detectorNameTmp = detectorName.substr(pos + pyramid.size());
2267  usePyramid = true;
2268  }
2269 
2270  if (detectorNameTmp == "SIFT") {
2271 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2272  cv::Ptr<cv::FeatureDetector> siftDetector = cv::xfeatures2d::SIFT::create();
2273  if (!usePyramid) {
2274  m_detectors[detectorNameTmp] = siftDetector;
2275  } else {
2276  std::cerr << "You should not use SIFT with Pyramid feature detection!" << std::endl;
2277  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
2278  }
2279 #else
2280  std::stringstream ss_msg;
2281  ss_msg << "Fail to initialize the detector: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2282  << " was not build with xFeatures2d module.";
2283  throw vpException(vpException::fatalError, ss_msg.str());
2284 #endif
2285  } else if (detectorNameTmp == "SURF") {
2286 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2287  cv::Ptr<cv::FeatureDetector> surfDetector = cv::xfeatures2d::SURF::create();
2288  if (!usePyramid) {
2289  m_detectors[detectorNameTmp] = surfDetector;
2290  } else {
2291  std::cerr << "You should not use SURF with Pyramid feature detection!" << std::endl;
2292  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(surfDetector);
2293  }
2294 #else
2295  std::stringstream ss_msg;
2296  ss_msg << "Fail to initialize the detector: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2297  << " was not build with xFeatures2d module.";
2298  throw vpException(vpException::fatalError, ss_msg.str());
2299 #endif
2300  } else if (detectorNameTmp == "FAST") {
2301  cv::Ptr<cv::FeatureDetector> fastDetector = cv::FastFeatureDetector::create();
2302  if (!usePyramid) {
2303  m_detectors[detectorNameTmp] = fastDetector;
2304  } else {
2305  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
2306  }
2307  } else if (detectorNameTmp == "MSER") {
2308  cv::Ptr<cv::FeatureDetector> fastDetector = cv::MSER::create();
2309  if (!usePyramid) {
2310  m_detectors[detectorNameTmp] = fastDetector;
2311  } else {
2312  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
2313  }
2314  } else if (detectorNameTmp == "ORB") {
2315  cv::Ptr<cv::FeatureDetector> orbDetector = cv::ORB::create();
2316  if (!usePyramid) {
2317  m_detectors[detectorNameTmp] = orbDetector;
2318  } else {
2319  std::cerr << "You should not use ORB with Pyramid feature detection!" << std::endl;
2320  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(orbDetector);
2321  }
2322  } else if (detectorNameTmp == "BRISK") {
2323  cv::Ptr<cv::FeatureDetector> briskDetector = cv::BRISK::create();
2324  if (!usePyramid) {
2325  m_detectors[detectorNameTmp] = briskDetector;
2326  } else {
2327  std::cerr << "You should not use BRISK with Pyramid feature detection!" << std::endl;
2328  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(briskDetector);
2329  }
2330  } else if (detectorNameTmp == "KAZE") {
2331  cv::Ptr<cv::FeatureDetector> kazeDetector = cv::KAZE::create();
2332  if (!usePyramid) {
2333  m_detectors[detectorNameTmp] = kazeDetector;
2334  } else {
2335  std::cerr << "You should not use KAZE with Pyramid feature detection!" << std::endl;
2336  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(kazeDetector);
2337  }
2338  } else if (detectorNameTmp == "AKAZE") {
2339  cv::Ptr<cv::FeatureDetector> akazeDetector = cv::AKAZE::create();
2340  if (!usePyramid) {
2341  m_detectors[detectorNameTmp] = akazeDetector;
2342  } else {
2343  std::cerr << "You should not use AKAZE with Pyramid feature detection!" << std::endl;
2344  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(akazeDetector);
2345  }
2346  } else if (detectorNameTmp == "GFTT") {
2347  cv::Ptr<cv::FeatureDetector> gfttDetector = cv::GFTTDetector::create();
2348  if (!usePyramid) {
2349  m_detectors[detectorNameTmp] = gfttDetector;
2350  } else {
2351  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(gfttDetector);
2352  }
2353  } else if (detectorNameTmp == "SimpleBlob") {
2354  cv::Ptr<cv::FeatureDetector> simpleBlobDetector = cv::SimpleBlobDetector::create();
2355  if (!usePyramid) {
2356  m_detectors[detectorNameTmp] = simpleBlobDetector;
2357  } else {
2358  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(simpleBlobDetector);
2359  }
2360  } else if (detectorNameTmp == "STAR") {
2361 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2362  cv::Ptr<cv::FeatureDetector> starDetector = cv::xfeatures2d::StarDetector::create();
2363  if (!usePyramid) {
2364  m_detectors[detectorNameTmp] = starDetector;
2365  } else {
2366  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(starDetector);
2367  }
2368 #else
2369  std::stringstream ss_msg;
2370  ss_msg << "Fail to initialize the detector: STAR. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2371  << " was not build with xFeatures2d module.";
2372  throw vpException(vpException::fatalError, ss_msg.str());
2373 #endif
2374  } else if (detectorNameTmp == "AGAST") {
2375  cv::Ptr<cv::FeatureDetector> agastDetector = cv::AgastFeatureDetector::create();
2376  if (!usePyramid) {
2377  m_detectors[detectorNameTmp] = agastDetector;
2378  } else {
2379  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(agastDetector);
2380  }
2381  } else if (detectorNameTmp == "MSD") {
2382 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100)
2383 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2384  cv::Ptr<cv::FeatureDetector> msdDetector = cv::xfeatures2d::MSDDetector::create();
2385  if (!usePyramid) {
2386  m_detectors[detectorNameTmp] = msdDetector;
2387  } else {
2388  std::cerr << "You should not use MSD with Pyramid feature detection!" << std::endl;
2389  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(msdDetector);
2390  }
2391 #else
2392  std::stringstream ss_msg;
2393  ss_msg << "Fail to initialize the detector: MSD. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2394  << " was not build with xFeatures2d module.";
2395  throw vpException(vpException::fatalError, ss_msg.str());
2396 #endif
2397 #else
2398  std::stringstream ss_msg;
2399  ss_msg << "Feature " << detectorName << " is not available in OpenCV version: " << std::hex
2400  << VISP_HAVE_OPENCV_VERSION << " (require >= OpenCV 3.1).";
2401 #endif
2402  } else {
2403  std::cerr << "The detector:" << detectorNameTmp << " is not available." << std::endl;
2404  }
2405 
2406  bool detectorInitialized = false;
2407  if (!usePyramid) {
2408  //if not null and to avoid warning C4800: forcing value to bool 'true' or 'false' (performance warning)
2409  detectorInitialized = !m_detectors[detectorNameTmp].empty();
2410  } else {
2411  //if not null and to avoid warning C4800: forcing value to bool 'true' or 'false' (performance warning)
2412  detectorInitialized = !m_detectors[detectorName].empty();
2413  }
2414 
2415  if (!detectorInitialized) {
2416  std::stringstream ss_msg;
2417  ss_msg << "Fail to initialize the detector: " << detectorNameTmp
2418  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2419  throw vpException(vpException::fatalError, ss_msg.str());
2420  }
2421 
2422 #endif
2423 }
2424 
2431 void vpKeyPoint::initDetectors(const std::vector<std::string> &detectorNames)
2432 {
2433  for (std::vector<std::string>::const_iterator it = detectorNames.begin(); it != detectorNames.end(); ++it) {
2434  initDetector(*it);
2435  }
2436 }
2437 
2443 void vpKeyPoint::initExtractor(const std::string &extractorName)
2444 {
2445 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
2446  m_extractors[extractorName] = cv::DescriptorExtractor::create(extractorName);
2447 #else
2448  if (extractorName == "SIFT") {
2449 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2450  m_extractors[extractorName] = cv::xfeatures2d::SIFT::create();
2451 #else
2452  std::stringstream ss_msg;
2453  ss_msg << "Fail to initialize the extractor: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2454  << " was not build with xFeatures2d module.";
2455  throw vpException(vpException::fatalError, ss_msg.str());
2456 #endif
2457  } else if (extractorName == "SURF") {
2458 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2459  // Use extended set of SURF descriptors (128 instead of 64)
2460  m_extractors[extractorName] = cv::xfeatures2d::SURF::create(100, 4, 3, true);
2461 #else
2462  std::stringstream ss_msg;
2463  ss_msg << "Fail to initialize the extractor: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
2464  << " was not build with xFeatures2d module.";
2465  throw vpException(vpException::fatalError, ss_msg.str());
2466 #endif
2467  } else if (extractorName == "ORB") {
2468  m_extractors[extractorName] = cv::ORB::create();
2469  } else if (extractorName == "BRISK") {
2470  m_extractors[extractorName] = cv::BRISK::create();
2471  } else if (extractorName == "FREAK") {
2472 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2473  m_extractors[extractorName] = cv::xfeatures2d::FREAK::create();
2474 #else
2475  std::stringstream ss_msg;
2476  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2477  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2478  throw vpException(vpException::fatalError, ss_msg.str());
2479 #endif
2480  } else if (extractorName == "BRIEF") {
2481 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2482  m_extractors[extractorName] = cv::xfeatures2d::BriefDescriptorExtractor::create();
2483 #else
2484  std::stringstream ss_msg;
2485  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2486  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2487  throw vpException(vpException::fatalError, ss_msg.str());
2488 #endif
2489  } else if (extractorName == "KAZE") {
2490  m_extractors[extractorName] = cv::KAZE::create();
2491  } else if (extractorName == "AKAZE") {
2492  m_extractors[extractorName] = cv::AKAZE::create();
2493  } else if (extractorName == "DAISY") {
2494 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2495  m_extractors[extractorName] = cv::xfeatures2d::DAISY::create();
2496 #else
2497  std::stringstream ss_msg;
2498  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2499  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2500  throw vpException(vpException::fatalError, ss_msg.str());
2501 #endif
2502  } else if (extractorName == "LATCH") {
2503 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2504  m_extractors[extractorName] = cv::xfeatures2d::LATCH::create();
2505 #else
2506  std::stringstream ss_msg;
2507  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2508  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2509  throw vpException(vpException::fatalError, ss_msg.str());
2510 #endif
2511  } else if (extractorName == "LUCID") {
2512 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2513  // m_extractors[extractorName] = cv::xfeatures2d::LUCID::create(1, 2);
2514  // Not possible currently, need a color image
2515  throw vpException(vpException::badValue, "Not possible currently as it needs a color image.");
2516 #else
2517  std::stringstream ss_msg;
2518  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2519  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2520  throw vpException(vpException::fatalError, ss_msg.str());
2521 #endif
2522  } else if (extractorName == "VGG") {
2523 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2524 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2525  m_extractors[extractorName] = cv::xfeatures2d::VGG::create();
2526 #else
2527  std::stringstream ss_msg;
2528  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2529  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2530  throw vpException(vpException::fatalError, ss_msg.str());
2531 #endif
2532 #else
2533  std::stringstream ss_msg;
2534  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2535  << VISP_HAVE_OPENCV_VERSION << " but requires at least OpenCV 3.2.";
2536  throw vpException(vpException::fatalError, ss_msg.str());
2537 #endif
2538  } else if (extractorName == "BoostDesc") {
2539 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2540 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2541  m_extractors[extractorName] = cv::xfeatures2d::BoostDesc::create();
2542 #else
2543  std::stringstream ss_msg;
2544  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2545  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2546  throw vpException(vpException::fatalError, ss_msg.str());
2547 #endif
2548 #else
2549  std::stringstream ss_msg;
2550  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2551  << VISP_HAVE_OPENCV_VERSION << " but requires at least OpenCV 3.2.";
2552  throw vpException(vpException::fatalError, ss_msg.str());
2553 #endif
2554  } else {
2555  std::cerr << "The extractor:" << extractorName << " is not available." << std::endl;
2556  }
2557 #endif
2558 
2559  if (!m_extractors[extractorName]) { //if null
2560  std::stringstream ss_msg;
2561  ss_msg << "Fail to initialize the extractor: " << extractorName
2562  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2563  throw vpException(vpException::fatalError, ss_msg.str());
2564  }
2565 
2566 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2567  if (extractorName == "SURF") {
2568  // Use extended set of SURF descriptors (128 instead of 64)
2569  m_extractors[extractorName]->set("extended", 1);
2570  }
2571 #endif
2572 }
2573 
2580 void vpKeyPoint::initExtractors(const std::vector<std::string> &extractorNames)
2581 {
2582  for (std::vector<std::string>::const_iterator it = extractorNames.begin(); it != extractorNames.end(); ++it) {
2583  initExtractor(*it);
2584  }
2585 
2586  int descriptorType = CV_32F;
2587  bool firstIteration = true;
2588  for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2589  it != m_extractors.end(); ++it) {
2590  if (firstIteration) {
2591  firstIteration = false;
2592  descriptorType = it->second->descriptorType();
2593  } else {
2594  if (descriptorType != it->second->descriptorType()) {
2595  throw vpException(vpException::fatalError, "All the descriptors must have the same type !");
2596  }
2597  }
2598  }
2599 }
2600 
2601 void vpKeyPoint::initFeatureNames()
2602 {
2603 // Create map enum to string
2604 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2605  m_mapOfDetectorNames[DETECTOR_FAST] = "FAST";
2606  m_mapOfDetectorNames[DETECTOR_MSER] = "MSER";
2607  m_mapOfDetectorNames[DETECTOR_ORB] = "ORB";
2608  m_mapOfDetectorNames[DETECTOR_BRISK] = "BRISK";
2609  m_mapOfDetectorNames[DETECTOR_GFTT] = "GFTT";
2610  m_mapOfDetectorNames[DETECTOR_SimpleBlob] = "SimpleBlob";
2611 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2612  m_mapOfDetectorNames[DETECTOR_STAR] = "STAR";
2613 #endif
2614 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2615  m_mapOfDetectorNames[DETECTOR_SIFT] = "SIFT";
2616  m_mapOfDetectorNames[DETECTOR_SURF] = "SURF";
2617 #endif
2618 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2619  m_mapOfDetectorNames[DETECTOR_KAZE] = "KAZE";
2620  m_mapOfDetectorNames[DETECTOR_AKAZE] = "AKAZE";
2621  m_mapOfDetectorNames[DETECTOR_AGAST] = "AGAST";
2622 #endif
2623 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2624  m_mapOfDetectorNames[DETECTOR_MSD] = "MSD";
2625 #endif
2626 #endif
2627 
2628 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2629  m_mapOfDescriptorNames[DESCRIPTOR_ORB] = "ORB";
2630  m_mapOfDescriptorNames[DESCRIPTOR_BRISK] = "BRISK";
2631 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2632  m_mapOfDescriptorNames[DESCRIPTOR_FREAK] = "FREAK";
2633  m_mapOfDescriptorNames[DESCRIPTOR_BRIEF] = "BRIEF";
2634 #endif
2635 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2636  m_mapOfDescriptorNames[DESCRIPTOR_SIFT] = "SIFT";
2637  m_mapOfDescriptorNames[DESCRIPTOR_SURF] = "SURF";
2638 #endif
2639 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2640  m_mapOfDescriptorNames[DESCRIPTOR_KAZE] = "KAZE";
2641  m_mapOfDescriptorNames[DESCRIPTOR_AKAZE] = "AKAZE";
2642 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2643  m_mapOfDescriptorNames[DESCRIPTOR_DAISY] = "DAISY";
2644  m_mapOfDescriptorNames[DESCRIPTOR_LATCH] = "LATCH";
2645 #endif
2646 #endif
2647 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2648  m_mapOfDescriptorNames[DESCRIPTOR_VGG] = "VGG";
2649  m_mapOfDescriptorNames[DESCRIPTOR_BoostDesc] = "BoostDesc";
2650 #endif
2651 #endif
2652 }
2653 
2659 void vpKeyPoint::initMatcher(const std::string &matcherName)
2660 {
2661  int descriptorType = CV_32F;
2662  bool firstIteration = true;
2663  for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2664  it != m_extractors.end(); ++it) {
2665  if (firstIteration) {
2666  firstIteration = false;
2667  descriptorType = it->second->descriptorType();
2668  } else {
2669  if (descriptorType != it->second->descriptorType()) {
2670  throw vpException(vpException::fatalError, "All the descriptors must have the same type !");
2671  }
2672  }
2673  }
2674 
2675  if (matcherName == "FlannBased") {
2676  if (m_extractors.empty()) {
2677  std::cout << "Warning: No extractor initialized, by default use "
2678  "floating values (CV_32F) "
2679  "for descriptor type !"
2680  << std::endl;
2681  }
2682 
2683  if (descriptorType == CV_8U) {
2684 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2685  m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::LshIndexParams>(12, 20, 2));
2686 #else
2687  m_matcher = new cv::FlannBasedMatcher(new cv::flann::LshIndexParams(12, 20, 2));
2688 #endif
2689  } else {
2690 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2691  m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::KDTreeIndexParams>());
2692 #else
2693  m_matcher = new cv::FlannBasedMatcher(new cv::flann::KDTreeIndexParams());
2694 #endif
2695  }
2696  } else {
2697  m_matcher = cv::DescriptorMatcher::create(matcherName);
2698  }
2699 
2700 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2701  if (m_matcher != NULL && !m_useKnn && matcherName == "BruteForce") {
2702  m_matcher->set("crossCheck", m_useBruteForceCrossCheck);
2703  }
2704 #endif
2705 
2706  if (!m_matcher) { //if null
2707  std::stringstream ss_msg;
2708  ss_msg << "Fail to initialize the matcher: " << matcherName
2709  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2710  throw vpException(vpException::fatalError, ss_msg.str());
2711  }
2712 }
2713 
2723  vpImage<unsigned char> &IMatching)
2724 {
2725  vpImagePoint topLeftCorner(0, 0);
2726  IMatching.insert(IRef, topLeftCorner);
2727  topLeftCorner = vpImagePoint(0, IRef.getWidth());
2728  IMatching.insert(ICurrent, topLeftCorner);
2729 }
2730 
2740  vpImage<vpRGBa> &IMatching)
2741 {
2742  vpImagePoint topLeftCorner(0, 0);
2743  IMatching.insert(IRef, topLeftCorner);
2744  topLeftCorner = vpImagePoint(0, IRef.getWidth());
2745  IMatching.insert(ICurrent, topLeftCorner);
2746 }
2747 
2756 {
2757  // Nb images in the training database + the current image we want to detect
2758  // the object
2759  int nbImg = (int)(m_mapOfImages.size() + 1);
2760 
2761  if (m_mapOfImages.empty()) {
2762  std::cerr << "There is no training image loaded !" << std::endl;
2763  return;
2764  }
2765 
2766  if (nbImg == 2) {
2767  // Only one training image, so we display them side by side
2768  insertImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
2769  } else {
2770  // Multiple training images, display them as a mosaic image
2771  int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
2772  int nbWidth = nbImgSqrt;
2773  int nbHeight = nbImgSqrt;
2774 
2775  if (nbImgSqrt * nbImgSqrt < nbImg) {
2776  nbWidth++;
2777  }
2778 
2779  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
2780  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2781  ++it) {
2782  if (maxW < it->second.getWidth()) {
2783  maxW = it->second.getWidth();
2784  }
2785 
2786  if (maxH < it->second.getHeight()) {
2787  maxH = it->second.getHeight();
2788  }
2789  }
2790 
2791  // Indexes of the current image in the grid made in order to the image is
2792  // in the center square in the mosaic grid
2793  int medianI = nbHeight / 2;
2794  int medianJ = nbWidth / 2;
2795  int medianIndex = medianI * nbWidth + medianJ;
2796 
2797  int cpt = 0;
2798  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2799  ++it, cpt++) {
2800  int local_cpt = cpt;
2801  if (cpt >= medianIndex) {
2802  // Shift of one unity the index of the training images which are after
2803  // the current image
2804  local_cpt++;
2805  }
2806  int indexI = local_cpt / nbWidth;
2807  int indexJ = local_cpt - (indexI * nbWidth);
2808  vpImagePoint topLeftCorner((int)maxH * indexI, (int)maxW * indexJ);
2809 
2810  IMatching.insert(it->second, topLeftCorner);
2811  }
2812 
2813  vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
2814  IMatching.insert(ICurrent, topLeftCorner);
2815  }
2816 }
2817 
2826 {
2827  // Nb images in the training database + the current image we want to detect
2828  // the object
2829  int nbImg = (int)(m_mapOfImages.size() + 1);
2830 
2831  if (m_mapOfImages.empty()) {
2832  std::cerr << "There is no training image loaded !" << std::endl;
2833  return;
2834  }
2835 
2836  if (nbImg == 2) {
2837  // Only one training image, so we display them side by side
2838  vpImage<vpRGBa> IRef;
2839  vpImageConvert::convert(m_mapOfImages.begin()->second, IRef);
2840  insertImageMatching(IRef, ICurrent, IMatching);
2841  } else {
2842  // Multiple training images, display them as a mosaic image
2843  int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
2844  int nbWidth = nbImgSqrt;
2845  int nbHeight = nbImgSqrt;
2846 
2847  if (nbImgSqrt * nbImgSqrt < nbImg) {
2848  nbWidth++;
2849  }
2850 
2851  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
2852  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2853  ++it) {
2854  if (maxW < it->second.getWidth()) {
2855  maxW = it->second.getWidth();
2856  }
2857 
2858  if (maxH < it->second.getHeight()) {
2859  maxH = it->second.getHeight();
2860  }
2861  }
2862 
2863  // Indexes of the current image in the grid made in order to the image is
2864  // in the center square in the mosaic grid
2865  int medianI = nbHeight / 2;
2866  int medianJ = nbWidth / 2;
2867  int medianIndex = medianI * nbWidth + medianJ;
2868 
2869  int cpt = 0;
2870  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2871  ++it, cpt++) {
2872  int local_cpt = cpt;
2873  if (cpt >= medianIndex) {
2874  // Shift of one unity the index of the training images which are after
2875  // the current image
2876  local_cpt++;
2877  }
2878  int indexI = local_cpt / nbWidth;
2879  int indexJ = local_cpt - (indexI * nbWidth);
2880  vpImagePoint topLeftCorner((int)maxH * indexI, (int)maxW * indexJ);
2881 
2882  vpImage<vpRGBa> IRef;
2883  vpImageConvert::convert(it->second, IRef);
2884  IMatching.insert(IRef, topLeftCorner);
2885  }
2886 
2887  vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
2888  IMatching.insert(ICurrent, topLeftCorner);
2889  }
2890 }
2891 
2892 #ifdef VISP_HAVE_PUGIXML
2893 
2898 void vpKeyPoint::loadConfigFile(const std::string &configFile)
2899 {
2901 
2902  try {
2903  // Reset detector and extractor
2904  m_detectorNames.clear();
2905  m_extractorNames.clear();
2906  m_detectors.clear();
2907  m_extractors.clear();
2908 
2909  std::cout << " *********** Parsing XML for configuration for vpKeyPoint "
2910  "************ "
2911  << std::endl;
2912  xmlp.parse(configFile);
2913 
2914  m_detectorNames.push_back(xmlp.getDetectorName());
2915  m_extractorNames.push_back(xmlp.getExtractorName());
2916  m_matcherName = xmlp.getMatcherName();
2917 
2918  switch (xmlp.getMatchingMethod()) {
2920  m_filterType = constantFactorDistanceThreshold;
2921  break;
2922 
2924  m_filterType = stdDistanceThreshold;
2925  break;
2926 
2928  m_filterType = ratioDistanceThreshold;
2929  break;
2930 
2932  m_filterType = stdAndRatioDistanceThreshold;
2933  break;
2934 
2936  m_filterType = noFilterMatching;
2937  break;
2938 
2939  default:
2940  break;
2941  }
2942 
2943  m_matchingFactorThreshold = xmlp.getMatchingFactorThreshold();
2944  m_matchingRatioThreshold = xmlp.getMatchingRatioThreshold();
2945 
2946  m_useRansacVVS = xmlp.getUseRansacVVSPoseEstimation();
2947  m_useConsensusPercentage = xmlp.getUseRansacConsensusPercentage();
2948  m_nbRansacIterations = xmlp.getNbRansacIterations();
2949  m_ransacReprojectionError = xmlp.getRansacReprojectionError();
2950  m_nbRansacMinInlierCount = xmlp.getNbRansacMinInlierCount();
2951  m_ransacThreshold = xmlp.getRansacThreshold();
2952  m_ransacConsensusPercentage = xmlp.getRansacConsensusPercentage();
2953 
2954  if (m_filterType == ratioDistanceThreshold || m_filterType == stdAndRatioDistanceThreshold) {
2955  m_useKnn = true;
2956  } else {
2957  m_useKnn = false;
2958  }
2959 
2960  init();
2961  } catch (...) {
2962  throw vpException(vpException::ioError, "Can't open XML file \"%s\"\n ", configFile.c_str());
2963  }
2964 }
2965 #endif
2966 
2975 void vpKeyPoint::loadLearningData(const std::string &filename, bool binaryMode, bool append)
2976 {
2977  int startClassId = 0;
2978  int startImageId = 0;
2979  if (!append) {
2980  m_trainKeyPoints.clear();
2981  m_trainPoints.clear();
2982  m_mapOfImageId.clear();
2983  m_mapOfImages.clear();
2984  } else {
2985  // In append case, find the max index of keypoint class Id
2986  for (std::map<int, int>::const_iterator it = m_mapOfImageId.begin(); it != m_mapOfImageId.end(); ++it) {
2987  if (startClassId < it->first) {
2988  startClassId = it->first;
2989  }
2990  }
2991 
2992  // In append case, find the max index of images Id
2993  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2994  ++it) {
2995  if (startImageId < it->first) {
2996  startImageId = it->first;
2997  }
2998  }
2999  }
3000 
3001  // Get parent directory
3002  std::string parent = vpIoTools::getParent(filename);
3003  if (!parent.empty()) {
3004  parent += "/";
3005  }
3006 
3007  if (binaryMode) {
3008  std::ifstream file(filename.c_str(), std::ifstream::binary);
3009  if (!file.is_open()) {
3010  throw vpException(vpException::ioError, "Cannot open the file.");
3011  }
3012 
3013  // Read info about training images
3014  int nbImgs = 0;
3015  vpIoTools::readBinaryValueLE(file, nbImgs);
3016 
3017 #if !defined(VISP_HAVE_MODULE_IO)
3018  if (nbImgs > 0) {
3019  std::cout << "Warning: The learning file contains image data that will "
3020  "not be loaded as visp_io module "
3021  "is not available !"
3022  << std::endl;
3023  }
3024 #endif
3025 
3026  for (int i = 0; i < nbImgs; i++) {
3027  // Read image_id
3028  int id = 0;
3029  vpIoTools::readBinaryValueLE(file, id);
3030 
3031  int length = 0;
3032  vpIoTools::readBinaryValueLE(file, length);
3033  // Will contain the path to the training images
3034  char *path = new char[length + 1]; // char path[length + 1];
3035 
3036  for (int cpt = 0; cpt < length; cpt++) {
3037  char c;
3038  file.read((char *)(&c), sizeof(c));
3039  path[cpt] = c;
3040  }
3041  path[length] = '\0';
3042 
3044 #ifdef VISP_HAVE_MODULE_IO
3045  if (vpIoTools::isAbsolutePathname(std::string(path))) {
3046  vpImageIo::read(I, path);
3047  } else {
3048  vpImageIo::read(I, parent + path);
3049  }
3050 
3051  // Add the image previously loaded only if VISP_HAVE_MODULE_IO
3052  m_mapOfImages[id + startImageId] = I;
3053 #endif
3054 
3055  // Delete path
3056  delete[] path;
3057  }
3058 
3059  // Read if 3D point information are saved or not
3060  int have3DInfoInt = 0;
3061  vpIoTools::readBinaryValueLE(file, have3DInfoInt);
3062  bool have3DInfo = have3DInfoInt != 0;
3063 
3064  // Read the number of descriptors
3065  int nRows = 0;
3066  vpIoTools::readBinaryValueLE(file, nRows);
3067 
3068  // Read the size of the descriptor
3069  int nCols = 0;
3070  vpIoTools::readBinaryValueLE(file, nCols);
3071 
3072  // Read the type of the descriptor
3073  int descriptorType = 5; // CV_32F
3074  vpIoTools::readBinaryValueLE(file, descriptorType);
3075 
3076  cv::Mat trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
3077  for (int i = 0; i < nRows; i++) {
3078  // Read information about keyPoint
3079  float u, v, size, angle, response;
3080  int octave, class_id, image_id;
3083  vpIoTools::readBinaryValueLE(file, size);
3084  vpIoTools::readBinaryValueLE(file, angle);
3085  vpIoTools::readBinaryValueLE(file, response);
3086  vpIoTools::readBinaryValueLE(file, octave);
3087  vpIoTools::readBinaryValueLE(file, class_id);
3088  vpIoTools::readBinaryValueLE(file, image_id);
3089  cv::KeyPoint keyPoint(cv::Point2f(u, v), size, angle, response, octave, (class_id + startClassId));
3090  m_trainKeyPoints.push_back(keyPoint);
3091 
3092  if (image_id != -1) {
3093 #ifdef VISP_HAVE_MODULE_IO
3094  // No training images if image_id == -1
3095  m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
3096 #endif
3097  }
3098 
3099  if (have3DInfo) {
3100  // Read oX, oY, oZ
3101  float oX, oY, oZ;
3102  vpIoTools::readBinaryValueLE(file, oX);
3103  vpIoTools::readBinaryValueLE(file, oY);
3104  vpIoTools::readBinaryValueLE(file, oZ);
3105  m_trainPoints.push_back(cv::Point3f(oX, oY, oZ));
3106  }
3107 
3108  for (int j = 0; j < nCols; j++) {
3109  // Read the value of the descriptor
3110  switch (descriptorType) {
3111  case CV_8U: {
3112  unsigned char value;
3113  file.read((char *)(&value), sizeof(value));
3114  trainDescriptorsTmp.at<unsigned char>(i, j) = value;
3115  } break;
3116 
3117  case CV_8S: {
3118  char value;
3119  file.read((char *)(&value), sizeof(value));
3120  trainDescriptorsTmp.at<char>(i, j) = value;
3121  } break;
3122 
3123  case CV_16U: {
3124  unsigned short int value;
3125  vpIoTools::readBinaryValueLE(file, value);
3126  trainDescriptorsTmp.at<unsigned short int>(i, j) = value;
3127  } break;
3128 
3129  case CV_16S: {
3130  short int value;
3131  vpIoTools::readBinaryValueLE(file, value);
3132  trainDescriptorsTmp.at<short int>(i, j) = value;
3133  } break;
3134 
3135  case CV_32S: {
3136  int value;
3137  vpIoTools::readBinaryValueLE(file, value);
3138  trainDescriptorsTmp.at<int>(i, j) = value;
3139  } break;
3140 
3141  case CV_32F: {
3142  float value;
3143  vpIoTools::readBinaryValueLE(file, value);
3144  trainDescriptorsTmp.at<float>(i, j) = value;
3145  } break;
3146 
3147  case CV_64F: {
3148  double value;
3149  vpIoTools::readBinaryValueLE(file, value);
3150  trainDescriptorsTmp.at<double>(i, j) = value;
3151  } break;
3152 
3153  default: {
3154  float value;
3155  vpIoTools::readBinaryValueLE(file, value);
3156  trainDescriptorsTmp.at<float>(i, j) = value;
3157  } break;
3158  }
3159  }
3160  }
3161 
3162  if (!append || m_trainDescriptors.empty()) {
3163  trainDescriptorsTmp.copyTo(m_trainDescriptors);
3164  } else {
3165  cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
3166  }
3167 
3168  file.close();
3169  } else {
3170 #ifdef VISP_HAVE_PUGIXML
3171  pugi::xml_document doc;
3172 
3173  /*parse the file and get the DOM */
3174  if (!doc.load_file(filename.c_str())) {
3175  throw vpException(vpException::ioError, "Error with file: %s", filename.c_str());
3176  }
3177 
3178  pugi::xml_node root_element = doc.document_element();
3179 
3180  int descriptorType = CV_32F; // float
3181  int nRows = 0, nCols = 0;
3182  int i = 0, j = 0;
3183 
3184  cv::Mat trainDescriptorsTmp;
3185 
3186  for (pugi::xml_node first_level_node = root_element.first_child(); first_level_node; first_level_node = first_level_node.next_sibling()) {
3187 
3188  std::string name(first_level_node.name());
3189  if (first_level_node.type() == pugi::node_element && name == "TrainingImageInfo") {
3190 
3191  for (pugi::xml_node image_info_node = first_level_node.first_child(); image_info_node; image_info_node = image_info_node.next_sibling()) {
3192  name = std::string(image_info_node.name());
3193 
3194  if (name == "trainImg") {
3195  // Read image_id
3196  int id = image_info_node.attribute("image_id").as_int();
3197 
3199 #ifdef VISP_HAVE_MODULE_IO
3200  std::string path(image_info_node.text().as_string());
3201  // Read path to the training images
3202  if (vpIoTools::isAbsolutePathname(std::string(path))) {
3203  vpImageIo::read(I, path);
3204  } else {
3205  vpImageIo::read(I, parent + path);
3206  }
3207 
3208  // Add the image previously loaded only if VISP_HAVE_MODULE_IO
3209  m_mapOfImages[id + startImageId] = I;
3210 #endif
3211  }
3212  }
3213  } else if (first_level_node.type() == pugi::node_element && name == "DescriptorsInfo") {
3214  for (pugi::xml_node descriptors_info_node = first_level_node.first_child(); descriptors_info_node;
3215  descriptors_info_node = descriptors_info_node.next_sibling()) {
3216  if (descriptors_info_node.type() == pugi::node_element) {
3217  name = std::string(descriptors_info_node.name());
3218 
3219  if (name == "nrows") {
3220  nRows = descriptors_info_node.text().as_int();
3221  } else if (name == "ncols") {
3222  nCols = descriptors_info_node.text().as_int();
3223  } else if (name == "type") {
3224  descriptorType = descriptors_info_node.text().as_int();
3225  }
3226  }
3227  }
3228 
3229  trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
3230  } else if (first_level_node.type() == pugi::node_element && name == "DescriptorInfo") {
3231  double u = 0.0, v = 0.0, size = 0.0, angle = 0.0, response = 0.0;
3232  int octave = 0, class_id = 0, image_id = 0;
3233  double oX = 0.0, oY = 0.0, oZ = 0.0;
3234 
3235  std::stringstream ss;
3236 
3237  for (pugi::xml_node point_node = first_level_node.first_child(); point_node; point_node = point_node.next_sibling()) {
3238  if (point_node.type() == pugi::node_element) {
3239  name = std::string(point_node.name());
3240 
3241  // Read information about keypoints
3242  if (name == "u") {
3243  u = point_node.text().as_double();
3244  } else if (name == "v") {
3245  v = point_node.text().as_double();
3246  } else if (name == "size") {
3247  size = point_node.text().as_double();
3248  } else if (name == "angle") {
3249  angle = point_node.text().as_double();
3250  } else if (name == "response") {
3251  response = point_node.text().as_double();
3252  } else if (name == "octave") {
3253  octave = point_node.text().as_int();
3254  } else if (name == "class_id") {
3255  class_id = point_node.text().as_int();
3256  cv::KeyPoint keyPoint(cv::Point2f((float)u, (float)v), (float)size, (float)angle, (float)response, octave,
3257  (class_id + startClassId));
3258  m_trainKeyPoints.push_back(keyPoint);
3259  } else if (name == "image_id") {
3260  image_id = point_node.text().as_int();
3261  if (image_id != -1) {
3262 #ifdef VISP_HAVE_MODULE_IO
3263  // No training images if image_id == -1
3264  m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
3265 #endif
3266  }
3267  } else if (name == "oX") {
3268  oX = point_node.text().as_double();
3269  } else if (name == "oY") {
3270  oY = point_node.text().as_double();
3271  } else if (name == "oZ") {
3272  oZ = point_node.text().as_double();
3273  m_trainPoints.push_back(cv::Point3f((float)oX, (float)oY, (float)oZ));
3274  } else if (name == "desc") {
3275  j = 0;
3276 
3277  for (pugi::xml_node descriptor_value_node = point_node.first_child(); descriptor_value_node;
3278  descriptor_value_node = descriptor_value_node.next_sibling()) {
3279 
3280  if (descriptor_value_node.type() == pugi::node_element) {
3281  // Read descriptors values
3282  std::string parseStr(descriptor_value_node.text().as_string());
3283  ss.clear();
3284  ss.str(parseStr);
3285 
3286  if (!ss.fail()) {
3287  switch (descriptorType) {
3288  case CV_8U: {
3289  // Parse the numeric value [0 ; 255] to an int
3290  int parseValue;
3291  ss >> parseValue;
3292  trainDescriptorsTmp.at<unsigned char>(i, j) = (unsigned char)parseValue;
3293  } break;
3294 
3295  case CV_8S:
3296  // Parse the numeric value [-128 ; 127] to an int
3297  int parseValue;
3298  ss >> parseValue;
3299  trainDescriptorsTmp.at<char>(i, j) = (char)parseValue;
3300  break;
3301 
3302  case CV_16U:
3303  ss >> trainDescriptorsTmp.at<unsigned short int>(i, j);
3304  break;
3305 
3306  case CV_16S:
3307  ss >> trainDescriptorsTmp.at<short int>(i, j);
3308  break;
3309 
3310  case CV_32S:
3311  ss >> trainDescriptorsTmp.at<int>(i, j);
3312  break;
3313 
3314  case CV_32F:
3315  ss >> trainDescriptorsTmp.at<float>(i, j);
3316  break;
3317 
3318  case CV_64F:
3319  ss >> trainDescriptorsTmp.at<double>(i, j);
3320  break;
3321 
3322  default:
3323  ss >> trainDescriptorsTmp.at<float>(i, j);
3324  break;
3325  }
3326  } else {
3327  std::cerr << "Error when converting:" << ss.str() << std::endl;
3328  }
3329 
3330  j++;
3331  }
3332  }
3333  }
3334  }
3335  }
3336  i++;
3337  }
3338  }
3339 
3340  if (!append || m_trainDescriptors.empty()) {
3341  trainDescriptorsTmp.copyTo(m_trainDescriptors);
3342  } else {
3343  cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
3344  }
3345 #else
3346  std::cout << "Error: pugixml is not properly built!" << std::endl;
3347 #endif
3348  }
3349 
3350  // Convert OpenCV type to ViSP type for compatibility
3352  vpConvert::convertFromOpenCV(this->m_trainPoints, m_trainVpPoints);
3353 
3354  // Add train descriptors in matcher object
3355  m_matcher->clear();
3356  m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
3357 
3358  // Set _reference_computed to true as we load a learning file
3359  _reference_computed = true;
3360 
3361  // Set m_currentImageId
3362  m_currentImageId = (int)m_mapOfImages.size();
3363 }
3364 
3373 void vpKeyPoint::match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors,
3374  std::vector<cv::DMatch> &matches, double &elapsedTime)
3375 {
3376  double t = vpTime::measureTimeMs();
3377 
3378  if (m_useKnn) {
3379  m_knnMatches.clear();
3380 
3381  if (m_useMatchTrainToQuery) {
3382  std::vector<std::vector<cv::DMatch> > knnMatchesTmp;
3383 
3384  // Match train descriptors to query descriptors
3385  cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(true);
3386  matcherTmp->knnMatch(trainDescriptors, queryDescriptors, knnMatchesTmp, 2);
3387 
3388  for (std::vector<std::vector<cv::DMatch> >::const_iterator it1 = knnMatchesTmp.begin();
3389  it1 != knnMatchesTmp.end(); ++it1) {
3390  std::vector<cv::DMatch> tmp;
3391  for (std::vector<cv::DMatch>::const_iterator it2 = it1->begin(); it2 != it1->end(); ++it2) {
3392  tmp.push_back(cv::DMatch(it2->trainIdx, it2->queryIdx, it2->distance));
3393  }
3394  m_knnMatches.push_back(tmp);
3395  }
3396 
3397  matches.resize(m_knnMatches.size());
3398  std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3399  } else {
3400  // Match query descriptors to train descriptors
3401  m_matcher->knnMatch(queryDescriptors, m_knnMatches, 2);
3402  matches.resize(m_knnMatches.size());
3403  std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3404  }
3405  } else {
3406  matches.clear();
3407 
3408  if (m_useMatchTrainToQuery) {
3409  std::vector<cv::DMatch> matchesTmp;
3410  // Match train descriptors to query descriptors
3411  cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(true);
3412  matcherTmp->match(trainDescriptors, queryDescriptors, matchesTmp);
3413 
3414  for (std::vector<cv::DMatch>::const_iterator it = matchesTmp.begin(); it != matchesTmp.end(); ++it) {
3415  matches.push_back(cv::DMatch(it->trainIdx, it->queryIdx, it->distance));
3416  }
3417  } else {
3418  // Match query descriptors to train descriptors
3419  m_matcher->match(queryDescriptors, matches);
3420  }
3421  }
3422  elapsedTime = vpTime::measureTimeMs() - t;
3423 }
3424 
3432 unsigned int vpKeyPoint::matchPoint(const vpImage<unsigned char> &I) { return matchPoint(I, vpRect()); }
3433 
3441 unsigned int vpKeyPoint::matchPoint(const vpImage<vpRGBa> &I_color) { return matchPoint(I_color, vpRect()); }
3442 
3453 unsigned int vpKeyPoint::matchPoint(const vpImage<unsigned char> &I, const vpImagePoint &iP, unsigned int height,
3454  unsigned int width)
3455 {
3456  return matchPoint(I, vpRect(iP, width, height));
3457 }
3458 
3469 unsigned int vpKeyPoint::matchPoint(const vpImage<vpRGBa> &I_color, const vpImagePoint &iP, unsigned int height,
3470  unsigned int width)
3471 {
3472  return matchPoint(I_color, vpRect(iP, width, height));
3473 }
3474 
3483 unsigned int vpKeyPoint::matchPoint(const vpImage<unsigned char> &I, const vpRect &rectangle)
3484 {
3485  if (m_trainDescriptors.empty()) {
3486  std::cerr << "Reference is empty." << std::endl;
3487  if (!_reference_computed) {
3488  std::cerr << "Reference is not computed." << std::endl;
3489  }
3490  std::cerr << "Matching is not possible." << std::endl;
3491 
3492  return 0;
3493  }
3494 
3495  if (m_useAffineDetection) {
3496  std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3497  std::vector<cv::Mat> listOfQueryDescriptors;
3498 
3499  // Detect keypoints and extract descriptors on multiple images
3500  detectExtractAffine(I, listOfQueryKeyPoints, listOfQueryDescriptors);
3501 
3502  // Flatten the different train lists
3503  m_queryKeyPoints.clear();
3504  for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3505  it != listOfQueryKeyPoints.end(); ++it) {
3506  m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3507  }
3508 
3509  bool first = true;
3510  for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3511  ++it) {
3512  if (first) {
3513  first = false;
3514  it->copyTo(m_queryDescriptors);
3515  } else {
3516  m_queryDescriptors.push_back(*it);
3517  }
3518  }
3519  } else {
3520  detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3521  extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3522  }
3523 
3524  match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3525 
3526  if (m_filterType != noFilterMatching) {
3527  m_queryFilteredKeyPoints.clear();
3528  m_objectFilteredPoints.clear();
3529  m_filteredMatches.clear();
3530 
3531  filterMatches();
3532  } else {
3533  if (m_useMatchTrainToQuery) {
3534  // Add only query keypoints matched with a train keypoints
3535  m_queryFilteredKeyPoints.clear();
3536  m_filteredMatches.clear();
3537  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3538  m_filteredMatches.push_back(cv::DMatch((int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3539  m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(size_t)it->queryIdx]);
3540  }
3541  } else {
3542  m_queryFilteredKeyPoints = m_queryKeyPoints;
3543  m_filteredMatches = m_matches;
3544  }
3545 
3546  if (!m_trainPoints.empty()) {
3547  m_objectFilteredPoints.clear();
3548  // Add 3D object points such as the same index in
3549  // m_queryFilteredKeyPoints and in m_objectFilteredPoints
3550  // matches to the same train object
3551  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3552  // m_matches is normally ordered following the queryDescriptor index
3553  m_objectFilteredPoints.push_back(m_trainPoints[(size_t)it->trainIdx]);
3554  }
3555  }
3556  }
3557 
3558  // Convert OpenCV type to ViSP type for compatibility
3559  vpConvert::convertFromOpenCV(m_queryFilteredKeyPoints, currentImagePointsList);
3561 
3562  return static_cast<unsigned int>(m_filteredMatches.size());
3563 }
3564 
3573 unsigned int vpKeyPoint::matchPoint(const vpImage<vpRGBa> &I_color, const vpRect &rectangle)
3574 {
3575  vpImageConvert::convert(I_color, m_I);
3576  return matchPoint(m_I, rectangle);
3577 }
3578 
3592  bool (*func)(const vpHomogeneousMatrix &), const vpRect &rectangle)
3593 {
3594  double error, elapsedTime;
3595  return matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3596 }
3597 
3611  bool (*func)(const vpHomogeneousMatrix &), const vpRect &rectangle)
3612 {
3613  double error, elapsedTime;
3614  return matchPoint(I_color, cam, cMo, error, elapsedTime, func, rectangle);
3615 }
3616 
3633  double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &),
3634  const vpRect &rectangle)
3635 {
3636  // Check if we have training descriptors
3637  if (m_trainDescriptors.empty()) {
3638  std::cerr << "Reference is empty." << std::endl;
3639  if (!_reference_computed) {
3640  std::cerr << "Reference is not computed." << std::endl;
3641  }
3642  std::cerr << "Matching is not possible." << std::endl;
3643 
3644  return false;
3645  }
3646 
3647  if (m_useAffineDetection) {
3648  std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3649  std::vector<cv::Mat> listOfQueryDescriptors;
3650 
3651  // Detect keypoints and extract descriptors on multiple images
3652  detectExtractAffine(I, listOfQueryKeyPoints, listOfQueryDescriptors);
3653 
3654  // Flatten the different train lists
3655  m_queryKeyPoints.clear();
3656  for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3657  it != listOfQueryKeyPoints.end(); ++it) {
3658  m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3659  }
3660 
3661  bool first = true;
3662  for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3663  ++it) {
3664  if (first) {
3665  first = false;
3666  it->copyTo(m_queryDescriptors);
3667  } else {
3668  m_queryDescriptors.push_back(*it);
3669  }
3670  }
3671  } else {
3672  detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3673  extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3674  }
3675 
3676  match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3677 
3678  elapsedTime = m_detectionTime + m_extractionTime + m_matchingTime;
3679 
3680  if (m_filterType != noFilterMatching) {
3681  m_queryFilteredKeyPoints.clear();
3682  m_objectFilteredPoints.clear();
3683  m_filteredMatches.clear();
3684 
3685  filterMatches();
3686  } else {
3687  if (m_useMatchTrainToQuery) {
3688  // Add only query keypoints matched with a train keypoints
3689  m_queryFilteredKeyPoints.clear();
3690  m_filteredMatches.clear();
3691  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3692  m_filteredMatches.push_back(cv::DMatch((int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3693  m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(size_t)it->queryIdx]);
3694  }
3695  } else {
3696  m_queryFilteredKeyPoints = m_queryKeyPoints;
3697  m_filteredMatches = m_matches;
3698  }
3699 
3700  if (!m_trainPoints.empty()) {
3701  m_objectFilteredPoints.clear();
3702  // Add 3D object points such as the same index in
3703  // m_queryFilteredKeyPoints and in m_objectFilteredPoints
3704  // matches to the same train object
3705  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3706  // m_matches is normally ordered following the queryDescriptor index
3707  m_objectFilteredPoints.push_back(m_trainPoints[(size_t)it->trainIdx]);
3708  }
3709  }
3710  }
3711 
3712  // Convert OpenCV type to ViSP type for compatibility
3713  vpConvert::convertFromOpenCV(m_queryFilteredKeyPoints, currentImagePointsList);
3715 
3716  // error = std::numeric_limits<double>::max(); // create an error under
3717  // Windows. To fix it we have to add #undef max
3718  error = DBL_MAX;
3719  m_ransacInliers.clear();
3720  m_ransacOutliers.clear();
3721 
3722  if (m_useRansacVVS) {
3723  std::vector<vpPoint> objectVpPoints(m_objectFilteredPoints.size());
3724  size_t cpt = 0;
3725  // Create a list of vpPoint with 2D coordinates (current keypoint
3726  // location) + 3D coordinates (world/object coordinates)
3727  for (std::vector<cv::Point3f>::const_iterator it = m_objectFilteredPoints.begin();
3728  it != m_objectFilteredPoints.end(); ++it, cpt++) {
3729  vpPoint pt;
3730  pt.setWorldCoordinates(it->x, it->y, it->z);
3731 
3732  vpImagePoint imP(m_queryFilteredKeyPoints[cpt].pt.y, m_queryFilteredKeyPoints[cpt].pt.x);
3733 
3734  double x = 0.0, y = 0.0;
3735  vpPixelMeterConversion::convertPoint(cam, imP, x, y);
3736  pt.set_x(x);
3737  pt.set_y(y);
3738 
3739  objectVpPoints[cpt] = pt;
3740  }
3741 
3742  std::vector<vpPoint> inliers;
3743  std::vector<unsigned int> inlierIndex;
3744 
3745  bool res = computePose(objectVpPoints, cMo, inliers, inlierIndex, m_poseTime, func);
3746 
3747  std::map<unsigned int, bool> mapOfInlierIndex;
3748  m_matchRansacKeyPointsToPoints.clear();
3749 
3750  for (std::vector<unsigned int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3751  m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3752  m_queryFilteredKeyPoints[(size_t)(*it)], m_objectFilteredPoints[(size_t)(*it)]));
3753  mapOfInlierIndex[*it] = true;
3754  }
3755 
3756  for (size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3757  if (mapOfInlierIndex.find((unsigned int)i) == mapOfInlierIndex.end()) {
3758  m_ransacOutliers.push_back(vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3759  }
3760  }
3761 
3762  error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3763 
3764  m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3765  std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3766  m_ransacInliers.begin(), matchRansacToVpImage);
3767 
3768  elapsedTime += m_poseTime;
3769 
3770  return res;
3771  } else {
3772  std::vector<cv::Point2f> imageFilteredPoints;
3773  cv::KeyPoint::convert(m_queryFilteredKeyPoints, imageFilteredPoints);
3774  std::vector<int> inlierIndex;
3775  bool res = computePose(imageFilteredPoints, m_objectFilteredPoints, cam, cMo, inlierIndex, m_poseTime);
3776 
3777  std::map<int, bool> mapOfInlierIndex;
3778  m_matchRansacKeyPointsToPoints.clear();
3779 
3780  for (std::vector<int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3781  m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3782  m_queryFilteredKeyPoints[(size_t)(*it)], m_objectFilteredPoints[(size_t)(*it)]));
3783  mapOfInlierIndex[*it] = true;
3784  }
3785 
3786  for (size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3787  if (mapOfInlierIndex.find((int)i) == mapOfInlierIndex.end()) {
3788  m_ransacOutliers.push_back(vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3789  }
3790  }
3791 
3792  error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3793 
3794  m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3795  std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3796  m_ransacInliers.begin(), matchRansacToVpImage);
3797 
3798  elapsedTime += m_poseTime;
3799 
3800  return res;
3801  }
3802 }
3803 
3820  double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &),
3821  const vpRect &rectangle)
3822 {
3823  vpImageConvert::convert(I_color, m_I);
3824  return (matchPoint(m_I, cam, cMo, error, elapsedTime, func, rectangle));
3825 }
3826 
3827 
3847  vpImagePoint &centerOfGravity, const bool isPlanarObject,
3848  std::vector<vpImagePoint> *imPts1, std::vector<vpImagePoint> *imPts2,
3849  double *meanDescriptorDistance, double *detection_score, const vpRect &rectangle)
3850 {
3851  if (imPts1 != NULL && imPts2 != NULL) {
3852  imPts1->clear();
3853  imPts2->clear();
3854  }
3855 
3856  matchPoint(I, rectangle);
3857 
3858  double meanDescriptorDistanceTmp = 0.0;
3859  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
3860  meanDescriptorDistanceTmp += (double)it->distance;
3861  }
3862 
3863  meanDescriptorDistanceTmp /= (double)m_filteredMatches.size();
3864  double score = (double)m_filteredMatches.size() / meanDescriptorDistanceTmp;
3865 
3866  if (meanDescriptorDistance != NULL) {
3867  *meanDescriptorDistance = meanDescriptorDistanceTmp;
3868  }
3869  if (detection_score != NULL) {
3870  *detection_score = score;
3871  }
3872 
3873  if (m_filteredMatches.size() >= 4) {
3874  // Training / Reference 2D points
3875  std::vector<cv::Point2f> points1(m_filteredMatches.size());
3876  // Query / Current 2D points
3877  std::vector<cv::Point2f> points2(m_filteredMatches.size());
3878 
3879  for (size_t i = 0; i < m_filteredMatches.size(); i++) {
3880  points1[i] = cv::Point2f(m_trainKeyPoints[(size_t)m_filteredMatches[i].trainIdx].pt);
3881  points2[i] = cv::Point2f(m_queryFilteredKeyPoints[(size_t)m_filteredMatches[i].queryIdx].pt);
3882  }
3883 
3884  std::vector<vpImagePoint> inliers;
3885  if (isPlanarObject) {
3886 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
3887  cv::Mat homographyMatrix = cv::findHomography(points1, points2, CV_RANSAC);
3888 #else
3889  cv::Mat homographyMatrix = cv::findHomography(points1, points2, cv::RANSAC);
3890 #endif
3891 
3892  for (size_t i = 0; i < m_filteredMatches.size(); i++) {
3893  // Compute reprojection error
3894  cv::Mat realPoint = cv::Mat(3, 1, CV_64F);
3895  realPoint.at<double>(0, 0) = points1[i].x;
3896  realPoint.at<double>(1, 0) = points1[i].y;
3897  realPoint.at<double>(2, 0) = 1.f;
3898 
3899  cv::Mat reprojectedPoint = homographyMatrix * realPoint;
3900  double err_x = (reprojectedPoint.at<double>(0, 0) / reprojectedPoint.at<double>(2, 0)) - points2[i].x;
3901  double err_y = (reprojectedPoint.at<double>(1, 0) / reprojectedPoint.at<double>(2, 0)) - points2[i].y;
3902  double reprojectionError = std::sqrt(err_x * err_x + err_y * err_y);
3903 
3904  if (reprojectionError < 6.0) {
3905  inliers.push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3906  if (imPts1 != NULL) {
3907  imPts1->push_back(vpImagePoint((double)points1[i].y, (double)points1[i].x));
3908  }
3909 
3910  if (imPts2 != NULL) {
3911  imPts2->push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3912  }
3913  }
3914  }
3915  } else if (m_filteredMatches.size() >= 8) {
3916  cv::Mat fundamentalInliers;
3917  cv::Mat fundamentalMatrix = cv::findFundamentalMat(points1, points2, cv::FM_RANSAC, 3, 0.99, fundamentalInliers);
3918 
3919  for (size_t i = 0; i < (size_t)fundamentalInliers.rows; i++) {
3920  if (fundamentalInliers.at<uchar>((int)i, 0)) {
3921  inliers.push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3922 
3923  if (imPts1 != NULL) {
3924  imPts1->push_back(vpImagePoint((double)points1[i].y, (double)points1[i].x));
3925  }
3926 
3927  if (imPts2 != NULL) {
3928  imPts2->push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3929  }
3930  }
3931  }
3932  }
3933 
3934  if (!inliers.empty()) {
3935  // Build a polygon with the list of inlier keypoints detected in the
3936  // current image to get the bounding box
3937  vpPolygon polygon(inliers);
3938  boundingBox = polygon.getBoundingBox();
3939 
3940  // Compute the center of gravity
3941  double meanU = 0.0, meanV = 0.0;
3942  for (std::vector<vpImagePoint>::const_iterator it = inliers.begin(); it != inliers.end(); ++it) {
3943  meanU += it->get_u();
3944  meanV += it->get_v();
3945  }
3946 
3947  meanU /= (double)inliers.size();
3948  meanV /= (double)inliers.size();
3949 
3950  centerOfGravity.set_u(meanU);
3951  centerOfGravity.set_v(meanV);
3952  }
3953  } else {
3954  // Too few matches
3955  return false;
3956  }
3957 
3958  if (m_detectionMethod == detectionThreshold) {
3959  return meanDescriptorDistanceTmp < m_detectionThreshold;
3960  } else {
3961  return score > m_detectionScore;
3962  }
3963 }
3964 
3985  vpHomogeneousMatrix &cMo, double &error, double &elapsedTime, vpRect &boundingBox,
3986  vpImagePoint &centerOfGravity, bool (*func)(const vpHomogeneousMatrix &),
3987  const vpRect &rectangle)
3988 {
3989  bool isMatchOk = matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3990  if (isMatchOk) {
3991  // Use the pose estimated to project the model points in the image
3992  vpPoint pt;
3993  vpImagePoint imPt;
3994  std::vector<vpImagePoint> modelImagePoints(m_trainVpPoints.size());
3995  size_t cpt = 0;
3996  for (std::vector<vpPoint>::const_iterator it = m_trainVpPoints.begin(); it != m_trainVpPoints.end(); ++it, cpt++) {
3997  pt = *it;
3998  pt.project(cMo);
3999  vpMeterPixelConversion::convertPoint(cam, pt.get_x(), pt.get_y(), imPt);
4000  modelImagePoints[cpt] = imPt;
4001  }
4002 
4003  // Build a polygon with the list of model image points to get the bounding
4004  // box
4005  vpPolygon polygon(modelImagePoints);
4006  boundingBox = polygon.getBoundingBox();
4007 
4008  // Compute the center of gravity of the current inlier keypoints
4009  double meanU = 0.0, meanV = 0.0;
4010  for (std::vector<vpImagePoint>::const_iterator it = m_ransacInliers.begin(); it != m_ransacInliers.end(); ++it) {
4011  meanU += it->get_u();
4012  meanV += it->get_v();
4013  }
4014 
4015  meanU /= (double)m_ransacInliers.size();
4016  meanV /= (double)m_ransacInliers.size();
4017 
4018  centerOfGravity.set_u(meanU);
4019  centerOfGravity.set_v(meanV);
4020  }
4021 
4022  return isMatchOk;
4023 }
4024 
4038  std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
4039  std::vector<cv::Mat> &listOfDescriptors,
4040  std::vector<vpImage<unsigned char> > *listOfAffineI)
4041 {
4042 #if 0
4043  cv::Mat img;
4044  vpImageConvert::convert(I, img);
4045  listOfKeypoints.clear();
4046  listOfDescriptors.clear();
4047 
4048  for (int tl = 1; tl < 6; tl++) {
4049  double t = pow(2, 0.5 * tl);
4050  for (int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
4051  std::vector<cv::KeyPoint> keypoints;
4052  cv::Mat descriptors;
4053 
4054  cv::Mat timg, mask, Ai;
4055  img.copyTo(timg);
4056 
4057  affineSkew(t, phi, timg, mask, Ai);
4058 
4059 
4060  if(listOfAffineI != NULL) {
4061  cv::Mat img_disp;
4062  bitwise_and(mask, timg, img_disp);
4064  vpImageConvert::convert(img_disp, tI);
4065  listOfAffineI->push_back(tI);
4066  }
4067 #if 0
4068  cv::Mat img_disp;
4069  cv::bitwise_and(mask, timg, img_disp);
4070  cv::namedWindow( "Skew", cv::WINDOW_AUTOSIZE ); // Create a window for display.
4071  cv::imshow( "Skew", img_disp );
4072  cv::waitKey(0);
4073 #endif
4074 
4075  for(std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
4076  it != m_detectors.end(); ++it) {
4077  std::vector<cv::KeyPoint> kp;
4078  it->second->detect(timg, kp, mask);
4079  keypoints.insert(keypoints.end(), kp.begin(), kp.end());
4080  }
4081 
4082  double elapsedTime;
4083  extract(timg, keypoints, descriptors, elapsedTime);
4084 
4085  for(unsigned int i = 0; i < keypoints.size(); i++) {
4086  cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
4087  cv::Mat kpt_t = Ai * cv::Mat(kpt);
4088  keypoints[i].pt.x = kpt_t.at<float>(0, 0);
4089  keypoints[i].pt.y = kpt_t.at<float>(1, 0);
4090  }
4091 
4092  listOfKeypoints.push_back(keypoints);
4093  listOfDescriptors.push_back(descriptors);
4094  }
4095  }
4096 
4097 #else
4098  cv::Mat img;
4099  vpImageConvert::convert(I, img);
4100 
4101  // Create a vector for storing the affine skew parameters
4102  std::vector<std::pair<double, int> > listOfAffineParams;
4103  for (int tl = 1; tl < 6; tl++) {
4104  double t = pow(2, 0.5 * tl);
4105  for (int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
4106  listOfAffineParams.push_back(std::pair<double, int>(t, phi));
4107  }
4108  }
4109 
4110  listOfKeypoints.resize(listOfAffineParams.size());
4111  listOfDescriptors.resize(listOfAffineParams.size());
4112 
4113  if (listOfAffineI != NULL) {
4114  listOfAffineI->resize(listOfAffineParams.size());
4115  }
4116 
4117 #ifdef VISP_HAVE_OPENMP
4118 #pragma omp parallel for
4119 #endif
4120  for (int cpt = 0; cpt < static_cast<int>(listOfAffineParams.size()); cpt++) {
4121  std::vector<cv::KeyPoint> keypoints;
4122  cv::Mat descriptors;
4123 
4124  cv::Mat timg, mask, Ai;
4125  img.copyTo(timg);
4126 
4127  affineSkew(listOfAffineParams[(size_t)cpt].first, listOfAffineParams[(size_t)cpt].second, timg, mask, Ai);
4128 
4129  if (listOfAffineI != NULL) {
4130  cv::Mat img_disp;
4131  bitwise_and(mask, timg, img_disp);
4133  vpImageConvert::convert(img_disp, tI);
4134  (*listOfAffineI)[(size_t)cpt] = tI;
4135  }
4136 
4137 #if 0
4138  cv::Mat img_disp;
4139  cv::bitwise_and(mask, timg, img_disp);
4140  cv::namedWindow( "Skew", cv::WINDOW_AUTOSIZE ); // Create a window for display.
4141  cv::imshow( "Skew", img_disp );
4142  cv::waitKey(0);
4143 #endif
4144 
4145  for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
4146  it != m_detectors.end(); ++it) {
4147  std::vector<cv::KeyPoint> kp;
4148  it->second->detect(timg, kp, mask);
4149  keypoints.insert(keypoints.end(), kp.begin(), kp.end());
4150  }
4151 
4152  double elapsedTime;
4153  extract(timg, keypoints, descriptors, elapsedTime);
4154 
4155  for (size_t i = 0; i < keypoints.size(); i++) {
4156  cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
4157  cv::Mat kpt_t = Ai * cv::Mat(kpt);
4158  keypoints[i].pt.x = kpt_t.at<float>(0, 0);
4159  keypoints[i].pt.y = kpt_t.at<float>(1, 0);
4160  }
4161 
4162  listOfKeypoints[(size_t)cpt] = keypoints;
4163  listOfDescriptors[(size_t)cpt] = descriptors;
4164  }
4165 #endif
4166 }
4167 
4172 {
4173  // vpBasicKeyPoint class
4174  referenceImagePointsList.clear();
4175  currentImagePointsList.clear();
4176  matchedReferencePoints.clear();
4177  _reference_computed = false;
4178 
4179  m_computeCovariance = false;
4180  m_covarianceMatrix = vpMatrix();
4181  m_currentImageId = 0;
4182  m_detectionMethod = detectionScore;
4183  m_detectionScore = 0.15;
4184  m_detectionThreshold = 100.0;
4185  m_detectionTime = 0.0;
4186  m_detectorNames.clear();
4187  m_detectors.clear();
4188  m_extractionTime = 0.0;
4189  m_extractorNames.clear();
4190  m_extractors.clear();
4191  m_filteredMatches.clear();
4192  m_filterType = ratioDistanceThreshold;
4193  m_imageFormat = jpgImageFormat;
4194  m_knnMatches.clear();
4195  m_mapOfImageId.clear();
4196  m_mapOfImages.clear();
4197  m_matcher = cv::Ptr<cv::DescriptorMatcher>();
4198  m_matcherName = "BruteForce-Hamming";
4199  m_matches.clear();
4200  m_matchingFactorThreshold = 2.0;
4201  m_matchingRatioThreshold = 0.85;
4202  m_matchingTime = 0.0;
4203  m_matchRansacKeyPointsToPoints.clear();
4204  m_nbRansacIterations = 200;
4205  m_nbRansacMinInlierCount = 100;
4206  m_objectFilteredPoints.clear();
4207  m_poseTime = 0.0;
4208  m_queryDescriptors = cv::Mat();
4209  m_queryFilteredKeyPoints.clear();
4210  m_queryKeyPoints.clear();
4211  m_ransacConsensusPercentage = 20.0;
4212  m_ransacFilterFlag = vpPose::NO_FILTER;
4213  m_ransacInliers.clear();
4214  m_ransacOutliers.clear();
4215  m_ransacParallel = true;
4216  m_ransacParallelNbThreads = 0;
4217  m_ransacReprojectionError = 6.0;
4218  m_ransacThreshold = 0.01;
4219  m_trainDescriptors = cv::Mat();
4220  m_trainKeyPoints.clear();
4221  m_trainPoints.clear();
4222  m_trainVpPoints.clear();
4223  m_useAffineDetection = false;
4224 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
4225  m_useBruteForceCrossCheck = true;
4226 #endif
4227  m_useConsensusPercentage = false;
4228  m_useKnn = true; // as m_filterType == ratioDistanceThreshold
4229  m_useMatchTrainToQuery = false;
4230  m_useRansacVVS = true;
4231  m_useSingleMatchFilter = true;
4232 
4233  m_detectorNames.push_back("ORB");
4234  m_extractorNames.push_back("ORB");
4235 
4236  init();
4237 }
4238 
4248 void vpKeyPoint::saveLearningData(const std::string &filename, bool binaryMode, bool saveTrainingImages)
4249 {
4250  std::string parent = vpIoTools::getParent(filename);
4251  if (!parent.empty()) {
4252  vpIoTools::makeDirectory(parent);
4253  }
4254 
4255  std::map<int, std::string> mapOfImgPath;
4256  if (saveTrainingImages) {
4257 #ifdef VISP_HAVE_MODULE_IO
4258  // Save the training image files in the same directory
4259  unsigned int cpt = 0;
4260 
4261  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
4262  ++it, cpt++) {
4263  if (cpt > 999) {
4264  throw vpException(vpException::fatalError, "The number of training images to save is too big !");
4265  }
4266 
4267  std::stringstream ss;
4268  ss << "train_image_" << std::setfill('0') << std::setw(3) << cpt;
4269 
4270  switch (m_imageFormat) {
4271  case jpgImageFormat:
4272  ss << ".jpg";
4273  break;
4274 
4275  case pngImageFormat:
4276  ss << ".png";
4277  break;
4278 
4279  case ppmImageFormat:
4280  ss << ".ppm";
4281  break;
4282 
4283  case pgmImageFormat:
4284  ss << ".pgm";
4285  break;
4286 
4287  default:
4288  ss << ".png";
4289  break;
4290  }
4291 
4292  std::string imgFilename = ss.str();
4293  mapOfImgPath[it->first] = imgFilename;
4294  vpImageIo::write(it->second, parent + (!parent.empty() ? "/" : "") + imgFilename);
4295  }
4296 #else
4297  std::cout << "Warning: in vpKeyPoint::saveLearningData() training images "
4298  "are not saved because "
4299  "visp_io module is not available !"
4300  << std::endl;
4301 #endif
4302  }
4303 
4304  bool have3DInfo = m_trainPoints.size() > 0;
4305  if (have3DInfo && m_trainPoints.size() != m_trainKeyPoints.size()) {
4306  throw vpException(vpException::fatalError, "List of keypoints and list of 3D points have different size !");
4307  }
4308 
4309  if (binaryMode) {
4310  // Save the learning data into little endian binary file.
4311  std::ofstream file(filename.c_str(), std::ofstream::binary);
4312  if (!file.is_open()) {
4313  throw vpException(vpException::ioError, "Cannot create the file.");
4314  }
4315 
4316  // Write info about training images
4317  int nbImgs = (int)mapOfImgPath.size();
4318  vpIoTools::writeBinaryValueLE(file, nbImgs);
4319 
4320 #ifdef VISP_HAVE_MODULE_IO
4321  for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
4322  // Write image_id
4323  int id = it->first;
4325 
4326  // Write image path
4327  std::string path = it->second;
4328  int length = (int)path.length();
4329  vpIoTools::writeBinaryValueLE(file, length);
4330 
4331  for (int cpt = 0; cpt < length; cpt++) {
4332  file.write((char *)(&path[(size_t)cpt]), sizeof(path[(size_t)cpt]));
4333  }
4334  }
4335 #endif
4336 
4337  // Write if we have 3D point information
4338  int have3DInfoInt = have3DInfo ? 1 : 0;
4339  vpIoTools::writeBinaryValueLE(file, have3DInfoInt);
4340 
4341  int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
4342  int descriptorType = m_trainDescriptors.type();
4343 
4344  // Write the number of descriptors
4345  vpIoTools::writeBinaryValueLE(file, nRows);
4346 
4347  // Write the size of the descriptor
4348  vpIoTools::writeBinaryValueLE(file, nCols);
4349 
4350  // Write the type of the descriptor
4351  vpIoTools::writeBinaryValueLE(file, descriptorType);
4352 
4353  for (int i = 0; i < nRows; i++) {
4354  unsigned int i_ = (unsigned int)i;
4355  // Write u
4356  float u = m_trainKeyPoints[i_].pt.x;
4358 
4359  // Write v
4360  float v = m_trainKeyPoints[i_].pt.y;
4362 
4363  // Write size
4364  float size = m_trainKeyPoints[i_].size;
4365  vpIoTools::writeBinaryValueLE(file, size);
4366 
4367  // Write angle
4368  float angle = m_trainKeyPoints[i_].angle;
4369  vpIoTools::writeBinaryValueLE(file, angle);
4370 
4371  // Write response
4372  float response = m_trainKeyPoints[i_].response;
4373  vpIoTools::writeBinaryValueLE(file, response);
4374 
4375  // Write octave
4376  int octave = m_trainKeyPoints[i_].octave;
4377  vpIoTools::writeBinaryValueLE(file, octave);
4378 
4379  // Write class_id
4380  int class_id = m_trainKeyPoints[i_].class_id;
4381  vpIoTools::writeBinaryValueLE(file, class_id);
4382 
4383 // Write image_id
4384 #ifdef VISP_HAVE_MODULE_IO
4385  std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
4386  int image_id = (saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1;
4387  vpIoTools::writeBinaryValueLE(file, image_id);
4388 #else
4389  int image_id = -1;
4390  // file.write((char *)(&image_id), sizeof(image_id));
4391  vpIoTools::writeBinaryValueLE(file, image_id);
4392 #endif
4393 
4394  if (have3DInfo) {
4395  float oX = m_trainPoints[i_].x, oY = m_trainPoints[i_].y, oZ = m_trainPoints[i_].z;
4396  // Write oX
4398 
4399  // Write oY
4401 
4402  // Write oZ
4404  }
4405 
4406  for (int j = 0; j < nCols; j++) {
4407  // Write the descriptor value
4408  switch (descriptorType) {
4409  case CV_8U:
4410  file.write((char *)(&m_trainDescriptors.at<unsigned char>(i, j)),
4411  sizeof(m_trainDescriptors.at<unsigned char>(i, j)));
4412  break;
4413 
4414  case CV_8S:
4415  file.write((char *)(&m_trainDescriptors.at<char>(i, j)), sizeof(m_trainDescriptors.at<char>(i, j)));
4416  break;
4417 
4418  case CV_16U:
4419  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<unsigned short int>(i, j));
4420  break;
4421 
4422  case CV_16S:
4423  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<short int>(i, j));
4424  break;
4425 
4426  case CV_32S:
4427  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<int>(i, j));
4428  break;
4429 
4430  case CV_32F:
4431  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<float>(i, j));
4432  break;
4433 
4434  case CV_64F:
4435  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<double>(i, j));
4436  break;
4437 
4438  default:
4439  throw vpException(vpException::fatalError, "Problem with the data type of descriptors !");
4440  break;
4441  }
4442  }
4443  }
4444 
4445  file.close();
4446  } else {
4447 #ifdef VISP_HAVE_PUGIXML
4448  pugi::xml_document doc;
4449  pugi::xml_node node = doc.append_child(pugi::node_declaration);
4450  node.append_attribute("version") = "1.0";
4451  node.append_attribute("encoding") = "UTF-8";
4452 
4453  if (!doc) {
4454  throw vpException(vpException::ioError, "Error with file: ", filename.c_str());
4455  }
4456 
4457  pugi::xml_node root_node = doc.append_child("LearningData");
4458 
4459  // Write the training images info
4460  pugi::xml_node image_node = root_node.append_child("TrainingImageInfo");
4461 
4462 #ifdef VISP_HAVE_MODULE_IO
4463  for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
4464  pugi::xml_node image_info_node = image_node.append_child("trainImg");
4465  image_info_node.append_child(pugi::node_pcdata).set_value(it->second.c_str());
4466  std::stringstream ss;
4467  ss << it->first;
4468  image_info_node.append_attribute("image_id") = ss.str().c_str();
4469  }
4470 #endif
4471 
4472  // Write information about descriptors
4473  pugi::xml_node descriptors_info_node = root_node.append_child("DescriptorsInfo");
4474 
4475  int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
4476  int descriptorType = m_trainDescriptors.type();
4477 
4478  // Write the number of rows
4479  descriptors_info_node.append_child("nrows").append_child(pugi::node_pcdata).text() = nRows;
4480 
4481  // Write the number of cols
4482  descriptors_info_node.append_child("ncols").append_child(pugi::node_pcdata).text() = nCols;
4483 
4484  // Write the descriptors type
4485  descriptors_info_node.append_child("type").append_child(pugi::node_pcdata).text() = descriptorType;
4486 
4487  for (int i = 0; i < nRows; i++) {
4488  unsigned int i_ = (unsigned int)i;
4489  pugi::xml_node descriptor_node = root_node.append_child("DescriptorInfo");
4490 
4491  descriptor_node.append_child("u").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.x;
4492  descriptor_node.append_child("v").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].pt.y;
4493  descriptor_node.append_child("size").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].size;
4494  descriptor_node.append_child("angle").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].angle;
4495  descriptor_node.append_child("response").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].response;
4496  descriptor_node.append_child("octave").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].octave;
4497  descriptor_node.append_child("class_id").append_child(pugi::node_pcdata).text() = m_trainKeyPoints[i_].class_id;
4498 
4499 #ifdef VISP_HAVE_MODULE_IO
4500  std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
4501  descriptor_node.append_child("image_id").append_child(pugi::node_pcdata).text() =
4502  ((saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1);
4503 #else
4504  descriptor_node.append_child("image_id").append_child(pugi::node_pcdata).text() = -1;
4505 #endif
4506 
4507  if (have3DInfo) {
4508  descriptor_node.append_child("oX").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].x;
4509  descriptor_node.append_child("oY").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].y;
4510  descriptor_node.append_child("oZ").append_child(pugi::node_pcdata).text() = m_trainPoints[i_].z;
4511  }
4512 
4513  pugi::xml_node desc_node = descriptor_node.append_child("desc");
4514 
4515  for (int j = 0; j < nCols; j++) {
4516  switch (descriptorType) {
4517  case CV_8U: {
4518  // Promote an unsigned char to an int
4519  // val_tmp holds the numeric value that will be written
4520  // We save the value in numeric form otherwise xml library will not be
4521  // able to parse A better solution could be possible
4522  int val_tmp = m_trainDescriptors.at<unsigned char>(i, j);
4523  desc_node.append_child("val").append_child(pugi::node_pcdata).text() = val_tmp;
4524  } break;
4525 
4526  case CV_8S: {
4527  // Promote a char to an int
4528  // val_tmp holds the numeric value that will be written
4529  // We save the value in numeric form otherwise xml library will not be
4530  // able to parse A better solution could be possible
4531  int val_tmp = m_trainDescriptors.at<char>(i, j);
4532  desc_node.append_child("val").append_child(pugi::node_pcdata).text() = val_tmp;
4533  } break;
4534 
4535  case CV_16U:
4536  desc_node.append_child("val").append_child(pugi::node_pcdata).text() =
4537  m_trainDescriptors.at<unsigned short int>(i, j);
4538  break;
4539 
4540  case CV_16S:
4541  desc_node.append_child("val").append_child(pugi::node_pcdata).text() =
4542  m_trainDescriptors.at<short int>(i, j);
4543  break;
4544 
4545  case CV_32S:
4546  desc_node.append_child("val").append_child(pugi::node_pcdata).text() =
4547  m_trainDescriptors.at<int>(i, j);
4548  break;
4549 
4550  case CV_32F:
4551  desc_node.append_child("val").append_child(pugi::node_pcdata).text() =
4552  m_trainDescriptors.at<float>(i, j);
4553  break;
4554 
4555  case CV_64F:
4556  desc_node.append_child("val").append_child(pugi::node_pcdata).text() =
4557  m_trainDescriptors.at<double>(i, j);
4558  break;
4559 
4560  default:
4561  throw vpException(vpException::fatalError, "Problem with the data type of descriptors !");
4562  break;
4563  }
4564  }
4565  }
4566 
4567  doc.save_file(filename.c_str(), PUGIXML_TEXT(" "), pugi::format_default, pugi::encoding_utf8);
4568 #else
4569  std::cerr << "Error: pugixml is not properly built!" << std::endl;
4570 #endif
4571  }
4572 }
4573 
4574 #if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x030000)
4575 #ifndef DOXYGEN_SHOULD_SKIP_THIS
4576 // From OpenCV 2.4.11 source code.
4577 struct KeypointResponseGreaterThanThreshold {
4578  KeypointResponseGreaterThanThreshold(float _value) : value(_value) {}
4579  inline bool operator()(const cv::KeyPoint &kpt) const { return kpt.response >= value; }
4580  float value;
4581 };
4582 
4583 struct KeypointResponseGreater {
4584  inline bool operator()(const cv::KeyPoint &kp1, const cv::KeyPoint &kp2) const { return kp1.response > kp2.response; }
4585 };
4586 
4587 // takes keypoints and culls them by the response
4588 void vpKeyPoint::KeyPointsFilter::retainBest(std::vector<cv::KeyPoint> &keypoints, int n_points)
4589 {
4590  // this is only necessary if the keypoints size is greater than the number
4591  // of desired points.
4592  if (n_points >= 0 && keypoints.size() > (size_t)n_points) {
4593  if (n_points == 0) {
4594  keypoints.clear();
4595  return;
4596  }
4597  // first use nth element to partition the keypoints into the best and
4598  // worst.
4599  std::nth_element(keypoints.begin(), keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreater());
4600  // this is the boundary response, and in the case of FAST may be ambiguous
4601  float ambiguous_response = keypoints[(size_t)(n_points - 1)].response;
4602  // use std::partition to grab all of the keypoints with the boundary
4603  // response.
4604  std::vector<cv::KeyPoint>::const_iterator new_end = std::partition(
4605  keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreaterThanThreshold(ambiguous_response));
4606  // resize the keypoints, given this new end point. nth_element and
4607  // partition reordered the points inplace
4608  keypoints.resize((size_t)(new_end - keypoints.begin()));
4609  }
4610 }
4611 
4612 struct RoiPredicate {
4613  RoiPredicate(const cv::Rect &_r) : r(_r) {}
4614 
4615  bool operator()(const cv::KeyPoint &keyPt) const { return !r.contains(keyPt.pt); }
4616 
4617  cv::Rect r;
4618 };
4619 
4620 void vpKeyPoint::KeyPointsFilter::runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize,
4621  int borderSize)
4622 {
4623  if (borderSize > 0) {
4624  if (imageSize.height <= borderSize * 2 || imageSize.width <= borderSize * 2)
4625  keypoints.clear();
4626  else
4627  keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(),
4628  RoiPredicate(cv::Rect(
4629  cv::Point(borderSize, borderSize),
4630  cv::Point(imageSize.width - borderSize, imageSize.height - borderSize)))),
4631  keypoints.end());
4632  }
4633 }
4634 
4635 struct SizePredicate {
4636  SizePredicate(float _minSize, float _maxSize) : minSize(_minSize), maxSize(_maxSize) {}
4637 
4638  bool operator()(const cv::KeyPoint &keyPt) const
4639  {
4640  float size = keyPt.size;
4641  return (size < minSize) || (size > maxSize);
4642  }
4643 
4644  float minSize, maxSize;
4645 };
4646 
4647 void vpKeyPoint::KeyPointsFilter::runByKeypointSize(std::vector<cv::KeyPoint> &keypoints, float minSize, float maxSize)
4648 {
4649  CV_Assert(minSize >= 0);
4650  CV_Assert(maxSize >= 0);
4651  CV_Assert(minSize <= maxSize);
4652 
4653  keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), SizePredicate(minSize, maxSize)), keypoints.end());
4654 }
4655 
4656 class MaskPredicate
4657 {
4658 public:
4659  MaskPredicate(const cv::Mat &_mask) : mask(_mask) {}
4660  bool operator()(const cv::KeyPoint &key_pt) const
4661  {
4662  return mask.at<uchar>((int)(key_pt.pt.y + 0.5f), (int)(key_pt.pt.x + 0.5f)) == 0;
4663  }
4664 
4665 private:
4666  const cv::Mat mask;
4667  MaskPredicate &operator=(const MaskPredicate &);
4668 };
4669 
4670 void vpKeyPoint::KeyPointsFilter::runByPixelsMask(std::vector<cv::KeyPoint> &keypoints, const cv::Mat &mask)
4671 {
4672  if (mask.empty())
4673  return;
4674 
4675  keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), MaskPredicate(mask)), keypoints.end());
4676 }
4677 
4678 struct KeyPoint_LessThan {
4679  KeyPoint_LessThan(const std::vector<cv::KeyPoint> &_kp) : kp(&_kp) {}
4680  bool operator()(/*int i, int j*/ size_t i, size_t j) const
4681  {
4682  const cv::KeyPoint &kp1 = (*kp)[/*(size_t)*/ i];
4683  const cv::KeyPoint &kp2 = (*kp)[/*(size_t)*/ j];
4684  if (!vpMath::equal(kp1.pt.x, kp2.pt.x,
4685  std::numeric_limits<float>::epsilon())) { // if (kp1.pt.x !=
4686  // kp2.pt.x) {
4687  return kp1.pt.x < kp2.pt.x;
4688  }
4689 
4690  if (!vpMath::equal(kp1.pt.y, kp2.pt.y,
4691  std::numeric_limits<float>::epsilon())) { // if (kp1.pt.y !=
4692  // kp2.pt.y) {
4693  return kp1.pt.y < kp2.pt.y;
4694  }
4695 
4696  if (!vpMath::equal(kp1.size, kp2.size,
4697  std::numeric_limits<float>::epsilon())) { // if (kp1.size !=
4698  // kp2.size) {
4699  return kp1.size > kp2.size;
4700  }
4701 
4702  if (!vpMath::equal(kp1.angle, kp2.angle,
4703  std::numeric_limits<float>::epsilon())) { // if (kp1.angle !=
4704  // kp2.angle) {
4705  return kp1.angle < kp2.angle;
4706  }
4707 
4708  if (!vpMath::equal(kp1.response, kp2.response,
4709  std::numeric_limits<float>::epsilon())) { // if (kp1.response !=
4710  // kp2.response) {
4711  return kp1.response > kp2.response;
4712  }
4713 
4714  if (kp1.octave != kp2.octave) {
4715  return kp1.octave > kp2.octave;
4716  }
4717 
4718  if (kp1.class_id != kp2.class_id) {
4719  return kp1.class_id > kp2.class_id;
4720  }
4721 
4722  return i < j;
4723  }
4724  const std::vector<cv::KeyPoint> *kp;
4725 };
4726 
4727 void vpKeyPoint::KeyPointsFilter::removeDuplicated(std::vector<cv::KeyPoint> &keypoints)
4728 {
4729  size_t i, j, n = keypoints.size();
4730  std::vector<size_t> kpidx(n);
4731  std::vector<uchar> mask(n, (uchar)1);
4732 
4733  for (i = 0; i < n; i++) {
4734  kpidx[i] = i;
4735  }
4736  std::sort(kpidx.begin(), kpidx.end(), KeyPoint_LessThan(keypoints));
4737  for (i = 1, j = 0; i < n; i++) {
4738  cv::KeyPoint &kp1 = keypoints[kpidx[i]];
4739  cv::KeyPoint &kp2 = keypoints[kpidx[j]];
4740  // if (kp1.pt.x != kp2.pt.x || kp1.pt.y != kp2.pt.y || kp1.size !=
4741  // kp2.size || kp1.angle != kp2.angle) {
4742  if (!vpMath::equal(kp1.pt.x, kp2.pt.x, std::numeric_limits<float>::epsilon()) ||
4743  !vpMath::equal(kp1.pt.y, kp2.pt.y, std::numeric_limits<float>::epsilon()) ||
4744  !vpMath::equal(kp1.size, kp2.size, std::numeric_limits<float>::epsilon()) ||
4745  !vpMath::equal(kp1.angle, kp2.angle, std::numeric_limits<float>::epsilon())) {
4746  j = i;
4747  } else {
4748  mask[kpidx[i]] = 0;
4749  }
4750  }
4751 
4752  for (i = j = 0; i < n; i++) {
4753  if (mask[i]) {
4754  if (i != j) {
4755  keypoints[j] = keypoints[i];
4756  }
4757  j++;
4758  }
4759  }
4760  keypoints.resize(j);
4761 }
4762 
4763 /*
4764  * PyramidAdaptedFeatureDetector
4765  */
4766 vpKeyPoint::PyramidAdaptedFeatureDetector::PyramidAdaptedFeatureDetector(const cv::Ptr<cv::FeatureDetector> &_detector,
4767  int _maxLevel)
4768  : detector(_detector), maxLevel(_maxLevel)
4769 {
4770 }
4771 
4772 bool vpKeyPoint::PyramidAdaptedFeatureDetector::empty() const
4773 {
4774  return detector.empty() || (cv::FeatureDetector *)detector->empty();
4775 }
4776 
4777 void vpKeyPoint::PyramidAdaptedFeatureDetector::detect(cv::InputArray image,
4778  CV_OUT std::vector<cv::KeyPoint> &keypoints, cv::InputArray mask)
4779 {
4780  detectImpl(image.getMat(), keypoints, mask.getMat());
4781 }
4782 
4783 void vpKeyPoint::PyramidAdaptedFeatureDetector::detectImpl(const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
4784  const cv::Mat &mask) const
4785 {
4786  cv::Mat src = image;
4787  cv::Mat src_mask = mask;
4788 
4789  cv::Mat dilated_mask;
4790  if (!mask.empty()) {
4791  cv::dilate(mask, dilated_mask, cv::Mat());
4792  cv::Mat mask255(mask.size(), CV_8UC1, cv::Scalar(0));
4793  mask255.setTo(cv::Scalar(255), dilated_mask != 0);
4794  dilated_mask = mask255;
4795  }
4796 
4797  for (int l = 0, multiplier = 1; l <= maxLevel; ++l, multiplier *= 2) {
4798  // Detect on current level of the pyramid
4799  std::vector<cv::KeyPoint> new_pts;
4800  detector->detect(src, new_pts, src_mask);
4801  std::vector<cv::KeyPoint>::iterator it = new_pts.begin(), end = new_pts.end();
4802  for (; it != end; ++it) {
4803  it->pt.x *= multiplier;
4804  it->pt.y *= multiplier;
4805  it->size *= multiplier;
4806  it->octave = l;
4807  }
4808  keypoints.insert(keypoints.end(), new_pts.begin(), new_pts.end());
4809 
4810  // Downsample
4811  if (l < maxLevel) {
4812  cv::Mat dst;
4813  pyrDown(src, dst);
4814  src = dst;
4815 
4816  if (!mask.empty())
4817  resize(dilated_mask, src_mask, src.size(), 0, 0, CV_INTER_AREA);
4818  }
4819  }
4820 
4821  if (!mask.empty())
4822  vpKeyPoint::KeyPointsFilter::runByPixelsMask(keypoints, mask);
4823 }
4824 #endif
4825 #endif
4826 
4827 #elif !defined(VISP_BUILD_SHARED_LIBS)
4828 // Work around to avoid warning: libvisp_vision.a(vpKeyPoint.cpp.o) has no
4829 // symbols
4830 void dummy_vpKeyPoint(){};
4831 #endif
Used to indicate that a value is not in the allowed range.
Definition: vpException.h:97
Implementation of a matrix and operations on matrices.
Definition: vpMatrix.h:164
void displayMatching(const vpImage< unsigned char > &IRef, vpImage< unsigned char > &IMatching, unsigned int crossSize, unsigned int lineThickness=1, const vpColor &color=vpColor::green)
double get_i() const
Definition: vpImagePoint.h:204
static void makeDirectory(const std::string &dirname)
Definition: vpIoTools.cpp:572
double getTop() const
Definition: vpRect.h:192
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool(*func)(const vpHomogeneousMatrix &)=NULL)
Definition: vpPose.cpp:374
static void compute3D(const cv::KeyPoint &candidate, const std::vector< vpPoint > &roi, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
Definition: vpKeyPoint.cpp:548
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
double get_oY() const
Get the point Y coordinate in the object frame.
Definition: vpPoint.cpp:424
void detectExtractAffine(const vpImage< unsigned char > &I, std::vector< std::vector< cv::KeyPoint > > &listOfKeypoints, std::vector< cv::Mat > &listOfDescriptors, std::vector< vpImage< unsigned char > > *listOfAffineI=NULL)
void setWorldCoordinates(double oX, double oY, double oZ)
Definition: vpPoint.cpp:113
std::vector< unsigned int > getRansacInlierIndex() const
Definition: vpPose.h:259
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static bool isAbsolutePathname(const std::string &pathname)
Definition: vpIoTools.cpp:1578
vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType, const std::string &matcherName, const vpFilterMatchingType &filterType=ratioDistanceThreshold)
Definition: vpKeyPoint.cpp:83
void createImageMatching(vpImage< unsigned char > &IRef, vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
vpMatchingMethodEnum getMatchingMethod() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void readBinaryValueLE(std::ifstream &file, int16_t &short_value)
Definition: vpIoTools.cpp:1860
void set_u(double u)
Definition: vpImagePoint.h:226
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
Class to define colors available for display functionnalities.
Definition: vpColor.h:119
static bool equal(double x, double y, double s=0.001)
Definition: vpMath.h:296
static const vpColor none
Definition: vpColor.h:191
error that can be emited by ViSP classes.
Definition: vpException.h:71
void setRansacThreshold(const double &t)
Definition: vpPose.h:248
vpHomogeneousMatrix inverse() const
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
static std::string getParent(const std::string &pathname)
Definition: vpIoTools.cpp:1473
void getObjectPoints(std::vector< cv::Point3f > &objectPoints) const
static const vpColor green
Definition: vpColor.h:182
void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector< cv::DMatch > &matches, double &elapsedTime)
std::vector< vpPoint > getRansacInliers() const
Definition: vpPose.h:260
double get_oX() const
Get the point X coordinate in the object frame.
Definition: vpPoint.cpp:422
double getRight() const
Definition: vpRect.h:179
void insertImageMatching(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
VISP_EXPORT double measureTimeMs()
Definition: vpTime.cpp:126
void getTrainPoints(std::vector< cv::Point3f > &points) const
vpRect getBoundingBox() const
Definition: vpPolygon.h:177
static const vpColor red
Definition: vpColor.h:179
Class that defines what is a point.
Definition: vpPoint.h:58
void set_x(double x)
Set the point x coordinate in the image plane.
Definition: vpPoint.cpp:472
void set_y(double y)
Set the point y coordinate in the image plane.
Definition: vpPoint.cpp:474
static void write(const vpImage< unsigned char > &I, const std::string &filename)
Definition: vpImageIo.cpp:445
vpMatrix getCovarianceMatrix() const
Definition: vpPose.h:279
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
Definition: vpKeyPoint.cpp:632
Defines a generic 2D polygon.
Definition: vpPolygon.h:103
void set_oY(double oY)
Set the point Y coordinate in the object frame.
Definition: vpPoint.cpp:465
void setNbParallelRansacThreads(int nb)
Definition: vpPose.h:316
void getQueryKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
double getWidth() const
Definition: vpRect.h:227
double getD() const
Definition: vpPlane.h:108
vpFeatureDetectorType
Definition: vpKeyPoint.h:258
void getTrainKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
static void compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpCylinder > &cylinders, const std::vector< std::vector< std::vector< vpImagePoint > > > &vectorOfCylinderRois, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
Definition: vpKeyPoint.cpp:753
double getB() const
Definition: vpPlane.h:104
bool _reference_computed
flag to indicate if the reference has been built.
bool isInside(const vpImagePoint &iP, const PointInPolygonMethod &method=PnPolyRayCasting) const
Definition: vpPolygon.cpp:309
double get_j() const
Definition: vpImagePoint.h:215
void setUseParallelRansac(bool use)
Definition: vpPose.h:330
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
Definition: vpPose.h:80
Generic class defining intrinsic camera parameters.
void display(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, unsigned int size=3)
vpFeatureDescriptorType
Definition: vpKeyPoint.h:286
unsigned int buildReference(const vpImage< unsigned char > &I)
Definition: vpKeyPoint.cpp:240
double get_oZ() const
Get the point Z coordinate in the object frame.
Definition: vpPoint.cpp:426
static bool isNaN(double value)
Definition: vpMath.cpp:84
double getLeft() const
Definition: vpRect.h:173
void insert(const vpImage< Type > &src, const vpImagePoint &topLeft)
Definition: vpImage.h:1194
std::vector< vpImagePoint > referenceImagePointsList
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
const char * what() const
static void writeBinaryValueLE(std::ofstream &file, const int16_t short_value)
Definition: vpIoTools.cpp:1938
unsigned int matchPoint(const vpImage< unsigned char > &I)
double getA() const
Definition: vpPlane.h:102
void setRansacMaxTrials(const int &rM)
Definition: vpPose.h:257
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
Definition: vpPose.h:247
static int round(double x)
Definition: vpMath.h:241
static void displayCircle(const vpImage< unsigned char > &I, const vpImagePoint &center, unsigned int radius, const vpColor &color, bool fill=false, unsigned int thickness=1)
void set_oZ(double oZ)
Set the point Z coordinate in the object frame.
Definition: vpPoint.cpp:467
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
void set_v(double v)
Definition: vpImagePoint.h:237
void set_ij(double ii, double jj)
Definition: vpImagePoint.h:189
unsigned int getHeight() const
Definition: vpImage.h:186
static void read(vpImage< unsigned char > &I, const std::string &filename)
Definition: vpImageIo.cpp:243
Implementation of column vector and the associated operations.
Definition: vpColVector.h:130
void set_oX(double oX)
Set the point X coordinate in the object frame.
Definition: vpPoint.cpp:463
double get_x() const
Get the point x coordinate in the image plane.
Definition: vpPoint.cpp:431
vpFilterMatchingType
Definition: vpKeyPoint.h:227
void setRansacFilterFlag(const RANSAC_FILTER_FLAGS &flag)
Definition: vpPose.h:299
double getHeight() const
Definition: vpRect.h:166
bool matchPointAndDetect(const vpImage< unsigned char > &I, vpRect &boundingBox, vpImagePoint &centerOfGravity, const bool isPlanarObject=true, std::vector< vpImagePoint > *imPts1=NULL, std::vector< vpImagePoint > *imPts2=NULL, double *meanDescriptorDistance=NULL, double *detectionScore=NULL, const vpRect &rectangle=vpRect())
double get_y() const
Get the point y coordinate in the image plane.
Definition: vpPoint.cpp:433
std::vector< unsigned int > matchedReferencePoints
double getC() const
Definition: vpPlane.h:106
Defines a rectangle in the plane.
Definition: vpRect.h:78
std::vector< vpImagePoint > currentImagePointsList
bool computePose(const std::vector< cv::Point2f > &imagePoints, const std::vector< cv::Point3f > &objectPoints, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector< int > &inlierIndex, double &elapsedTime, bool(*func)(const vpHomogeneousMatrix &)=NULL)
Definition: vpKeyPoint.cpp:901
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:88
This class defines the container for a plane geometrical structure.
Definition: vpPlane.h:58
void loadConfigFile(const std::string &configFile)
void reset()
void addPoint(const vpPoint &P)
Definition: vpPose.cpp:149
unsigned int getWidth() const
Definition: vpImage.h:244
double getBottom() const
Definition: vpRect.h:97
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void convertFromOpenCV(const cv::KeyPoint &from, vpImagePoint &to)
Definition: vpConvert.cpp:226
Class that consider the case of a translation vector.
Implementation of a rotation vector as axis-angle minimal representation.
void setCovarianceComputation(const bool &flag)
Definition: vpPose.h:268
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
void initMatcher(const std::string &matcherName)
void extract(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, cv::Mat &descriptors, std::vector< cv::Point3f > *trainPoints=NULL)
void parse(const std::string &filename)
bool detect(const vpImage< unsigned char > &I)