Visual Servoing Platform  version 3.2.0 under development (2019-01-22)
vpKeyPoint.cpp
1 /****************************************************************************
2  *
3  * ViSP, open source Visual Servoing Platform software.
4  * Copyright (C) 2005 - 2019 by Inria. All rights reserved.
5  *
6  * This software is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  * See the file LICENSE.txt at the root directory of this source
11  * distribution for additional information about the GNU GPL.
12  *
13  * For using ViSP with software that can not be combined with the GNU
14  * GPL, please contact Inria about acquiring a ViSP Professional
15  * Edition License.
16  *
17  * See http://visp.inria.fr for more information.
18  *
19  * This software was developed at:
20  * Inria Rennes - Bretagne Atlantique
21  * Campus Universitaire de Beaulieu
22  * 35042 Rennes Cedex
23  * France
24  *
25  * If you have questions regarding the use of this file, please contact
26  * Inria at visp@inria.fr
27  *
28  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
29  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
30  *
31  * Description:
32  * Key point functionalities.
33  *
34  * Authors:
35  * Souriya Trinh
36  *
37  *****************************************************************************/
38 
39 #include <iomanip>
40 #include <limits>
41 
42 #include <visp3/core/vpIoTools.h>
43 #include <visp3/vision/vpKeyPoint.h>
44 
45 #if (VISP_HAVE_OPENCV_VERSION >= 0x020101)
46 
47 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
48 #include <opencv2/calib3d/calib3d.hpp>
49 #endif
50 
51 namespace
52 {
53 // Specific Type transformation functions
54 inline cv::DMatch knnToDMatch(const std::vector<cv::DMatch> &knnMatches)
55 {
56  if (knnMatches.size() > 0) {
57  return knnMatches[0];
58  }
59 
60  return cv::DMatch();
61 }
62 
63 inline vpImagePoint matchRansacToVpImage(const std::pair<cv::KeyPoint, cv::Point3f> &pair)
64 {
65  return vpImagePoint(pair.first.pt.y, pair.first.pt.x);
66 }
67 
68 }
69 
79 vpKeyPoint::vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType,
80  const std::string &matcherName, const vpFilterMatchingType &filterType)
81  : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
82  m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
83  m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
84  m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
85  m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
86  m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
87  m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
88  m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
89  m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0),
90  m_ransacThreshold(0.01), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(),
91  m_useAffineDetection(false),
92 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
93  m_useBruteForceCrossCheck(true),
94 #endif
95  m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
96  m_useSingleMatchFilter(true)
97 {
98  initFeatureNames();
99 
100  m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
101  m_extractorNames.push_back(m_mapOfDescriptorNames[descriptorType]);
102 
103  init();
104 }
105 
115 vpKeyPoint::vpKeyPoint(const std::string &detectorName, const std::string &extractorName,
116  const std::string &matcherName, const vpFilterMatchingType &filterType)
117  : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
118  m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(), m_detectors(),
119  m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
120  m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(),
121  m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85),
122  m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100),
123  m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
124  m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(vpPose::NO_FILTER), m_ransacInliers(), m_ransacOutliers(),
125  m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0),
126  m_ransacThreshold(0.01), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(),
127  m_useAffineDetection(false),
128 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
129  m_useBruteForceCrossCheck(true),
130 #endif
131  m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
132  m_useSingleMatchFilter(true)
133 {
134  initFeatureNames();
135 
136  m_detectorNames.push_back(detectorName);
137  m_extractorNames.push_back(extractorName);
138 
139  init();
140 }
141 
151 vpKeyPoint::vpKeyPoint(const std::vector<std::string> &detectorNames, const std::vector<std::string> &extractorNames,
152  const std::string &matcherName, const vpFilterMatchingType &filterType)
153  : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
154  m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(detectorNames),
155  m_detectors(), m_extractionTime(0.), m_extractorNames(extractorNames), m_extractors(), m_filteredMatches(),
156  m_filterType(filterType), m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(),
157  m_matcher(), m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0),
158  m_matchingRatioThreshold(0.85), m_matchingTime(0.), m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200),
159  m_nbRansacMinInlierCount(100), m_objectFilteredPoints(), m_poseTime(0.), m_queryDescriptors(),
160  m_queryFilteredKeyPoints(), m_queryKeyPoints(), m_ransacConsensusPercentage(20.0), m_ransacFilterFlag(vpPose::NO_FILTER), m_ransacInliers(),
161  m_ransacOutliers(), m_ransacParallel(false), m_ransacParallelNbThreads(0), m_ransacReprojectionError(6.0), m_ransacThreshold(0.01),
162  m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(), m_trainVpPoints(), m_useAffineDetection(false),
163 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
164  m_useBruteForceCrossCheck(true),
165 #endif
166  m_useConsensusPercentage(false), m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true),
167  m_useSingleMatchFilter(true)
168 {
169  initFeatureNames();
170  init();
171 }
172 
181 void vpKeyPoint::affineSkew(double tilt, double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai)
182 {
183  int h = img.rows;
184  int w = img.cols;
185 
186  mask = cv::Mat(h, w, CV_8UC1, cv::Scalar(255));
187 
188  cv::Mat A = cv::Mat::eye(2, 3, CV_32F);
189 
190  // if (phi != 0.0) {
191  if (std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
192  phi *= M_PI / 180.;
193  double s = sin(phi);
194  double c = cos(phi);
195 
196  A = (cv::Mat_<float>(2, 2) << c, -s, s, c);
197 
198  cv::Mat corners = (cv::Mat_<float>(4, 2) << 0, 0, w, 0, w, h, 0, h);
199  cv::Mat tcorners = corners * A.t();
200  cv::Mat tcorners_x, tcorners_y;
201  tcorners.col(0).copyTo(tcorners_x);
202  tcorners.col(1).copyTo(tcorners_y);
203  std::vector<cv::Mat> channels;
204  channels.push_back(tcorners_x);
205  channels.push_back(tcorners_y);
206  cv::merge(channels, tcorners);
207 
208  cv::Rect rect = cv::boundingRect(tcorners);
209  A = (cv::Mat_<float>(2, 3) << c, -s, -rect.x, s, c, -rect.y);
210 
211  cv::warpAffine(img, img, A, cv::Size(rect.width, rect.height), cv::INTER_LINEAR, cv::BORDER_REPLICATE);
212  }
213  // if (tilt != 1.0) {
214  if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon()) {
215  double s = 0.8 * sqrt(tilt * tilt - 1);
216  cv::GaussianBlur(img, img, cv::Size(0, 0), s, 0.01);
217  cv::resize(img, img, cv::Size(0, 0), 1.0 / tilt, 1.0, cv::INTER_NEAREST);
218  A.row(0) = A.row(0) / tilt;
219  }
220  // if (tilt != 1.0 || phi != 0.0) {
221  if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon() ||
222  std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
223  h = img.rows;
224  w = img.cols;
225  cv::warpAffine(mask, mask, A, cv::Size(w, h), cv::INTER_NEAREST);
226  }
227  cv::invertAffineTransform(A, Ai);
228 }
229 
237 
248  const unsigned int height, const unsigned int width)
249 {
250 
251  return buildReference(I, vpRect(iP, width, height));
252 }
253 
261 unsigned int vpKeyPoint::buildReference(const vpImage<unsigned char> &I, const vpRect &rectangle)
262 {
263  // Reset variables used when dealing with 3D models
264  // So as no 3D point list is passed, we dont need this variables
265  m_trainPoints.clear();
266  m_mapOfImageId.clear();
267  m_mapOfImages.clear();
268  m_currentImageId = 1;
269 
270  if (m_useAffineDetection) {
271  std::vector<std::vector<cv::KeyPoint> > listOfTrainKeyPoints;
272  std::vector<cv::Mat> listOfTrainDescriptors;
273 
274  // Detect keypoints and extract descriptors on multiple images
275  detectExtractAffine(I, listOfTrainKeyPoints, listOfTrainDescriptors);
276 
277  // Flatten the different train lists
278  m_trainKeyPoints.clear();
279  for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfTrainKeyPoints.begin();
280  it != listOfTrainKeyPoints.end(); ++it) {
281  m_trainKeyPoints.insert(m_trainKeyPoints.end(), it->begin(), it->end());
282  }
283 
284  bool first = true;
285  for (std::vector<cv::Mat>::const_iterator it = listOfTrainDescriptors.begin(); it != listOfTrainDescriptors.end();
286  ++it) {
287  if (first) {
288  first = false;
289  it->copyTo(m_trainDescriptors);
290  } else {
291  m_trainDescriptors.push_back(*it);
292  }
293  }
294  } else {
295  detect(I, m_trainKeyPoints, m_detectionTime, rectangle);
296  extract(I, m_trainKeyPoints, m_trainDescriptors, m_extractionTime);
297  }
298 
299  // Save the correspondence keypoint class_id with the training image_id in a
300  // map Used to display the matching with all the training images
301  for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
302  m_mapOfImageId[it->class_id] = m_currentImageId;
303  }
304 
305  // Save the image in a map at a specific image_id
306  m_mapOfImages[m_currentImageId] = I;
307 
308  // Convert OpenCV type to ViSP type for compatibility
310 
311  _reference_computed = true;
312 
313  // Add train descriptors in matcher object
314  m_matcher->clear();
315  m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
316 
317  return static_cast<unsigned int>(m_trainKeyPoints.size());
318 }
319 
331 void vpKeyPoint::buildReference(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &trainKeyPoints,
332  std::vector<cv::Point3f> &points3f, const bool append, const int class_id)
333 {
334  cv::Mat trainDescriptors;
335  // Copy the input list of keypoints
336  std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
337 
338  extract(I, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
339 
340  if (trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
341  // Keypoints have been removed
342  // Store the hash of a keypoint as the key and the index of the keypoint
343  // as the value
344  std::map<size_t, size_t> mapOfKeypointHashes;
345  size_t cpt = 0;
346  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
347  ++it, cpt++) {
348  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
349  }
350 
351  std::vector<cv::Point3f> trainPoints_tmp;
352  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
353  if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
354  trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
355  }
356  }
357 
358  // Copy trainPoints_tmp to points3f
359  points3f = trainPoints_tmp;
360  }
361 
362  buildReference(I, trainKeyPoints, trainDescriptors, points3f, append, class_id);
363 }
364 
377 void vpKeyPoint::buildReference(const vpImage<unsigned char> &I, const std::vector<cv::KeyPoint> &trainKeyPoints,
378  const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
379  const bool append, const int class_id)
380 {
381  if (!append) {
382  m_currentImageId = 0;
383  m_mapOfImageId.clear();
384  m_mapOfImages.clear();
385  this->m_trainKeyPoints.clear();
386  this->m_trainPoints.clear();
387  }
388 
389  m_currentImageId++;
390 
391  std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
392  // Set class_id if != -1
393  if (class_id != -1) {
394  for (std::vector<cv::KeyPoint>::iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
395  it->class_id = class_id;
396  }
397  }
398 
399  // Save the correspondence keypoint class_id with the training image_id in a
400  // map Used to display the matching with all the training images
401  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end();
402  ++it) {
403  m_mapOfImageId[it->class_id] = m_currentImageId;
404  }
405 
406  // Save the image in a map at a specific image_id
407  m_mapOfImages[m_currentImageId] = I;
408 
409  // Append reference lists
410  this->m_trainKeyPoints.insert(this->m_trainKeyPoints.end(), trainKeyPoints_tmp.begin(), trainKeyPoints_tmp.end());
411  if (!append) {
412  trainDescriptors.copyTo(this->m_trainDescriptors);
413  } else {
414  this->m_trainDescriptors.push_back(trainDescriptors);
415  }
416  this->m_trainPoints.insert(this->m_trainPoints.end(), points3f.begin(), points3f.end());
417 
418  // Convert OpenCV type to ViSP type for compatibility
420  vpConvert::convertFromOpenCV(this->m_trainPoints, m_trainVpPoints);
421 
422  // Add train descriptors in matcher object
423  m_matcher->clear();
424  m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
425 
426  _reference_computed = true;
427 }
428 
444 void vpKeyPoint::compute3D(const cv::KeyPoint &candidate, const std::vector<vpPoint> &roi,
445  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
446 {
447  /* compute plane equation */
448  std::vector<vpPoint>::const_iterator it_roi = roi.begin();
449  vpPoint pts[3];
450  pts[0] = *it_roi;
451  ++it_roi;
452  pts[1] = *it_roi;
453  ++it_roi;
454  pts[2] = *it_roi;
455  vpPlane Po(pts[0], pts[1], pts[2]);
456  double xc = 0.0, yc = 0.0;
457  vpPixelMeterConversion::convertPoint(cam, candidate.pt.x, candidate.pt.y, xc, yc);
458  double Z = -Po.getD() / (Po.getA() * xc + Po.getB() * yc + Po.getC());
459  double X = xc * Z;
460  double Y = yc * Z;
461  vpColVector point_cam(4);
462  point_cam[0] = X;
463  point_cam[1] = Y;
464  point_cam[2] = Z;
465  point_cam[3] = 1;
466  vpColVector point_obj(4);
467  point_obj = cMo.inverse() * point_cam;
468  point = cv::Point3f((float)point_obj[0], (float)point_obj[1], (float)point_obj[2]);
469 }
470 
486 void vpKeyPoint::compute3D(const vpImagePoint &candidate, const std::vector<vpPoint> &roi,
487  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, vpPoint &point)
488 {
489  /* compute plane equation */
490  std::vector<vpPoint>::const_iterator it_roi = roi.begin();
491  vpPoint pts[3];
492  pts[0] = *it_roi;
493  ++it_roi;
494  pts[1] = *it_roi;
495  ++it_roi;
496  pts[2] = *it_roi;
497  vpPlane Po(pts[0], pts[1], pts[2]);
498  double xc = 0.0, yc = 0.0;
499  vpPixelMeterConversion::convertPoint(cam, candidate, xc, yc);
500  double Z = -Po.getD() / (Po.getA() * xc + Po.getB() * yc + Po.getC());
501  double X = xc * Z;
502  double Y = yc * Z;
503  vpColVector point_cam(4);
504  point_cam[0] = X;
505  point_cam[1] = Y;
506  point_cam[2] = Z;
507  point_cam[3] = 1;
508  vpColVector point_obj(4);
509  point_obj = cMo.inverse() * point_cam;
510  point.setWorldCoordinates(point_obj);
511 }
512 
529  std::vector<cv::KeyPoint> &candidates,
530  const std::vector<vpPolygon> &polygons,
531  const std::vector<std::vector<vpPoint> > &roisPt,
532  std::vector<cv::Point3f> &points, cv::Mat *descriptors)
533 {
534 
535  std::vector<cv::KeyPoint> candidatesToCheck = candidates;
536  candidates.clear();
537  points.clear();
538  vpImagePoint imPt;
539  cv::Point3f pt;
540  cv::Mat desc;
541 
542  std::vector<std::pair<cv::KeyPoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
543  for (size_t i = 0; i < candidatesToCheck.size(); i++) {
544  pairOfCandidatesToCheck[i] = std::pair<cv::KeyPoint, size_t>(candidatesToCheck[i], i);
545  }
546 
547  size_t cpt1 = 0;
548  std::vector<vpPolygon> polygons_tmp = polygons;
549  for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
550  std::vector<std::pair<cv::KeyPoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
551 
552  while (it2 != pairOfCandidatesToCheck.end()) {
553  imPt.set_ij(it2->first.pt.y, it2->first.pt.x);
554  if (it1->isInside(imPt)) {
555  candidates.push_back(it2->first);
556  vpKeyPoint::compute3D(it2->first, roisPt[cpt1], cam, cMo, pt);
557  points.push_back(pt);
558 
559  if (descriptors != NULL) {
560  desc.push_back(descriptors->row((int)it2->second));
561  }
562 
563  // Remove candidate keypoint which is located on the current polygon
564  it2 = pairOfCandidatesToCheck.erase(it2);
565  } else {
566  ++it2;
567  }
568  }
569  }
570 
571  if (descriptors != NULL) {
572  desc.copyTo(*descriptors);
573  }
574 }
575 
593  std::vector<vpImagePoint> &candidates,
594  const std::vector<vpPolygon> &polygons,
595  const std::vector<std::vector<vpPoint> > &roisPt,
596  std::vector<vpPoint> &points, cv::Mat *descriptors)
597 {
598 
599  std::vector<vpImagePoint> candidatesToCheck = candidates;
600  candidates.clear();
601  points.clear();
602  vpPoint pt;
603  cv::Mat desc;
604 
605  std::vector<std::pair<vpImagePoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
606  for (size_t i = 0; i < candidatesToCheck.size(); i++) {
607  pairOfCandidatesToCheck[i] = std::pair<vpImagePoint, size_t>(candidatesToCheck[i], i);
608  }
609 
610  size_t cpt1 = 0;
611  std::vector<vpPolygon> polygons_tmp = polygons;
612  for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
613  std::vector<std::pair<vpImagePoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
614 
615  while (it2 != pairOfCandidatesToCheck.end()) {
616  if (it1->isInside(it2->first)) {
617  candidates.push_back(it2->first);
618  vpKeyPoint::compute3D(it2->first, roisPt[cpt1], cam, cMo, pt);
619  points.push_back(pt);
620 
621  if (descriptors != NULL) {
622  desc.push_back(descriptors->row((int)it2->second));
623  }
624 
625  // Remove candidate keypoint which is located on the current polygon
626  it2 = pairOfCandidatesToCheck.erase(it2);
627  } else {
628  ++it2;
629  }
630  }
631  }
632 }
633 
650  const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector<cv::KeyPoint> &candidates,
651  const std::vector<vpCylinder> &cylinders,
652  const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<cv::Point3f> &points,
653  cv::Mat *descriptors)
654 {
655  std::vector<cv::KeyPoint> candidatesToCheck = candidates;
656  candidates.clear();
657  points.clear();
658  cv::Mat desc;
659 
660  // Keep only keypoints on cylinders
661  size_t cpt_keypoint = 0;
662  for (std::vector<cv::KeyPoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
663  ++it1, cpt_keypoint++) {
664  size_t cpt_cylinder = 0;
665 
666  // Iterate through the list of vpCylinders
667  for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
668  it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
669  // Iterate through the list of the bounding boxes of the current
670  // vpCylinder
671  for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
672  if (vpPolygon::isInside(*it3, it1->pt.y, it1->pt.x)) {
673  candidates.push_back(*it1);
674 
675  // Calculate the 3D coordinates for each keypoint located on
676  // cylinders
677  double xm = 0.0, ym = 0.0;
678  vpPixelMeterConversion::convertPoint(cam, it1->pt.x, it1->pt.y, xm, ym);
679  double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
680 
681  if (!vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
682  vpColVector point_cam(4);
683  point_cam[0] = xm * Z;
684  point_cam[1] = ym * Z;
685  point_cam[2] = Z;
686  point_cam[3] = 1;
687  vpColVector point_obj(4);
688  point_obj = cMo.inverse() * point_cam;
689  vpPoint pt;
690  pt.setWorldCoordinates(point_obj);
691  points.push_back(cv::Point3f((float)pt.get_oX(), (float)pt.get_oY(), (float)pt.get_oZ()));
692 
693  if (descriptors != NULL) {
694  desc.push_back(descriptors->row((int)cpt_keypoint));
695  }
696 
697  break;
698  }
699  }
700  }
701  }
702  }
703 
704  if (descriptors != NULL) {
705  desc.copyTo(*descriptors);
706  }
707 }
708 
725  const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector<vpImagePoint> &candidates,
726  const std::vector<vpCylinder> &cylinders,
727  const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois, std::vector<vpPoint> &points,
728  cv::Mat *descriptors)
729 {
730  std::vector<vpImagePoint> candidatesToCheck = candidates;
731  candidates.clear();
732  points.clear();
733  cv::Mat desc;
734 
735  // Keep only keypoints on cylinders
736  size_t cpt_keypoint = 0;
737  for (std::vector<vpImagePoint>::const_iterator it1 = candidatesToCheck.begin(); it1 != candidatesToCheck.end();
738  ++it1, cpt_keypoint++) {
739  size_t cpt_cylinder = 0;
740 
741  // Iterate through the list of vpCylinders
742  for (std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
743  it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
744  // Iterate through the list of the bounding boxes of the current
745  // vpCylinder
746  for (std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
747  if (vpPolygon::isInside(*it3, it1->get_i(), it1->get_j())) {
748  candidates.push_back(*it1);
749 
750  // Calculate the 3D coordinates for each keypoint located on
751  // cylinders
752  double xm = 0.0, ym = 0.0;
753  vpPixelMeterConversion::convertPoint(cam, it1->get_u(), it1->get_v(), xm, ym);
754  double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
755 
756  if (!vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
757  vpColVector point_cam(4);
758  point_cam[0] = xm * Z;
759  point_cam[1] = ym * Z;
760  point_cam[2] = Z;
761  point_cam[3] = 1;
762  vpColVector point_obj(4);
763  point_obj = cMo.inverse() * point_cam;
764  vpPoint pt;
765  pt.setWorldCoordinates(point_obj);
766  points.push_back(pt);
767 
768  if (descriptors != NULL) {
769  desc.push_back(descriptors->row((int)cpt_keypoint));
770  }
771 
772  break;
773  }
774  }
775  }
776  }
777  }
778 
779  if (descriptors != NULL) {
780  desc.copyTo(*descriptors);
781  }
782 }
783 
797 bool vpKeyPoint::computePose(const std::vector<cv::Point2f> &imagePoints, const std::vector<cv::Point3f> &objectPoints,
798  const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector<int> &inlierIndex,
799  double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &))
800 {
801  double t = vpTime::measureTimeMs();
802 
803  if (imagePoints.size() < 4 || objectPoints.size() < 4 || imagePoints.size() != objectPoints.size()) {
804  elapsedTime = (vpTime::measureTimeMs() - t);
805  std::cerr << "Not enough points to compute the pose (at least 4 points "
806  "are needed)."
807  << std::endl;
808 
809  return false;
810  }
811 
812  cv::Mat cameraMatrix =
813  (cv::Mat_<double>(3, 3) << cam.get_px(), 0, cam.get_u0(), 0, cam.get_py(), cam.get_v0(), 0, 0, 1);
814  cv::Mat rvec, tvec;
815 
816  // Bug with OpenCV < 2.4.0 when zero distorsion is provided by an empty
817  // array. http://code.opencv.org/issues/1700 ;
818  // http://code.opencv.org/issues/1718 what(): Distortion coefficients must
819  // be 1x4, 4x1, 1x5, 5x1, 1x8 or 8x1 floating-point vector in function
820  // cvProjectPoints2 Fixed in OpenCV 2.4.0 (r7558)
821  // cv::Mat distCoeffs;
822  cv::Mat distCoeffs = cv::Mat::zeros(1, 5, CV_64F);
823 
824  try {
825 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
826  // OpenCV 3.0.0 (2014/12/12)
827  cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, false, m_nbRansacIterations,
828  (float)m_ransacReprojectionError,
829  0.99, // confidence=0.99 (default) – The probability
830  // that the algorithm produces a useful result.
831  inlierIndex, cv::SOLVEPNP_ITERATIVE);
832 // SOLVEPNP_ITERATIVE (default): Iterative method is based on
833 // Levenberg-Marquardt optimization. In this case the function finds such a
834 // pose that minimizes reprojection error, that is the sum of squared
835 // distances between the observed projections imagePoints and the projected
836 // (using projectPoints() ) objectPoints . SOLVEPNP_P3P: Method is based on
837 // the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang “Complete Solution
838 // Classification for the Perspective-Three-Point Problem”. In this case the
839 // function requires exactly four object and image points. SOLVEPNP_EPNP:
840 // Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the
841 // paper “EPnP: Efficient Perspective-n-Point Camera Pose Estimation”.
842 // SOLVEPNP_DLS: Method is based on the paper of Joel A. Hesch and Stergios I.
843 // Roumeliotis. “A Direct Least-Squares (DLS) Method for PnP”. SOLVEPNP_UPNP
844 // Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto,
845 // F.Moreno-Noguer. “Exhaustive Linearization for Robust Camera Pose and Focal
846 // Length Estimation”. In this case the function also estimates the
847 // parameters
848 // f_x and f_y assuming that both have the same value. Then the cameraMatrix
849 // is updated with the estimated focal length.
850 #else
851  int nbInlierToReachConsensus = m_nbRansacMinInlierCount;
852  if (m_useConsensusPercentage) {
853  nbInlierToReachConsensus = (int)(m_ransacConsensusPercentage / 100.0 * (double)m_queryFilteredKeyPoints.size());
854  }
855 
856  cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs, rvec, tvec, false, m_nbRansacIterations,
857  (float)m_ransacReprojectionError, nbInlierToReachConsensus, inlierIndex);
858 #endif
859  } catch (cv::Exception &e) {
860  std::cerr << e.what() << std::endl;
861  elapsedTime = (vpTime::measureTimeMs() - t);
862  return false;
863  }
864  vpTranslationVector translationVec(tvec.at<double>(0), tvec.at<double>(1), tvec.at<double>(2));
865  vpThetaUVector thetaUVector(rvec.at<double>(0), rvec.at<double>(1), rvec.at<double>(2));
866  cMo = vpHomogeneousMatrix(translationVec, thetaUVector);
867 
868  if (func != NULL) {
869  // Check the final pose returned by solvePnPRansac to discard
870  // solutions which do not respect the pose criterion.
871  if (!func(cMo)) {
872  elapsedTime = (vpTime::measureTimeMs() - t);
873  return false;
874  }
875  }
876 
877  elapsedTime = (vpTime::measureTimeMs() - t);
878  return true;
879 }
880 
893 bool vpKeyPoint::computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo,
894  std::vector<vpPoint> &inliers, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &))
895 {
896  std::vector<unsigned int> inlierIndex;
897  return computePose(objectVpPoints, cMo, inliers, inlierIndex, elapsedTime, func);
898 }
899 
913 bool vpKeyPoint::computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo,
914  std::vector<vpPoint> &inliers, std::vector<unsigned int> &inlierIndex, double &elapsedTime,
915  bool (*func)(const vpHomogeneousMatrix &))
916 {
917  double t = vpTime::measureTimeMs();
918 
919  if (objectVpPoints.size() < 4) {
920  elapsedTime = (vpTime::measureTimeMs() - t);
921  // std::cerr << "Not enough points to compute the pose (at least 4
922  // points are needed)." << std::endl;
923 
924  return false;
925  }
926 
927  vpPose pose;
928 
929  for (std::vector<vpPoint>::const_iterator it = objectVpPoints.begin(); it != objectVpPoints.end(); ++it) {
930  pose.addPoint(*it);
931  }
932 
933  unsigned int nbInlierToReachConsensus = (unsigned int)m_nbRansacMinInlierCount;
934  if (m_useConsensusPercentage) {
935  nbInlierToReachConsensus =
936  (unsigned int)(m_ransacConsensusPercentage / 100.0 * (double)m_queryFilteredKeyPoints.size());
937  }
938 
939  pose.setRansacFilterFlag(m_ransacFilterFlag);
940  pose.setUseParallelRansac(m_ransacParallel);
941  pose.setNbParallelRansacThreads(m_ransacParallelNbThreads);
942  pose.setRansacNbInliersToReachConsensus(nbInlierToReachConsensus);
943  pose.setRansacThreshold(m_ransacThreshold);
944  pose.setRansacMaxTrials(m_nbRansacIterations);
945 
946  bool isRansacPoseEstimationOk = false;
947  try {
948  pose.setCovarianceComputation(m_computeCovariance);
949  isRansacPoseEstimationOk = pose.computePose(vpPose::RANSAC, cMo, func);
950  inliers = pose.getRansacInliers();
951  inlierIndex = pose.getRansacInlierIndex();
952 
953  if (m_computeCovariance) {
954  m_covarianceMatrix = pose.getCovarianceMatrix();
955  }
956  } catch (const vpException &e) {
957  std::cerr << "e=" << e.what() << std::endl;
958  elapsedTime = (vpTime::measureTimeMs() - t);
959  return false;
960  }
961 
962  // if(func != NULL && isRansacPoseEstimationOk) {
963  // //Check the final pose returned by the Ransac VVS pose estimation as
964  // in rare some cases
965  // //we can converge toward a final cMo that does not respect the pose
966  // criterion even
967  // //if the 4 minimal points picked to respect the pose criterion.
968  // if(!func(&cMo)) {
969  // elapsedTime = (vpTime::measureTimeMs() - t);
970  // return false;
971  // }
972  // }
973 
974  elapsedTime = (vpTime::measureTimeMs() - t);
975  return isRansacPoseEstimationOk;
976 }
977 
991 double vpKeyPoint::computePoseEstimationError(const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
992  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo_est)
993 {
994  if (matchKeyPoints.size() == 0) {
995  // return std::numeric_limits<double>::max(); // create an error under
996  // Windows. To fix it we have to add #undef max
997  return DBL_MAX;
998  }
999 
1000  std::vector<double> errors(matchKeyPoints.size());
1001  size_t cpt = 0;
1002  vpPoint pt;
1003  for (std::vector<std::pair<cv::KeyPoint, cv::Point3f> >::const_iterator it = matchKeyPoints.begin();
1004  it != matchKeyPoints.end(); ++it, cpt++) {
1005  pt.set_oX(it->second.x);
1006  pt.set_oY(it->second.y);
1007  pt.set_oZ(it->second.z);
1008  pt.project(cMo_est);
1009  double u = 0.0, v = 0.0;
1010  vpMeterPixelConversion::convertPoint(cam, pt.get_x(), pt.get_y(), u, v);
1011  errors[cpt] = std::sqrt((u - it->first.pt.x) * (u - it->first.pt.x) + (v - it->first.pt.y) * (v - it->first.pt.y));
1012  }
1013 
1014  return std::accumulate(errors.begin(), errors.end(), 0.0) / errors.size();
1015 }
1016 
1026  vpImage<unsigned char> &IMatching)
1027 {
1028  // Image matching side by side
1029  unsigned int width = IRef.getWidth() + ICurrent.getWidth();
1030  unsigned int height = ((std::max))(IRef.getHeight(), ICurrent.getHeight());
1031 
1032  IMatching = vpImage<unsigned char>(height, width);
1033 }
1034 
1045 {
1046  // Nb images in the training database + the current image we want to detect
1047  // the object
1048  unsigned int nbImg = (unsigned int)(m_mapOfImages.size() + 1);
1049 
1050  if (m_mapOfImages.empty()) {
1051  std::cerr << "There is no training image loaded !" << std::endl;
1052  return;
1053  }
1054 
1055  if (nbImg == 2) {
1056  // Only one training image, so we display them side by side
1057  createImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
1058  } else {
1059  // Multiple training images, display them as a mosaic image
1060  //(unsigned int) std::floor(std::sqrt((double) nbImg) + 0.5);
1061  unsigned int nbImgSqrt = (unsigned int)vpMath::round(std::sqrt((double)nbImg));
1062 
1063  // Number of columns in the mosaic grid
1064  unsigned int nbWidth = nbImgSqrt;
1065  // Number of rows in the mosaic grid
1066  unsigned int nbHeight = nbImgSqrt;
1067 
1068  // Deals with non square mosaic grid and the total number of images
1069  if (nbImgSqrt * nbImgSqrt < nbImg) {
1070  nbWidth++;
1071  }
1072 
1073  unsigned int maxW = ICurrent.getWidth();
1074  unsigned int maxH = ICurrent.getHeight();
1075  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1076  ++it) {
1077  if (maxW < it->second.getWidth()) {
1078  maxW = it->second.getWidth();
1079  }
1080 
1081  if (maxH < it->second.getHeight()) {
1082  maxH = it->second.getHeight();
1083  }
1084  }
1085 
1086  IMatching = vpImage<unsigned char>(maxH * nbHeight, maxW * nbWidth);
1087  }
1088 }
1089 
1097 void vpKeyPoint::detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, const vpRect &rectangle)
1098 {
1099  double elapsedTime;
1100  detect(I, keyPoints, elapsedTime, rectangle);
1101 }
1102 
1111 void vpKeyPoint::detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, const cv::Mat &mask)
1112 {
1113  double elapsedTime;
1114  detect(matImg, keyPoints, elapsedTime, mask);
1115 }
1116 
1125 void vpKeyPoint::detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
1126  const vpRect &rectangle)
1127 {
1128  cv::Mat matImg;
1129  vpImageConvert::convert(I, matImg, false);
1130  cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1131 
1132  if (rectangle.getWidth() > 0 && rectangle.getHeight() > 0) {
1133  cv::Point leftTop((int)rectangle.getLeft(), (int)rectangle.getTop()),
1134  rightBottom((int)rectangle.getRight(), (int)rectangle.getBottom());
1135  cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1136  } else {
1137  mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1138  }
1139 
1140  detect(matImg, keyPoints, elapsedTime, mask);
1141 }
1142 
1152 void vpKeyPoint::detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
1153  const cv::Mat &mask)
1154 {
1155  double t = vpTime::measureTimeMs();
1156  keyPoints.clear();
1157 
1158  for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
1159  it != m_detectors.end(); ++it) {
1160  std::vector<cv::KeyPoint> kp;
1161  it->second->detect(matImg, kp, mask);
1162  keyPoints.insert(keyPoints.end(), kp.begin(), kp.end());
1163  }
1164 
1165  elapsedTime = vpTime::measureTimeMs() - t;
1166 }
1167 
1175 void vpKeyPoint::display(const vpImage<unsigned char> &IRef, const vpImage<unsigned char> &ICurrent, unsigned int size)
1176 {
1177  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1178  getQueryKeyPoints(vpQueryImageKeyPoints);
1179  std::vector<vpImagePoint> vpTrainImageKeyPoints;
1180  getTrainKeyPoints(vpTrainImageKeyPoints);
1181 
1182  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1183  vpDisplay::displayCross(IRef, vpTrainImageKeyPoints[(size_t)(it->trainIdx)], size, vpColor::red);
1184  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, vpColor::green);
1185  }
1186 }
1187 
1195 void vpKeyPoint::display(const vpImage<unsigned char> &ICurrent, unsigned int size, const vpColor &color)
1196 {
1197  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1198  getQueryKeyPoints(vpQueryImageKeyPoints);
1199 
1200  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1201  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, color);
1202  }
1203 }
1204 
1217  unsigned int crossSize, unsigned int lineThickness, const vpColor &color)
1218 {
1219  bool randomColor = (color == vpColor::none);
1220  srand((unsigned int)time(NULL));
1221  vpColor currentColor = color;
1222 
1223  std::vector<vpImagePoint> queryImageKeyPoints;
1224  getQueryKeyPoints(queryImageKeyPoints);
1225  std::vector<vpImagePoint> trainImageKeyPoints;
1226  getTrainKeyPoints(trainImageKeyPoints);
1227 
1228  vpImagePoint leftPt, rightPt;
1229  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1230  if (randomColor) {
1231  currentColor = vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1232  }
1233 
1234  leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1235  rightPt = vpImagePoint(queryImageKeyPoints[(size_t)(it->queryIdx)].get_i(),
1236  queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.getWidth());
1237  vpDisplay::displayCross(IMatching, leftPt, crossSize, currentColor);
1238  vpDisplay::displayCross(IMatching, rightPt, crossSize, currentColor);
1239  vpDisplay::displayLine(IMatching, leftPt, rightPt, currentColor, lineThickness);
1240  }
1241 }
1242 
1255  const std::vector<vpImagePoint> &ransacInliers, unsigned int crossSize,
1256  unsigned int lineThickness)
1257 {
1258  if (m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1259  // No training images so return
1260  std::cerr << "There is no training image loaded !" << std::endl;
1261  return;
1262  }
1263 
1264  // Nb images in the training database + the current image we want to detect
1265  // the object
1266  int nbImg = (int)(m_mapOfImages.size() + 1);
1267 
1268  if (nbImg == 2) {
1269  // Only one training image, so we display the matching result side-by-side
1270  displayMatching(m_mapOfImages.begin()->second, IMatching, crossSize);
1271  } else {
1272  // Multiple training images, display them as a mosaic image
1273  int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
1274  int nbWidth = nbImgSqrt;
1275  int nbHeight = nbImgSqrt;
1276 
1277  if (nbImgSqrt * nbImgSqrt < nbImg) {
1278  nbWidth++;
1279  }
1280 
1281  std::map<int, int> mapOfImageIdIndex;
1282  int cpt = 0;
1283  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
1284  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
1285  ++it, cpt++) {
1286  mapOfImageIdIndex[it->first] = cpt;
1287 
1288  if (maxW < it->second.getWidth()) {
1289  maxW = it->second.getWidth();
1290  }
1291 
1292  if (maxH < it->second.getHeight()) {
1293  maxH = it->second.getHeight();
1294  }
1295  }
1296 
1297  // Indexes of the current image in the grid computed to put preferably the
1298  // image in the center of the mosaic grid
1299  int medianI = nbHeight / 2;
1300  int medianJ = nbWidth / 2;
1301  int medianIndex = medianI * nbWidth + medianJ;
1302  for (std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1303  vpImagePoint topLeftCorner;
1304  int current_class_id_index = 0;
1305  if (mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1306  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1307  } else {
1308  // Shift of one unity the index of the training images which are after
1309  // the current image
1310  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1311  }
1312 
1313  int indexI = current_class_id_index / nbWidth;
1314  int indexJ = current_class_id_index - (indexI * nbWidth);
1315  topLeftCorner.set_ij((int)maxH * indexI, (int)maxW * indexJ);
1316 
1317  // Display cross for keypoints in the learning database
1318  vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1319  (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1320  }
1321 
1322  vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
1323  for (std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1324  // Display cross for keypoints detected in the current image
1325  vpDisplay::displayCross(IMatching, (int)(it->pt.y + topLeftCorner.get_i()),
1326  (int)(it->pt.x + topLeftCorner.get_j()), crossSize, vpColor::red);
1327  }
1328  for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
1329  // Display green circle for RANSAC inliers
1330  vpDisplay::displayCircle(IMatching, (int)(it->get_v() + topLeftCorner.get_i()),
1331  (int)(it->get_u() + topLeftCorner.get_j()), 4, vpColor::green);
1332  }
1333  for (std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1334  // Display red circle for RANSAC outliers
1335  vpDisplay::displayCircle(IMatching, (int)(it->get_i() + topLeftCorner.get_i()),
1336  (int)(it->get_j() + topLeftCorner.get_j()), 4, vpColor::red);
1337  }
1338 
1339  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1340  int current_class_id = 0;
1341  if (mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] < medianIndex) {
1342  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]];
1343  } else {
1344  // Shift of one unity the index of the training images which are after
1345  // the current image
1346  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t)it->trainIdx].class_id]] + 1;
1347  }
1348 
1349  int indexI = current_class_id / nbWidth;
1350  int indexJ = current_class_id - (indexI * nbWidth);
1351 
1352  vpImagePoint end((int)maxH * indexI + m_trainKeyPoints[(size_t)it->trainIdx].pt.y,
1353  (int)maxW * indexJ + m_trainKeyPoints[(size_t)it->trainIdx].pt.x);
1354  vpImagePoint start((int)maxH * medianI + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.y,
1355  (int)maxW * medianJ + m_queryFilteredKeyPoints[(size_t)it->queryIdx].pt.x);
1356 
1357  // Draw line for matching keypoints detected in the current image and
1358  // those detected in the training images
1359  vpDisplay::displayLine(IMatching, start, end, vpColor::green, lineThickness);
1360  }
1361  }
1362 }
1363 
1374 void vpKeyPoint::extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1375  std::vector<cv::Point3f> *trainPoints)
1376 {
1377  double elapsedTime;
1378  extract(I, keyPoints, descriptors, elapsedTime, trainPoints);
1379 }
1380 
1391 void vpKeyPoint::extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1392  std::vector<cv::Point3f> *trainPoints)
1393 {
1394  double elapsedTime;
1395  extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1396 }
1397 
1408 void vpKeyPoint::extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1409  double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1410 {
1411  cv::Mat matImg;
1412  vpImageConvert::convert(I, matImg, false);
1413  extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1414 }
1415 
1426 void vpKeyPoint::extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1427  double &elapsedTime, std::vector<cv::Point3f> *trainPoints)
1428 {
1429  double t = vpTime::measureTimeMs();
1430  bool first = true;
1431 
1432  for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator itd = m_extractors.begin();
1433  itd != m_extractors.end(); ++itd) {
1434  if (first) {
1435  first = false;
1436  // Check if we have 3D object points information
1437  if (trainPoints != NULL && !trainPoints->empty()) {
1438  // Copy the input list of keypoints, keypoints that cannot be computed
1439  // are removed in the function compute
1440  std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1441 
1442  // Extract descriptors for the given list of keypoints
1443  itd->second->compute(matImg, keyPoints, descriptors);
1444 
1445  if (keyPoints.size() != keyPoints_tmp.size()) {
1446  // Keypoints have been removed
1447  // Store the hash of a keypoint as the key and the index of the
1448  // keypoint as the value
1449  std::map<size_t, size_t> mapOfKeypointHashes;
1450  size_t cpt = 0;
1451  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1452  ++it, cpt++) {
1453  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1454  }
1455 
1456  std::vector<cv::Point3f> trainPoints_tmp;
1457  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1458  if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1459  trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1460  }
1461  }
1462 
1463  // Copy trainPoints_tmp to m_trainPoints
1464  *trainPoints = trainPoints_tmp;
1465  }
1466  } else {
1467  // Extract descriptors for the given list of keypoints
1468  itd->second->compute(matImg, keyPoints, descriptors);
1469  }
1470  } else {
1471  // Copy the input list of keypoints, keypoints that cannot be computed
1472  // are removed in the function compute
1473  std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1474 
1475  cv::Mat desc;
1476  // Extract descriptors for the given list of keypoints
1477  itd->second->compute(matImg, keyPoints, desc);
1478 
1479  if (keyPoints.size() != keyPoints_tmp.size()) {
1480  // Keypoints have been removed
1481  // Store the hash of a keypoint as the key and the index of the
1482  // keypoint as the value
1483  std::map<size_t, size_t> mapOfKeypointHashes;
1484  size_t cpt = 0;
1485  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end();
1486  ++it, cpt++) {
1487  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1488  }
1489 
1490  std::vector<cv::Point3f> trainPoints_tmp;
1491  cv::Mat descriptors_tmp;
1492  for (std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1493  if (mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1494  if (trainPoints != NULL && !trainPoints->empty()) {
1495  trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1496  }
1497 
1498  if (!descriptors.empty()) {
1499  descriptors_tmp.push_back(descriptors.row((int)mapOfKeypointHashes[myKeypointHash(*it)]));
1500  }
1501  }
1502  }
1503 
1504  if (trainPoints != NULL) {
1505  // Copy trainPoints_tmp to m_trainPoints
1506  *trainPoints = trainPoints_tmp;
1507  }
1508  // Copy descriptors_tmp to descriptors
1509  descriptors_tmp.copyTo(descriptors);
1510  }
1511 
1512  // Merge descriptors horizontally
1513  if (descriptors.empty()) {
1514  desc.copyTo(descriptors);
1515  } else {
1516  cv::hconcat(descriptors, desc, descriptors);
1517  }
1518  }
1519  }
1520 
1521  if (keyPoints.size() != (size_t)descriptors.rows) {
1522  std::cerr << "keyPoints.size() != (size_t) descriptors.rows" << std::endl;
1523  }
1524  elapsedTime = vpTime::measureTimeMs() - t;
1525 }
1526 
1530 void vpKeyPoint::filterMatches()
1531 {
1532  std::vector<cv::KeyPoint> queryKpts;
1533  std::vector<cv::Point3f> trainPts;
1534  std::vector<cv::DMatch> m;
1535 
1536  if (m_useKnn) {
1537  // double max_dist = 0;
1538  // double min_dist = std::numeric_limits<double>::max(); // create an
1539  // error under Windows. To fix it we have to add #undef max
1540  double min_dist = DBL_MAX;
1541  double mean = 0.0;
1542  std::vector<double> distance_vec(m_knnMatches.size());
1543 
1544  if (m_filterType == stdAndRatioDistanceThreshold) {
1545  for (size_t i = 0; i < m_knnMatches.size(); i++) {
1546  double dist = m_knnMatches[i][0].distance;
1547  mean += dist;
1548  distance_vec[i] = dist;
1549 
1550  if (dist < min_dist) {
1551  min_dist = dist;
1552  }
1553  // if (dist > max_dist) {
1554  // max_dist = dist;
1555  //}
1556  }
1557  mean /= m_queryDescriptors.rows;
1558  }
1559 
1560  double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1561  double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1562  double threshold = min_dist + stdev;
1563 
1564  for (size_t i = 0; i < m_knnMatches.size(); i++) {
1565  if (m_knnMatches[i].size() >= 2) {
1566  // Calculate ratio of the descriptor distance between the two nearest
1567  // neighbors of the keypoint
1568  float ratio = m_knnMatches[i][0].distance / m_knnMatches[i][1].distance;
1569  // float ratio = std::sqrt((vecMatches[i][0].distance *
1570  // vecMatches[i][0].distance)
1571  // / (vecMatches[i][1].distance *
1572  // vecMatches[i][1].distance));
1573  double dist = m_knnMatches[i][0].distance;
1574 
1575  if (ratio < m_matchingRatioThreshold || (m_filterType == stdAndRatioDistanceThreshold && dist < threshold)) {
1576  m.push_back(cv::DMatch((int)queryKpts.size(), m_knnMatches[i][0].trainIdx, m_knnMatches[i][0].distance));
1577 
1578  if (!m_trainPoints.empty()) {
1579  trainPts.push_back(m_trainPoints[(size_t)m_knnMatches[i][0].trainIdx]);
1580  }
1581  queryKpts.push_back(m_queryKeyPoints[(size_t)m_knnMatches[i][0].queryIdx]);
1582  }
1583  }
1584  }
1585  } else {
1586  // double max_dist = 0;
1587  // create an error under Windows. To fix it we have to add #undef max
1588  // double min_dist = std::numeric_limits<double>::max();
1589  double min_dist = DBL_MAX;
1590  double mean = 0.0;
1591  std::vector<double> distance_vec(m_matches.size());
1592  for (size_t i = 0; i < m_matches.size(); i++) {
1593  double dist = m_matches[i].distance;
1594  mean += dist;
1595  distance_vec[i] = dist;
1596 
1597  if (dist < min_dist) {
1598  min_dist = dist;
1599  }
1600  // if (dist > max_dist) {
1601  // max_dist = dist;
1602  // }
1603  }
1604  mean /= m_queryDescriptors.rows;
1605 
1606  double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1607  double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1608 
1609  // Define a threshold where we keep all keypoints whose the descriptor
1610  // distance falls below a factor of the minimum descriptor distance (for
1611  // all the query keypoints) or below the minimum descriptor distance +
1612  // the standard deviation (calculated on all the query descriptor
1613  // distances)
1614  double threshold =
1615  m_filterType == constantFactorDistanceThreshold ? m_matchingFactorThreshold * min_dist : min_dist + stdev;
1616 
1617  for (size_t i = 0; i < m_matches.size(); i++) {
1618  if (m_matches[i].distance <= threshold) {
1619  m.push_back(cv::DMatch((int)queryKpts.size(), m_matches[i].trainIdx, m_matches[i].distance));
1620 
1621  if (!m_trainPoints.empty()) {
1622  trainPts.push_back(m_trainPoints[(size_t)m_matches[i].trainIdx]);
1623  }
1624  queryKpts.push_back(m_queryKeyPoints[(size_t)m_matches[i].queryIdx]);
1625  }
1626  }
1627  }
1628 
1629  if (m_useSingleMatchFilter) {
1630  // Eliminate matches where multiple query keypoints are matched to the
1631  // same train keypoint
1632  std::vector<cv::DMatch> mTmp;
1633  std::vector<cv::Point3f> trainPtsTmp;
1634  std::vector<cv::KeyPoint> queryKptsTmp;
1635 
1636  std::map<int, int> mapOfTrainIdx;
1637  // Count the number of query points matched to the same train point
1638  for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1639  mapOfTrainIdx[it->trainIdx]++;
1640  }
1641 
1642  // Keep matches with only one correspondence
1643  for (std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1644  if (mapOfTrainIdx[it->trainIdx] == 1) {
1645  mTmp.push_back(cv::DMatch((int)queryKptsTmp.size(), it->trainIdx, it->distance));
1646 
1647  if (!m_trainPoints.empty()) {
1648  trainPtsTmp.push_back(m_trainPoints[(size_t)it->trainIdx]);
1649  }
1650  queryKptsTmp.push_back(queryKpts[(size_t)it->queryIdx]);
1651  }
1652  }
1653 
1654  m_filteredMatches = mTmp;
1655  m_objectFilteredPoints = trainPtsTmp;
1656  m_queryFilteredKeyPoints = queryKptsTmp;
1657  } else {
1658  m_filteredMatches = m;
1659  m_objectFilteredPoints = trainPts;
1660  m_queryFilteredKeyPoints = queryKpts;
1661  }
1662 }
1663 
1671 void vpKeyPoint::getObjectPoints(std::vector<cv::Point3f> &objectPoints) const
1672 {
1673  objectPoints = m_objectFilteredPoints;
1674 }
1675 
1683 void vpKeyPoint::getObjectPoints(std::vector<vpPoint> &objectPoints) const
1684 {
1685  vpConvert::convertFromOpenCV(m_objectFilteredPoints, objectPoints);
1686 }
1687 
1694 void vpKeyPoint::getQueryKeyPoints(std::vector<cv::KeyPoint> &keyPoints) const { keyPoints = m_queryFilteredKeyPoints; }
1695 
1702 void vpKeyPoint::getQueryKeyPoints(std::vector<vpImagePoint> &keyPoints) const { keyPoints = currentImagePointsList; }
1703 
1709 void vpKeyPoint::getTrainKeyPoints(std::vector<cv::KeyPoint> &keyPoints) const { keyPoints = m_trainKeyPoints; }
1710 
1716 void vpKeyPoint::getTrainKeyPoints(std::vector<vpImagePoint> &keyPoints) const { keyPoints = referenceImagePointsList; }
1717 
1724 void vpKeyPoint::getTrainPoints(std::vector<cv::Point3f> &points) const { points = m_trainPoints; }
1725 
1732 void vpKeyPoint::getTrainPoints(std::vector<vpPoint> &points) const { points = m_trainVpPoints; }
1733 
1738 void vpKeyPoint::init()
1739 {
1740 // Require 2.4.0 <= opencv < 3.0.0
1741 #if defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && (VISP_HAVE_OPENCV_VERSION < 0x030000)
1742  // The following line must be called in order to use SIFT or SURF
1743  if (!cv::initModule_nonfree()) {
1744  std::cerr << "Cannot init module non free, SIFT or SURF cannot be used." << std::endl;
1745  }
1746 #endif
1747 
1748  // Use k-nearest neighbors (knn) to retrieve the two best matches for a
1749  // keypoint So this is useful only for ratioDistanceThreshold method
1750  if (m_filterType == ratioDistanceThreshold || m_filterType == stdAndRatioDistanceThreshold) {
1751  m_useKnn = true;
1752  }
1753 
1754  initDetectors(m_detectorNames);
1755  initExtractors(m_extractorNames);
1756  initMatcher(m_matcherName);
1757 }
1758 
1764 void vpKeyPoint::initDetector(const std::string &detectorName)
1765 {
1766 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
1767  m_detectors[detectorName] = cv::FeatureDetector::create(detectorName);
1768 
1769  if (m_detectors[detectorName] == NULL) {
1770  std::stringstream ss_msg;
1771  ss_msg << "Fail to initialize the detector: " << detectorName
1772  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
1773  throw vpException(vpException::fatalError, ss_msg.str());
1774  }
1775 #else
1776  std::string detectorNameTmp = detectorName;
1777  std::string pyramid = "Pyramid";
1778  std::size_t pos = detectorName.find(pyramid);
1779  bool usePyramid = false;
1780  if (pos != std::string::npos) {
1781  detectorNameTmp = detectorName.substr(pos + pyramid.size());
1782  usePyramid = true;
1783  }
1784 
1785  if (detectorNameTmp == "SIFT") {
1786 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1787  cv::Ptr<cv::FeatureDetector> siftDetector = cv::xfeatures2d::SIFT::create();
1788  if (!usePyramid) {
1789  m_detectors[detectorNameTmp] = siftDetector;
1790  } else {
1791  std::cerr << "You should not use SIFT with Pyramid feature detection!" << std::endl;
1792  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
1793  }
1794 #else
1795  std::stringstream ss_msg;
1796  ss_msg << "Fail to initialize the detector: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1797  << " was not build with xFeatures2d module.";
1798  throw vpException(vpException::fatalError, ss_msg.str());
1799 #endif
1800  } else if (detectorNameTmp == "SURF") {
1801 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1802  cv::Ptr<cv::FeatureDetector> surfDetector = cv::xfeatures2d::SURF::create();
1803  if (!usePyramid) {
1804  m_detectors[detectorNameTmp] = surfDetector;
1805  } else {
1806  std::cerr << "You should not use SURF with Pyramid feature detection!" << std::endl;
1807  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(surfDetector);
1808  }
1809 #else
1810  std::stringstream ss_msg;
1811  ss_msg << "Fail to initialize the detector: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1812  << " was not build with xFeatures2d module.";
1813  throw vpException(vpException::fatalError, ss_msg.str());
1814 #endif
1815  } else if (detectorNameTmp == "FAST") {
1816  cv::Ptr<cv::FeatureDetector> fastDetector = cv::FastFeatureDetector::create();
1817  if (!usePyramid) {
1818  m_detectors[detectorNameTmp] = fastDetector;
1819  } else {
1820  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
1821  }
1822  } else if (detectorNameTmp == "MSER") {
1823  cv::Ptr<cv::FeatureDetector> fastDetector = cv::MSER::create();
1824  if (!usePyramid) {
1825  m_detectors[detectorNameTmp] = fastDetector;
1826  } else {
1827  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
1828  }
1829  } else if (detectorNameTmp == "ORB") {
1830  cv::Ptr<cv::FeatureDetector> orbDetector = cv::ORB::create();
1831  if (!usePyramid) {
1832  m_detectors[detectorNameTmp] = orbDetector;
1833  } else {
1834  std::cerr << "You should not use ORB with Pyramid feature detection!" << std::endl;
1835  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(orbDetector);
1836  }
1837  } else if (detectorNameTmp == "BRISK") {
1838  cv::Ptr<cv::FeatureDetector> briskDetector = cv::BRISK::create();
1839  if (!usePyramid) {
1840  m_detectors[detectorNameTmp] = briskDetector;
1841  } else {
1842  std::cerr << "You should not use BRISK with Pyramid feature detection!" << std::endl;
1843  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(briskDetector);
1844  }
1845  } else if (detectorNameTmp == "KAZE") {
1846  cv::Ptr<cv::FeatureDetector> kazeDetector = cv::KAZE::create();
1847  if (!usePyramid) {
1848  m_detectors[detectorNameTmp] = kazeDetector;
1849  } else {
1850  std::cerr << "You should not use KAZE with Pyramid feature detection!" << std::endl;
1851  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(kazeDetector);
1852  }
1853  } else if (detectorNameTmp == "AKAZE") {
1854  cv::Ptr<cv::FeatureDetector> akazeDetector = cv::AKAZE::create();
1855  if (!usePyramid) {
1856  m_detectors[detectorNameTmp] = akazeDetector;
1857  } else {
1858  std::cerr << "You should not use AKAZE with Pyramid feature detection!" << std::endl;
1859  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(akazeDetector);
1860  }
1861  } else if (detectorNameTmp == "GFTT") {
1862  cv::Ptr<cv::FeatureDetector> gfttDetector = cv::GFTTDetector::create();
1863  if (!usePyramid) {
1864  m_detectors[detectorNameTmp] = gfttDetector;
1865  } else {
1866  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(gfttDetector);
1867  }
1868  } else if (detectorNameTmp == "SimpleBlob") {
1869  cv::Ptr<cv::FeatureDetector> simpleBlobDetector = cv::SimpleBlobDetector::create();
1870  if (!usePyramid) {
1871  m_detectors[detectorNameTmp] = simpleBlobDetector;
1872  } else {
1873  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(simpleBlobDetector);
1874  }
1875  } else if (detectorNameTmp == "STAR") {
1876 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1877  cv::Ptr<cv::FeatureDetector> starDetector = cv::xfeatures2d::StarDetector::create();
1878  if (!usePyramid) {
1879  m_detectors[detectorNameTmp] = starDetector;
1880  } else {
1881  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(starDetector);
1882  }
1883 #else
1884  std::stringstream ss_msg;
1885  ss_msg << "Fail to initialize the detector: STAR. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1886  << " was not build with xFeatures2d module.";
1887  throw vpException(vpException::fatalError, ss_msg.str());
1888 #endif
1889  } else if (detectorNameTmp == "AGAST") {
1890  cv::Ptr<cv::FeatureDetector> agastDetector = cv::AgastFeatureDetector::create();
1891  if (!usePyramid) {
1892  m_detectors[detectorNameTmp] = agastDetector;
1893  } else {
1894  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(agastDetector);
1895  }
1896  } else if (detectorNameTmp == "MSD") {
1897 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100)
1898 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
1899  cv::Ptr<cv::FeatureDetector> msdDetector = cv::xfeatures2d::MSDDetector::create();
1900  if (!usePyramid) {
1901  m_detectors[detectorNameTmp] = msdDetector;
1902  } else {
1903  std::cerr << "You should not use MSD with Pyramid feature detection!" << std::endl;
1904  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(msdDetector);
1905  }
1906 #else
1907  std::stringstream ss_msg;
1908  ss_msg << "Fail to initialize the detector: MSD. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1909  << " was not build with xFeatures2d module.";
1910  throw vpException(vpException::fatalError, ss_msg.str());
1911 #endif
1912 #else
1913  std::stringstream ss_msg;
1914  ss_msg << "Feature " << detectorName << " is not available in OpenCV version: " << std::hex
1915  << VISP_HAVE_OPENCV_VERSION << " (require >= OpenCV 3.1).";
1916 #endif
1917  } else {
1918  std::cerr << "The detector:" << detectorNameTmp << " is not available." << std::endl;
1919  }
1920 
1921  bool detectorInitialized = false;
1922  if (!usePyramid) {
1923  //if not null and to avoid warning C4800: forcing value to bool 'true' or 'false' (performance warning)
1924  detectorInitialized = !m_detectors[detectorNameTmp].empty();
1925  } else {
1926  //if not null and to avoid warning C4800: forcing value to bool 'true' or 'false' (performance warning)
1927  detectorInitialized = !m_detectors[detectorName].empty();
1928  }
1929 
1930  if (!detectorInitialized) {
1931  std::stringstream ss_msg;
1932  ss_msg << "Fail to initialize the detector: " << detectorNameTmp
1933  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
1934  throw vpException(vpException::fatalError, ss_msg.str());
1935  }
1936 
1937 #endif
1938 }
1939 
1946 void vpKeyPoint::initDetectors(const std::vector<std::string> &detectorNames)
1947 {
1948  for (std::vector<std::string>::const_iterator it = detectorNames.begin(); it != detectorNames.end(); ++it) {
1949  initDetector(*it);
1950  }
1951 }
1952 
1958 void vpKeyPoint::initExtractor(const std::string &extractorName)
1959 {
1960 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
1961  m_extractors[extractorName] = cv::DescriptorExtractor::create(extractorName);
1962 #else
1963  if (extractorName == "SIFT") {
1964 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1965  m_extractors[extractorName] = cv::xfeatures2d::SIFT::create();
1966 #else
1967  std::stringstream ss_msg;
1968  ss_msg << "Fail to initialize the extractor: SIFT. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1969  << " was not build with xFeatures2d module.";
1970  throw vpException(vpException::fatalError, ss_msg.str());
1971 #endif
1972  } else if (extractorName == "SURF") {
1973 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1974  // Use extended set of SURF descriptors (128 instead of 64)
1975  m_extractors[extractorName] = cv::xfeatures2d::SURF::create(100, 4, 3, true);
1976 #else
1977  std::stringstream ss_msg;
1978  ss_msg << "Fail to initialize the extractor: SURF. OpenCV version " << std::hex << VISP_HAVE_OPENCV_VERSION
1979  << " was not build with xFeatures2d module.";
1980  throw vpException(vpException::fatalError, ss_msg.str());
1981 #endif
1982  } else if (extractorName == "ORB") {
1983  m_extractors[extractorName] = cv::ORB::create();
1984  } else if (extractorName == "BRISK") {
1985  m_extractors[extractorName] = cv::BRISK::create();
1986  } else if (extractorName == "FREAK") {
1987 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1988  m_extractors[extractorName] = cv::xfeatures2d::FREAK::create();
1989 #else
1990  std::stringstream ss_msg;
1991  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
1992  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
1993  throw vpException(vpException::fatalError, ss_msg.str());
1994 #endif
1995  } else if (extractorName == "BRIEF") {
1996 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1997  m_extractors[extractorName] = cv::xfeatures2d::BriefDescriptorExtractor::create();
1998 #else
1999  std::stringstream ss_msg;
2000  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2001  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2002  throw vpException(vpException::fatalError, ss_msg.str());
2003 #endif
2004  } else if (extractorName == "KAZE") {
2005  m_extractors[extractorName] = cv::KAZE::create();
2006  } else if (extractorName == "AKAZE") {
2007  m_extractors[extractorName] = cv::AKAZE::create();
2008  } else if (extractorName == "DAISY") {
2009 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2010  m_extractors[extractorName] = cv::xfeatures2d::DAISY::create();
2011 #else
2012  std::stringstream ss_msg;
2013  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2014  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2015  throw vpException(vpException::fatalError, ss_msg.str());
2016 #endif
2017  } else if (extractorName == "LATCH") {
2018 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2019  m_extractors[extractorName] = cv::xfeatures2d::LATCH::create();
2020 #else
2021  std::stringstream ss_msg;
2022  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2023  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2024  throw vpException(vpException::fatalError, ss_msg.str());
2025 #endif
2026  } else if (extractorName == "LUCID") {
2027 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2028  // m_extractors[extractorName] = cv::xfeatures2d::LUCID::create(1, 2);
2029  // Not possible currently, need a color image
2030  throw vpException(vpException::badValue, "Not possible currently as it needs a color image.");
2031 #else
2032  std::stringstream ss_msg;
2033  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2034  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2035  throw vpException(vpException::fatalError, ss_msg.str());
2036 #endif
2037  } else if (extractorName == "VGG") {
2038 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2039 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2040  m_extractors[extractorName] = cv::xfeatures2d::VGG::create();
2041 #else
2042  std::stringstream ss_msg;
2043  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2044  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2045  throw vpException(vpException::fatalError, ss_msg.str());
2046 #endif
2047 #else
2048  std::stringstream ss_msg;
2049  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2050  << VISP_HAVE_OPENCV_VERSION << " but requires at least OpenCV 3.2.";
2051  throw vpException(vpException::fatalError, ss_msg.str());
2052 #endif
2053  } else if (extractorName == "BoostDesc") {
2054 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2055 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2056  m_extractors[extractorName] = cv::xfeatures2d::BoostDesc::create();
2057 #else
2058  std::stringstream ss_msg;
2059  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2060  << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2061  throw vpException(vpException::fatalError, ss_msg.str());
2062 #endif
2063 #else
2064  std::stringstream ss_msg;
2065  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version " << std::hex
2066  << VISP_HAVE_OPENCV_VERSION << " but requires at least OpenCV 3.2.";
2067  throw vpException(vpException::fatalError, ss_msg.str());
2068 #endif
2069  } else {
2070  std::cerr << "The extractor:" << extractorName << " is not available." << std::endl;
2071  }
2072 #endif
2073 
2074  if (!m_extractors[extractorName]) { //if null
2075  std::stringstream ss_msg;
2076  ss_msg << "Fail to initialize the extractor: " << extractorName
2077  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2078  throw vpException(vpException::fatalError, ss_msg.str());
2079  }
2080 
2081 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2082  if (extractorName == "SURF") {
2083  // Use extended set of SURF descriptors (128 instead of 64)
2084  m_extractors[extractorName]->set("extended", 1);
2085  }
2086 #endif
2087 }
2088 
2095 void vpKeyPoint::initExtractors(const std::vector<std::string> &extractorNames)
2096 {
2097  for (std::vector<std::string>::const_iterator it = extractorNames.begin(); it != extractorNames.end(); ++it) {
2098  initExtractor(*it);
2099  }
2100 
2101  int descriptorType = CV_32F;
2102  bool firstIteration = true;
2103  for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2104  it != m_extractors.end(); ++it) {
2105  if (firstIteration) {
2106  firstIteration = false;
2107  descriptorType = it->second->descriptorType();
2108  } else {
2109  if (descriptorType != it->second->descriptorType()) {
2110  throw vpException(vpException::fatalError, "All the descriptors must have the same type !");
2111  }
2112  }
2113  }
2114 }
2115 
2116 void vpKeyPoint::initFeatureNames()
2117 {
2118 // Create map enum to string
2119 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2120  m_mapOfDetectorNames[DETECTOR_FAST] = "FAST";
2121  m_mapOfDetectorNames[DETECTOR_MSER] = "MSER";
2122  m_mapOfDetectorNames[DETECTOR_ORB] = "ORB";
2123  m_mapOfDetectorNames[DETECTOR_BRISK] = "BRISK";
2124  m_mapOfDetectorNames[DETECTOR_GFTT] = "GFTT";
2125  m_mapOfDetectorNames[DETECTOR_SimpleBlob] = "SimpleBlob";
2126 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2127  m_mapOfDetectorNames[DETECTOR_STAR] = "STAR";
2128 #endif
2129 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2130  m_mapOfDetectorNames[DETECTOR_SIFT] = "SIFT";
2131  m_mapOfDetectorNames[DETECTOR_SURF] = "SURF";
2132 #endif
2133 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2134  m_mapOfDetectorNames[DETECTOR_KAZE] = "KAZE";
2135  m_mapOfDetectorNames[DETECTOR_AKAZE] = "AKAZE";
2136  m_mapOfDetectorNames[DETECTOR_AGAST] = "AGAST";
2137 #endif
2138 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2139  m_mapOfDetectorNames[DETECTOR_MSD] = "MSD";
2140 #endif
2141 #endif
2142 
2143 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2144  m_mapOfDescriptorNames[DESCRIPTOR_ORB] = "ORB";
2145  m_mapOfDescriptorNames[DESCRIPTOR_BRISK] = "BRISK";
2146 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
2147  m_mapOfDescriptorNames[DESCRIPTOR_FREAK] = "FREAK";
2148  m_mapOfDescriptorNames[DESCRIPTOR_BRIEF] = "BRIEF";
2149 #endif
2150 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
2151  m_mapOfDescriptorNames[DESCRIPTOR_SIFT] = "SIFT";
2152  m_mapOfDescriptorNames[DESCRIPTOR_SURF] = "SURF";
2153 #endif
2154 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2155  m_mapOfDescriptorNames[DESCRIPTOR_KAZE] = "KAZE";
2156  m_mapOfDescriptorNames[DESCRIPTOR_AKAZE] = "AKAZE";
2157 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
2158  m_mapOfDescriptorNames[DESCRIPTOR_DAISY] = "DAISY";
2159  m_mapOfDescriptorNames[DESCRIPTOR_LATCH] = "LATCH";
2160 #endif
2161 #endif
2162 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
2163  m_mapOfDescriptorNames[DESCRIPTOR_VGG] = "VGG";
2164  m_mapOfDescriptorNames[DESCRIPTOR_BoostDesc] = "BoostDesc";
2165 #endif
2166 #endif
2167 }
2168 
2174 void vpKeyPoint::initMatcher(const std::string &matcherName)
2175 {
2176  int descriptorType = CV_32F;
2177  bool firstIteration = true;
2178  for (std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2179  it != m_extractors.end(); ++it) {
2180  if (firstIteration) {
2181  firstIteration = false;
2182  descriptorType = it->second->descriptorType();
2183  } else {
2184  if (descriptorType != it->second->descriptorType()) {
2185  throw vpException(vpException::fatalError, "All the descriptors must have the same type !");
2186  }
2187  }
2188  }
2189 
2190  if (matcherName == "FlannBased") {
2191  if (m_extractors.empty()) {
2192  std::cout << "Warning: No extractor initialized, by default use "
2193  "floating values (CV_32F) "
2194  "for descriptor type !"
2195  << std::endl;
2196  }
2197 
2198  if (descriptorType == CV_8U) {
2199 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2200  m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::LshIndexParams>(12, 20, 2));
2201 #else
2202  m_matcher = new cv::FlannBasedMatcher(new cv::flann::LshIndexParams(12, 20, 2));
2203 #endif
2204  } else {
2205 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2206  m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::KDTreeIndexParams>());
2207 #else
2208  m_matcher = new cv::FlannBasedMatcher(new cv::flann::KDTreeIndexParams());
2209 #endif
2210  }
2211  } else {
2212  m_matcher = cv::DescriptorMatcher::create(matcherName);
2213  }
2214 
2215 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2216  if (m_matcher != NULL && !m_useKnn && matcherName == "BruteForce") {
2217  m_matcher->set("crossCheck", m_useBruteForceCrossCheck);
2218  }
2219 #endif
2220 
2221  if (!m_matcher) { //if null
2222  std::stringstream ss_msg;
2223  ss_msg << "Fail to initialize the matcher: " << matcherName
2224  << " or it is not available in OpenCV version: " << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2225  throw vpException(vpException::fatalError, ss_msg.str());
2226  }
2227 }
2228 
2238  vpImage<unsigned char> &IMatching)
2239 {
2240  vpImagePoint topLeftCorner(0, 0);
2241  IMatching.insert(IRef, topLeftCorner);
2242  topLeftCorner = vpImagePoint(0, IRef.getWidth());
2243  IMatching.insert(ICurrent, topLeftCorner);
2244 }
2245 
2254 {
2255  // Nb images in the training database + the current image we want to detect
2256  // the object
2257  int nbImg = (int)(m_mapOfImages.size() + 1);
2258 
2259  if (m_mapOfImages.empty()) {
2260  std::cerr << "There is no training image loaded !" << std::endl;
2261  return;
2262  }
2263 
2264  if (nbImg == 2) {
2265  // Only one training image, so we display them side by side
2266  insertImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
2267  } else {
2268  // Multiple training images, display them as a mosaic image
2269  int nbImgSqrt = vpMath::round(std::sqrt((double)nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
2270  int nbWidth = nbImgSqrt;
2271  int nbHeight = nbImgSqrt;
2272 
2273  if (nbImgSqrt * nbImgSqrt < nbImg) {
2274  nbWidth++;
2275  }
2276 
2277  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
2278  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2279  ++it) {
2280  if (maxW < it->second.getWidth()) {
2281  maxW = it->second.getWidth();
2282  }
2283 
2284  if (maxH < it->second.getHeight()) {
2285  maxH = it->second.getHeight();
2286  }
2287  }
2288 
2289  // Indexes of the current image in the grid made in order to the image is
2290  // in the center square in the mosaic grid
2291  int medianI = nbHeight / 2;
2292  int medianJ = nbWidth / 2;
2293  int medianIndex = medianI * nbWidth + medianJ;
2294 
2295  int cpt = 0;
2296  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2297  ++it, cpt++) {
2298  int local_cpt = cpt;
2299  if (cpt >= medianIndex) {
2300  // Shift of one unity the index of the training images which are after
2301  // the current image
2302  local_cpt++;
2303  }
2304  int indexI = local_cpt / nbWidth;
2305  int indexJ = local_cpt - (indexI * nbWidth);
2306  vpImagePoint topLeftCorner((int)maxH * indexI, (int)maxW * indexJ);
2307 
2308  IMatching.insert(it->second, topLeftCorner);
2309  }
2310 
2311  vpImagePoint topLeftCorner((int)maxH * medianI, (int)maxW * medianJ);
2312  IMatching.insert(ICurrent, topLeftCorner);
2313  }
2314 }
2315 
2316 #ifdef VISP_HAVE_XML2
2317 
2322 void vpKeyPoint::loadConfigFile(const std::string &configFile)
2323 {
2325 
2326  try {
2327  // Reset detector and extractor
2328  m_detectorNames.clear();
2329  m_extractorNames.clear();
2330  m_detectors.clear();
2331  m_extractors.clear();
2332 
2333  std::cout << " *********** Parsing XML for configuration for vpKeyPoint "
2334  "************ "
2335  << std::endl;
2336  xmlp.parse(configFile);
2337 
2338  m_detectorNames.push_back(xmlp.getDetectorName());
2339  m_extractorNames.push_back(xmlp.getExtractorName());
2340  m_matcherName = xmlp.getMatcherName();
2341 
2342  switch (xmlp.getMatchingMethod()) {
2344  m_filterType = constantFactorDistanceThreshold;
2345  break;
2346 
2348  m_filterType = stdDistanceThreshold;
2349  break;
2350 
2352  m_filterType = ratioDistanceThreshold;
2353  break;
2354 
2356  m_filterType = stdAndRatioDistanceThreshold;
2357  break;
2358 
2360  m_filterType = noFilterMatching;
2361  break;
2362 
2363  default:
2364  break;
2365  }
2366 
2367  m_matchingFactorThreshold = xmlp.getMatchingFactorThreshold();
2368  m_matchingRatioThreshold = xmlp.getMatchingRatioThreshold();
2369 
2370  m_useRansacVVS = xmlp.getUseRansacVVSPoseEstimation();
2371  m_useConsensusPercentage = xmlp.getUseRansacConsensusPercentage();
2372  m_nbRansacIterations = xmlp.getNbRansacIterations();
2373  m_ransacReprojectionError = xmlp.getRansacReprojectionError();
2374  m_nbRansacMinInlierCount = xmlp.getNbRansacMinInlierCount();
2375  m_ransacThreshold = xmlp.getRansacThreshold();
2376  m_ransacConsensusPercentage = xmlp.getRansacConsensusPercentage();
2377 
2378  if (m_filterType == ratioDistanceThreshold || m_filterType == stdAndRatioDistanceThreshold) {
2379  m_useKnn = true;
2380  } else {
2381  m_useKnn = false;
2382  }
2383 
2384  init();
2385  } catch (...) {
2386  throw vpException(vpException::ioError, "Can't open XML file \"%s\"\n ", configFile.c_str());
2387  }
2388 }
2389 #endif
2390 
2399 void vpKeyPoint::loadLearningData(const std::string &filename, const bool binaryMode, const bool append)
2400 {
2401  int startClassId = 0;
2402  int startImageId = 0;
2403  if (!append) {
2404  m_trainKeyPoints.clear();
2405  m_trainPoints.clear();
2406  m_mapOfImageId.clear();
2407  m_mapOfImages.clear();
2408  } else {
2409  // In append case, find the max index of keypoint class Id
2410  for (std::map<int, int>::const_iterator it = m_mapOfImageId.begin(); it != m_mapOfImageId.end(); ++it) {
2411  if (startClassId < it->first) {
2412  startClassId = it->first;
2413  }
2414  }
2415 
2416  // In append case, find the max index of images Id
2417  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
2418  ++it) {
2419  if (startImageId < it->first) {
2420  startImageId = it->first;
2421  }
2422  }
2423  }
2424 
2425  // Get parent directory
2426  std::string parent = vpIoTools::getParent(filename);
2427  if (!parent.empty()) {
2428  parent += "/";
2429  }
2430 
2431  if (binaryMode) {
2432  std::ifstream file(filename.c_str(), std::ifstream::binary);
2433  if (!file.is_open()) {
2434  throw vpException(vpException::ioError, "Cannot open the file.");
2435  }
2436 
2437  // Read info about training images
2438  int nbImgs = 0;
2439  vpIoTools::readBinaryValueLE(file, nbImgs);
2440 
2441 #if !defined(VISP_HAVE_MODULE_IO)
2442  if (nbImgs > 0) {
2443  std::cout << "Warning: The learning file contains image data that will "
2444  "not be loaded as visp_io module "
2445  "is not available !"
2446  << std::endl;
2447  }
2448 #endif
2449 
2450  for (int i = 0; i < nbImgs; i++) {
2451  // Read image_id
2452  int id = 0;
2453  vpIoTools::readBinaryValueLE(file, id);
2454 
2455  int length = 0;
2456  vpIoTools::readBinaryValueLE(file, length);
2457  // Will contain the path to the training images
2458  char *path = new char[length + 1]; // char path[length + 1];
2459 
2460  for (int cpt = 0; cpt < length; cpt++) {
2461  char c;
2462  file.read((char *)(&c), sizeof(c));
2463  path[cpt] = c;
2464  }
2465  path[length] = '\0';
2466 
2468 #ifdef VISP_HAVE_MODULE_IO
2469  if (vpIoTools::isAbsolutePathname(std::string(path))) {
2470  vpImageIo::read(I, path);
2471  } else {
2472  vpImageIo::read(I, parent + path);
2473  }
2474 
2475  // Add the image previously loaded only if VISP_HAVE_MODULE_IO
2476  m_mapOfImages[id + startImageId] = I;
2477 #endif
2478 
2479  // Delete path
2480  delete[] path;
2481  }
2482 
2483  // Read if 3D point information are saved or not
2484  int have3DInfoInt = 0;
2485  vpIoTools::readBinaryValueLE(file, have3DInfoInt);
2486  bool have3DInfo = have3DInfoInt != 0;
2487 
2488  // Read the number of descriptors
2489  int nRows = 0;
2490  vpIoTools::readBinaryValueLE(file, nRows);
2491 
2492  // Read the size of the descriptor
2493  int nCols = 0;
2494  vpIoTools::readBinaryValueLE(file, nCols);
2495 
2496  // Read the type of the descriptor
2497  int descriptorType = 5; // CV_32F
2498  vpIoTools::readBinaryValueLE(file, descriptorType);
2499 
2500  cv::Mat trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
2501  for (int i = 0; i < nRows; i++) {
2502  // Read information about keyPoint
2503  float u, v, size, angle, response;
2504  int octave, class_id, image_id;
2507  vpIoTools::readBinaryValueLE(file, size);
2508  vpIoTools::readBinaryValueLE(file, angle);
2509  vpIoTools::readBinaryValueLE(file, response);
2510  vpIoTools::readBinaryValueLE(file, octave);
2511  vpIoTools::readBinaryValueLE(file, class_id);
2512  vpIoTools::readBinaryValueLE(file, image_id);
2513  cv::KeyPoint keyPoint(cv::Point2f(u, v), size, angle, response, octave, (class_id + startClassId));
2514  m_trainKeyPoints.push_back(keyPoint);
2515 
2516  if (image_id != -1) {
2517 #ifdef VISP_HAVE_MODULE_IO
2518  // No training images if image_id == -1
2519  m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
2520 #endif
2521  }
2522 
2523  if (have3DInfo) {
2524  // Read oX, oY, oZ
2525  float oX, oY, oZ;
2526  vpIoTools::readBinaryValueLE(file, oX);
2527  vpIoTools::readBinaryValueLE(file, oY);
2528  vpIoTools::readBinaryValueLE(file, oZ);
2529  m_trainPoints.push_back(cv::Point3f(oX, oY, oZ));
2530  }
2531 
2532  for (int j = 0; j < nCols; j++) {
2533  // Read the value of the descriptor
2534  switch (descriptorType) {
2535  case CV_8U: {
2536  unsigned char value;
2537  file.read((char *)(&value), sizeof(value));
2538  trainDescriptorsTmp.at<unsigned char>(i, j) = value;
2539  } break;
2540 
2541  case CV_8S: {
2542  char value;
2543  file.read((char *)(&value), sizeof(value));
2544  trainDescriptorsTmp.at<char>(i, j) = value;
2545  } break;
2546 
2547  case CV_16U: {
2548  unsigned short int value;
2549  vpIoTools::readBinaryValueLE(file, value);
2550  trainDescriptorsTmp.at<unsigned short int>(i, j) = value;
2551  } break;
2552 
2553  case CV_16S: {
2554  short int value;
2555  vpIoTools::readBinaryValueLE(file, value);
2556  trainDescriptorsTmp.at<short int>(i, j) = value;
2557  } break;
2558 
2559  case CV_32S: {
2560  int value;
2561  vpIoTools::readBinaryValueLE(file, value);
2562  trainDescriptorsTmp.at<int>(i, j) = value;
2563  } break;
2564 
2565  case CV_32F: {
2566  float value;
2567  vpIoTools::readBinaryValueLE(file, value);
2568  trainDescriptorsTmp.at<float>(i, j) = value;
2569  } break;
2570 
2571  case CV_64F: {
2572  double value;
2573  vpIoTools::readBinaryValueLE(file, value);
2574  trainDescriptorsTmp.at<double>(i, j) = value;
2575  } break;
2576 
2577  default: {
2578  float value;
2579  vpIoTools::readBinaryValueLE(file, value);
2580  trainDescriptorsTmp.at<float>(i, j) = value;
2581  } break;
2582  }
2583  }
2584  }
2585 
2586  if (!append || m_trainDescriptors.empty()) {
2587  trainDescriptorsTmp.copyTo(m_trainDescriptors);
2588  } else {
2589  cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
2590  }
2591 
2592  file.close();
2593  } else {
2594 #ifdef VISP_HAVE_XML2
2595  xmlDocPtr doc = NULL;
2596  xmlNodePtr root_element = NULL;
2597 
2598  /*
2599  * this initialize the library and check potential ABI mismatches
2600  * between the version it was compiled for and the actual shared
2601  * library used.
2602  */
2603  LIBXML_TEST_VERSION
2604 
2605  /*parse the file and get the DOM */
2606  doc = xmlReadFile(filename.c_str(), NULL, 0);
2607 
2608  if (doc == NULL) {
2609  throw vpException(vpException::ioError, "Error with file " + filename);
2610  }
2611 
2612  root_element = xmlDocGetRootElement(doc);
2613 
2614  xmlNodePtr first_level_node = NULL;
2615  char *pEnd = NULL;
2616 
2617  int descriptorType = CV_32F; // float
2618  int nRows = 0, nCols = 0;
2619  int i = 0, j = 0;
2620 
2621  cv::Mat trainDescriptorsTmp;
2622 
2623  for (first_level_node = root_element->children; first_level_node; first_level_node = first_level_node->next) {
2624 
2625  std::string name((char *)first_level_node->name);
2626  if (first_level_node->type == XML_ELEMENT_NODE && name == "TrainingImageInfo") {
2627  xmlNodePtr image_info_node = NULL;
2628 
2629  for (image_info_node = first_level_node->children; image_info_node; image_info_node = image_info_node->next) {
2630  name = std::string((char *)image_info_node->name);
2631 
2632  if (name == "trainImg") {
2633  // Read image_id
2634  xmlChar *image_id_property = xmlGetProp(image_info_node, BAD_CAST "image_id");
2635  int id = 0;
2636  if (image_id_property) {
2637  id = std::atoi((char *)image_id_property);
2638  }
2639  xmlFree(image_id_property);
2640 
2642 #ifdef VISP_HAVE_MODULE_IO
2643  std::string path((char *)image_info_node->children->content);
2644  // Read path to the training images
2645  if (vpIoTools::isAbsolutePathname(std::string(path))) {
2646  vpImageIo::read(I, path);
2647  } else {
2648  vpImageIo::read(I, parent + path);
2649  }
2650 
2651  // Add the image previously loaded only if VISP_HAVE_MODULE_IO
2652  m_mapOfImages[id + startImageId] = I;
2653 #endif
2654  }
2655  }
2656  } else if (first_level_node->type == XML_ELEMENT_NODE && name == "DescriptorsInfo") {
2657  xmlNodePtr descriptors_info_node = NULL;
2658  for (descriptors_info_node = first_level_node->children; descriptors_info_node;
2659  descriptors_info_node = descriptors_info_node->next) {
2660  if (descriptors_info_node->type == XML_ELEMENT_NODE) {
2661  name = std::string((char *)descriptors_info_node->name);
2662 
2663  if (name == "nrows") {
2664  nRows = std::atoi((char *)descriptors_info_node->children->content);
2665  } else if (name == "ncols") {
2666  nCols = std::atoi((char *)descriptors_info_node->children->content);
2667  } else if (name == "type") {
2668  descriptorType = std::atoi((char *)descriptors_info_node->children->content);
2669  }
2670  }
2671  }
2672 
2673  trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
2674  } else if (first_level_node->type == XML_ELEMENT_NODE && name == "DescriptorInfo") {
2675  xmlNodePtr point_node = NULL;
2676  double u = 0.0, v = 0.0, size = 0.0, angle = 0.0, response = 0.0;
2677  int octave = 0, class_id = 0, image_id = 0;
2678  double oX = 0.0, oY = 0.0, oZ = 0.0;
2679 
2680  std::stringstream ss;
2681 
2682  for (point_node = first_level_node->children; point_node; point_node = point_node->next) {
2683  if (point_node->type == XML_ELEMENT_NODE) {
2684  name = std::string((char *)point_node->name);
2685 
2686  // Read information about keypoints
2687  if (name == "u") {
2688  u = std::strtod((char *)point_node->children->content, &pEnd);
2689  } else if (name == "v") {
2690  v = std::strtod((char *)point_node->children->content, &pEnd);
2691  } else if (name == "size") {
2692  size = std::strtod((char *)point_node->children->content, &pEnd);
2693  } else if (name == "angle") {
2694  angle = std::strtod((char *)point_node->children->content, &pEnd);
2695  } else if (name == "response") {
2696  response = std::strtod((char *)point_node->children->content, &pEnd);
2697  } else if (name == "octave") {
2698  octave = std::atoi((char *)point_node->children->content);
2699  } else if (name == "class_id") {
2700  class_id = std::atoi((char *)point_node->children->content);
2701  cv::KeyPoint keyPoint(cv::Point2f((float)u, (float)v), (float)size, (float)angle, (float)response, octave,
2702  (class_id + startClassId));
2703  m_trainKeyPoints.push_back(keyPoint);
2704  } else if (name == "image_id") {
2705  image_id = std::atoi((char *)point_node->children->content);
2706  if (image_id != -1) {
2707 #ifdef VISP_HAVE_MODULE_IO
2708  // No training images if image_id == -1
2709  m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
2710 #endif
2711  }
2712  } else if (name == "oX") {
2713  oX = std::atof((char *)point_node->children->content);
2714  } else if (name == "oY") {
2715  oY = std::atof((char *)point_node->children->content);
2716  } else if (name == "oZ") {
2717  oZ = std::atof((char *)point_node->children->content);
2718  m_trainPoints.push_back(cv::Point3f((float)oX, (float)oY, (float)oZ));
2719  } else if (name == "desc") {
2720  xmlNodePtr descriptor_value_node = NULL;
2721  j = 0;
2722 
2723  for (descriptor_value_node = point_node->children; descriptor_value_node;
2724  descriptor_value_node = descriptor_value_node->next) {
2725 
2726  if (descriptor_value_node->type == XML_ELEMENT_NODE) {
2727  // Read descriptors values
2728  std::string parseStr((char *)descriptor_value_node->children->content);
2729  ss.clear();
2730  ss.str(parseStr);
2731 
2732  if (!ss.fail()) {
2733  switch (descriptorType) {
2734  case CV_8U: {
2735  // Parse the numeric value [0 ; 255] to an int
2736  int parseValue;
2737  ss >> parseValue;
2738  trainDescriptorsTmp.at<unsigned char>(i, j) = (unsigned char)parseValue;
2739  } break;
2740 
2741  case CV_8S:
2742  // Parse the numeric value [-128 ; 127] to an int
2743  int parseValue;
2744  ss >> parseValue;
2745  trainDescriptorsTmp.at<char>(i, j) = (char)parseValue;
2746  break;
2747 
2748  case CV_16U:
2749  ss >> trainDescriptorsTmp.at<unsigned short int>(i, j);
2750  break;
2751 
2752  case CV_16S:
2753  ss >> trainDescriptorsTmp.at<short int>(i, j);
2754  break;
2755 
2756  case CV_32S:
2757  ss >> trainDescriptorsTmp.at<int>(i, j);
2758  break;
2759 
2760  case CV_32F:
2761  ss >> trainDescriptorsTmp.at<float>(i, j);
2762  break;
2763 
2764  case CV_64F:
2765  ss >> trainDescriptorsTmp.at<double>(i, j);
2766  break;
2767 
2768  default:
2769  ss >> trainDescriptorsTmp.at<float>(i, j);
2770  break;
2771  }
2772  } else {
2773  std::cerr << "Error when converting:" << ss.str() << std::endl;
2774  }
2775 
2776  j++;
2777  }
2778  }
2779  }
2780  }
2781  }
2782  i++;
2783  }
2784  }
2785 
2786  if (!append || m_trainDescriptors.empty()) {
2787  trainDescriptorsTmp.copyTo(m_trainDescriptors);
2788  } else {
2789  cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
2790  }
2791 
2792  /*free the document */
2793  xmlFreeDoc(doc);
2794 
2795  /*
2796  *Free the global variables that may
2797  *have been allocated by the parser.
2798  */
2799  xmlCleanupParser();
2800 #else
2801  std::cout << "Error: libxml2 is required !" << std::endl;
2802 #endif
2803  }
2804 
2805  // Convert OpenCV type to ViSP type for compatibility
2807  vpConvert::convertFromOpenCV(this->m_trainPoints, m_trainVpPoints);
2808 
2809  // Add train descriptors in matcher object
2810  m_matcher->clear();
2811  m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
2812 
2813  // Set _reference_computed to true as we load a learning file
2814  _reference_computed = true;
2815 
2816  // Set m_currentImageId
2817  m_currentImageId = (int)m_mapOfImages.size();
2818 }
2819 
2828 void vpKeyPoint::match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors,
2829  std::vector<cv::DMatch> &matches, double &elapsedTime)
2830 {
2831  double t = vpTime::measureTimeMs();
2832 
2833  if (m_useKnn) {
2834  m_knnMatches.clear();
2835 
2836  if (m_useMatchTrainToQuery) {
2837  std::vector<std::vector<cv::DMatch> > knnMatchesTmp;
2838 
2839  // Match train descriptors to query descriptors
2840  cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(true);
2841  matcherTmp->knnMatch(trainDescriptors, queryDescriptors, knnMatchesTmp, 2);
2842 
2843  for (std::vector<std::vector<cv::DMatch> >::const_iterator it1 = knnMatchesTmp.begin();
2844  it1 != knnMatchesTmp.end(); ++it1) {
2845  std::vector<cv::DMatch> tmp;
2846  for (std::vector<cv::DMatch>::const_iterator it2 = it1->begin(); it2 != it1->end(); ++it2) {
2847  tmp.push_back(cv::DMatch(it2->trainIdx, it2->queryIdx, it2->distance));
2848  }
2849  m_knnMatches.push_back(tmp);
2850  }
2851 
2852  matches.resize(m_knnMatches.size());
2853  std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
2854  } else {
2855  // Match query descriptors to train descriptors
2856  m_matcher->knnMatch(queryDescriptors, m_knnMatches, 2);
2857  matches.resize(m_knnMatches.size());
2858  std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
2859  }
2860  } else {
2861  matches.clear();
2862 
2863  if (m_useMatchTrainToQuery) {
2864  std::vector<cv::DMatch> matchesTmp;
2865  // Match train descriptors to query descriptors
2866  cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(true);
2867  matcherTmp->match(trainDescriptors, queryDescriptors, matchesTmp);
2868 
2869  for (std::vector<cv::DMatch>::const_iterator it = matchesTmp.begin(); it != matchesTmp.end(); ++it) {
2870  matches.push_back(cv::DMatch(it->trainIdx, it->queryIdx, it->distance));
2871  }
2872  } else {
2873  // Match query descriptors to train descriptors
2874  m_matcher->match(queryDescriptors, matches);
2875  }
2876  }
2877  elapsedTime = vpTime::measureTimeMs() - t;
2878 }
2879 
2887 unsigned int vpKeyPoint::matchPoint(const vpImage<unsigned char> &I) { return matchPoint(I, vpRect()); }
2888 
2899 unsigned int vpKeyPoint::matchPoint(const vpImage<unsigned char> &I, const vpImagePoint &iP, const unsigned int height,
2900  const unsigned int width)
2901 {
2902  return matchPoint(I, vpRect(iP, width, height));
2903 }
2904 
2913 unsigned int vpKeyPoint::matchPoint(const vpImage<unsigned char> &I, const vpRect &rectangle)
2914 {
2915  if (m_trainDescriptors.empty()) {
2916  std::cerr << "Reference is empty." << std::endl;
2917  if (!_reference_computed) {
2918  std::cerr << "Reference is not computed." << std::endl;
2919  }
2920  std::cerr << "Matching is not possible." << std::endl;
2921 
2922  return 0;
2923  }
2924 
2925  if (m_useAffineDetection) {
2926  std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
2927  std::vector<cv::Mat> listOfQueryDescriptors;
2928 
2929  // Detect keypoints and extract descriptors on multiple images
2930  detectExtractAffine(I, listOfQueryKeyPoints, listOfQueryDescriptors);
2931 
2932  // Flatten the different train lists
2933  m_queryKeyPoints.clear();
2934  for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
2935  it != listOfQueryKeyPoints.end(); ++it) {
2936  m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
2937  }
2938 
2939  bool first = true;
2940  for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
2941  ++it) {
2942  if (first) {
2943  first = false;
2944  it->copyTo(m_queryDescriptors);
2945  } else {
2946  m_queryDescriptors.push_back(*it);
2947  }
2948  }
2949  } else {
2950  detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
2951  extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
2952  }
2953 
2954  match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
2955 
2956  if (m_filterType != noFilterMatching) {
2957  m_queryFilteredKeyPoints.clear();
2958  m_objectFilteredPoints.clear();
2959  m_filteredMatches.clear();
2960 
2961  filterMatches();
2962  } else {
2963  if (m_useMatchTrainToQuery) {
2964  // Add only query keypoints matched with a train keypoints
2965  m_queryFilteredKeyPoints.clear();
2966  m_filteredMatches.clear();
2967  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
2968  m_filteredMatches.push_back(cv::DMatch((int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
2969  m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(size_t)it->queryIdx]);
2970  }
2971  } else {
2972  m_queryFilteredKeyPoints = m_queryKeyPoints;
2973  m_filteredMatches = m_matches;
2974  }
2975 
2976  if (!m_trainPoints.empty()) {
2977  m_objectFilteredPoints.clear();
2978  // Add 3D object points such as the same index in
2979  // m_queryFilteredKeyPoints and in m_objectFilteredPoints
2980  // matches to the same train object
2981  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
2982  // m_matches is normally ordered following the queryDescriptor index
2983  m_objectFilteredPoints.push_back(m_trainPoints[(size_t)it->trainIdx]);
2984  }
2985  }
2986  }
2987 
2988  // Convert OpenCV type to ViSP type for compatibility
2989  vpConvert::convertFromOpenCV(m_queryFilteredKeyPoints, currentImagePointsList);
2991 
2992  return static_cast<unsigned int>(m_filteredMatches.size());
2993 }
2994 
3008  bool (*func)(const vpHomogeneousMatrix &), const vpRect &rectangle)
3009 {
3010  double error, elapsedTime;
3011  return matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3012 }
3013 
3030  double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &),
3031  const vpRect &rectangle)
3032 {
3033  // Check if we have training descriptors
3034  if (m_trainDescriptors.empty()) {
3035  std::cerr << "Reference is empty." << std::endl;
3036  if (!_reference_computed) {
3037  std::cerr << "Reference is not computed." << std::endl;
3038  }
3039  std::cerr << "Matching is not possible." << std::endl;
3040 
3041  return false;
3042  }
3043 
3044  if (m_useAffineDetection) {
3045  std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3046  std::vector<cv::Mat> listOfQueryDescriptors;
3047 
3048  // Detect keypoints and extract descriptors on multiple images
3049  detectExtractAffine(I, listOfQueryKeyPoints, listOfQueryDescriptors);
3050 
3051  // Flatten the different train lists
3052  m_queryKeyPoints.clear();
3053  for (std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3054  it != listOfQueryKeyPoints.end(); ++it) {
3055  m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3056  }
3057 
3058  bool first = true;
3059  for (std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end();
3060  ++it) {
3061  if (first) {
3062  first = false;
3063  it->copyTo(m_queryDescriptors);
3064  } else {
3065  m_queryDescriptors.push_back(*it);
3066  }
3067  }
3068  } else {
3069  detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3070  extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3071  }
3072 
3073  match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3074 
3075  elapsedTime = m_detectionTime + m_extractionTime + m_matchingTime;
3076 
3077  if (m_filterType != noFilterMatching) {
3078  m_queryFilteredKeyPoints.clear();
3079  m_objectFilteredPoints.clear();
3080  m_filteredMatches.clear();
3081 
3082  filterMatches();
3083  } else {
3084  if (m_useMatchTrainToQuery) {
3085  // Add only query keypoints matched with a train keypoints
3086  m_queryFilteredKeyPoints.clear();
3087  m_filteredMatches.clear();
3088  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3089  m_filteredMatches.push_back(cv::DMatch((int)m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3090  m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(size_t)it->queryIdx]);
3091  }
3092  } else {
3093  m_queryFilteredKeyPoints = m_queryKeyPoints;
3094  m_filteredMatches = m_matches;
3095  }
3096 
3097  if (!m_trainPoints.empty()) {
3098  m_objectFilteredPoints.clear();
3099  // Add 3D object points such as the same index in
3100  // m_queryFilteredKeyPoints and in m_objectFilteredPoints
3101  // matches to the same train object
3102  for (std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3103  // m_matches is normally ordered following the queryDescriptor index
3104  m_objectFilteredPoints.push_back(m_trainPoints[(size_t)it->trainIdx]);
3105  }
3106  }
3107  }
3108 
3109  // Convert OpenCV type to ViSP type for compatibility
3110  vpConvert::convertFromOpenCV(m_queryFilteredKeyPoints, currentImagePointsList);
3112 
3113  // error = std::numeric_limits<double>::max(); // create an error under
3114  // Windows. To fix it we have to add #undef max
3115  error = DBL_MAX;
3116  m_ransacInliers.clear();
3117  m_ransacOutliers.clear();
3118 
3119  if (m_useRansacVVS) {
3120  std::vector<vpPoint> objectVpPoints(m_objectFilteredPoints.size());
3121  size_t cpt = 0;
3122  // Create a list of vpPoint with 2D coordinates (current keypoint
3123  // location) + 3D coordinates (world/object coordinates)
3124  for (std::vector<cv::Point3f>::const_iterator it = m_objectFilteredPoints.begin();
3125  it != m_objectFilteredPoints.end(); ++it, cpt++) {
3126  vpPoint pt;
3127  pt.setWorldCoordinates(it->x, it->y, it->z);
3128 
3129  vpImagePoint imP(m_queryFilteredKeyPoints[cpt].pt.y, m_queryFilteredKeyPoints[cpt].pt.x);
3130 
3131  double x = 0.0, y = 0.0;
3132  vpPixelMeterConversion::convertPoint(cam, imP, x, y);
3133  pt.set_x(x);
3134  pt.set_y(y);
3135 
3136  objectVpPoints[cpt] = pt;
3137  }
3138 
3139  std::vector<vpPoint> inliers;
3140  std::vector<unsigned int> inlierIndex;
3141 
3142  bool res = computePose(objectVpPoints, cMo, inliers, inlierIndex, m_poseTime, func);
3143 
3144  std::map<unsigned int, bool> mapOfInlierIndex;
3145  m_matchRansacKeyPointsToPoints.clear();
3146 
3147  for (std::vector<unsigned int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3148  m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3149  m_queryFilteredKeyPoints[(size_t)(*it)], m_objectFilteredPoints[(size_t)(*it)]));
3150  mapOfInlierIndex[*it] = true;
3151  }
3152 
3153  for (size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3154  if (mapOfInlierIndex.find((unsigned int)i) == mapOfInlierIndex.end()) {
3155  m_ransacOutliers.push_back(vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3156  }
3157  }
3158 
3159  error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3160 
3161  m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3162  std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3163  m_ransacInliers.begin(), matchRansacToVpImage);
3164 
3165  elapsedTime += m_poseTime;
3166 
3167  return res;
3168  } else {
3169  std::vector<cv::Point2f> imageFilteredPoints;
3170  cv::KeyPoint::convert(m_queryFilteredKeyPoints, imageFilteredPoints);
3171  std::vector<int> inlierIndex;
3172  bool res = computePose(imageFilteredPoints, m_objectFilteredPoints, cam, cMo, inlierIndex, m_poseTime);
3173 
3174  std::map<int, bool> mapOfInlierIndex;
3175  m_matchRansacKeyPointsToPoints.clear();
3176 
3177  for (std::vector<int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3178  m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(
3179  m_queryFilteredKeyPoints[(size_t)(*it)], m_objectFilteredPoints[(size_t)(*it)]));
3180  mapOfInlierIndex[*it] = true;
3181  }
3182 
3183  for (size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3184  if (mapOfInlierIndex.find((int)i) == mapOfInlierIndex.end()) {
3185  m_ransacOutliers.push_back(vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3186  }
3187  }
3188 
3189  error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3190 
3191  m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3192  std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(),
3193  m_ransacInliers.begin(), matchRansacToVpImage);
3194 
3195  elapsedTime += m_poseTime;
3196 
3197  return res;
3198  }
3199 }
3200 
3220  vpImagePoint &centerOfGravity, const bool isPlanarObject,
3221  std::vector<vpImagePoint> *imPts1, std::vector<vpImagePoint> *imPts2,
3222  double *meanDescriptorDistance, double *detection_score, const vpRect &rectangle)
3223 {
3224  if (imPts1 != NULL && imPts2 != NULL) {
3225  imPts1->clear();
3226  imPts2->clear();
3227  }
3228 
3229  matchPoint(I, rectangle);
3230 
3231  double meanDescriptorDistanceTmp = 0.0;
3232  for (std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
3233  meanDescriptorDistanceTmp += (double)it->distance;
3234  }
3235 
3236  meanDescriptorDistanceTmp /= (double)m_filteredMatches.size();
3237  double score = (double)m_filteredMatches.size() / meanDescriptorDistanceTmp;
3238 
3239  if (meanDescriptorDistance != NULL) {
3240  *meanDescriptorDistance = meanDescriptorDistanceTmp;
3241  }
3242  if (detection_score != NULL) {
3243  *detection_score = score;
3244  }
3245 
3246  if (m_filteredMatches.size() >= 4) {
3247  // Training / Reference 2D points
3248  std::vector<cv::Point2f> points1(m_filteredMatches.size());
3249  // Query / Current 2D points
3250  std::vector<cv::Point2f> points2(m_filteredMatches.size());
3251 
3252  for (size_t i = 0; i < m_filteredMatches.size(); i++) {
3253  points1[i] = cv::Point2f(m_trainKeyPoints[(size_t)m_filteredMatches[i].trainIdx].pt);
3254  points2[i] = cv::Point2f(m_queryFilteredKeyPoints[(size_t)m_filteredMatches[i].queryIdx].pt);
3255  }
3256 
3257  std::vector<vpImagePoint> inliers;
3258  if (isPlanarObject) {
3259 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
3260  cv::Mat homographyMatrix = cv::findHomography(points1, points2, CV_RANSAC);
3261 #else
3262  cv::Mat homographyMatrix = cv::findHomography(points1, points2, cv::RANSAC);
3263 #endif
3264 
3265  for (size_t i = 0; i < m_filteredMatches.size(); i++) {
3266  // Compute reprojection error
3267  cv::Mat realPoint = cv::Mat(3, 1, CV_64F);
3268  realPoint.at<double>(0, 0) = points1[i].x;
3269  realPoint.at<double>(1, 0) = points1[i].y;
3270  realPoint.at<double>(2, 0) = 1.f;
3271 
3272  cv::Mat reprojectedPoint = homographyMatrix * realPoint;
3273  double err_x = (reprojectedPoint.at<double>(0, 0) / reprojectedPoint.at<double>(2, 0)) - points2[i].x;
3274  double err_y = (reprojectedPoint.at<double>(1, 0) / reprojectedPoint.at<double>(2, 0)) - points2[i].y;
3275  double reprojectionError = std::sqrt(err_x * err_x + err_y * err_y);
3276 
3277  if (reprojectionError < 6.0) {
3278  inliers.push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3279  if (imPts1 != NULL) {
3280  imPts1->push_back(vpImagePoint((double)points1[i].y, (double)points1[i].x));
3281  }
3282 
3283  if (imPts2 != NULL) {
3284  imPts2->push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3285  }
3286  }
3287  }
3288  } else if (m_filteredMatches.size() >= 8) {
3289  cv::Mat fundamentalInliers;
3290  cv::Mat fundamentalMatrix = cv::findFundamentalMat(points1, points2, cv::FM_RANSAC, 3, 0.99, fundamentalInliers);
3291 
3292  for (size_t i = 0; i < (size_t)fundamentalInliers.rows; i++) {
3293  if (fundamentalInliers.at<uchar>((int)i, 0)) {
3294  inliers.push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3295 
3296  if (imPts1 != NULL) {
3297  imPts1->push_back(vpImagePoint((double)points1[i].y, (double)points1[i].x));
3298  }
3299 
3300  if (imPts2 != NULL) {
3301  imPts2->push_back(vpImagePoint((double)points2[i].y, (double)points2[i].x));
3302  }
3303  }
3304  }
3305  }
3306 
3307  if (!inliers.empty()) {
3308  // Build a polygon with the list of inlier keypoints detected in the
3309  // current image to get the bounding box
3310  vpPolygon polygon(inliers);
3311  boundingBox = polygon.getBoundingBox();
3312 
3313  // Compute the center of gravity
3314  double meanU = 0.0, meanV = 0.0;
3315  for (std::vector<vpImagePoint>::const_iterator it = inliers.begin(); it != inliers.end(); ++it) {
3316  meanU += it->get_u();
3317  meanV += it->get_v();
3318  }
3319 
3320  meanU /= (double)inliers.size();
3321  meanV /= (double)inliers.size();
3322 
3323  centerOfGravity.set_u(meanU);
3324  centerOfGravity.set_v(meanV);
3325  }
3326  } else {
3327  // Too few matches
3328  return false;
3329  }
3330 
3331  if (m_detectionMethod == detectionThreshold) {
3332  return meanDescriptorDistanceTmp < m_detectionThreshold;
3333  } else {
3334  return score > m_detectionScore;
3335  }
3336 }
3337 
3358  vpHomogeneousMatrix &cMo, double &error, double &elapsedTime, vpRect &boundingBox,
3359  vpImagePoint &centerOfGravity, bool (*func)(const vpHomogeneousMatrix &),
3360  const vpRect &rectangle)
3361 {
3362  bool isMatchOk = matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3363  if (isMatchOk) {
3364  // Use the pose estimated to project the model points in the image
3365  vpPoint pt;
3366  vpImagePoint imPt;
3367  std::vector<vpImagePoint> modelImagePoints(m_trainVpPoints.size());
3368  size_t cpt = 0;
3369  for (std::vector<vpPoint>::const_iterator it = m_trainVpPoints.begin(); it != m_trainVpPoints.end(); ++it, cpt++) {
3370  pt = *it;
3371  pt.project(cMo);
3372  vpMeterPixelConversion::convertPoint(cam, pt.get_x(), pt.get_y(), imPt);
3373  modelImagePoints[cpt] = imPt;
3374  }
3375 
3376  // Build a polygon with the list of model image points to get the bounding
3377  // box
3378  vpPolygon polygon(modelImagePoints);
3379  boundingBox = polygon.getBoundingBox();
3380 
3381  // Compute the center of gravity of the current inlier keypoints
3382  double meanU = 0.0, meanV = 0.0;
3383  for (std::vector<vpImagePoint>::const_iterator it = m_ransacInliers.begin(); it != m_ransacInliers.end(); ++it) {
3384  meanU += it->get_u();
3385  meanV += it->get_v();
3386  }
3387 
3388  meanU /= (double)m_ransacInliers.size();
3389  meanV /= (double)m_ransacInliers.size();
3390 
3391  centerOfGravity.set_u(meanU);
3392  centerOfGravity.set_v(meanV);
3393  }
3394 
3395  return isMatchOk;
3396 }
3397 
3411  std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
3412  std::vector<cv::Mat> &listOfDescriptors,
3413  std::vector<vpImage<unsigned char> > *listOfAffineI)
3414 {
3415 #if 0
3416  cv::Mat img;
3417  vpImageConvert::convert(I, img);
3418  listOfKeypoints.clear();
3419  listOfDescriptors.clear();
3420 
3421  for (int tl = 1; tl < 6; tl++) {
3422  double t = pow(2, 0.5 * tl);
3423  for (int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
3424  std::vector<cv::KeyPoint> keypoints;
3425  cv::Mat descriptors;
3426 
3427  cv::Mat timg, mask, Ai;
3428  img.copyTo(timg);
3429 
3430  affineSkew(t, phi, timg, mask, Ai);
3431 
3432 
3433  if(listOfAffineI != NULL) {
3434  cv::Mat img_disp;
3435  bitwise_and(mask, timg, img_disp);
3437  vpImageConvert::convert(img_disp, tI);
3438  listOfAffineI->push_back(tI);
3439  }
3440 #if 0
3441  cv::Mat img_disp;
3442  cv::bitwise_and(mask, timg, img_disp);
3443  cv::namedWindow( "Skew", cv::WINDOW_AUTOSIZE ); // Create a window for display.
3444  cv::imshow( "Skew", img_disp );
3445  cv::waitKey(0);
3446 #endif
3447 
3448  for(std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
3449  it != m_detectors.end(); ++it) {
3450  std::vector<cv::KeyPoint> kp;
3451  it->second->detect(timg, kp, mask);
3452  keypoints.insert(keypoints.end(), kp.begin(), kp.end());
3453  }
3454 
3455  double elapsedTime;
3456  extract(timg, keypoints, descriptors, elapsedTime);
3457 
3458  for(unsigned int i = 0; i < keypoints.size(); i++) {
3459  cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
3460  cv::Mat kpt_t = Ai * cv::Mat(kpt);
3461  keypoints[i].pt.x = kpt_t.at<float>(0, 0);
3462  keypoints[i].pt.y = kpt_t.at<float>(1, 0);
3463  }
3464 
3465  listOfKeypoints.push_back(keypoints);
3466  listOfDescriptors.push_back(descriptors);
3467  }
3468  }
3469 
3470 #else
3471  cv::Mat img;
3472  vpImageConvert::convert(I, img);
3473 
3474  // Create a vector for storing the affine skew parameters
3475  std::vector<std::pair<double, int> > listOfAffineParams;
3476  for (int tl = 1; tl < 6; tl++) {
3477  double t = pow(2, 0.5 * tl);
3478  for (int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
3479  listOfAffineParams.push_back(std::pair<double, int>(t, phi));
3480  }
3481  }
3482 
3483  listOfKeypoints.resize(listOfAffineParams.size());
3484  listOfDescriptors.resize(listOfAffineParams.size());
3485 
3486  if (listOfAffineI != NULL) {
3487  listOfAffineI->resize(listOfAffineParams.size());
3488  }
3489 
3490 #ifdef VISP_HAVE_OPENMP
3491 #pragma omp parallel for
3492 #endif
3493  for (int cpt = 0; cpt < static_cast<int>(listOfAffineParams.size()); cpt++) {
3494  std::vector<cv::KeyPoint> keypoints;
3495  cv::Mat descriptors;
3496 
3497  cv::Mat timg, mask, Ai;
3498  img.copyTo(timg);
3499 
3500  affineSkew(listOfAffineParams[(size_t)cpt].first, listOfAffineParams[(size_t)cpt].second, timg, mask, Ai);
3501 
3502  if (listOfAffineI != NULL) {
3503  cv::Mat img_disp;
3504  bitwise_and(mask, timg, img_disp);
3506  vpImageConvert::convert(img_disp, tI);
3507  (*listOfAffineI)[(size_t)cpt] = tI;
3508  }
3509 
3510 #if 0
3511  cv::Mat img_disp;
3512  cv::bitwise_and(mask, timg, img_disp);
3513  cv::namedWindow( "Skew", cv::WINDOW_AUTOSIZE ); // Create a window for display.
3514  cv::imshow( "Skew", img_disp );
3515  cv::waitKey(0);
3516 #endif
3517 
3518  for (std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
3519  it != m_detectors.end(); ++it) {
3520  std::vector<cv::KeyPoint> kp;
3521  it->second->detect(timg, kp, mask);
3522  keypoints.insert(keypoints.end(), kp.begin(), kp.end());
3523  }
3524 
3525  double elapsedTime;
3526  extract(timg, keypoints, descriptors, elapsedTime);
3527 
3528  for (size_t i = 0; i < keypoints.size(); i++) {
3529  cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
3530  cv::Mat kpt_t = Ai * cv::Mat(kpt);
3531  keypoints[i].pt.x = kpt_t.at<float>(0, 0);
3532  keypoints[i].pt.y = kpt_t.at<float>(1, 0);
3533  }
3534 
3535  listOfKeypoints[(size_t)cpt] = keypoints;
3536  listOfDescriptors[(size_t)cpt] = descriptors;
3537  }
3538 #endif
3539 }
3540 
3545 {
3546  // vpBasicKeyPoint class
3547  referenceImagePointsList.clear();
3548  currentImagePointsList.clear();
3549  matchedReferencePoints.clear();
3550  _reference_computed = false;
3551 
3552  m_computeCovariance = false;
3553  m_covarianceMatrix = vpMatrix();
3554  m_currentImageId = 0;
3555  m_detectionMethod = detectionScore;
3556  m_detectionScore = 0.15;
3557  m_detectionThreshold = 100.0;
3558  m_detectionTime = 0.0;
3559  m_detectorNames.clear();
3560  m_detectors.clear();
3561  m_extractionTime = 0.0;
3562  m_extractorNames.clear();
3563  m_extractors.clear();
3564  m_filteredMatches.clear();
3565  m_filterType = ratioDistanceThreshold;
3566  m_imageFormat = jpgImageFormat;
3567  m_knnMatches.clear();
3568  m_mapOfImageId.clear();
3569  m_mapOfImages.clear();
3570  m_matcher = cv::Ptr<cv::DescriptorMatcher>();
3571  m_matcherName = "BruteForce-Hamming";
3572  m_matches.clear();
3573  m_matchingFactorThreshold = 2.0;
3574  m_matchingRatioThreshold = 0.85;
3575  m_matchingTime = 0.0;
3576  m_matchRansacKeyPointsToPoints.clear();
3577  m_nbRansacIterations = 200;
3578  m_nbRansacMinInlierCount = 100;
3579  m_objectFilteredPoints.clear();
3580  m_poseTime = 0.0;
3581  m_queryDescriptors = cv::Mat();
3582  m_queryFilteredKeyPoints.clear();
3583  m_queryKeyPoints.clear();
3584  m_ransacConsensusPercentage = 20.0;
3585  m_ransacFilterFlag = vpPose::NO_FILTER;
3586  m_ransacInliers.clear();
3587  m_ransacOutliers.clear();
3588  m_ransacParallel = true;
3589  m_ransacParallelNbThreads = 0;
3590  m_ransacReprojectionError = 6.0;
3591  m_ransacThreshold = 0.01;
3592  m_trainDescriptors = cv::Mat();
3593  m_trainKeyPoints.clear();
3594  m_trainPoints.clear();
3595  m_trainVpPoints.clear();
3596  m_useAffineDetection = false;
3597 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
3598  m_useBruteForceCrossCheck = true;
3599 #endif
3600  m_useConsensusPercentage = false;
3601  m_useKnn = true; // as m_filterType == ratioDistanceThreshold
3602  m_useMatchTrainToQuery = false;
3603  m_useRansacVVS = true;
3604  m_useSingleMatchFilter = true;
3605 
3606  m_detectorNames.push_back("ORB");
3607  m_extractorNames.push_back("ORB");
3608 
3609  init();
3610 }
3611 
3620 void vpKeyPoint::saveLearningData(const std::string &filename, bool binaryMode, const bool saveTrainingImages)
3621 {
3622  std::string parent = vpIoTools::getParent(filename);
3623  if (!parent.empty()) {
3624  vpIoTools::makeDirectory(parent);
3625  }
3626 
3627  std::map<int, std::string> mapOfImgPath;
3628  if (saveTrainingImages) {
3629 #ifdef VISP_HAVE_MODULE_IO
3630  // Save the training image files in the same directory
3631  unsigned int cpt = 0;
3632 
3633  for (std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end();
3634  ++it, cpt++) {
3635  if (cpt > 999) {
3636  throw vpException(vpException::fatalError, "The number of training images to save is too big !");
3637  }
3638 
3639  std::stringstream ss;
3640  ss << "train_image_" << std::setfill('0') << std::setw(3) << cpt;
3641 
3642  switch (m_imageFormat) {
3643  case jpgImageFormat:
3644  ss << ".jpg";
3645  break;
3646 
3647  case pngImageFormat:
3648  ss << ".png";
3649  break;
3650 
3651  case ppmImageFormat:
3652  ss << ".ppm";
3653  break;
3654 
3655  case pgmImageFormat:
3656  ss << ".pgm";
3657  break;
3658 
3659  default:
3660  ss << ".png";
3661  break;
3662  }
3663 
3664  std::string imgFilename = ss.str();
3665  mapOfImgPath[it->first] = imgFilename;
3666  vpImageIo::write(it->second, parent + (!parent.empty() ? "/" : "") + imgFilename);
3667  }
3668 #else
3669  std::cout << "Warning: in vpKeyPoint::saveLearningData() training images "
3670  "are not saved because "
3671  "visp_io module is not available !"
3672  << std::endl;
3673 #endif
3674  }
3675 
3676  bool have3DInfo = m_trainPoints.size() > 0;
3677  if (have3DInfo && m_trainPoints.size() != m_trainKeyPoints.size()) {
3678  throw vpException(vpException::fatalError, "List of keypoints and list of 3D points have different size !");
3679  }
3680 
3681  if (binaryMode) {
3682  // Save the learning data into little endian binary file.
3683  std::ofstream file(filename.c_str(), std::ofstream::binary);
3684  if (!file.is_open()) {
3685  throw vpException(vpException::ioError, "Cannot create the file.");
3686  }
3687 
3688  // Write info about training images
3689  int nbImgs = (int)mapOfImgPath.size();
3690  vpIoTools::writeBinaryValueLE(file, nbImgs);
3691 
3692 #ifdef VISP_HAVE_MODULE_IO
3693  for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
3694  // Write image_id
3695  int id = it->first;
3697 
3698  // Write image path
3699  std::string path = it->second;
3700  int length = (int)path.length();
3701  vpIoTools::writeBinaryValueLE(file, length);
3702 
3703  for (int cpt = 0; cpt < length; cpt++) {
3704  file.write((char *)(&path[(size_t)cpt]), sizeof(path[(size_t)cpt]));
3705  }
3706  }
3707 #endif
3708 
3709  // Write if we have 3D point information
3710  int have3DInfoInt = have3DInfo ? 1 : 0;
3711  vpIoTools::writeBinaryValueLE(file, have3DInfoInt);
3712 
3713  int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
3714  int descriptorType = m_trainDescriptors.type();
3715 
3716  // Write the number of descriptors
3717  vpIoTools::writeBinaryValueLE(file, nRows);
3718 
3719  // Write the size of the descriptor
3720  vpIoTools::writeBinaryValueLE(file, nCols);
3721 
3722  // Write the type of the descriptor
3723  vpIoTools::writeBinaryValueLE(file, descriptorType);
3724 
3725  for (int i = 0; i < nRows; i++) {
3726  unsigned int i_ = (unsigned int)i;
3727  // Write u
3728  float u = m_trainKeyPoints[i_].pt.x;
3730 
3731  // Write v
3732  float v = m_trainKeyPoints[i_].pt.y;
3734 
3735  // Write size
3736  float size = m_trainKeyPoints[i_].size;
3737  vpIoTools::writeBinaryValueLE(file, size);
3738 
3739  // Write angle
3740  float angle = m_trainKeyPoints[i_].angle;
3741  vpIoTools::writeBinaryValueLE(file, angle);
3742 
3743  // Write response
3744  float response = m_trainKeyPoints[i_].response;
3745  vpIoTools::writeBinaryValueLE(file, response);
3746 
3747  // Write octave
3748  int octave = m_trainKeyPoints[i_].octave;
3749  vpIoTools::writeBinaryValueLE(file, octave);
3750 
3751  // Write class_id
3752  int class_id = m_trainKeyPoints[i_].class_id;
3753  vpIoTools::writeBinaryValueLE(file, class_id);
3754 
3755 // Write image_id
3756 #ifdef VISP_HAVE_MODULE_IO
3757  std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
3758  int image_id = (saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1;
3759  vpIoTools::writeBinaryValueLE(file, image_id);
3760 #else
3761  int image_id = -1;
3762  // file.write((char *)(&image_id), sizeof(image_id));
3763  vpIoTools::writeBinaryValueLE(file, image_id);
3764 #endif
3765 
3766  if (have3DInfo) {
3767  float oX = m_trainPoints[i_].x, oY = m_trainPoints[i_].y, oZ = m_trainPoints[i_].z;
3768  // Write oX
3770 
3771  // Write oY
3773 
3774  // Write oZ
3776  }
3777 
3778  for (int j = 0; j < nCols; j++) {
3779  // Write the descriptor value
3780  switch (descriptorType) {
3781  case CV_8U:
3782  file.write((char *)(&m_trainDescriptors.at<unsigned char>(i, j)),
3783  sizeof(m_trainDescriptors.at<unsigned char>(i, j)));
3784  break;
3785 
3786  case CV_8S:
3787  file.write((char *)(&m_trainDescriptors.at<char>(i, j)), sizeof(m_trainDescriptors.at<char>(i, j)));
3788  break;
3789 
3790  case CV_16U:
3791  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<unsigned short int>(i, j));
3792  break;
3793 
3794  case CV_16S:
3795  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<short int>(i, j));
3796  break;
3797 
3798  case CV_32S:
3799  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<int>(i, j));
3800  break;
3801 
3802  case CV_32F:
3803  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<float>(i, j));
3804  break;
3805 
3806  case CV_64F:
3807  vpIoTools::writeBinaryValueLE(file, m_trainDescriptors.at<double>(i, j));
3808  break;
3809 
3810  default:
3811  throw vpException(vpException::fatalError, "Problem with the data type of descriptors !");
3812  break;
3813  }
3814  }
3815  }
3816 
3817  file.close();
3818  } else {
3819 #ifdef VISP_HAVE_XML2
3820  xmlDocPtr doc = NULL;
3821  xmlNodePtr root_node = NULL, image_node = NULL, image_info_node = NULL, descriptors_info_node = NULL,
3822  descriptor_node = NULL, desc_node = NULL;
3823 
3824  /*
3825  * this initialize the library and check potential ABI mismatches
3826  * between the version it was compiled for and the actual shared
3827  * library used.
3828  */
3829  LIBXML_TEST_VERSION
3830 
3831  doc = xmlNewDoc(BAD_CAST "1.0");
3832  if (doc == NULL) {
3833  throw vpException(vpException::ioError, "Error with file " + filename);
3834  }
3835 
3836  root_node = xmlNewNode(NULL, BAD_CAST "LearningData");
3837  xmlDocSetRootElement(doc, root_node);
3838 
3839  std::stringstream ss;
3840 
3841  // Write the training images info
3842  image_node = xmlNewChild(root_node, NULL, BAD_CAST "TrainingImageInfo", NULL);
3843 
3844 #ifdef VISP_HAVE_MODULE_IO
3845  for (std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
3846  image_info_node = xmlNewChild(image_node, NULL, BAD_CAST "trainImg", BAD_CAST it->second.c_str());
3847  ss.str("");
3848  ss << it->first;
3849  xmlNewProp(image_info_node, BAD_CAST "image_id", BAD_CAST ss.str().c_str());
3850  }
3851 #endif
3852 
3853  // Write information about descriptors
3854  descriptors_info_node = xmlNewChild(root_node, NULL, BAD_CAST "DescriptorsInfo", NULL);
3855 
3856  int nRows = m_trainDescriptors.rows, nCols = m_trainDescriptors.cols;
3857  int descriptorType = m_trainDescriptors.type();
3858 
3859  // Write the number of rows
3860  ss.str("");
3861  ss << nRows;
3862  xmlNewChild(descriptors_info_node, NULL, BAD_CAST "nrows", BAD_CAST ss.str().c_str());
3863 
3864  // Write the number of cols
3865  ss.str("");
3866  ss << nCols;
3867  xmlNewChild(descriptors_info_node, NULL, BAD_CAST "ncols", BAD_CAST ss.str().c_str());
3868 
3869  // Write the descriptors type
3870  ss.str("");
3871  ss << descriptorType;
3872  xmlNewChild(descriptors_info_node, NULL, BAD_CAST "type", BAD_CAST ss.str().c_str());
3873 
3874  for (int i = 0; i < nRows; i++) {
3875  unsigned int i_ = (unsigned int)i;
3876  descriptor_node = xmlNewChild(root_node, NULL, BAD_CAST "DescriptorInfo", NULL);
3877 
3878  ss.str("");
3879  // max_digits10 == 9 for float
3880  ss << std::fixed << std::setprecision(9) << m_trainKeyPoints[i_].pt.x;
3881  xmlNewChild(descriptor_node, NULL, BAD_CAST "u", BAD_CAST ss.str().c_str());
3882 
3883  ss.str("");
3884  // max_digits10 == 9 for float
3885  ss << std::fixed << std::setprecision(9) << m_trainKeyPoints[i_].pt.y;
3886  xmlNewChild(descriptor_node, NULL, BAD_CAST "v", BAD_CAST ss.str().c_str());
3887 
3888  ss.str("");
3889  // max_digits10 == 9 for float
3890  ss << std::fixed << std::setprecision(9) << m_trainKeyPoints[i_].size;
3891  xmlNewChild(descriptor_node, NULL, BAD_CAST "size", BAD_CAST ss.str().c_str());
3892 
3893  ss.str("");
3894  // max_digits10 == 9 for float
3895  ss << std::fixed << std::setprecision(9) << m_trainKeyPoints[i_].angle;
3896  xmlNewChild(descriptor_node, NULL, BAD_CAST "angle", BAD_CAST ss.str().c_str());
3897 
3898  ss.str("");
3899  // max_digits10 == 9 for float
3900  ss << std::fixed << std::setprecision(9) << m_trainKeyPoints[i_].response;
3901  xmlNewChild(descriptor_node, NULL, BAD_CAST "response", BAD_CAST ss.str().c_str());
3902 
3903  ss.str("");
3904  ss << m_trainKeyPoints[i_].octave;
3905  xmlNewChild(descriptor_node, NULL, BAD_CAST "octave", BAD_CAST ss.str().c_str());
3906 
3907  ss.str("");
3908  ss << m_trainKeyPoints[i_].class_id;
3909  xmlNewChild(descriptor_node, NULL, BAD_CAST "class_id", BAD_CAST ss.str().c_str());
3910 
3911  ss.str("");
3912 #ifdef VISP_HAVE_MODULE_IO
3913  std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
3914  ss << ((saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1);
3915  xmlNewChild(descriptor_node, NULL, BAD_CAST "image_id", BAD_CAST ss.str().c_str());
3916 #else
3917  ss << -1;
3918  xmlNewChild(descriptor_node, NULL, BAD_CAST "image_id", BAD_CAST ss.str().c_str());
3919 #endif
3920 
3921  if (have3DInfo) {
3922  ss.str("");
3923  // max_digits10 == 9 for float
3924  ss << std::fixed << std::setprecision(9) << m_trainPoints[i_].x;
3925  xmlNewChild(descriptor_node, NULL, BAD_CAST "oX", BAD_CAST ss.str().c_str());
3926 
3927  ss.str("");
3928  // max_digits10 == 9 for float
3929  ss << std::fixed << std::setprecision(9) << m_trainPoints[i_].y;
3930  xmlNewChild(descriptor_node, NULL, BAD_CAST "oY", BAD_CAST ss.str().c_str());
3931 
3932  ss.str("");
3933  // max_digits10 == 9 for float
3934  ss << std::fixed << std::setprecision(9) << m_trainPoints[i_].z;
3935  xmlNewChild(descriptor_node, NULL, BAD_CAST "oZ", BAD_CAST ss.str().c_str());
3936  }
3937 
3938  desc_node = xmlNewChild(descriptor_node, NULL, BAD_CAST "desc", NULL);
3939 
3940  for (int j = 0; j < nCols; j++) {
3941  ss.str("");
3942 
3943  switch (descriptorType) {
3944  case CV_8U: {
3945  // Promote an unsigned char to an int
3946  // val_tmp holds the numeric value that will be written
3947  // We save the value in numeric form otherwise libxml2 will not be
3948  // able to parse A better solution could be possible
3949  int val_tmp = m_trainDescriptors.at<unsigned char>(i, j);
3950  ss << val_tmp;
3951  } break;
3952 
3953  case CV_8S: {
3954  // Promote a char to an int
3955  // val_tmp holds the numeric value that will be written
3956  // We save the value in numeric form otherwise libxml2 will not be
3957  // able to parse A better solution could be possible
3958  int val_tmp = m_trainDescriptors.at<char>(i, j);
3959  ss << val_tmp;
3960  } break;
3961 
3962  case CV_16U:
3963  ss << m_trainDescriptors.at<unsigned short int>(i, j);
3964  break;
3965 
3966  case CV_16S:
3967  ss << m_trainDescriptors.at<short int>(i, j);
3968  break;
3969 
3970  case CV_32S:
3971  ss << m_trainDescriptors.at<int>(i, j);
3972  break;
3973 
3974  case CV_32F:
3975  // max_digits10 == 9 for float
3976  ss << std::fixed << std::setprecision(9) << m_trainDescriptors.at<float>(i, j);
3977  break;
3978 
3979  case CV_64F:
3980  // max_digits10 == 17 for double
3981  ss << std::fixed << std::setprecision(17) << m_trainDescriptors.at<double>(i, j);
3982  break;
3983 
3984  default:
3985  throw vpException(vpException::fatalError, "Problem with the data type of descriptors !");
3986  break;
3987  }
3988  xmlNewChild(desc_node, NULL, BAD_CAST "val", BAD_CAST ss.str().c_str());
3989  }
3990  }
3991 
3992  xmlSaveFormatFileEnc(filename.c_str(), doc, "UTF-8", 1);
3993 
3994  /*free the document */
3995  xmlFreeDoc(doc);
3996 
3997  /*
3998  *Free the global variables that may
3999  *have been allocated by the parser.
4000  */
4001  xmlCleanupParser();
4002 #else
4003  std::cerr << "Error: libxml2 is required !" << std::endl;
4004 #endif
4005  }
4006 }
4007 
4008 #if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x030000)
4009 // From OpenCV 2.4.11 source code.
4011  KeypointResponseGreaterThanThreshold(float _value) : value(_value) {}
4012  inline bool operator()(const cv::KeyPoint &kpt) const { return kpt.response >= value; }
4013  float value;
4014 };
4015 
4017  inline bool operator()(const cv::KeyPoint &kp1, const cv::KeyPoint &kp2) const { return kp1.response > kp2.response; }
4018 };
4019 
4020 // takes keypoints and culls them by the response
4021 void vpKeyPoint::KeyPointsFilter::retainBest(std::vector<cv::KeyPoint> &keypoints, int n_points)
4022 {
4023  // this is only necessary if the keypoints size is greater than the number
4024  // of desired points.
4025  if (n_points >= 0 && keypoints.size() > (size_t)n_points) {
4026  if (n_points == 0) {
4027  keypoints.clear();
4028  return;
4029  }
4030  // first use nth element to partition the keypoints into the best and
4031  // worst.
4032  std::nth_element(keypoints.begin(), keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreater());
4033  // this is the boundary response, and in the case of FAST may be ambiguous
4034  float ambiguous_response = keypoints[(size_t)(n_points - 1)].response;
4035  // use std::partition to grab all of the keypoints with the boundary
4036  // response.
4037  std::vector<cv::KeyPoint>::const_iterator new_end = std::partition(
4038  keypoints.begin() + n_points, keypoints.end(), KeypointResponseGreaterThanThreshold(ambiguous_response));
4039  // resize the keypoints, given this new end point. nth_element and
4040  // partition reordered the points inplace
4041  keypoints.resize((size_t)(new_end - keypoints.begin()));
4042  }
4043 }
4044 
4046  RoiPredicate(const cv::Rect &_r) : r(_r) {}
4047 
4048  bool operator()(const cv::KeyPoint &keyPt) const { return !r.contains(keyPt.pt); }
4049 
4050  cv::Rect r;
4051 };
4052 
4053 void vpKeyPoint::KeyPointsFilter::runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize,
4054  int borderSize)
4055 {
4056  if (borderSize > 0) {
4057  if (imageSize.height <= borderSize * 2 || imageSize.width <= borderSize * 2)
4058  keypoints.clear();
4059  else
4060  keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(),
4061  RoiPredicate(cv::Rect(
4062  cv::Point(borderSize, borderSize),
4063  cv::Point(imageSize.width - borderSize, imageSize.height - borderSize)))),
4064  keypoints.end());
4065  }
4066 }
4067 
4069  SizePredicate(float _minSize, float _maxSize) : minSize(_minSize), maxSize(_maxSize) {}
4070 
4071  bool operator()(const cv::KeyPoint &keyPt) const
4072  {
4073  float size = keyPt.size;
4074  return (size < minSize) || (size > maxSize);
4075  }
4076 
4077  float minSize, maxSize;
4078 };
4079 
4080 void vpKeyPoint::KeyPointsFilter::runByKeypointSize(std::vector<cv::KeyPoint> &keypoints, float minSize, float maxSize)
4081 {
4082  CV_Assert(minSize >= 0);
4083  CV_Assert(maxSize >= 0);
4084  CV_Assert(minSize <= maxSize);
4085 
4086  keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), SizePredicate(minSize, maxSize)), keypoints.end());
4087 }
4088 
4090 {
4091 public:
4092  MaskPredicate(const cv::Mat &_mask) : mask(_mask) {}
4093  bool operator()(const cv::KeyPoint &key_pt) const
4094  {
4095  return mask.at<uchar>((int)(key_pt.pt.y + 0.5f), (int)(key_pt.pt.x + 0.5f)) == 0;
4096  }
4097 
4098 private:
4099  const cv::Mat mask;
4100  MaskPredicate &operator=(const MaskPredicate &);
4101 };
4102 
4103 void vpKeyPoint::KeyPointsFilter::runByPixelsMask(std::vector<cv::KeyPoint> &keypoints, const cv::Mat &mask)
4104 {
4105  if (mask.empty())
4106  return;
4107 
4108  keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), MaskPredicate(mask)), keypoints.end());
4109 }
4110 
4112  KeyPoint_LessThan(const std::vector<cv::KeyPoint> &_kp) : kp(&_kp) {}
4113  bool operator()(/*int i, int j*/ size_t i, size_t j) const
4114  {
4115  const cv::KeyPoint &kp1 = (*kp)[/*(size_t)*/ i];
4116  const cv::KeyPoint &kp2 = (*kp)[/*(size_t)*/ j];
4117  if (!vpMath::equal(kp1.pt.x, kp2.pt.x,
4118  std::numeric_limits<float>::epsilon())) { // if (kp1.pt.x !=
4119  // kp2.pt.x) {
4120  return kp1.pt.x < kp2.pt.x;
4121  }
4122 
4123  if (!vpMath::equal(kp1.pt.y, kp2.pt.y,
4124  std::numeric_limits<float>::epsilon())) { // if (kp1.pt.y !=
4125  // kp2.pt.y) {
4126  return kp1.pt.y < kp2.pt.y;
4127  }
4128 
4129  if (!vpMath::equal(kp1.size, kp2.size,
4130  std::numeric_limits<float>::epsilon())) { // if (kp1.size !=
4131  // kp2.size) {
4132  return kp1.size > kp2.size;
4133  }
4134 
4135  if (!vpMath::equal(kp1.angle, kp2.angle,
4136  std::numeric_limits<float>::epsilon())) { // if (kp1.angle !=
4137  // kp2.angle) {
4138  return kp1.angle < kp2.angle;
4139  }
4140 
4141  if (!vpMath::equal(kp1.response, kp2.response,
4142  std::numeric_limits<float>::epsilon())) { // if (kp1.response !=
4143  // kp2.response) {
4144  return kp1.response > kp2.response;
4145  }
4146 
4147  if (kp1.octave != kp2.octave) {
4148  return kp1.octave > kp2.octave;
4149  }
4150 
4151  if (kp1.class_id != kp2.class_id) {
4152  return kp1.class_id > kp2.class_id;
4153  }
4154 
4155  return i < j;
4156  }
4157  const std::vector<cv::KeyPoint> *kp;
4158 };
4159 
4160 void vpKeyPoint::KeyPointsFilter::removeDuplicated(std::vector<cv::KeyPoint> &keypoints)
4161 {
4162  size_t i, j, n = keypoints.size();
4163  std::vector<size_t> kpidx(n);
4164  std::vector<uchar> mask(n, (uchar)1);
4165 
4166  for (i = 0; i < n; i++) {
4167  kpidx[i] = i;
4168  }
4169  std::sort(kpidx.begin(), kpidx.end(), KeyPoint_LessThan(keypoints));
4170  for (i = 1, j = 0; i < n; i++) {
4171  cv::KeyPoint &kp1 = keypoints[kpidx[i]];
4172  cv::KeyPoint &kp2 = keypoints[kpidx[j]];
4173  // if (kp1.pt.x != kp2.pt.x || kp1.pt.y != kp2.pt.y || kp1.size !=
4174  // kp2.size || kp1.angle != kp2.angle) {
4175  if (!vpMath::equal(kp1.pt.x, kp2.pt.x, std::numeric_limits<float>::epsilon()) ||
4176  !vpMath::equal(kp1.pt.y, kp2.pt.y, std::numeric_limits<float>::epsilon()) ||
4177  !vpMath::equal(kp1.size, kp2.size, std::numeric_limits<float>::epsilon()) ||
4178  !vpMath::equal(kp1.angle, kp2.angle, std::numeric_limits<float>::epsilon())) {
4179  j = i;
4180  } else {
4181  mask[kpidx[i]] = 0;
4182  }
4183  }
4184 
4185  for (i = j = 0; i < n; i++) {
4186  if (mask[i]) {
4187  if (i != j) {
4188  keypoints[j] = keypoints[i];
4189  }
4190  j++;
4191  }
4192  }
4193  keypoints.resize(j);
4194 }
4195 
4196 /*
4197  * PyramidAdaptedFeatureDetector
4198  */
4199 vpKeyPoint::PyramidAdaptedFeatureDetector::PyramidAdaptedFeatureDetector(const cv::Ptr<cv::FeatureDetector> &_detector,
4200  int _maxLevel)
4201  : detector(_detector), maxLevel(_maxLevel)
4202 {
4203 }
4204 
4205 bool vpKeyPoint::PyramidAdaptedFeatureDetector::empty() const
4206 {
4207  return detector.empty() || (cv::FeatureDetector *)detector->empty();
4208 }
4209 
4210 void vpKeyPoint::PyramidAdaptedFeatureDetector::detect(cv::InputArray image,
4211  CV_OUT std::vector<cv::KeyPoint> &keypoints, cv::InputArray mask)
4212 {
4213  detectImpl(image.getMat(), keypoints, mask.getMat());
4214 }
4215 
4216 void vpKeyPoint::PyramidAdaptedFeatureDetector::detectImpl(const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
4217  const cv::Mat &mask) const
4218 {
4219  cv::Mat src = image;
4220  cv::Mat src_mask = mask;
4221 
4222  cv::Mat dilated_mask;
4223  if (!mask.empty()) {
4224  cv::dilate(mask, dilated_mask, cv::Mat());
4225  cv::Mat mask255(mask.size(), CV_8UC1, cv::Scalar(0));
4226  mask255.setTo(cv::Scalar(255), dilated_mask != 0);
4227  dilated_mask = mask255;
4228  }
4229 
4230  for (int l = 0, multiplier = 1; l <= maxLevel; ++l, multiplier *= 2) {
4231  // Detect on current level of the pyramid
4232  std::vector<cv::KeyPoint> new_pts;
4233  detector->detect(src, new_pts, src_mask);
4234  std::vector<cv::KeyPoint>::iterator it = new_pts.begin(), end = new_pts.end();
4235  for (; it != end; ++it) {
4236  it->pt.x *= multiplier;
4237  it->pt.y *= multiplier;
4238  it->size *= multiplier;
4239  it->octave = l;
4240  }
4241  keypoints.insert(keypoints.end(), new_pts.begin(), new_pts.end());
4242 
4243  // Downsample
4244  if (l < maxLevel) {
4245  cv::Mat dst;
4246  pyrDown(src, dst);
4247  src = dst;
4248 
4249  if (!mask.empty())
4250  resize(dilated_mask, src_mask, src.size(), 0, 0, CV_INTER_AREA);
4251  }
4252  }
4253 
4254  if (!mask.empty())
4255  vpKeyPoint::KeyPointsFilter::runByPixelsMask(keypoints, mask);
4256 }
4257 #endif
4258 
4259 #elif !defined(VISP_BUILD_SHARED_LIBS)
4260 // Work around to avoid warning: libvisp_vision.a(vpKeyPoint.cpp.o) has no
4261 // symbols
4262 void dummy_vpKeyPoint(){};
4263 #endif
void getTrainKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
Used to indicate that a value is not in the allowed range.
Definition: vpException.h:97
Implementation of a matrix and operations on matrices.
Definition: vpMatrix.h:104
void displayMatching(const vpImage< unsigned char > &IRef, vpImage< unsigned char > &IMatching, unsigned int crossSize, unsigned int lineThickness=1, const vpColor &color=vpColor::green)
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool(*func)(const vpHomogeneousMatrix &)=NULL)
Definition: vpPose.cpp:362
double getTop() const
Definition: vpRect.h:175
static void compute3D(const cv::KeyPoint &candidate, const std::vector< vpPoint > &roi, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
Definition: vpKeyPoint.cpp:444
RoiPredicate(const cv::Rect &_r)
void set_oZ(const double oZ)
Set the point Z coordinate in the object frame.
Definition: vpPoint.cpp:465
void detectExtractAffine(const vpImage< unsigned char > &I, std::vector< std::vector< cv::KeyPoint > > &listOfKeypoints, std::vector< cv::Mat > &listOfDescriptors, std::vector< vpImage< unsigned char > > *listOfAffineI=NULL)
double get_u0() const
void getQueryKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
double get_i() const
Definition: vpImagePoint.h:204
unsigned int getWidth() const
Definition: vpImage.h:239
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static bool isAbsolutePathname(const std::string &pathname)
Definition: vpIoTools.cpp:1582
vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType, const std::string &matcherName, const vpFilterMatchingType &filterType=ratioDistanceThreshold)
Definition: vpKeyPoint.cpp:79
void createImageMatching(vpImage< unsigned char > &IRef, vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void readBinaryValueLE(std::ifstream &file, int16_t &short_value)
Definition: vpIoTools.cpp:1864
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
static bool isNaN(const double value)
Definition: vpMath.cpp:84
Class to define colors available for display functionnalities.
Definition: vpColor.h:120
static bool equal(double x, double y, double s=0.001)
Definition: vpMath.h:290
static const vpColor none
Definition: vpColor.h:192
double get_oY() const
Get the point Y coordinate in the object frame.
Definition: vpPoint.cpp:422
error that can be emited by ViSP classes.
Definition: vpException.h:71
void setRansacThreshold(const double &t)
Definition: vpPose.h:248
void set_x(const double x)
Set the point x coordinate in the image plane.
Definition: vpPoint.cpp:470
double getHeight() const
Definition: vpRect.h:149
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
std::vector< unsigned int > getRansacInlierIndex() const
Definition: vpPose.h:259
double get_py() const
double get_y() const
Get the point y coordinate in the image plane.
Definition: vpPoint.cpp:431
bool operator()(const cv::KeyPoint &keyPt) const
double getRight() const
Definition: vpRect.h:162
static std::string getParent(const std::string &pathname)
Definition: vpIoTools.cpp:1477
static const vpColor green
Definition: vpColor.h:183
void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector< cv::DMatch > &matches, double &elapsedTime)
static int round(const double x)
Definition: vpMath.h:235
double get_j() const
Definition: vpImagePoint.h:215
void insertImageMatching(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
VISP_EXPORT double measureTimeMs()
Definition: vpTime.cpp:88
static const vpColor red
Definition: vpColor.h:180
Class that defines what is a point.
Definition: vpPoint.h:58
bool isInside(const vpImagePoint &iP, const PointInPolygonMethod &method=PnPolyRayCasting) const
Definition: vpPolygon.cpp:317
static void write(const vpImage< unsigned char > &I, const std::string &filename)
Definition: vpImageIo.cpp:375
static void makeDirectory(const char *dirname)
Definition: vpIoTools.cpp:597
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
Definition: vpKeyPoint.cpp:528
Defines a generic 2D polygon.
Definition: vpPolygon.h:103
const char * what() const
double getBottom() const
Definition: vpRect.h:94
vpRect getBoundingBox() const
Definition: vpPolygon.h:177
double getWidth() const
Definition: vpRect.h:197
double get_v0() const
vpFeatureDetectorType
Definition: vpKeyPoint.h:264
void set_u(const double u)
Definition: vpImagePoint.h:226
static void compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpCylinder > &cylinders, const std::vector< std::vector< std::vector< vpImagePoint > > > &vectorOfCylinderRois, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
Definition: vpKeyPoint.cpp:649
bool _reference_computed
flag to indicate if the reference has been built.
void set_v(const double v)
Definition: vpImagePoint.h:237
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
Definition: vpPose.h:78
void setNbParallelRansacThreads(const int nb)
Definition: vpPose.h:316
Generic class defining intrinsic camera parameters.
KeyPoint_LessThan(const std::vector< cv::KeyPoint > &_kp)
double get_oZ() const
Get the point Z coordinate in the object frame.
Definition: vpPoint.cpp:424
void set_y(const double y)
Set the point y coordinate in the image plane.
Definition: vpPoint.cpp:472
void set_oX(const double oX)
Set the point X coordinate in the object frame.
Definition: vpPoint.cpp:461
bool operator()(const cv::KeyPoint &kp1, const cv::KeyPoint &kp2) const
void display(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, unsigned int size=3)
vpFeatureDescriptorType
Definition: vpKeyPoint.h:292
unsigned int buildReference(const vpImage< unsigned char > &I)
Definition: vpKeyPoint.cpp:236
double get_x() const
Get the point x coordinate in the image plane.
Definition: vpPoint.cpp:429
bool operator()(const cv::KeyPoint &keyPt) const
vpMatrix getCovarianceMatrix() const
Definition: vpPose.h:279
void insert(const vpImage< Type > &src, const vpImagePoint &topLeft)
Definition: vpImage.h:1188
std::vector< vpImagePoint > referenceImagePointsList
void getObjectPoints(std::vector< cv::Point3f > &objectPoints) const
double get_px() const
static void writeBinaryValueLE(std::ofstream &file, const int16_t short_value)
Definition: vpIoTools.cpp:1942
unsigned int matchPoint(const vpImage< unsigned char > &I)
void setRansacMaxTrials(const int &rM)
Definition: vpPose.h:257
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
Definition: vpPose.h:247
KeypointResponseGreaterThanThreshold(float _value)
static void displayCircle(const vpImage< unsigned char > &I, const vpImagePoint &center, unsigned int radius, const vpColor &color, bool fill=false, unsigned int thickness=1)
double get_oX() const
Get the point X coordinate in the object frame.
Definition: vpPoint.cpp:420
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
void setUseParallelRansac(const bool use)
Definition: vpPose.h:330
void loadLearningData(const std::string &filename, const bool binaryMode=false, const bool append=false)
void setWorldCoordinates(const double oX, const double oY, const double oZ)
Definition: vpPoint.cpp:113
static void read(vpImage< unsigned char > &I, const std::string &filename)
Definition: vpImageIo.cpp:207
Implementation of column vector and the associated operations.
Definition: vpColVector.h:72
const std::vector< cv::KeyPoint > * kp
void set_oY(const double oY)
Set the point Y coordinate in the object frame.
Definition: vpPoint.cpp:463
void saveLearningData(const std::string &filename, const bool binaryMode=false, const bool saveTrainingImages=true)
void getTrainPoints(std::vector< cv::Point3f > &points) const
vpHomogeneousMatrix inverse() const
vpFilterMatchingType
Definition: vpKeyPoint.h:233
double getB() const
Definition: vpPlane.h:104
void setRansacFilterFlag(const RANSAC_FILTER_FLAGS &flag)
Definition: vpPose.h:299
bool operator()(size_t i, size_t j) const
bool matchPointAndDetect(const vpImage< unsigned char > &I, vpRect &boundingBox, vpImagePoint &centerOfGravity, const bool isPlanarObject=true, std::vector< vpImagePoint > *imPts1=NULL, std::vector< vpImagePoint > *imPts2=NULL, double *meanDescriptorDistance=NULL, double *detectionScore=NULL, const vpRect &rectangle=vpRect())
std::vector< unsigned int > matchedReferencePoints
double getA() const
Definition: vpPlane.h:102
unsigned int getHeight() const
Definition: vpImage.h:178
Defines a rectangle in the plane.
Definition: vpRect.h:78
std::vector< vpImagePoint > currentImagePointsList
double getC() const
Definition: vpPlane.h:106
bool computePose(const std::vector< cv::Point2f > &imagePoints, const std::vector< cv::Point3f > &objectPoints, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector< int > &inlierIndex, double &elapsedTime, bool(*func)(const vpHomogeneousMatrix &)=NULL)
Definition: vpKeyPoint.cpp:797
MaskPredicate(const cv::Mat &_mask)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:88
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1)
This class defines the container for a plane geometrical structure.
Definition: vpPlane.h:58
void loadConfigFile(const std::string &configFile)
void reset()
SizePredicate(float _minSize, float _maxSize)
void addPoint(const vpPoint &P)
Definition: vpPose.cpp:137
double getLeft() const
Definition: vpRect.h:156
static void convertFromOpenCV(const cv::KeyPoint &from, vpImagePoint &to)
Definition: vpConvert.cpp:226
Class that consider the case of a translation vector.
Implementation of a rotation vector as axis-angle minimal representation.
bool operator()(const cv::KeyPoint &key_pt) const
std::vector< vpPoint > getRansacInliers() const
Definition: vpPose.h:260
bool operator()(const cv::KeyPoint &kpt) const
void setCovarianceComputation(const bool &flag)
Definition: vpPose.h:268
double getD() const
Definition: vpPlane.h:108
void set_ij(const double ii, const double jj)
Definition: vpImagePoint.h:189
vpMatchingMethodEnum getMatchingMethod() const
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
void initMatcher(const std::string &matcherName)
void extract(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, cv::Mat &descriptors, std::vector< cv::Point3f > *trainPoints=NULL)
void parse(const std::string &filename)
bool detect(const vpImage< unsigned char > &I)