ViSP  2.10.0
vpKeyPoint.cpp
1 /****************************************************************************
2  *
3  * This file is part of the ViSP software.
4  * Copyright (C) 2005 - 2014 by INRIA. All rights reserved.
5  *
6  * This software is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * ("GPL") version 2 as published by the Free Software Foundation.
9  * See the file LICENSE.txt at the root directory of this source
10  * distribution for additional information about the GNU GPL.
11  *
12  * For using ViSP with software that can not be combined with the GNU
13  * GPL, please contact INRIA about acquiring a ViSP Professional
14  * Edition License.
15  *
16  * See http://www.irisa.fr/lagadic/visp/visp.html for more information.
17  *
18  * This software was developed at:
19  * INRIA Rennes - Bretagne Atlantique
20  * Campus Universitaire de Beaulieu
21  * 35042 Rennes Cedex
22  * France
23  * http://www.irisa.fr/lagadic
24  *
25  * If you have questions regarding the use of this file, please contact
26  * INRIA at visp@inria.fr
27  *
28  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
29  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
30  *
31  * Description:
32  * Key point functionalities.
33  *
34  * Authors:
35  * Souriya Trinh
36  *
37  *****************************************************************************/
38 
39 #include <limits>
40 
41 #include <visp/vpKeyPoint.h>
42 
43 #if (VISP_HAVE_OPENCV_VERSION >= 0x020101)
44 
45 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
46 # include <opencv2/calib3d/calib3d.hpp>
47 #endif
48 
49 
50 //Specific Type transformation functions
52 // Convert a list of cv::DMatch to a cv::DMatch (extract the first cv::DMatch, the nearest neighbor).
53 //
54 // \param knnMatches : List of cv::DMatch.
55 // \return The nearest neighbor.
56 // */
57 inline cv::DMatch knnToDMatch(const std::vector<cv::DMatch> &knnMatches) {
58  if(knnMatches.size() > 0) {
59  return knnMatches[0];
60  }
61 
62  return cv::DMatch();
63 }
64 
66 // Convert a cv::DMatch to an index (extract the train index).
67 //
68 // \param match : Point to convert in ViSP type.
69 // \return The train index.
70 // */
71 inline vpImagePoint matchRansacToVpImage(const std::pair<cv::KeyPoint, cv::Point3f> &pair) {
72  return vpImagePoint(pair.first.pt.y, pair.first.pt.x);
73 }
74 
75 //TODO: Try to implement a pyramidal feature detection
76 //#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
77 //class MaskPredicate
78 //{
79 //public:
80 // MaskPredicate( const cv::Mat& _mask ) : mask(_mask) {}
81 // bool operator() (const cv::KeyPoint& key_pt) const
82 // {
83 // return mask.at<uchar>( (int)(key_pt.pt.y + 0.5f), (int)(key_pt.pt.x + 0.5f) ) == 0;
84 // }
85 //
86 //private:
87 // const cv::Mat mask;
88 // MaskPredicate& operator=(const MaskPredicate&);
89 //};
90 //
91 //void vpKeyPoint::runByPixelsMask( std::vector<cv::KeyPoint>& keypoints, const cv::Mat& mask )
92 //{
93 // if( mask.empty() )
94 // {
95 // return;
96 // }
97 //
98 // keypoints.erase(std::remove_if(keypoints.begin(), keypoints.end(), MaskPredicate(mask)), keypoints.end());
99 //}
100 //
101 //void vpKeyPoint::pyramidFeatureDetection(cv::Ptr<cv::FeatureDetector> &detector, const cv::Mat& image, std::vector<cv::KeyPoint>& keypoints, const cv::Mat& mask,
102 // const int maxLevel)
103 //{
104 // cv::Mat src = image;
105 // cv::Mat src_mask = mask;
106 //
107 // cv::Mat dilated_mask;
108 // if( !mask.empty() )
109 // {
110 // dilate( mask, dilated_mask, cv::Mat() );
111 // cv::Mat mask255( mask.size(), CV_8UC1, cv::Scalar(0) );
112 // mask255.setTo( cv::Scalar(255), dilated_mask != 0 );
113 // dilated_mask = mask255;
114 // }
115 //
116 // for( int l = 0, multiplier = 1; l <= maxLevel; ++l, multiplier *= 2 )
117 // {
118 // // Detect on current level of the pyramid
119 // std::vector<cv::KeyPoint> new_pts;
120 // detector->detect( src, new_pts, src_mask );
121 // std::vector<cv::KeyPoint>::iterator it = new_pts.begin(),
122 // end = new_pts.end();
123 // for( ; it != end; ++it)
124 // {
125 // it->pt.x *= multiplier;
126 // it->pt.y *= multiplier;
127 // it->size *= multiplier;
128 // it->octave = l;
129 // }
130 // keypoints.insert( keypoints.end(), new_pts.begin(), new_pts.end() );
131 //
132 // // Downsample
133 // if( l < maxLevel )
134 // {
135 // cv::Mat dst;
136 // pyrDown( src, dst );
137 // src = dst;
138 //
139 // if( !mask.empty() )
140 // resize( dilated_mask, src_mask, src.size(), 0, 0, CV_INTER_AREA );
141 // }
142 // }
143 //
144 // if( !mask.empty() )
145 // {
146 // //KeyPointsFilter::runByPixelsMask( keypoints, mask );
147 // }
148 //}
149 //#endif
150 
159 vpKeyPoint::vpKeyPoint(const std::string &detectorName, const std::string &extractorName,
160  const std::string &matcherName, const vpFilterMatchingType &filterType)
161  : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
162  m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(),
163  m_detectors(), m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
164  m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(), m_matcherName(matcherName), m_matches(),
165  m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85), m_matchingTime(0.),
166  m_matchQueryToTrainKeyPoints(), m_matchRansacKeyPointsToPoints(), m_matchRansacQueryToTrainKeyPoints(),
167  m_nbRansacIterations(200), m_nbRansacMinInlierCount(100), m_objectFilteredPoints(),
168  m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
169  m_ransacConsensusPercentage(20.0), m_ransacInliers(), m_ransacOutliers(), m_ransacReprojectionError(6.0),
170  m_ransacThreshold(0.001), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(),
171  m_trainVpPoints(), m_useAffineDetection(false), m_useBruteForceCrossCheck(true),
172  m_useConsensusPercentage(false), m_useKnn(false), m_useRansacVVS(false)
173 {
174  //Use k-nearest neighbors (knn) to retrieve the two best matches for a keypoint
175  //So this is useful only for ratioDistanceThreshold method
176  if(filterType == ratioDistanceThreshold || filterType == stdAndRatioDistanceThreshold) {
177  m_useKnn = true;
178  }
179 
180  m_detectorNames.push_back(detectorName);
181  m_extractorNames.push_back(extractorName);
182 
183  init();
184 }
185 
194 vpKeyPoint::vpKeyPoint(const std::vector<std::string> &detectorNames, const std::vector<std::string> &extractorNames,
195  const std::string &matcherName, const vpFilterMatchingType &filterType)
196  : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
197  m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(detectorNames),
198  m_detectors(), m_extractionTime(0.), m_extractorNames(extractorNames), m_extractors(), m_filteredMatches(),
199  m_filterType(filterType), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(), m_matcher(), m_matcherName(matcherName),
200  m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85), m_matchingTime(0.),
201  m_matchQueryToTrainKeyPoints(), m_matchRansacKeyPointsToPoints(), m_matchRansacQueryToTrainKeyPoints(),
202  m_nbRansacIterations(200), m_nbRansacMinInlierCount(100), m_objectFilteredPoints(),
203  m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
204  m_ransacConsensusPercentage(20.0), m_ransacInliers(), m_ransacOutliers(), m_ransacReprojectionError(6.0),
205  m_ransacThreshold(0.001), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(),
206  m_trainVpPoints(), m_useAffineDetection(false), m_useBruteForceCrossCheck(true),
207  m_useConsensusPercentage(false), m_useKnn(false), m_useRansacVVS(false)
208 {
209  //Use k-nearest neighbors (knn) to retrieve the two best matches for a keypoint
210  //So this is useful only for ratioDistanceThreshold method
211  if(filterType == ratioDistanceThreshold || filterType == stdAndRatioDistanceThreshold) {
212  m_useKnn = true;
213  }
214 
215  init();
216 }
217 
226 void vpKeyPoint::affineSkew(double tilt, double phi, cv::Mat& img,
227  cv::Mat& mask, cv::Mat& Ai) {
228  int h = img.rows;
229  int w = img.cols;
230 
231  mask = cv::Mat(h, w, CV_8UC1, cv::Scalar(255));
232 
233  cv::Mat A = cv::Mat::eye(2, 3, CV_32F);
234 
235  //if (phi != 0.0) {
236  if (std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
237  phi *= M_PI / 180.;
238  double s = sin(phi);
239  double c = cos(phi);
240 
241  A = (cv::Mat_<float>(2, 2) << c, -s, s, c);
242 
243  cv::Mat corners = (cv::Mat_<float>(4, 2) << 0, 0, w, 0, w, h, 0, h);
244  cv::Mat tcorners = corners * A.t();
245  cv::Mat tcorners_x, tcorners_y;
246  tcorners.col(0).copyTo(tcorners_x);
247  tcorners.col(1).copyTo(tcorners_y);
248  std::vector<cv::Mat> channels;
249  channels.push_back(tcorners_x);
250  channels.push_back(tcorners_y);
251  merge(channels, tcorners);
252 
253  cv::Rect rect = boundingRect(tcorners);
254  A = (cv::Mat_<float>(2, 3) << c, -s, -rect.x, s, c, -rect.y);
255 
256  cv::warpAffine(img, img, A, cv::Size(rect.width, rect.height),
257  cv::INTER_LINEAR, cv::BORDER_REPLICATE);
258  }
259  //if (tilt != 1.0) {
260  if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon()) {
261  double s = 0.8 * sqrt(tilt * tilt - 1);
262  cv::GaussianBlur(img, img, cv::Size(0, 0), s, 0.01);
263  cv::resize(img, img, cv::Size(0, 0), 1.0 / tilt, 1.0, cv::INTER_NEAREST);
264  A.row(0) = A.row(0) / tilt;
265  }
266  //if (tilt != 1.0 || phi != 0.0) {
267  if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon() || std::fabs(phi) > std::numeric_limits<double>::epsilon() ) {
268  h = img.rows;
269  w = img.cols;
270  cv::warpAffine(mask, mask, A, cv::Size(w, h), cv::INTER_NEAREST);
271  }
272  invertAffineTransform(A, Ai);
273 }
274 
282  return buildReference(I, vpRect());
283 }
284 
295  const vpImagePoint &iP,
296  const unsigned int height, const unsigned int width) {
297 
298  return buildReference(I, vpRect(iP, width, height));
299 }
300 
309  const vpRect &rectangle) {
310  //Reset variables used when dealing with 3D models
311  //So as no 3D point list is passed, we dont need this variables
312  m_trainPoints.clear();
313  m_mapOfImageId.clear();
314  m_mapOfImages.clear();
315  m_currentImageId = 1;
316 
317  if(m_useAffineDetection) {
318  std::vector<std::vector<cv::KeyPoint> > listOfTrainKeyPoints;
319  std::vector<cv::Mat> listOfTrainDescriptors;
320 
321  //Detect keypoints and extract descriptors on multiple images
322  detectExtractAffine(I, listOfTrainKeyPoints, listOfTrainDescriptors);
323 
324  //Flatten the different train lists
325  m_trainKeyPoints.clear();
326  for(std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfTrainKeyPoints.begin();
327  it != listOfTrainKeyPoints.end(); ++it) {
328  m_trainKeyPoints.insert(m_trainKeyPoints.end(), it->begin(), it->end());
329  }
330 
331  bool first = true;
332  for(std::vector<cv::Mat>::const_iterator it = listOfTrainDescriptors.begin(); it != listOfTrainDescriptors.end(); ++it) {
333  if(first) {
334  first = false;
335  it->copyTo(m_trainDescriptors);
336  } else {
337  m_trainDescriptors.push_back(*it);
338  }
339  }
340  } else {
341  detect(I, m_trainKeyPoints, m_detectionTime, rectangle);
342  extract(I, m_trainKeyPoints, m_trainDescriptors, m_extractionTime);
343  }
344 
345  //Save the correspondence keypoint class_id with the training image_id in a map
346  //Used to display the matching with all the training images
347  for(std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
348  m_mapOfImageId[it->class_id] = m_currentImageId;
349  }
350 
351  //Save the image in a map at a specific image_id
352  m_mapOfImages[m_currentImageId] = I;
353 
354  //Convert OpenCV type to ViSP type for compatibility
356 
357  _reference_computed = true;
358 
359  return static_cast<unsigned int>(m_trainKeyPoints.size());
360 }
361 
370 void vpKeyPoint::buildReference(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &trainKeyPoints,
371  std::vector<cv::Point3f> &points3f, bool append) {
372  cv::Mat trainDescriptors;
373  extract(I, trainKeyPoints, trainDescriptors, m_extractionTime);
374 
375  buildReference(I, trainKeyPoints, trainDescriptors, points3f, append);
376 }
377 
387 void vpKeyPoint::buildReference(const vpImage<unsigned char> &I, const std::vector<cv::KeyPoint> &trainKeyPoints,
388  const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f, bool append) {
389  if(!append) {
390  m_currentImageId = 0;
391  m_mapOfImageId.clear();
392  m_mapOfImages.clear();
393  this->m_trainKeyPoints.clear();
394  this->m_trainPoints.clear();
395  }
396 
397  m_currentImageId++;
398 
399  //Save the correspondence keypoint class_id with the training image_id in a map
400  //Used to display the matching with all the training images
401  for(std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
402  m_mapOfImageId[it->class_id] = m_currentImageId;
403  }
404 
405  //Save the image in a map at a specific image_id
406  m_mapOfImages[m_currentImageId] = I;
407 
408  //Append reference lists
409  this->m_trainKeyPoints.insert(this->m_trainKeyPoints.end(), trainKeyPoints.begin(), trainKeyPoints.end());
410  if(!append) {
411  trainDescriptors.copyTo(this->m_trainDescriptors);
412  } else {
413  this->m_trainDescriptors.push_back(trainDescriptors);
414  }
415  this->m_trainPoints.insert(this->m_trainPoints.end(), points3f.begin(), points3f.end());
416 
417 
418  //Convert OpenCV type to ViSP type for compatibility
420  vpConvert::convertFromOpenCV(this->m_trainPoints, m_trainVpPoints);
421 
422  _reference_computed = true;
423 }
424 
437 void vpKeyPoint::compute3D(const cv::KeyPoint &candidate, const std::vector<vpPoint> &roi,
438  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point) {
439  /* compute plane equation */
440  std::vector<vpPoint>::const_iterator it_roi = roi.begin();
441  vpPoint pts[3];
442  pts[0] = *it_roi;
443  ++it_roi;
444  pts[1] = *it_roi;
445  ++it_roi;
446  pts[2] = *it_roi;
447  vpPlane Po(pts[0], pts[1], pts[2]);
448  double xc = 0.0, yc = 0.0;
449  vpPixelMeterConversion::convertPoint(cam, candidate.pt.x, candidate.pt.y, xc, yc);
450  double Z = -Po.getD() / (Po.getA() * xc + Po.getB() * yc + Po.getC());
451  double X = xc * Z;
452  double Y = yc * Z;
453  vpColVector point_cam(4);
454  point_cam[0] = X;
455  point_cam[1] = Y;
456  point_cam[2] = Z;
457  point_cam[3] = 1;
458  vpColVector point_obj(4);
459  point_obj = cMo.inverse() * point_cam;
460  point = cv::Point3f((float) point_obj[0], (float) point_obj[1], (float) point_obj[2]);
461 }
462 
475 void vpKeyPoint::compute3D(const vpImagePoint &candidate, const std::vector<vpPoint> &roi,
476  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, vpPoint &point) {
477  /* compute plane equation */
478  std::vector<vpPoint>::const_iterator it_roi = roi.begin();
479  vpPoint pts[3];
480  pts[0] = *it_roi;
481  ++it_roi;
482  pts[1] = *it_roi;
483  ++it_roi;
484  pts[2] = *it_roi;
485  vpPlane Po(pts[0], pts[1], pts[2]);
486  double xc = 0.0, yc = 0.0;
487  vpPixelMeterConversion::convertPoint(cam, candidate, xc, yc);
488  double Z = -Po.getD() / (Po.getA() * xc + Po.getB() * yc + Po.getC());
489  double X = xc * Z;
490  double Y = yc * Z;
491  vpColVector point_cam(4);
492  point_cam[0] = X;
493  point_cam[1] = Y;
494  point_cam[2] = Z;
495  point_cam[3] = 1;
496  vpColVector point_obj(4);
497  point_obj = cMo.inverse() * point_cam;
498  point.setWorldCoordinates(point_obj);
499 }
500 
515  std::vector<cv::KeyPoint> &candidates, std::vector<vpPolygon> &polygons, std::vector<std::vector<vpPoint> > &roisPt,
516  std::vector<cv::Point3f> &points, cv::Mat *descriptors) {
517 
518  std::vector<cv::KeyPoint> candidateToCheck = candidates;
519  candidates.clear();
520  vpImagePoint iPt;
521  cv::Point3f pt;
522  cv::Mat desc;
523 
524  size_t cpt1 = 0;
525  for (std::vector<vpPolygon>::iterator it1 = polygons.begin(); it1 != polygons.end(); ++it1, cpt1++) {
526  int cpt2 = 0;
527 
528  for (std::vector<cv::KeyPoint>::iterator it2 = candidateToCheck.begin(); it2 != candidateToCheck.end(); cpt2++) {
529  iPt.set_ij(it2->pt.y, it2->pt.x);
530  if (it1->isInside(iPt)) {
531  candidates.push_back(*it2);
532  vpKeyPoint::compute3D(*it2, roisPt[cpt1], cam, cMo, pt);
533  points.push_back(pt);
534 
535  if(descriptors != NULL) {
536  desc.push_back(descriptors->row(cpt2));
537  }
538 
539  //Remove candidate keypoint which is located on the current polygon
540  candidateToCheck.erase(it2);
541  } else {
542  ++it2;
543  }
544  }
545  }
546 
547  if(descriptors != NULL) {
548  desc.copyTo(*descriptors);
549  }
550 }
551 
566  std::vector<vpImagePoint> &candidates, std::vector<vpPolygon> &polygons, std::vector<std::vector<vpPoint> > &roisPt,
567  std::vector<vpPoint> &points, cv::Mat *descriptors) {
568 
569  std::vector<vpImagePoint> candidateToCheck = candidates;
570  candidates.clear();
571  vpPoint pt;
572  cv::Mat desc;
573 
574  size_t cpt1 = 0;
575  for (std::vector<vpPolygon>::iterator it1 = polygons.begin(); it1 != polygons.end(); ++it1, cpt1++) {
576  int cpt2 = 0;
577 
578  for (std::vector<vpImagePoint>::iterator it2 = candidateToCheck.begin(); it2 != candidateToCheck.end(); cpt2++) {
579  if (it1->isInside(*it2)) {
580  candidates.push_back(*it2);
581  vpKeyPoint::compute3D(*it2, roisPt[cpt1], cam, cMo, pt);
582  points.push_back(pt);
583 
584  if(descriptors != NULL) {
585  desc.push_back(descriptors->row(cpt2));
586  }
587 
588  //Remove candidate keypoint which is located on the current polygon
589  candidateToCheck.erase(it2);
590  } else {
591  ++it2;
592  }
593  }
594  }
595 }
596 
609 bool vpKeyPoint::computePose(const std::vector<cv::Point2f> &imagePoints, const std::vector<cv::Point3f> &objectPoints,
610  const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector<int> &inlierIndex,
611  double &elapsedTime, bool (*func)(vpHomogeneousMatrix *)) {
612  double t = vpTime::measureTimeMs();
613 
614  if(imagePoints.size() < 4 || objectPoints.size() < 4 || imagePoints.size() != objectPoints.size()) {
615  elapsedTime = (vpTime::measureTimeMs() - t);
616  std::cerr << "Not enough points to compute the pose (at least 4 points are needed)." << std::endl;
617 
618  return false;
619  }
620 
621  cv::Mat cameraMatrix =
622  (cv::Mat_<double>(3, 3) << cam.get_px(), 0, cam.get_u0(), 0, cam.get_py(), cam.get_v0(), 0, 0, 1);
623  cv::Mat rvec, tvec;
624  cv::Mat distCoeffs;
625 
626  try {
627 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
628  //OpenCV 3.0.0 (2014/12/12)
629  cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs,
630  rvec, tvec, false, m_nbRansacIterations, (float) m_ransacReprojectionError,
631  0.99,//confidence=0.99 (default) – The probability that the algorithm produces a useful result.
632  inlierIndex,
633  cv::SOLVEPNP_ITERATIVE);
634  //SOLVEPNP_ITERATIVE (default): Iterative method is based on Levenberg-Marquardt optimization.
635  //In this case the function finds such a pose that minimizes reprojection error, that is the sum of squared distances
636  //between the observed projections imagePoints and the projected (using projectPoints() ) objectPoints .
637  //SOLVEPNP_P3P: Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang “Complete Solution Classification
638  //for the Perspective-Three-Point Problem”. In this case the function requires exactly four object and image points.
639  //SOLVEPNP_EPNP: Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the paper “EPnP: Efficient
640  //Perspective-n-Point Camera Pose Estimation”.
641  //SOLVEPNP_DLS: Method is based on the paper of Joel A. Hesch and Stergios I. Roumeliotis. “A Direct Least-Squares (DLS)
642  //Method for PnP”.
643  //SOLVEPNP_UPNP Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto, F.Moreno-Noguer.
644  //“Exhaustive Linearization for Robust Camera Pose and Focal Length Estimation”. In this case the function also
645  //estimates the parameters f_x and f_y assuming that both have the same value. Then the cameraMatrix is updated with the
646  //estimated focal length.
647 #else
648  int nbInlierToReachConsensus = m_nbRansacMinInlierCount;
649  if(m_useConsensusPercentage) {
650  nbInlierToReachConsensus = (int) (m_ransacConsensusPercentage / 100.0 * (double) m_queryFilteredKeyPoints.size());
651  }
652 
653  cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs,
654  rvec, tvec, false, m_nbRansacIterations,
655  (float) m_ransacReprojectionError, nbInlierToReachConsensus,
656  inlierIndex);
657 #endif
658  } catch (cv::Exception &e) {
659  std::cerr << e.what() << std::endl;
660  elapsedTime = (vpTime::measureTimeMs() - t);
661  return false;
662  }
663  vpTranslationVector translationVec(tvec.at<double>(0),
664  tvec.at<double>(1), tvec.at<double>(2));
665  vpThetaUVector thetaUVector(rvec.at<double>(0), rvec.at<double>(1),
666  rvec.at<double>(2));
667  cMo = vpHomogeneousMatrix(translationVec, thetaUVector);
668 
669  if(func != NULL) {
670  //Check the final pose returned by the Ransac VVS pose estimation as in rare some cases
671  //we can converge toward a final cMo that does not respect the pose criterion even
672  //if the 4 minimal points picked to respect the pose criterion.
673  if(!func(&cMo)) {
674  elapsedTime = (vpTime::measureTimeMs() - t);
675  return false;
676  }
677  }
678 
679  elapsedTime = (vpTime::measureTimeMs() - t);
680  return true;
681 }
682 
694 bool vpKeyPoint::computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo,
695  std::vector<vpPoint> &inliers, double &elapsedTime, bool (*func)(vpHomogeneousMatrix *)) {
696  double t = vpTime::measureTimeMs();
697 
698  if(objectVpPoints.size() < 4) {
699  elapsedTime = (vpTime::measureTimeMs() - t);
700  std::cerr << "Not enough points to compute the pose (at least 4 points are needed)." << std::endl;
701 
702  return false;
703  }
704 
705  vpPose pose;
706  pose.setCovarianceComputation(true);
707 
708  for(std::vector<vpPoint>::const_iterator it = objectVpPoints.begin(); it != objectVpPoints.end(); ++it) {
709  pose.addPoint(*it);
710  }
711 
712  unsigned int nbInlierToReachConsensus = (unsigned int) m_nbRansacMinInlierCount;
713  if(m_useConsensusPercentage) {
714  nbInlierToReachConsensus = (unsigned int) (m_ransacConsensusPercentage / 100.0 *
715  (double) m_queryFilteredKeyPoints.size());
716  }
717 
718  pose.setRansacNbInliersToReachConsensus(nbInlierToReachConsensus);
719  pose.setRansacThreshold(m_ransacThreshold);
720  pose.setRansacMaxTrials(m_nbRansacIterations);
721 
722  bool isRansacPoseEstimationOk = false;
723  try {
724  pose.setCovarianceComputation(m_computeCovariance);
725  isRansacPoseEstimationOk = pose.computePose(vpPose::RANSAC, cMo, func);
726  inliers = pose.getRansacInliers();
727  if(m_computeCovariance) {
728  m_covarianceMatrix = pose.getCovarianceMatrix();
729  }
730  } catch(vpException &e) {
731  std::cerr << "e=" << e.what() << std::endl;
732  elapsedTime = (vpTime::measureTimeMs() - t);
733  return false;
734  }
735 
736  if(func != NULL && isRansacPoseEstimationOk) {
737  //Check the final pose returned by the Ransac VVS pose estimation as in rare some cases
738  //we can converge toward a final cMo that does not respect the pose criterion even
739  //if the 4 minimal points picked to respect the pose criterion.
740  if(!func(&cMo)) {
741  elapsedTime = (vpTime::measureTimeMs() - t);
742  return false;
743  }
744  }
745 
746  elapsedTime = (vpTime::measureTimeMs() - t);
747  return isRansacPoseEstimationOk;
748 }
749 
761 double vpKeyPoint::computePoseEstimationError(const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
762  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo_est) {
763  if(matchKeyPoints.size() == 0) {
764  //return std::numeric_limits<double>::max(); // create an error under Windows. To fix it we have to add #undef max
765  return DBL_MAX;
766  }
767 
768  std::vector<double> errors(matchKeyPoints.size());
769  size_t cpt = 0;
770  vpPoint pt;
771  for(std::vector<std::pair<cv::KeyPoint, cv::Point3f> >::const_iterator it = matchKeyPoints.begin();
772  it != matchKeyPoints.end(); ++it, cpt++) {
773  pt.set_oX(it->second.x);
774  pt.set_oY(it->second.y);
775  pt.set_oZ(it->second.z);
776  pt.project(cMo_est);
777  double u = 0.0, v = 0.0;
778  vpMeterPixelConversion::convertPoint(cam, pt.get_x(), pt.get_y(), u, v);
779  errors[cpt] = std::sqrt((u-it->first.pt.x)*(u-it->first.pt.x) + (v-it->first.pt.y)*(v-it->first.pt.y));
780  }
781 
782  return std::accumulate(errors.begin(), errors.end(), 0.0) / errors.size();
783 }
784 
793  vpImage<unsigned char> &IMatching) {
794  //Image matching side by side
795  unsigned int width = IRef.getWidth() + ICurrent.getWidth();
796  unsigned int height = (std::max)(IRef.getHeight(), ICurrent.getHeight());
797 
798  IMatching = vpImage<unsigned char>(height, width);
799 }
800 
810  //Nb images in the training database + the current image we want to detect the object
811  unsigned int nbImg = (unsigned int) (m_mapOfImages.size() + 1);
812 
813  if(m_mapOfImages.empty()) {
814  return;
815  }
816 
817  if(nbImg == 2) {
818  //Only one training image, so we display them side by side
819  createImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
820  } else {
821  //Multiple training images, display them as a mosaic image
822  //round(std::sqrt((double) nbImg)); VC++ compiler does not have round so the next line is used
823  //Different implementations of round exist, here round to the closest integer but will not work for negative numbers
824  unsigned int nbImgSqrt = (unsigned int) std::floor(std::sqrt((double) nbImg) + 0.5);
825 
826  //Number of columns in the mosaic grid
827  unsigned int nbWidth = nbImgSqrt;
828  //Number of rows in the mosaic grid
829  unsigned int nbHeight = nbImgSqrt;
830 
831  //Deals with non square mosaic grid and the total number of images
832  if(nbImgSqrt * nbImgSqrt < nbImg) {
833  nbWidth++;
834  }
835 
836  unsigned int maxW = ICurrent.getWidth();
837  unsigned int maxH = ICurrent.getHeight();
838  for(std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end(); ++it) {
839  if(maxW < it->second.getWidth()) {
840  maxW = it->second.getWidth();
841  }
842 
843  if(maxH < it->second.getHeight()) {
844  maxH = it->second.getHeight();
845  }
846  }
847 
848  IMatching = vpImage<unsigned char>(maxH * nbHeight, maxW * nbWidth);
849  }
850 }
851 
860 void vpKeyPoint::detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
861  const vpRect &rectangle) {
862  cv::Mat matImg;
863  vpImageConvert::convert(I, matImg, false);
864  cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
865 
866  if(rectangle.getWidth() > 0 && rectangle.getHeight() > 0) {
867  cv::Point leftTop((int) rectangle.getLeft(), (int) rectangle.getTop()), rightBottom((int) rectangle.getRight(),
868  (int) rectangle.getBottom());
869  cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
870  } else {
871  mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U);
872  }
873 
874  detect(matImg, keyPoints, elapsedTime, mask);
875 }
876 
885 void vpKeyPoint::detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
886  const cv::Mat &mask) {
887  double t = vpTime::measureTimeMs();
888  keyPoints.clear();
889 
890  for(std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin(); it != m_detectors.end(); ++it) {
891  std::vector<cv::KeyPoint> kp;
892  it->second->detect(matImg, kp, mask);
893  keyPoints.insert(keyPoints.end(), kp.begin(), kp.end());
894  }
895 
896  elapsedTime = vpTime::measureTimeMs() - t;
897 }
898 
907  const vpImage<unsigned char> &ICurrent, unsigned int size) {
908  std::vector<vpImagePoint> vpQueryImageKeyPoints;
909  getQueryKeyPoints(vpQueryImageKeyPoints);
910  std::vector<vpImagePoint> vpTrainImageKeyPoints;
911  getTrainKeyPoints(vpTrainImageKeyPoints);
912 
913  for(std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
914  vpDisplay::displayCross(IRef, vpTrainImageKeyPoints[(size_t)(it->trainIdx)], size, vpColor::red);
915  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, vpColor::green);
916  }
917 }
918 
926 void vpKeyPoint::display(const vpImage<unsigned char> &ICurrent, unsigned int size, const vpColor &color) {
927  std::vector<vpImagePoint> vpQueryImageKeyPoints;
928  getQueryKeyPoints(vpQueryImageKeyPoints);
929 
930  for(std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
931  vpDisplay::displayCross (ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, color);
932  }
933 }
934 
945  unsigned int crossSize, unsigned int lineThickness, const vpColor &color) {
946  bool randomColor = (color == vpColor::none);
947  srand((unsigned int) time(NULL));
948  vpColor currentColor = color;
949 
950  std::vector<vpImagePoint> queryImageKeyPoints;
951  getQueryKeyPoints(queryImageKeyPoints);
952  std::vector<vpImagePoint> trainImageKeyPoints;
953  getTrainKeyPoints(trainImageKeyPoints);
954 
955  vpImagePoint leftPt, rightPt;
956  for(std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
957  if(randomColor) {
958  currentColor = vpColor((rand() % 256), (rand() % 256), (rand() % 256));
959  }
960 
961  leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
962  rightPt = vpImagePoint(queryImageKeyPoints[(size_t)(it->queryIdx)].get_i(),
963  queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.getWidth());
964  vpDisplay::displayCross(IMatching, leftPt, crossSize, currentColor);
965  vpDisplay::displayCross(IMatching, rightPt, crossSize, currentColor);
966  vpDisplay::displayLine(IMatching, leftPt, rightPt, currentColor, lineThickness);
967  }
968 }
969 
981  const std::vector<vpImagePoint> &ransacInliers, unsigned int crossSize, unsigned int lineThickness) {
982  if(m_mapOfImages.empty()) {
983  //No training images so return
984  return;
985  }
986 
987  //Nb images in the training database + the current image we want to detect the object
988  int nbImg = (int) (m_mapOfImages.size() + 1);
989 
990  if(nbImg == 2) {
991  //Only one training image, so we display the matching result side-by-side
992  displayMatching(m_mapOfImages.begin()->second, IMatching, crossSize);
993  } else {
994  //Multiple training images, display them as a mosaic image
995  //round(std::sqrt((double) nbImg)); VC++ compiler does not have round so the next line is used
996  //Different implementations of round exist, here round to the closest integer but will not work for negative numbers
997  int nbImgSqrt = (int) std::floor(std::sqrt((double) nbImg) + 0.5);
998  int nbWidth = nbImgSqrt;
999  int nbHeight = nbImgSqrt;
1000 
1001  if(nbImgSqrt * nbImgSqrt < nbImg) {
1002  nbWidth++;
1003  }
1004 
1005  std::map<int, int> mapOfImageIdIndex;
1006  int cpt = 0;
1007  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
1008  for(std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end(); ++it, cpt++) {
1009  mapOfImageIdIndex[it->first] = cpt;
1010 
1011  if(maxW < it->second.getWidth()) {
1012  maxW = it->second.getWidth();
1013  }
1014 
1015  if(maxH < it->second.getHeight()) {
1016  maxH = it->second.getHeight();
1017  }
1018  }
1019 
1020  //Indexes of the current image in the grid made in order to the image is in the center square in the mosaic grid
1021  int medianI = nbHeight / 2;
1022  int medianJ = nbWidth / 2;
1023  int medianIndex = medianI * nbWidth + medianJ;
1024  for(std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1025  vpImagePoint topLeftCorner;
1026  int current_class_id_index = 0;
1027  if(mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1028  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1029  } else {
1030  //Shift of one unity the index of the training images which are after the current image
1031  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1032  }
1033 
1034  int indexI = current_class_id_index / nbWidth;
1035  int indexJ = current_class_id_index - (indexI * nbWidth);
1036  topLeftCorner.set_ij((int)maxH*indexI, (int)maxW*indexJ);
1037 
1038  //Display cross for keypoints in the learning database
1039  vpDisplay::displayCross(IMatching, (int) (it->pt.y + topLeftCorner.get_i()), (int) (it->pt.x + topLeftCorner.get_j()),
1040  crossSize, vpColor::red);
1041  }
1042 
1043  vpImagePoint topLeftCorner((int)maxH*medianI, (int)maxW*medianJ);
1044  for(std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1045  //Display cross for keypoints detected in the current image
1046  vpDisplay::displayCross(IMatching, (int) (it->pt.y + topLeftCorner.get_i()), (int) (it->pt.x + topLeftCorner.get_j()),
1047  crossSize, vpColor::red);
1048  }
1049  for(std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin();
1050  it != ransacInliers.end(); ++it) {
1051  //Display green circle for RANSAC inliers
1052  vpDisplay::displayCircle(IMatching, (int) (it->get_v() + topLeftCorner.get_i()), (int) (it->get_u() +
1053  topLeftCorner.get_j()), 4, vpColor::green);
1054  }
1055  for(std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1056  //Display red circle for RANSAC outliers
1057  vpDisplay::displayCircle(IMatching, (int) (it->get_i() + topLeftCorner.get_i()), (int) (it->get_j() +
1058  topLeftCorner.get_j()), 4, vpColor::red);
1059  }
1060 
1061  for(std::vector<std::pair<cv::KeyPoint, cv::KeyPoint> >::const_iterator it = m_matchQueryToTrainKeyPoints.begin();
1062  it != m_matchQueryToTrainKeyPoints.end(); ++it) {
1063  int current_class_id = 0;
1064  if(mapOfImageIdIndex[m_mapOfImageId[it->second.class_id]] < medianIndex) {
1065  current_class_id = mapOfImageIdIndex[m_mapOfImageId[it->second.class_id]];
1066  } else {
1067  //Shift of one unity the index of the training images which are after the current image
1068  current_class_id = mapOfImageIdIndex[m_mapOfImageId[it->second.class_id]] + 1;
1069  }
1070 
1071  int indexI = current_class_id / nbWidth;
1072  int indexJ = current_class_id - (indexI * nbWidth);
1073 
1074  vpImagePoint end((int)maxH*indexI + it->second.pt.y, (int)maxW*indexJ + it->second.pt.x);
1075  vpImagePoint start((int)maxH*medianI + it->first.pt.y, (int)maxW*medianJ + it->first.pt.x);
1076 
1077  //Draw line for matching keypoints detected in the current image and those detected
1078  //in the training images
1079  vpDisplay::displayLine(IMatching, start, end, vpColor::green, lineThickness);
1080  }
1081  }
1082 }
1083 
1092 void vpKeyPoint::extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1093  double &elapsedTime) {
1094  cv::Mat matImg;
1095  vpImageConvert::convert(I, matImg, false);
1096  extract(matImg, keyPoints, descriptors, elapsedTime);
1097 }
1098 
1107 void vpKeyPoint::extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1108  double &elapsedTime) {
1109  double t = vpTime::measureTimeMs();
1110  bool first = true;
1111 
1112  for(std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
1113  it != m_extractors.end(); ++it) {
1114  if(first) {
1115  first = false;
1116  it->second->compute(matImg, keyPoints, descriptors);
1117  } else {
1118  cv::Mat desc;
1119  it->second->compute(matImg, keyPoints, desc);
1120  if(descriptors.empty()) {
1121  desc.copyTo(descriptors);
1122  } else {
1123  cv::hconcat(descriptors, desc, descriptors);
1124  }
1125  }
1126  }
1127 
1128  if(keyPoints.size() != (size_t) descriptors.rows) {
1129  std::cerr << "keyPoints.size() != (size_t) descriptors.rows" << std::endl;
1130  }
1131  elapsedTime = vpTime::measureTimeMs() - t;
1132 }
1133 
1137 void vpKeyPoint::filterMatches() {
1138  m_matchQueryToTrainKeyPoints.clear();
1139 
1140  std::vector<cv::KeyPoint> queryKpts;
1141  std::vector<cv::Point3f> trainPts;
1142  std::vector<cv::DMatch> m;
1143 
1144  if(m_useKnn) {
1145  double max_dist = 0;
1146  //double min_dist = std::numeric_limits<double>::max(); // create an error under Windows. To fix it we have to add #undef max
1147  double min_dist = DBL_MAX;
1148  double mean = 0.0;
1149  std::vector<double> distance_vec(m_knnMatches.size());
1150 
1151  if(m_filterType == stdAndRatioDistanceThreshold) {
1152  for(size_t i = 0; i < m_knnMatches.size(); i++) {
1153  double dist = m_knnMatches[i][0].distance;
1154  mean += dist;
1155  distance_vec[i] = dist;
1156 
1157  if (dist < min_dist) {
1158  min_dist = dist;
1159  }
1160  if (dist > max_dist) {
1161  max_dist = dist;
1162  }
1163  }
1164  mean /= m_queryDescriptors.rows;
1165  }
1166 
1167  double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1168  double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1169  double threshold = min_dist + stdev;
1170 
1171  for(size_t i = 0; i < m_knnMatches.size(); i++) {
1172  if(m_knnMatches[i].size() >= 2) {
1173  //Calculate ratio of the descriptor distance between the two nearest neighbors of the keypoint
1174  float ratio = m_knnMatches[i][0].distance / m_knnMatches[i][1].distance;
1175 // float ratio = std::sqrt((vecMatches[i][0].distance * vecMatches[i][0].distance)
1176 // / (vecMatches[i][1].distance * vecMatches[i][1].distance));
1177  double dist = m_knnMatches[i][0].distance;
1178 
1179  if(ratio < m_matchingRatioThreshold || (m_filterType == stdAndRatioDistanceThreshold && dist < threshold)) {
1180  m.push_back(cv::DMatch((int) queryKpts.size(), m_knnMatches[i][0].trainIdx, m_knnMatches[i][0].distance));
1181 
1182  if(!m_trainPoints.empty()) {
1183  trainPts.push_back(m_trainPoints[(size_t)m_knnMatches[i][0].trainIdx]);
1184  }
1185  queryKpts.push_back(m_queryKeyPoints[(size_t)m_knnMatches[i][0].queryIdx]);
1186 
1187 // //Add the pair with the correspondence between the detected keypoints and the one matched in the train keypoints list
1188 // m_matchQueryToTrainKeyPoints.push_back(std::pair<cv::KeyPoint, cv::KeyPoint>(
1189 // m_queryKeyPoints[(size_t)m_knnMatches[i][0].queryIdx],
1190 // m_trainKeyPoints[(size_t)m_knnMatches[i][0].trainIdx]));
1191  }
1192  }
1193  }
1194  } else {
1195  double max_dist = 0;
1196  //double min_dist = std::numeric_limits<double>::max(); // create an error under Windows. To fix it we have to add #undef max
1197  double min_dist = DBL_MAX;
1198  double mean = 0.0;
1199  std::vector<double> distance_vec(m_matches.size());
1200  for(size_t i = 0; i < m_matches.size(); i++) {
1201  double dist = m_matches[i].distance;
1202  mean += dist;
1203  distance_vec[i] = dist;
1204 
1205  if (dist < min_dist) {
1206  min_dist = dist;
1207  }
1208  if (dist > max_dist) {
1209  max_dist = dist;
1210  }
1211  }
1212  mean /= m_queryDescriptors.rows;
1213 
1214  double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1215  double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1216 
1217  //Define a threshold where we keep all keypoints whose the descriptor distance falls below a factor of the
1218  //minimum descriptor distance (for all the query keypoints)
1219  //or below the minimum descriptor distance + the standard deviation (calculated on all the query descriptor distances)
1220  double threshold = m_filterType == constantFactorDistanceThreshold ? m_matchingFactorThreshold * min_dist : min_dist + stdev;
1221 
1222  for (size_t i = 0; i < m_matches.size(); i++) {
1223  if(m_matches[i].distance <= threshold) {
1224  m.push_back(cv::DMatch((int) queryKpts.size(), m_matches[i].trainIdx, m_matches[i].distance));
1225 
1226  if(!m_trainPoints.empty()) {
1227  trainPts.push_back(m_trainPoints[(size_t)m_matches[i].trainIdx]);
1228  }
1229  queryKpts.push_back(m_queryKeyPoints[(size_t)m_matches[i].queryIdx]);
1230 
1231 // //Add the pair with the correspondence between the detected keypoints and the one matched in the train keypoints list
1232 // m_matchQueryToTrainKeyPoints.push_back(std::pair<cv::KeyPoint, cv::KeyPoint>(
1233 // m_queryKeyPoints[(size_t)m_matches[i].queryIdx], m_trainKeyPoints[(size_t)m_matches[i].trainIdx]));
1234  }
1235  }
1236  }
1237 
1238  //Eliminate matches where multiple query keypoints are matched to the same train keypoint
1239  std::vector<cv::DMatch> mTmp;
1240  std::vector<cv::Point3f> trainPtsTmp;
1241  std::vector<cv::KeyPoint> queryKptsTmp;
1242 
1243  std::map<int, int> mapOfTrainIdx;
1244  //Count the number of query points matched to the same train point
1245  for(std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1246  mapOfTrainIdx[it->trainIdx]++;
1247  }
1248 
1249  //Keep matches with only one correspondence
1250  for(std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1251  if(mapOfTrainIdx[it->trainIdx] == 1) {
1252  mTmp.push_back(cv::DMatch((int) queryKptsTmp.size(), it->trainIdx, it->distance));
1253 
1254  if(!m_trainPoints.empty()) {
1255  trainPtsTmp.push_back(m_trainPoints[(size_t) it->trainIdx]);
1256  }
1257  queryKptsTmp.push_back(queryKpts[(size_t) it->queryIdx]);
1258 
1259  //Add the pair with the correspondence between the detected keypoints and the one matched in the train keypoints list
1260  m_matchQueryToTrainKeyPoints.push_back(std::pair<cv::KeyPoint, cv::KeyPoint>(
1261  queryKpts[(size_t) it->queryIdx],
1262  m_trainKeyPoints[(size_t) it->trainIdx]));
1263  }
1264  }
1265 
1266  m_filteredMatches = mTmp;
1267  m_objectFilteredPoints = trainPtsTmp;
1268  m_queryFilteredKeyPoints = queryKptsTmp;
1269 }
1270 
1277 void vpKeyPoint::getObjectPoints(std::vector<cv::Point3f> &objectPoints) const {
1278  objectPoints = m_objectFilteredPoints;
1279 }
1280 
1287 void vpKeyPoint::getObjectPoints(std::vector<vpPoint> &objectPoints) const {
1288  vpConvert::convertFromOpenCV(m_objectFilteredPoints, objectPoints);
1289 }
1290 
1296 void vpKeyPoint::getQueryKeyPoints(std::vector<cv::KeyPoint> &keyPoints) const {
1297  keyPoints = m_queryFilteredKeyPoints;
1298 }
1299 
1305 void vpKeyPoint::getQueryKeyPoints(std::vector<vpImagePoint> &keyPoints) const {
1306  keyPoints = currentImagePointsList;
1307 }
1308 
1314 void vpKeyPoint::getTrainKeyPoints(std::vector<cv::KeyPoint> &keyPoints) const {
1315  keyPoints = m_trainKeyPoints;
1316 }
1317 
1323 void vpKeyPoint::getTrainKeyPoints(std::vector<vpImagePoint> &keyPoints) const {
1324  keyPoints = referenceImagePointsList;
1325 }
1326 
1332 void vpKeyPoint::getTrainPoints(std::vector<cv::Point3f> &points) const {
1333  points = m_trainPoints;
1334 }
1335 
1341 void vpKeyPoint::getTrainPoints(std::vector<vpPoint> &points) const {
1342  points = m_trainVpPoints;
1343 }
1344 
1348 void vpKeyPoint::init() {
1349 #if defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && (VISP_HAVE_OPENCV_VERSION < 0x030000) // Require 2.4.0 <= opencv < 3.0.0
1350  //The following line must be called in order to use SIFT or SURF
1351  if (!cv::initModule_nonfree()) {
1352  std::cerr << "Cannot init module non free, SIFT or SURF cannot be used."
1353  << std::endl;
1354  }
1355 #endif
1356 
1357  initDetectors(m_detectorNames);
1358  initExtractors(m_extractorNames);
1359  initMatcher(m_matcherName);
1360 }
1361 
1367 void vpKeyPoint::initDetector(const std::string &detectorName) {
1368  // TODO: Add function to process detection in pyramid images with OpenCV 3.0.0
1369  // since there seems to have no equivalent function (2014/12/11)
1370  // std::string pyramid = "Pyramid";
1371  // std::size_t pos = detectorName.find(pyramid);
1372  // if(pos != std::string::npos) {
1373  // std::string sub = detectorName.substr(pos + pyramid.size());
1374  // detectors[detectorName] = cv::Ptr<cv::FeatureDetector>(
1375  // new cv::PyramidAdaptedFeatureDetector(cv::FeatureDetector::create<cv::FeatureDetector>(sub), 2));
1376  // } else {
1377  // detectors[detectorName] = cv::FeatureDetector::create<cv::FeatureDetector>(detectorName);
1378  // }
1379 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
1380  m_detectors[detectorName] = cv::FeatureDetector::create(detectorName);
1381 
1382  if(m_detectors[detectorName] == NULL) {
1383  std::stringstream ss_msg;
1384  ss_msg << "Fail to initialize the detector: " << detectorName << " or it is not available in OpenCV version: "
1385  << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
1386  throw vpException(vpException::fatalError, ss_msg.str());
1387  }
1388 #else
1389  //TODO: Add a pyramidal feature detection
1390  std::string detectorNameTmp = detectorName;
1391  std::string pyramid = "Pyramid";
1392  std::size_t pos = detectorName.find(pyramid);
1393  if(pos != std::string::npos) {
1394  detectorNameTmp = detectorName.substr(pos + pyramid.size());
1395  }
1396 
1397  if(detectorNameTmp == "SIFT") {
1398 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1399  m_detectors[detectorNameTmp] = cv::xfeatures2d::SIFT::create();
1400 #else
1401  std::stringstream ss_msg;
1402  ss_msg << "Fail to initialize the detector: SIFT. OpenCV version "
1403  << std::hex << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
1404  throw vpException(vpException::fatalError, ss_msg.str());
1405 #endif
1406  } else if(detectorNameTmp == "SURF") {
1407 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1408  m_detectors[detectorNameTmp] = cv::xfeatures2d::SURF::create();
1409 #else
1410  std::stringstream ss_msg;
1411  ss_msg << "Fail to initialize the detector: SURF. OpenCV version "
1412  << std::hex << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
1413  throw vpException(vpException::fatalError, ss_msg.str());
1414 #endif
1415  } else if(detectorNameTmp == "FAST") {
1416  m_detectors[detectorNameTmp] = cv::FastFeatureDetector::create();
1417  } else if(detectorNameTmp == "MSER") {
1418  m_detectors[detectorNameTmp] = cv::MSER::create();
1419  } else if(detectorNameTmp == "ORB") {
1420  m_detectors[detectorNameTmp] = cv::ORB::create();
1421  } else if(detectorNameTmp == "BRISK") {
1422  m_detectors[detectorNameTmp] = cv::BRISK::create();
1423  } else if(detectorNameTmp == "KAZE") {
1424  m_detectors[detectorNameTmp] = cv::KAZE::create();
1425  } else if(detectorNameTmp == "AKAZE") {
1426  m_detectors[detectorNameTmp] = cv::AKAZE::create();
1427  } else if(detectorNameTmp == "GFFT") {
1428  m_detectors[detectorNameTmp] = cv::GFTTDetector::create();
1429  } else if(detectorNameTmp == "SimpleBlob") {
1430  m_detectors[detectorNameTmp] = cv::SimpleBlobDetector::create();
1431  } else if(detectorNameTmp == "STAR") {
1432 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1433  m_detectors[detectorNameTmp] = cv::xfeatures2d::StarDetector::create();
1434 #else
1435  std::stringstream ss_msg;
1436  ss_msg << "Fail to initialize the detector: STAR. OpenCV version "
1437  << std::hex << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
1438  throw vpException(vpException::fatalError, ss_msg.str());
1439 #endif
1440  } else {
1441  std::cerr << "The detector:" << detectorNameTmp << " is not available." << std::endl;
1442  }
1443 
1444  if(m_detectors[detectorNameTmp] == NULL) {
1445  std::stringstream ss_msg;
1446  ss_msg << "Fail to initialize the detector: " << detectorNameTmp << " or it is not available in OpenCV version: "
1447  << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
1448  throw vpException(vpException::fatalError, ss_msg.str());
1449  }
1450 // m_detectors[detectorName] = cv::FeatureDetector::create<cv::FeatureDetector>(detectorName);
1451 #endif
1452 }
1453 
1459 void vpKeyPoint::initDetectors(const std::vector<std::string> &detectorNames) {
1460  for(std::vector<std::string>::const_iterator it = detectorNames.begin(); it != detectorNames.end(); ++it) {
1461  initDetector(*it);
1462  }
1463 }
1464 
1470 void vpKeyPoint::initExtractor(const std::string &extractorName) {
1471 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
1472  m_extractors[extractorName] = cv::DescriptorExtractor::create(extractorName);
1473 #else
1474  if(extractorName == "SIFT") {
1475 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1476  m_extractors[extractorName] = cv::xfeatures2d::SIFT::create();
1477 #else
1478  std::stringstream ss_msg;
1479  ss_msg << "Fail to initialize the extractor: SIFT. OpenCV version "
1480  << std::hex << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
1481  throw vpException(vpException::fatalError, ss_msg.str());
1482 #endif
1483  } else if(extractorName == "SURF") {
1484 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1485  m_extractors[extractorName] = cv::xfeatures2d::SURF::create();
1486 #else
1487  std::stringstream ss_msg;
1488  ss_msg << "Fail to initialize the extractor: SURF. OpenCV version "
1489  << std::hex << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
1490  throw vpException(vpException::fatalError, ss_msg.str());
1491 #endif
1492  } else if(extractorName == "ORB") {
1493  m_extractors[extractorName] = cv::ORB::create();
1494  } else if(extractorName == "BRISK") {
1495  m_extractors[extractorName] = cv::BRISK::create();
1496  } else if(extractorName == "FREAK") {
1497 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1498  m_extractors[extractorName] = cv::xfeatures2d::FREAK::create();
1499 #else
1500  std::stringstream ss_msg;
1501  ss_msg << "Fail to initialize the extractor: FREAK. OpenCV version "
1502  << std::hex << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
1503  throw vpException(vpException::fatalError, ss_msg.str());
1504 #endif
1505  } else if(extractorName == "BRIEF") {
1506 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1507  m_extractors[extractorName] = cv::xfeatures2d::BriefDescriptorExtractor::create();
1508 #else
1509  std::stringstream ss_msg;
1510  ss_msg << "Fail to initialize the extractor: BRIEF. OpenCV version "
1511  << std::hex << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
1512  throw vpException(vpException::fatalError, ss_msg.str());
1513 #endif
1514  } else {
1515  std::cerr << "The extractor:" << extractorName << " is not available." << std::endl;
1516  }
1517 // m_extractors[extractorName] = cv::DescriptorExtractor::create<cv::DescriptorExtractor>(extractorName);
1518 #endif
1519 
1520  if(m_extractors[extractorName] == NULL) {
1521  std::stringstream ss_msg;
1522  ss_msg << "Fail to initialize the extractor: " << extractorName << " or it is not available in OpenCV version: "
1523  << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
1524  throw vpException(vpException::fatalError, ss_msg.str());
1525  }
1526 }
1527 
1533 void vpKeyPoint::initExtractors(const std::vector<std::string> &extractorNames) {
1534  for(std::vector<std::string>::const_iterator it = extractorNames.begin(); it != extractorNames.end(); ++it) {
1535  initExtractor(*it);
1536  }
1537 }
1538 
1544 void vpKeyPoint::initMatcher(const std::string &matcherName) {
1545  m_matcher = cv::DescriptorMatcher::create(matcherName);
1546 
1547 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400)
1548  if(m_matcher != NULL && !m_useKnn && matcherName == "BruteForce") {
1549  m_matcher->set("crossCheck", m_useBruteForceCrossCheck);
1550  }
1551 #endif
1552 
1553  if(m_matcher == NULL) {
1554  std::stringstream ss_msg;
1555  ss_msg << "Fail to initialize the matcher: " << matcherName << " or it is not available in OpenCV version: "
1556  << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
1557  throw vpException(vpException::fatalError, ss_msg.str());
1558  }
1559 }
1560 
1570  vpImage<unsigned char> &IMatching) {
1571  vpImagePoint topLeftCorner(0, 0);
1572  IMatching.insert(IRef, topLeftCorner);
1573  topLeftCorner = vpImagePoint(0, IRef.getWidth());
1574  IMatching.insert(ICurrent, topLeftCorner);
1575 }
1576 
1585  //Nb images in the training database + the current image we want to detect the object
1586  int nbImg = (int) (m_mapOfImages.size() + 1);
1587 
1588  if(nbImg == 2) {
1589  //Only one training image, so we display them side by side
1590  insertImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
1591  } else {
1592  //Multiple training images, display them as a mosaic image
1593  //round(std::sqrt((double) nbImg)); VC++ compiler does not have round so the next line is used
1594  //Different implementations of round exist, here round to the closest integer but will not work for negative numbers
1595  int nbImgSqrt = (int) std::floor(std::sqrt((double) nbImg) + 0.5);
1596  int nbWidth = nbImgSqrt;
1597  int nbHeight = nbImgSqrt;
1598 
1599  if(nbImgSqrt * nbImgSqrt < nbImg) {
1600  nbWidth++;
1601  }
1602 
1603  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
1604  for(std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end(); ++it) {
1605  if(maxW < it->second.getWidth()) {
1606  maxW = it->second.getWidth();
1607  }
1608 
1609  if(maxH < it->second.getHeight()) {
1610  maxH = it->second.getHeight();
1611  }
1612  }
1613 
1614  //Indexes of the current image in the grid made in order to the image is in the center square in the mosaic grid
1615  int medianI = nbHeight / 2;
1616  int medianJ = nbWidth / 2;
1617  int medianIndex = medianI * nbWidth + medianJ;
1618 
1619  int cpt = 0;
1620  for(std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end(); ++it, cpt++) {
1621  int local_cpt = cpt;
1622  if(cpt >= medianIndex) {
1623  //Shift of one unity the index of the training images which are after the current image
1624  local_cpt++;
1625  }
1626  int indexI = local_cpt / nbWidth;
1627  int indexJ = local_cpt - (indexI * nbWidth);
1628  vpImagePoint topLeftCorner((int)maxH*indexI, (int)maxW*indexJ);
1629 
1630  IMatching.insert(it->second, topLeftCorner);
1631  }
1632 
1633  vpImagePoint topLeftCorner((int)maxH*medianI, (int)maxW*medianJ);
1634  IMatching.insert(ICurrent, topLeftCorner);
1635  }
1636 }
1637 
1638 #ifdef VISP_HAVE_XML2
1639 
1644 void vpKeyPoint::loadConfigFile(const std::string &configFile) {
1646 
1647  try {
1648  //Reset detector and extractor
1649  m_detectorNames.clear();
1650  m_extractorNames.clear();
1651  m_detectors.clear();
1652  m_extractors.clear();
1653 
1654  std::cout << " *********** Parsing XML for configuration for vpKeyPoint ************ " << std::endl;
1655  xmlp.parse(configFile);
1656 
1657  m_detectorNames.push_back(xmlp.getDetectorName());
1658  m_extractorNames.push_back(xmlp.getExtractorName());
1659  m_matcherName = xmlp.getMatcherName();
1660 
1661  switch(xmlp.getMatchingMethod()) {
1663  m_filterType = constantFactorDistanceThreshold;
1664  break;
1665 
1667  m_filterType = stdDistanceThreshold;
1668  break;
1669 
1671  m_filterType = ratioDistanceThreshold;
1672  break;
1673 
1675  m_filterType = stdAndRatioDistanceThreshold;
1676  break;
1677 
1679  m_filterType = noFilterMatching;
1680  break;
1681 
1682  default:
1683  break;
1684  }
1685 
1686  m_matchingFactorThreshold = xmlp.getMatchingFactorThreshold();
1687  m_matchingRatioThreshold = xmlp.getMatchingRatioThreshold();
1688 
1689  m_useRansacVVS = xmlp.getUseRansacVVSPoseEstimation();
1690  m_useConsensusPercentage = xmlp.getUseRansacConsensusPercentage();
1691  m_nbRansacIterations = xmlp.getNbRansacIterations();
1692  m_ransacReprojectionError = xmlp.getRansacReprojectionError();
1693  m_nbRansacMinInlierCount = xmlp.getNbRansacMinInlierCount();
1694  m_ransacThreshold = xmlp.getRansacThreshold();
1695  m_ransacConsensusPercentage = xmlp.getRansacConsensusPercentage();
1696 
1697  if(m_filterType == ratioDistanceThreshold || m_filterType == stdAndRatioDistanceThreshold) {
1698  m_useKnn = true;
1699  } else {
1700  m_useKnn = false;
1701  }
1702 
1703  init();
1704  }
1705  catch(...) {
1706  throw vpException(vpException::ioError, "Can't open XML file \"%s\"\n ", configFile.c_str());
1707  }
1708 }
1709 #endif
1710 
1718 void vpKeyPoint::loadLearningData(const std::string &filename, const bool binaryMode, const bool append) {
1719  int startClassId = 0;
1720  int startImageId = 0;
1721  if(!append) {
1722  m_trainKeyPoints.clear();
1723  m_trainPoints.clear();
1724  m_mapOfImageId.clear();
1725  m_mapOfImages.clear();
1726  } else {
1727  //In append case, find the max index of keypoint class Id
1728  for(std::map<int, int>::const_iterator it = m_mapOfImageId.begin(); it != m_mapOfImageId.end(); ++it) {
1729  if(startClassId < it->first) {
1730  startClassId = it->first;
1731  }
1732  }
1733 
1734  //In append case, find the max index of images Id
1735  for(std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end(); ++it) {
1736  if(startImageId < it->first) {
1737  startImageId = it->first;
1738  }
1739  }
1740  }
1741 
1742  //Get parent directory
1743  std::string parent = vpIoTools::getParent(filename);
1744  if(!parent.empty()) {
1745  parent += "/";
1746  }
1747 
1748  if(binaryMode) {
1749  std::ifstream file(filename.c_str(), std::ifstream::binary);
1750  if(!file.is_open()){
1751  throw vpException(vpException::ioError, "Cannot open the file.");
1752  }
1753 
1754  //Read info about training images
1755  int nbImgs = 0;
1756  file.read((char *)(&nbImgs), sizeof(nbImgs));
1757 
1758  for(int i = 0; i < nbImgs; i++) {
1759  //Read image_id
1760  int id = 0;
1761  file.read((char *)(&id), sizeof(id));
1762 
1763  int length = 0;
1764  file.read((char *)(&length), sizeof(length));
1765  //Will contain the path to the training images
1766  char* path = new char[length + 1];//char path[length + 1];
1767 
1768  for(int cpt = 0; cpt < length; cpt++) {
1769  char c;
1770  file.read((char *)(&c), sizeof(c));
1771  path[cpt] = c;
1772  }
1773  path[length] = '\0';
1774 
1776  if(vpIoTools::isAbsolutePathname(std::string(path))) {
1777  vpImageIo::read(I, path);
1778  } else {
1779  vpImageIo::read(I, parent + path);
1780  }
1781 
1782  m_mapOfImages[id + startImageId] = I;
1783  }
1784 
1785  //Read if 3D point information are saved or not
1786  int have3DInfoInt = 0;
1787  file.read((char *)(&have3DInfoInt), sizeof(have3DInfoInt));
1788  bool have3DInfo = have3DInfoInt != 0;
1789 
1790  //Read the number of descriptors
1791  int nRows = 0;
1792  file.read((char *)(&nRows), sizeof(nRows));
1793 
1794  //Read the size of the descriptor
1795  int nCols = 0;
1796  file.read((char *)(&nCols), sizeof(nCols));
1797 
1798  //Read the type of the descriptor
1799  int descriptorType = 5; //CV_32F
1800  file.read((char *)(&descriptorType), sizeof(descriptorType));
1801 
1802  cv::Mat trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
1803  for(int i = 0; i < nRows; i++) {
1804  //Read information about keyPoint
1805  float u, v, size, angle, response;
1806  int octave, class_id, image_id;
1807  file.read((char *)(&u), sizeof(u));
1808  file.read((char *)(&v), sizeof(v));
1809  file.read((char *)(&size), sizeof(size));
1810  file.read((char *)(&angle), sizeof(angle));
1811  file.read((char *)(&response), sizeof(response));
1812  file.read((char *)(&octave), sizeof(octave));
1813  file.read((char *)(&class_id), sizeof(class_id));
1814  file.read((char *)(&image_id), sizeof(image_id));
1815  cv::KeyPoint keyPoint(cv::Point2f(u, v), size, angle, response, octave, (class_id + startClassId));
1816  m_trainKeyPoints.push_back(keyPoint);
1817 
1818  if(image_id != -1) {
1819  //No training images if image_id == -1
1820  m_mapOfImageId[class_id] = image_id + startImageId;
1821  }
1822 
1823  if(have3DInfo) {
1824  //Read oX, oY, oZ
1825  float oX, oY, oZ;
1826  file.read((char *)(&oX), sizeof(oX));
1827  file.read((char *)(&oY), sizeof(oY));
1828  file.read((char *)(&oZ), sizeof(oZ));
1829  m_trainPoints.push_back(cv::Point3f(oX, oY, oZ));
1830  }
1831 
1832  for(int j = 0; j < nCols; j++) {
1833  //Read the value of the descriptor
1834  switch(descriptorType) {
1835  case CV_8U:
1836  {
1837  unsigned char value;
1838  file.read((char *)(&value), sizeof(value));
1839  trainDescriptorsTmp.at<unsigned char>(i, j) = value;
1840  }
1841  break;
1842 
1843  case CV_8S:
1844  {
1845  char value;
1846  file.read((char *)(&value), sizeof(value));
1847  trainDescriptorsTmp.at<char>(i, j) = value;
1848  }
1849  break;
1850 
1851  case CV_16U:
1852  {
1853  unsigned short int value;
1854  file.read((char *)(&value), sizeof(value));
1855  trainDescriptorsTmp.at<unsigned short int>(i, j) = value;
1856  }
1857  break;
1858 
1859  case CV_16S:
1860  {
1861  short int value;
1862  file.read((char *)(&value), sizeof(value));
1863  trainDescriptorsTmp.at<short int>(i, j) = value;
1864  }
1865  break;
1866 
1867  case CV_32S:
1868  {
1869  int value;
1870  file.read((char *)(&value), sizeof(value));
1871  trainDescriptorsTmp.at<int>(i, j) = value;
1872  }
1873  break;
1874 
1875  case CV_32F:
1876  {
1877  float value;
1878  file.read((char *)(&value), sizeof(value));
1879  trainDescriptorsTmp.at<float>(i, j) = value;
1880  }
1881  break;
1882 
1883  case CV_64F:
1884  {
1885  double value;
1886  file.read((char *)(&value), sizeof(value));
1887  trainDescriptorsTmp.at<double>(i, j) = value;
1888  }
1889  break;
1890 
1891  default:
1892  {
1893  float value;
1894  file.read((char *)(&value), sizeof(value));
1895  trainDescriptorsTmp.at<float>(i, j) = value;
1896  }
1897  break;
1898  }
1899  }
1900  }
1901 
1902  if(!append || m_trainDescriptors.empty()) {
1903  trainDescriptorsTmp.copyTo(m_trainDescriptors);
1904  } else {
1905  cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
1906  }
1907 
1908  file.close();
1909  } else {
1910 #ifdef VISP_HAVE_XML2
1911  xmlDocPtr doc = NULL;
1912  xmlNodePtr root_element = NULL;
1913 
1914  /*
1915  * this initialize the library and check potential ABI mismatches
1916  * between the version it was compiled for and the actual shared
1917  * library used.
1918  */
1919  LIBXML_TEST_VERSION
1920 
1921  /*parse the file and get the DOM */
1922  doc = xmlReadFile(filename.c_str(), NULL, 0);
1923 
1924  if (doc == NULL) {
1925  throw vpException(vpException::ioError, "Error with file " + filename);
1926  }
1927 
1928  root_element = xmlDocGetRootElement(doc);
1929 
1930  xmlNodePtr first_level_node = NULL;
1931  char *pEnd = NULL;
1932 
1933  int descriptorType = CV_32F; //float
1934  int nRows = 0, nCols = 0;
1935  int i = 0, j = 0;
1936 
1937  cv::Mat trainDescriptorsTmp;
1938 
1939  for (first_level_node = root_element->children; first_level_node;
1940  first_level_node = first_level_node->next) {
1941 
1942  std::string name((char *) first_level_node->name);
1943  if (first_level_node->type == XML_ELEMENT_NODE && name == "TrainingImageInfo") {
1944  xmlNodePtr image_info_node = NULL;
1945 
1946  for (image_info_node = first_level_node->children; image_info_node; image_info_node =
1947  image_info_node->next) {
1948  name = std::string ((char *) image_info_node->name);
1949 
1950  if(name == "trainImg") {
1951  //Read image_id
1952  int id = std::atoi((char *) xmlGetProp(image_info_node, BAD_CAST "image_id"));
1953 
1955  std::string path((char *) image_info_node->children->content);
1956  //Read path to the training images
1957  if(vpIoTools::isAbsolutePathname(std::string(path))) {
1958  vpImageIo::read(I, path);
1959  } else {
1960  vpImageIo::read(I, parent + path);
1961  }
1962 
1963  m_mapOfImages[id + startImageId] = I;
1964  }
1965  }
1966  } else if(first_level_node->type == XML_ELEMENT_NODE && name == "DescriptorsInfo") {
1967  xmlNodePtr descriptors_info_node = NULL;
1968  for (descriptors_info_node = first_level_node->children; descriptors_info_node; descriptors_info_node =
1969  descriptors_info_node->next) {
1970  if (descriptors_info_node->type == XML_ELEMENT_NODE) {
1971  name = std::string ((char *) descriptors_info_node->name);
1972 
1973  if(name == "nrows") {
1974  nRows = std::atoi((char *) descriptors_info_node->children->content);
1975  } else if(name == "ncols") {
1976  nCols = std::atoi((char *) descriptors_info_node->children->content);
1977  } else if(name == "type") {
1978  descriptorType = std::atoi((char *) descriptors_info_node->children->content);
1979  }
1980  }
1981  }
1982 
1983  trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
1984  } else if (first_level_node->type == XML_ELEMENT_NODE && name == "DescriptorInfo") {
1985  xmlNodePtr point_node = NULL;
1986  double u = 0.0, v = 0.0, size = 0.0, angle = 0.0, response = 0.0;
1987  int octave = 0, class_id = 0, image_id = 0;
1988  double oX = 0.0, oY = 0.0, oZ = 0.0;
1989 
1990  std::stringstream ss;
1991 
1992  for (point_node = first_level_node->children; point_node; point_node =
1993  point_node->next) {
1994  if (point_node->type == XML_ELEMENT_NODE) {
1995  name = std::string ((char *) point_node->name);
1996 
1997  //Read information about keypoints
1998  if(name == "u") {
1999  u = std::strtod((char *) point_node->children->content, &pEnd);
2000  } else if(name == "v") {
2001  v = std::strtod((char *) point_node->children->content, &pEnd);
2002  } else if(name == "size") {
2003  size = std::strtod((char *) point_node->children->content, &pEnd);
2004  } else if(name == "angle") {
2005  angle = std::strtod((char *) point_node->children->content, &pEnd);
2006  } else if(name == "response") {
2007  response = std::strtod((char *) point_node->children->content, &pEnd);
2008  } else if(name == "octave") {
2009  octave = std::atoi((char *) point_node->children->content);
2010  } else if(name == "class_id") {
2011  class_id = std::atoi((char *) point_node->children->content);
2012  cv::KeyPoint keyPoint(cv::Point2f((float) u, (float) v), (float) size,
2013  (float) angle, (float) response, octave, (class_id + startClassId));
2014  m_trainKeyPoints.push_back(keyPoint);
2015  } else if(name == "image_id") {
2016  image_id = std::atoi((char *) point_node->children->content);
2017  if(image_id != -1) {
2018  //No training images if image_id == -1
2019  m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
2020  }
2021  } else if (name == "oX") {
2022  oX = std::atof((char *) point_node->children->content);
2023  } else if (name == "oY") {
2024  oY = std::atof((char *) point_node->children->content);
2025  } else if (name == "oZ") {
2026  oZ = std::atof((char *) point_node->children->content);
2027  m_trainPoints.push_back(cv::Point3f((float) oX, (float) oY, (float) oZ));
2028  } else if (name == "desc") {
2029  xmlNodePtr descriptor_value_node = NULL;
2030  j = 0;
2031 
2032  for (descriptor_value_node = point_node->children;
2033  descriptor_value_node; descriptor_value_node =
2034  descriptor_value_node->next) {
2035 
2036  if (descriptor_value_node->type == XML_ELEMENT_NODE) {
2037  //Read descriptors values
2038  std::string parseStr((char *) descriptor_value_node->children->content);
2039  ss.clear();
2040  ss.str(parseStr);
2041 
2042  if(!ss.fail()) {
2043  switch(descriptorType) {
2044  case CV_8U:
2045  {
2046  //Parse the numeric value [0 ; 255] to an int
2047  int parseValue;
2048  ss >> parseValue;
2049  trainDescriptorsTmp.at<unsigned char>(i, j) = (unsigned char) parseValue;
2050  }
2051  break;
2052 
2053  case CV_8S:
2054  //Parse the numeric value [-128 ; 127] to an int
2055  int parseValue;
2056  ss >> parseValue;
2057  trainDescriptorsTmp.at<char>(i, j) = (char) parseValue;
2058  break;
2059 
2060  case CV_16U:
2061  ss >> trainDescriptorsTmp.at<unsigned short int>(i, j);
2062  break;
2063 
2064  case CV_16S:
2065  ss >> trainDescriptorsTmp.at<short int>(i, j);
2066  break;
2067 
2068  case CV_32S:
2069  ss >> trainDescriptorsTmp.at<int>(i, j);
2070  break;
2071 
2072  case CV_32F:
2073  ss >> trainDescriptorsTmp.at<float>(i, j);
2074  break;
2075 
2076  case CV_64F:
2077  ss >> trainDescriptorsTmp.at<double>(i, j);
2078  break;
2079 
2080  default:
2081  ss >> trainDescriptorsTmp.at<float>(i, j);
2082  break;
2083  }
2084  } else {
2085  std::cerr << "Error when converting:" << ss.str() << std::endl;
2086  }
2087 
2088  j++;
2089  }
2090  }
2091  }
2092  }
2093  }
2094  i++;
2095  }
2096  }
2097 
2098  if(!append || m_trainDescriptors.empty()) {
2099  trainDescriptorsTmp.copyTo(m_trainDescriptors);
2100  } else {
2101  cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
2102  }
2103 
2104  /*free the document */
2105  xmlFreeDoc(doc);
2106 
2107  /*
2108  *Free the global variables that may
2109  *have been allocated by the parser.
2110  */
2111  xmlCleanupParser();
2112 #else
2113  std::cout << "Error: libxml2 is required !" << std::endl;
2114 #endif
2115  }
2116 
2117  //Convert OpenCV type to ViSP type for compatibility
2119  vpConvert::convertFromOpenCV(this->m_trainPoints, m_trainVpPoints);
2120 
2121  //Set _reference_computed to true as we load learning file
2122  _reference_computed = true;
2123 }
2124 
2133 void vpKeyPoint::match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors,
2134  std::vector<cv::DMatch> &matches, double &elapsedTime) {
2135  double t = vpTime::measureTimeMs();
2136 
2137  if(m_useKnn) {
2138  m_knnMatches.clear();
2139  m_matcher->knnMatch(queryDescriptors, trainDescriptors, m_knnMatches, 2);
2140  matches.resize(m_knnMatches.size());
2141  std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
2142  } else {
2143  matches.clear();
2144  m_matcher->match(queryDescriptors, trainDescriptors, matches);
2145  }
2146  elapsedTime = vpTime::measureTimeMs() - t;
2147 }
2148 
2156  return matchPoint(I, vpRect());
2157 }
2158 
2170  const vpImagePoint &iP,
2171  const unsigned int height, const unsigned int width) {
2172  return matchPoint(I, vpRect(iP, width, height));
2173 }
2174 
2184  const vpRect& rectangle) {
2185  if(m_trainDescriptors.empty()) {
2186  std::cerr << "Reference is empty." << std::endl;
2187  if(!_reference_computed) {
2188  std::cerr << "Reference is not computed." << std::endl;
2189  }
2190  std::cerr << "Matching is not possible." << std::endl;
2191 
2192  return 0;
2193  }
2194 
2195  if(m_useAffineDetection) {
2196  std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
2197  std::vector<cv::Mat> listOfQueryDescriptors;
2198 
2199  //Detect keypoints and extract descriptors on multiple images
2200  detectExtractAffine(I, listOfQueryKeyPoints, listOfQueryDescriptors);
2201 
2202  //Flatten the different train lists
2203  m_queryKeyPoints.clear();
2204  for(std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
2205  it != listOfQueryKeyPoints.end(); ++it) {
2206  m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
2207  }
2208 
2209  bool first = true;
2210  for(std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end(); ++it) {
2211  if(first) {
2212  first = false;
2213  it->copyTo(m_queryDescriptors);
2214  } else {
2215  m_queryDescriptors.push_back(*it);
2216  }
2217  }
2218  } else {
2219  detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
2220  extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
2221  }
2222 
2223  match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
2224 
2225  if(m_filterType != noFilterMatching) {
2226  m_queryFilteredKeyPoints.clear();
2227  m_objectFilteredPoints.clear();
2228  m_filteredMatches.clear();
2229 
2230  filterMatches();
2231  } else {
2232  m_queryFilteredKeyPoints = m_queryKeyPoints;
2233  m_objectFilteredPoints = m_trainPoints;
2234  m_filteredMatches = m_matches;
2235 
2236  m_matchQueryToTrainKeyPoints.clear();
2237  for (size_t i = 0; i < m_matches.size(); i++) {
2238  m_matchQueryToTrainKeyPoints.push_back(std::pair<cv::KeyPoint, cv::KeyPoint>(
2239  m_queryKeyPoints[(size_t) m_matches[i].queryIdx],
2240  m_trainKeyPoints[(size_t) m_matches[i].trainIdx]));
2241  }
2242  }
2243 
2244  //Convert OpenCV type to ViSP type for compatibility
2245  vpConvert::convertFromOpenCV(m_queryFilteredKeyPoints, currentImagePointsList);
2247 
2248  return static_cast<unsigned int>(m_filteredMatches.size());
2249 }
2250 
2265  double &error, double &elapsedTime, bool (*func)(vpHomogeneousMatrix *)) {
2266  //Check if we have training descriptors
2267  if(m_trainDescriptors.empty()) {
2268  std::cerr << "Reference is empty." << std::endl;
2269  if(!_reference_computed) {
2270  std::cerr << "Reference is not computed." << std::endl;
2271  }
2272  std::cerr << "Matching is not possible." << std::endl;
2273 
2274  return false;
2275  }
2276 
2277  if(m_useAffineDetection) {
2278  std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
2279  std::vector<cv::Mat> listOfQueryDescriptors;
2280 
2281  //Detect keypoints and extract descriptors on multiple images
2282  detectExtractAffine(I, listOfQueryKeyPoints, listOfQueryDescriptors);
2283 
2284  //Flatten the different train lists
2285  m_queryKeyPoints.clear();
2286  for(std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
2287  it != listOfQueryKeyPoints.end(); ++it) {
2288  m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
2289  }
2290 
2291  bool first = true;
2292  for(std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end(); ++it) {
2293  if(first) {
2294  first = false;
2295  it->copyTo(m_queryDescriptors);
2296  } else {
2297  m_queryDescriptors.push_back(*it);
2298  }
2299  }
2300  } else {
2301  detect(I, m_queryKeyPoints, m_detectionTime);
2302  extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
2303  }
2304 
2305  match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
2306 
2307  elapsedTime = m_detectionTime + m_extractionTime + m_matchingTime;
2308 
2309  if(m_filterType != noFilterMatching) {
2310  m_queryFilteredKeyPoints.clear();
2311  m_objectFilteredPoints.clear();
2312  m_filteredMatches.clear();
2313 
2314  filterMatches();
2315  } else {
2316  m_queryFilteredKeyPoints = m_queryKeyPoints;
2317  m_objectFilteredPoints = m_trainPoints;
2318  m_filteredMatches = m_matches;
2319 
2320  m_matchQueryToTrainKeyPoints.clear();
2321  for (size_t i = 0; i < m_matches.size(); i++) {
2322  m_matchQueryToTrainKeyPoints.push_back(std::pair<cv::KeyPoint, cv::KeyPoint>(
2323  m_queryKeyPoints[(size_t) m_matches[i].queryIdx],
2324  m_trainKeyPoints[(size_t) m_matches[i].trainIdx]));
2325  }
2326  }
2327 
2328  //Convert OpenCV type to ViSP type for compatibility
2329  vpConvert::convertFromOpenCV(m_queryFilteredKeyPoints, currentImagePointsList);
2331 
2332  //error = std::numeric_limits<double>::max(); // create an error under Windows. To fix it we have to add #undef max
2333  error = DBL_MAX;
2334  m_ransacInliers.clear();
2335  m_ransacOutliers.clear();
2336 
2337  if(m_useRansacVVS) {
2338  std::vector<vpPoint> objectVpPoints(m_objectFilteredPoints.size());
2339  size_t cpt = 0;
2340  //Create a list of vpPoint with 2D coordinates (current keypoint location) + 3D coordinates (world/object coordinates)
2341  for(std::vector<cv::Point3f>::const_iterator it = m_objectFilteredPoints.begin(); it != m_objectFilteredPoints.end();
2342  ++it, cpt++) {
2343  vpPoint pt;
2344  pt.setWorldCoordinates(it->x, it->y, it->z);
2345 
2346  vpImagePoint imP(m_queryFilteredKeyPoints[cpt].pt.y, m_queryFilteredKeyPoints[cpt].pt.x);
2347 
2348  double x = 0.0, y = 0.0;
2349  vpPixelMeterConversion::convertPoint(cam, imP, x, y);
2350  pt.set_x(x);
2351  pt.set_y(y);
2352 
2353  objectVpPoints[cpt] = pt;
2354  }
2355 
2356  std::vector<vpPoint> inliers;
2357  bool res = computePose(objectVpPoints, cMo, inliers, m_poseTime, func);
2358  m_ransacInliers.resize(inliers.size());
2359  for(size_t i = 0; i < m_ransacInliers.size(); i++) {
2360  vpMeterPixelConversion::convertPoint(cam, inliers[i].get_x(), inliers[i].get_y(), m_ransacInliers[i]);
2361  }
2362 
2363  elapsedTime += m_poseTime;
2364 
2365  return res;
2366  } else {
2367  std::vector<cv::Point2f> imageFilteredPoints;
2368  cv::KeyPoint::convert(m_queryFilteredKeyPoints, imageFilteredPoints);
2369  std::vector<int> inlierIndex;
2370  bool res = computePose(imageFilteredPoints, m_objectFilteredPoints, cam, cMo, inlierIndex, m_poseTime);
2371 
2372  std::map<int, bool> mapOfInlierIndex;
2373  m_matchRansacKeyPointsToPoints.clear();
2374  m_matchRansacQueryToTrainKeyPoints.clear();
2375  for (std::vector<int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
2376  m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(m_queryFilteredKeyPoints[(size_t)(*it)],
2377  m_objectFilteredPoints[(size_t)(*it)]));
2378  m_matchRansacQueryToTrainKeyPoints.push_back(std::pair<cv::KeyPoint, cv::KeyPoint>(m_queryFilteredKeyPoints[(size_t)(*it)],
2379  m_trainKeyPoints[(size_t)m_matches[(size_t)(*it)].trainIdx]));
2380  mapOfInlierIndex[*it] = true;
2381  }
2382 
2383  for(size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
2384  if(mapOfInlierIndex.find((int) i) == mapOfInlierIndex.end()) {
2385  m_ransacOutliers.push_back(vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
2386  }
2387  }
2388 
2389  error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
2390 
2391  m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
2392  std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(), m_ransacInliers.begin(),
2393  matchRansacToVpImage);
2394 
2395  elapsedTime += m_poseTime;
2396 
2397  return res;
2398  }
2399 }
2400 
2417 bool vpKeyPoint::matchPointAndDetect(const vpImage<unsigned char> &I, vpRect &boundingBox, vpImagePoint &centerOfGravity,
2418  const bool isPlanarObject, std::vector<vpImagePoint> *imPts1, std::vector<vpImagePoint> *imPts2,
2419  double *meanDescriptorDistance, double *detectionScore) {
2420  if(imPts1 != NULL && imPts2 != NULL) {
2421  imPts1->clear();
2422  imPts2->clear();
2423  }
2424 
2425  matchPoint(I);
2426 
2427  double meanDescriptorDistanceTmp = 0.0;
2428  for(std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
2429  meanDescriptorDistanceTmp += (double) it->distance;
2430  }
2431 
2432  meanDescriptorDistanceTmp /= (double) m_filteredMatches.size();
2433  double score = (double) m_filteredMatches.size() / meanDescriptorDistanceTmp;
2434 
2435  if(meanDescriptorDistance != NULL) {
2436  *meanDescriptorDistance = meanDescriptorDistanceTmp;
2437  }
2438  if(detectionScore != NULL) {
2439  *detectionScore = score;
2440  }
2441 
2442  if(m_filteredMatches.size() >= 4) {
2443  //Training / Reference 2D points
2444  std::vector<cv::Point2f> points1(m_filteredMatches.size());
2445  //Query / Current 2D points
2446  std::vector<cv::Point2f> points2(m_filteredMatches.size());
2447 
2448  for(size_t i = 0; i < m_filteredMatches.size(); i++) {
2449  points1[i] = cv::Point2f(m_trainKeyPoints[(size_t)m_filteredMatches[i].trainIdx].pt);
2450  points2[i] = cv::Point2f(m_queryFilteredKeyPoints[(size_t)m_filteredMatches[i].queryIdx].pt);
2451  }
2452 
2453  std::vector<vpImagePoint> inliers;
2454  if(isPlanarObject) {
2455 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
2456  cv::Mat homographyMatrix = cv::findHomography(points1, points2, CV_RANSAC);
2457 #else
2458  cv::Mat homographyMatrix = cv::findHomography(points1, points2, cv::RANSAC);
2459 #endif
2460 
2461  for(size_t i = 0; i < m_filteredMatches.size(); i++ ) {
2462  //Compute reprojection error
2463  cv::Mat realPoint = cv::Mat(3, 1, CV_64F);
2464  realPoint.at<double>(0,0) = points1[i].x;
2465  realPoint.at<double>(1,0) = points1[i].y;
2466  realPoint.at<double>(2,0) = 1.f;
2467 
2468  cv::Mat reprojectedPoint = homographyMatrix * realPoint;
2469  double err_x = (reprojectedPoint.at<double>(0,0) / reprojectedPoint.at<double>(2,0)) - points2[i].x;
2470  double err_y = (reprojectedPoint.at<double>(1,0) / reprojectedPoint.at<double>(2,0)) - points2[i].y;
2471  double reprojectionError = std::sqrt(err_x*err_x + err_y*err_y);
2472 
2473  if(reprojectionError < 6.0) {
2474  inliers.push_back(vpImagePoint((double) points2[i].y, (double) points2[i].x));
2475  if(imPts1 != NULL) {
2476  imPts1->push_back(vpImagePoint((double) points1[i].y, (double) points1[i].x));
2477  }
2478 
2479  if(imPts2 != NULL) {
2480  imPts2->push_back(vpImagePoint((double) points2[i].y, (double) points2[i].x));
2481  }
2482  }
2483  }
2484  } else if(m_filteredMatches.size() >= 8) {
2485  cv::Mat fundamentalInliers;
2486  cv::Mat fundamentalMatrix = cv::findFundamentalMat(points1, points2, cv::FM_RANSAC, 3, 0.99, fundamentalInliers);
2487 
2488  for(size_t i = 0; i < (size_t) fundamentalInliers.rows; i++) {
2489  if(fundamentalInliers.at<uchar>((int) i, 0)) {
2490  inliers.push_back(vpImagePoint((double) points2[i].y, (double) points2[i].x));
2491 
2492  if(imPts1 != NULL) {
2493  imPts1->push_back(vpImagePoint((double) points1[i].y, (double) points1[i].x));
2494  }
2495 
2496  if(imPts2 != NULL) {
2497  imPts2->push_back(vpImagePoint((double) points2[i].y, (double) points2[i].x));
2498  }
2499  }
2500  }
2501  }
2502 
2503  if(!inliers.empty()) {
2504  //Build a polygon with the list of inlier keypoints detected in the current image to get the bounding box
2505  vpPolygon polygon(inliers);
2506  boundingBox = polygon.getBoundingBox();
2507 
2508  //Compute the center of gravity
2509  double meanU = 0.0, meanV = 0.0;
2510  for(std::vector<vpImagePoint>::const_iterator it = inliers.begin(); it != inliers.end(); ++it) {
2511  meanU += it->get_u();
2512  meanV += it->get_v();
2513  }
2514 
2515  meanU /= (double) inliers.size();
2516  meanV /= (double) inliers.size();
2517 
2518  centerOfGravity.set_u(meanU);
2519  centerOfGravity.set_v(meanV);
2520  }
2521  } else {
2522  //Too few matches
2523  return false;
2524  }
2525 
2526  if(m_detectionMethod == detectionThreshold) {
2527  return meanDescriptorDistanceTmp < m_detectionThreshold;
2528  } else {
2529  return score > m_detectionScore;
2530  }
2531 }
2532 
2551  double &error, double &elapsedTime, vpRect &boundingBox, vpImagePoint &centerOfGravity,
2552  bool (*func)(vpHomogeneousMatrix *)) {
2553  bool isMatchOk = matchPoint(I, cam, cMo, error, elapsedTime, func);
2554  if(isMatchOk) {
2555  //Use the pose estimated to project the model points in the image
2556  vpPoint pt;
2557  vpImagePoint imPt;
2558  std::vector<vpImagePoint> modelImagePoints(m_trainVpPoints.size());
2559  size_t cpt = 0;
2560  for(std::vector<vpPoint>::const_iterator it = m_trainVpPoints.begin(); it != m_trainVpPoints.end(); ++it, cpt++) {
2561  pt = *it;
2562  pt.project(cMo);
2563  vpMeterPixelConversion::convertPoint(cam, pt.get_x(), pt.get_y(), imPt);
2564  modelImagePoints[cpt] = imPt;
2565  }
2566 
2567  //Build a polygon with the list of model image points to get the bounding box
2568  vpPolygon polygon(modelImagePoints);
2569  boundingBox = polygon.getBoundingBox();
2570 
2571  //Compute the center of gravity of the current inlier keypoints
2572  double meanU = 0.0, meanV = 0.0;
2573  for(std::vector<vpImagePoint>::const_iterator it = m_ransacInliers.begin(); it != m_ransacInliers.end();
2574  ++it) {
2575  meanU += it->get_u();
2576  meanV += it->get_v();
2577  }
2578 
2579  meanU /= (double) m_ransacInliers.size();
2580  meanV /= (double) m_ransacInliers.size();
2581 
2582  centerOfGravity.set_u(meanU);
2583  centerOfGravity.set_v(meanV);
2584  }
2585 
2586  return isMatchOk;
2587 }
2588 
2600 void vpKeyPoint::detectExtractAffine(const vpImage<unsigned char> &I,std::vector<std::vector<cv::KeyPoint> >& listOfKeypoints,
2601  std::vector<cv::Mat>& listOfDescriptors, std::vector<vpImage<unsigned char> > *listOfAffineI) {
2602  cv::Mat img;
2603  vpImageConvert::convert(I, img);
2604  listOfKeypoints.clear();
2605  listOfDescriptors.clear();
2606 
2607  for (int tl = 1; tl < 6; tl++) {
2608  double t = pow(2, 0.5 * tl);
2609  for (int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
2610  std::vector<cv::KeyPoint> keypoints;
2611  cv::Mat descriptors;
2612 
2613  cv::Mat timg, mask, Ai;
2614  img.copyTo(timg);
2615 
2616  affineSkew(t, phi, timg, mask, Ai);
2617 
2618 
2619  if(listOfAffineI != NULL) {
2620  cv::Mat img_disp;
2621  bitwise_and(mask, timg, img_disp);
2623  vpImageConvert::convert(img_disp, tI);
2624  listOfAffineI->push_back(tI);
2625  }
2626 #if 0
2627  cv::namedWindow( "Skew", cv::WINDOW_AUTOSIZE ); // Create a window for display.
2628  cv::imshow( "Skew", img_disp );
2629  cv::waitKey(0);
2630 #endif
2631 
2632  for(std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin(); it != m_detectors.end(); ++it) {
2633  std::vector<cv::KeyPoint> kp;
2634  it->second->detect(timg, kp, mask);
2635  keypoints.insert(keypoints.end(), kp.begin(), kp.end());
2636  }
2637 
2638  double elapsedTime;
2639  extract(timg, keypoints, descriptors, elapsedTime);
2640 
2641  for(unsigned int i = 0; i < keypoints.size(); i++) {
2642  cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
2643  cv::Mat kpt_t = Ai * cv::Mat(kpt);
2644  keypoints[i].pt.x = kpt_t.at<float>(0, 0);
2645  keypoints[i].pt.y = kpt_t.at<float>(1, 0);
2646  }
2647 
2648  listOfKeypoints.push_back(keypoints);
2649  listOfDescriptors.push_back(descriptors);
2650  }
2651  }
2652 }
2653 
2661 void vpKeyPoint::saveLearningData(const std::string &filename, bool binaryMode, const bool saveTrainingImages) {
2662  std::string parent = vpIoTools::getParent(filename);
2663  if(!parent.empty()) {
2664  vpIoTools::makeDirectory(parent);
2665  }
2666 
2667  std::map<int, std::string> mapOfImgPath;
2668  if(saveTrainingImages) {
2669  //Save the training image files in the same directory
2670  int cpt = 0;
2671 
2672  for(std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end(); ++it, cpt++) {
2673  char buffer[4];
2674  sprintf(buffer, "%03d", cpt);
2675  std::stringstream ss;
2676  ss << "train_image_" << buffer << ".jpg";
2677  std::string filename_ = ss.str();
2678  mapOfImgPath[it->first] = filename_;
2679  vpImageIo::write(it->second, parent + (!parent.empty() ? "/" : "") + filename_);
2680  }
2681  }
2682 
2683  bool have3DInfo = m_trainPoints.size() > 0;
2684  if(have3DInfo && m_trainPoints.size() != m_trainKeyPoints.size()) {
2685  throw vpException(vpException::fatalError, "List of keypoints and list of 3D points have different size !");
2686  }
2687 
2688  if(binaryMode) {
2689  std::ofstream file(filename.c_str(), std::ofstream::binary);
2690  if(!file.is_open()) {
2691  throw vpException(vpException::ioError, "Cannot create the file.");
2692  }
2693 
2694  //Write info about training images
2695  int nbImgs = (int) mapOfImgPath.size();
2696  file.write((char *)(&nbImgs), sizeof(nbImgs));
2697 
2698  for(std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
2699  //Write image_id
2700  int id = it->first;
2701  file.write((char *)(&id), sizeof(id));
2702 
2703  //Write image path
2704  std::string path = it->second;
2705  int length = (int) path.length();
2706  file.write((char *)(&length), sizeof(length));
2707 
2708  for(int cpt = 0; cpt < length; cpt++) {
2709  file.write((char *) (&path[(size_t)cpt]), sizeof(path[(size_t)cpt]));
2710  }
2711  }
2712 
2713  //Write if we have 3D point information
2714  int have3DInfoInt = have3DInfo ? 1 : 0;
2715  file.write((char *)(&have3DInfoInt), sizeof(have3DInfoInt));
2716 
2717 
2718  int nRows = m_trainDescriptors.rows,
2719  nCols = m_trainDescriptors.cols;
2720  int descriptorType = m_trainDescriptors.type();
2721 
2722  //Write the number of descriptors
2723  file.write((char *)(&nRows), sizeof(nRows));
2724 
2725  //Write the size of the descriptor
2726  file.write((char *)(&nCols), sizeof(nCols));
2727 
2728  //Write the type of the descriptor
2729  file.write((char *)(&descriptorType), sizeof(descriptorType));
2730 
2731  for (int i = 0; i < nRows; i++) {
2732  unsigned int i_ = (unsigned int) i;
2733  //Write u
2734  float u = m_trainKeyPoints[i_].pt.x;
2735  file.write((char *)(&u), sizeof(u));
2736 
2737  //Write v
2738  float v = m_trainKeyPoints[i_].pt.y;
2739  file.write((char *)(&v), sizeof(v));
2740 
2741  //Write size
2742  float size = m_trainKeyPoints[i_].size;
2743  file.write((char *)(&size), sizeof(size));
2744 
2745  //Write angle
2746  float angle = m_trainKeyPoints[i_].angle;
2747  file.write((char *)(&angle), sizeof(angle));
2748 
2749  //Write response
2750  float response = m_trainKeyPoints[i_].response;
2751  file.write((char *)(&response), sizeof(response));
2752 
2753  //Write octave
2754  int octave = m_trainKeyPoints[i_].octave;
2755  file.write((char *)(&octave), sizeof(octave));
2756 
2757  //Write class_id
2758  int class_id = m_trainKeyPoints[i_].class_id;
2759  file.write((char *)(&class_id), sizeof(class_id));
2760 
2761  //Write image_id
2762  int image_id = (saveTrainingImages && m_mapOfImageId.size() > 0) ? m_mapOfImageId[m_trainKeyPoints[i_].class_id] : -1;
2763  file.write((char *)(&image_id), sizeof(image_id));
2764 
2765  if(have3DInfo) {
2766  float oX = m_trainPoints[i_].x, oY = m_trainPoints[i_].y, oZ = m_trainPoints[i_].z;
2767  //Write oX
2768  file.write((char *)(&oX), sizeof(oX));
2769 
2770  //Write oY
2771  file.write((char *)(&oY), sizeof(oY));
2772 
2773  //Write oZ
2774  file.write((char *)(&oZ), sizeof(oZ));
2775  }
2776 
2777  for (int j = 0; j < nCols; j++) {
2778  //Write the descriptor value
2779  switch(descriptorType) {
2780  case CV_8U:
2781  file.write((char *)(&m_trainDescriptors.at<unsigned char>(i, j)), sizeof(m_trainDescriptors.at<unsigned char>(i, j)));
2782  break;
2783 
2784  case CV_8S:
2785  file.write((char *)(&m_trainDescriptors.at<char>(i, j)), sizeof(m_trainDescriptors.at<char>(i, j)));
2786  break;
2787 
2788  case CV_16U:
2789  file.write((char *)(&m_trainDescriptors.at<unsigned short int>(i, j)), sizeof(m_trainDescriptors.at<unsigned short int>(i, j)));
2790  break;
2791 
2792  case CV_16S:
2793  file.write((char *)(&m_trainDescriptors.at<short int>(i, j)), sizeof(m_trainDescriptors.at<short int>(i, j)));
2794  break;
2795 
2796  case CV_32S:
2797  file.write((char *)(&m_trainDescriptors.at<int>(i, j)), sizeof(m_trainDescriptors.at<int>(i, j)));
2798  break;
2799 
2800  case CV_32F:
2801  file.write((char *)(&m_trainDescriptors.at<float>(i, j)), sizeof(m_trainDescriptors.at<float>(i, j)));
2802  break;
2803 
2804  case CV_64F:
2805  file.write((char *)(&m_trainDescriptors.at<double>(i, j)), sizeof(m_trainDescriptors.at<double>(i, j)));
2806  break;
2807 
2808  default:
2809  file.write((char *)(&m_trainDescriptors.at<float>(i, j)), sizeof(m_trainDescriptors.at<float>(i, j)));
2810  break;
2811  }
2812  }
2813  }
2814 
2815 
2816  file.close();
2817  } else {
2818 #ifdef VISP_HAVE_XML2
2819  xmlDocPtr doc = NULL;
2820  xmlNodePtr root_node = NULL, image_node = NULL, image_info_node = NULL, descriptors_info_node = NULL,
2821  descriptor_node = NULL, desc_node = NULL;
2822 
2823  /*
2824  * this initialize the library and check potential ABI mismatches
2825  * between the version it was compiled for and the actual shared
2826  * library used.
2827  */
2828  LIBXML_TEST_VERSION
2829 
2830  doc = xmlNewDoc(BAD_CAST "1.0");
2831  if (doc == NULL) {
2832  throw vpException(vpException::ioError, "Error with file " + filename);
2833  }
2834 
2835  root_node = xmlNewNode(NULL, BAD_CAST "LearningData");
2836  xmlDocSetRootElement(doc, root_node);
2837 
2838  std::stringstream ss;
2839 
2840  //Write the training images info
2841  image_node = xmlNewChild(root_node, NULL, BAD_CAST "TrainingImageInfo", NULL);
2842 
2843  for(std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
2844  image_info_node = xmlNewChild(image_node, NULL, BAD_CAST "trainImg",
2845  BAD_CAST it->second.c_str());
2846  ss.str("");
2847  ss << it->first;
2848  xmlNewProp(image_info_node, BAD_CAST "image_id", BAD_CAST ss.str().c_str());
2849  }
2850 
2851  //Write information about descriptors
2852  descriptors_info_node = xmlNewChild(root_node, NULL, BAD_CAST "DescriptorsInfo", NULL);
2853 
2854  int nRows = m_trainDescriptors.rows,
2855  nCols = m_trainDescriptors.cols;
2856  int descriptorType = m_trainDescriptors.type();
2857 
2858  //Write the number of rows
2859  ss.str("");
2860  ss << nRows;
2861  xmlNewChild(descriptors_info_node, NULL, BAD_CAST "nrows", BAD_CAST ss.str().c_str());
2862 
2863  //Write the number of cols
2864  ss.str("");
2865  ss << nCols;
2866  xmlNewChild(descriptors_info_node, NULL, BAD_CAST "ncols", BAD_CAST ss.str().c_str());
2867 
2868  //Write the descriptors type
2869  ss.str("");
2870  ss << descriptorType;
2871  xmlNewChild(descriptors_info_node, NULL, BAD_CAST "type", BAD_CAST ss.str().c_str());
2872 
2873  for (int i = 0; i < nRows; i++) {
2874  unsigned int i_ = (unsigned int) i;
2875  descriptor_node = xmlNewChild(root_node, NULL, BAD_CAST "DescriptorInfo",
2876  NULL);
2877 
2878  ss.str("");
2879  ss << m_trainKeyPoints[i_].pt.x;
2880  xmlNewChild(descriptor_node, NULL, BAD_CAST "u",
2881  BAD_CAST ss.str().c_str());
2882 
2883  ss.str("");
2884  ss << m_trainKeyPoints[i_].pt.y;
2885  xmlNewChild(descriptor_node, NULL, BAD_CAST "v",
2886  BAD_CAST ss.str().c_str());
2887 
2888  ss.str("");
2889  ss << m_trainKeyPoints[i_].size;
2890  xmlNewChild(descriptor_node, NULL, BAD_CAST "size",
2891  BAD_CAST ss.str().c_str());
2892 
2893  ss.str("");
2894  ss << m_trainKeyPoints[i_].angle;
2895  xmlNewChild(descriptor_node, NULL, BAD_CAST "angle",
2896  BAD_CAST ss.str().c_str());
2897 
2898  ss.str("");
2899  ss << m_trainKeyPoints[i_].response;
2900  xmlNewChild(descriptor_node, NULL, BAD_CAST "response",
2901  BAD_CAST ss.str().c_str());
2902 
2903  ss.str("");
2904  ss << m_trainKeyPoints[i_].octave;
2905  xmlNewChild(descriptor_node, NULL, BAD_CAST "octave",
2906  BAD_CAST ss.str().c_str());
2907 
2908  ss.str("");
2909  ss << m_trainKeyPoints[i_].class_id;
2910  xmlNewChild(descriptor_node, NULL, BAD_CAST "class_id",
2911  BAD_CAST ss.str().c_str());
2912 
2913  ss.str("");
2914  ss << ((saveTrainingImages && m_mapOfImageId.size() > 0) ? m_mapOfImageId[m_trainKeyPoints[i_].class_id] : -1);
2915  xmlNewChild(descriptor_node, NULL, BAD_CAST "image_id",
2916  BAD_CAST ss.str().c_str());
2917 
2918  if (have3DInfo) {
2919  ss.str("");
2920  ss << m_trainPoints[i_].x;
2921  xmlNewChild(descriptor_node, NULL, BAD_CAST "oX",
2922  BAD_CAST ss.str().c_str());
2923 
2924  ss.str("");
2925  ss << m_trainPoints[i_].y;
2926  xmlNewChild(descriptor_node, NULL, BAD_CAST "oY",
2927  BAD_CAST ss.str().c_str());
2928 
2929  ss.str("");
2930  ss << m_trainPoints[i_].z;
2931  xmlNewChild(descriptor_node, NULL, BAD_CAST "oZ",
2932  BAD_CAST ss.str().c_str());
2933  }
2934 
2935  desc_node = xmlNewChild(descriptor_node, NULL, BAD_CAST "desc", NULL);
2936 
2937  for (int j = 0; j < nCols; j++) {
2938  ss.str("");
2939 
2940  switch(descriptorType) {
2941  case CV_8U:
2942  {
2943  //Promote an unsigned char to an int
2944  //val_tmp holds the numeric value that will be written
2945  //We save the value in numeric form otherwise libxml2 will not be able to parse
2946  //A better solution could be possible
2947  int val_tmp = m_trainDescriptors.at<unsigned char>(i, j);
2948  ss << val_tmp;
2949  }
2950  break;
2951 
2952  case CV_8S:
2953  {
2954  //Promote a char to an int
2955  //val_tmp holds the numeric value that will be written
2956  //We save the value in numeric form otherwise libxml2 will not be able to parse
2957  //A better solution could be possible
2958  int val_tmp = m_trainDescriptors.at<char>(i, j);
2959  ss << val_tmp;
2960  }
2961  break;
2962 
2963  case CV_16U:
2964  ss << m_trainDescriptors.at<unsigned short int>(i, j);
2965  break;
2966 
2967  case CV_16S:
2968  ss << m_trainDescriptors.at<short int>(i, j);
2969  break;
2970 
2971  case CV_32S:
2972  ss << m_trainDescriptors.at<int>(i, j);
2973  break;
2974 
2975  case CV_32F:
2976  ss << m_trainDescriptors.at<float>(i, j);
2977  break;
2978 
2979  case CV_64F:
2980  ss << m_trainDescriptors.at<double>(i, j);
2981  break;
2982 
2983  default:
2984  ss << m_trainDescriptors.at<float>(i, j);
2985  break;
2986  }
2987  xmlNewChild(desc_node, NULL, BAD_CAST "val",
2988  BAD_CAST ss.str().c_str());
2989  }
2990  }
2991 
2992  xmlSaveFormatFileEnc(filename.c_str(), doc, "UTF-8", 1);
2993 
2994  /*free the document */
2995  xmlFreeDoc(doc);
2996 
2997  /*
2998  *Free the global variables that may
2999  *have been allocated by the parser.
3000  */
3001  xmlCleanupParser();
3002 #else
3003  std::cerr << "Error: libxml2 is required !" << std::endl;
3004 #endif
3005  }
3006 }
3007 
3008 #endif
virtual void displayCircle(const vpImagePoint &center, unsigned int radius, const vpColor &color, bool fill=false, unsigned int thickness=1)=0
static void write(const vpImage< unsigned char > &I, const char *filename)
Definition: vpImageIo.cpp:476
void getTrainKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
void displayMatching(const vpImage< unsigned char > &IRef, vpImage< unsigned char > &IMatching, unsigned int crossSize, unsigned int lineThickness=1, const vpColor &color=vpColor::green)
Definition: vpKeyPoint.cpp:944
double getTop() const
Definition: vpRect.h:180
static void compute3D(const cv::KeyPoint &candidate, const std::vector< vpPoint > &roi, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
Definition: vpKeyPoint.cpp:437
void detectExtractAffine(const vpImage< unsigned char > &I, std::vector< std::vector< cv::KeyPoint > > &listOfKeypoints, std::vector< cv::Mat > &listOfDescriptors, std::vector< vpImage< unsigned char > > *listOfAffineI=NULL)
double get_u0() const
void getQueryKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
double get_i() const
Definition: vpImagePoint.h:195
unsigned int getWidth() const
Definition: vpImage.h:161
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static bool isAbsolutePathname(const std::string &pathname)
Definition: vpIoTools.cpp:1291
void createImageMatching(vpImage< unsigned char > &IRef, vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
Definition: vpKeyPoint.cpp:792
The class provides a data structure for the homogeneous matrices as well as a set of operations on th...
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, double &elapsedTime, const vpRect &rectangle=vpRect())
Definition: vpKeyPoint.cpp:860
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
Point coordinates conversion from normalized coordinates in meter to pixel coordinates ...
Class to define colors available for display functionnalities.
Definition: vpColor.h:125
static const vpColor none
Definition: vpColor.h:179
error that can be emited by ViSP classes.
Definition: vpException.h:76
void setRansacThreshold(const double &t)
Definition: vpPose.h:170
void set_x(const double x)
Set the point x coordinate in the image plane.
Definition: vpPoint.h:194
double getHeight() const
Definition: vpRect.h:155
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
Point coordinates conversion from pixel coordinates to normalized coordinates in meter...
double get_py() const
static double measureTimeMs()
Definition: vpTime.cpp:86
double get_y() const
Get the point y coordinate in the image plane.
Definition: vpPoint.h:138
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidate, std::vector< vpPolygon > &polygons, std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
Definition: vpKeyPoint.cpp:514
double getRight() const
Definition: vpRect.h:167
static std::string getParent(const std::string &pathname)
Definition: vpIoTools.cpp:1220
static const vpColor green
Definition: vpColor.h:170
void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector< cv::DMatch > &matches, double &elapsedTime)
double get_j() const
Definition: vpImagePoint.h:206
void set_oX(const double X)
Set the point X coordinate in the object frame.
Definition: vpPoint.h:185
void insertImageMatching(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
static const vpColor red
Definition: vpColor.h:167
Class that defines what is a point.
Definition: vpPoint.h:65
static void makeDirectory(const char *dirname)
Definition: vpIoTools.cpp:384
Defines a generic 2D polygon.
Definition: vpPolygon.h:102
const char * what() const
double getBottom() const
Definition: vpRect.h:103
vpRect getBoundingBox() const
Definition: vpPolygon.h:168
double getWidth() const
Definition: vpRect.h:199
void set_oZ(const double Z)
Set the point Z coordinate in the object frame.
Definition: vpPoint.h:189
double get_v0() const
void set_u(const double u)
Definition: vpImagePoint.h:217
bool computePose(vpPoseMethodType methode, vpHomogeneousMatrix &cMo, bool(*func)(vpHomogeneousMatrix *)=NULL)
compute the pose for a given method
Definition: vpPose.cpp:386
bool _reference_computed
flag to indicate if the reference has been built.
virtual void displayCross(const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)=0
void set_v(const double v)
Definition: vpImagePoint.h:228
Class used for pose computation from N points (pose from point only).
Definition: vpPose.h:78
Generic class defining intrinsic camera parameters.
void set_y(const double y)
Set the point y coordinate in the image plane.
Definition: vpPoint.h:196
void display(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, unsigned int size=3)
Definition: vpKeyPoint.cpp:906
unsigned int buildReference(const vpImage< unsigned char > &I)
Definition: vpKeyPoint.cpp:281
double get_x() const
Get the point x coordinate in the image plane.
Definition: vpPoint.h:136
vpMatrix getCovarianceMatrix() const
Definition: vpPose.h:196
vpKeyPoint(const std::string &detectorName="ORB", const std::string &extractorName="ORB", const std::string &matcherName="BruteForce-Hamming", const vpFilterMatchingType &filterType=ratioDistanceThreshold)
Definition: vpKeyPoint.cpp:159
std::vector< vpImagePoint > referenceImagePointsList
void getObjectPoints(std::vector< cv::Point3f > &objectPoints) const
double get_px() const
bool computePose(const std::vector< cv::Point2f > &imagePoints, const std::vector< cv::Point3f > &objectPoints, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector< int > &inlierIndex, double &elapsedTime, bool(*func)(vpHomogeneousMatrix *)=NULL)
Definition: vpKeyPoint.cpp:609
unsigned int matchPoint(const vpImage< unsigned char > &I)
void setRansacMaxTrials(const int &rM)
Definition: vpPose.h:178
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
Definition: vpPose.h:169
void loadLearningData(const std::string &filename, const bool binaryMode=false, const bool append=false)
Class that provides a data structure for the column vectors as well as a set of operations on these v...
Definition: vpColVector.h:72
void saveLearningData(const std::string &filename, const bool binaryMode=false, const bool saveTrainingImages=true)
void getTrainPoints(std::vector< cv::Point3f > &points) const
void insert(const vpImage< Type > &src, const vpImagePoint topLeft)
Definition: vpImage.h:855
vpHomogeneousMatrix inverse() const
vpFilterMatchingType
Definition: vpKeyPoint.h:217
double getB() const
Definition: vpPlane.h:117
std::vector< unsigned int > matchedReferencePoints
double getA() const
Definition: vpPlane.h:115
unsigned int getHeight() const
Definition: vpImage.h:152
Defines a rectangle in the plane.
Definition: vpRect.h:85
std::vector< vpImagePoint > currentImagePointsList
double getC() const
Definition: vpPlane.h:119
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:93
virtual void displayLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1)=0
This class defines the container for a plane geometrical structure.
Definition: vpPlane.h:67
void loadConfigFile(const std::string &configFile)
static void read(vpImage< unsigned char > &I, const char *filename)
Definition: vpImageIo.cpp:278
void addPoint(const vpPoint &P)
Add a new point in this array.
Definition: vpPose.cpp:155
double getLeft() const
Definition: vpRect.h:161
static void convertFromOpenCV(const cv::KeyPoint &from, vpImagePoint &to)
Definition: vpConvert.cpp:217
Class that consider the case of a translation vector.
Class that consider the case of the parameterization for the rotation.
std::vector< vpPoint > getRansacInliers() const
Definition: vpPose.h:180
void setCovarianceComputation(const bool &flag)
Definition: vpPose.h:187
double getD() const
Definition: vpPlane.h:121
void set_ij(const double ii, const double jj)
Definition: vpImagePoint.h:181
vpMatchingMethodEnum getMatchingMethod() const
void setWorldCoordinates(const double ox, const double oy, const double oz)
Set the point world coordinates. We mean here the coordinates of the point in the object frame...
Definition: vpPoint.cpp:74
void extract(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, cv::Mat &descriptors, double &elapsedTime)
void initMatcher(const std::string &matcherName)
void set_oY(const double Y)
Set the point Y coordinate in the object frame.
Definition: vpPoint.h:187
bool matchPointAndDetect(const vpImage< unsigned char > &I, vpRect &boundingBox, vpImagePoint &centerOfGravity, const bool isPlanarObject=true, std::vector< vpImagePoint > *imPts1=NULL, std::vector< vpImagePoint > *imPts2=NULL, double *meanDescriptorDistance=NULL, double *detectionScore=NULL)
void parse(const std::string &filename)