Visual Servoing Platform  version 3.2.0 under development (2019-01-22)
vpKeyPoint.h
1 /****************************************************************************
2  *
3  * ViSP, open source Visual Servoing Platform software.
4  * Copyright (C) 2005 - 2019 by Inria. All rights reserved.
5  *
6  * This software is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  * See the file LICENSE.txt at the root directory of this source
11  * distribution for additional information about the GNU GPL.
12  *
13  * For using ViSP with software that can not be combined with the GNU
14  * GPL, please contact Inria about acquiring a ViSP Professional
15  * Edition License.
16  *
17  * See http://visp.inria.fr for more information.
18  *
19  * This software was developed at:
20  * Inria Rennes - Bretagne Atlantique
21  * Campus Universitaire de Beaulieu
22  * 35042 Rennes Cedex
23  * France
24  *
25  * If you have questions regarding the use of this file, please contact
26  * Inria at visp@inria.fr
27  *
28  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
29  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
30  *
31  * Description:
32  * Key point functionalities.
33  *
34  * Authors:
35  * Souriya Trinh
36  *
37  *****************************************************************************/
38 #ifndef _vpKeyPoint_h_
39 #define _vpKeyPoint_h_
40 
41 #include <algorithm> // std::transform
42 #include <float.h> // DBL_MAX
43 #include <fstream> // std::ofstream
44 #include <limits>
45 #include <map> // std::map
46 #include <numeric> // std::accumulate
47 #include <stdlib.h> // srand, rand
48 #include <time.h> // time
49 #include <vector> // std::vector
50 
51 #include <visp3/core/vpConfig.h>
52 #include <visp3/core/vpDisplay.h>
53 #include <visp3/core/vpImageConvert.h>
54 #include <visp3/core/vpPixelMeterConversion.h>
55 #include <visp3/core/vpPlane.h>
56 #include <visp3/core/vpPoint.h>
57 #include <visp3/vision/vpBasicKeyPoint.h>
58 #include <visp3/vision/vpPose.h>
59 #ifdef VISP_HAVE_MODULE_IO
60 # include <visp3/io/vpImageIo.h>
61 #endif
62 #include <visp3/core/vpConvert.h>
63 #include <visp3/core/vpCylinder.h>
64 #include <visp3/core/vpMeterPixelConversion.h>
65 #include <visp3/core/vpPolygon.h>
66 #include <visp3/vision/vpXmlConfigParserKeyPoint.h>
67 
68 // Require at least OpenCV >= 2.1.1
69 #if (VISP_HAVE_OPENCV_VERSION >= 0x020101)
70 
71 # include <opencv2/calib3d/calib3d.hpp>
72 # include <opencv2/features2d/features2d.hpp>
73 # include <opencv2/imgproc/imgproc.hpp>
74 
75 # if (VISP_HAVE_OPENCV_VERSION >= 0x040000) // Require opencv >= 4.0.0
76 # include <opencv2/imgproc/imgproc_c.h>
77 # include <opencv2/imgproc.hpp>
78 # endif
79 
80 # if defined(VISP_HAVE_OPENCV_XFEATURES2D) // OpenCV >= 3.0.0
81 # include <opencv2/xfeatures2d.hpp>
82 # elif defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && \
83  (VISP_HAVE_OPENCV_VERSION < 0x030000)
84 # include <opencv2/nonfree/nonfree.hpp>
85 # endif
86 
87 # ifdef VISP_HAVE_XML2
88 # include <libxml/xmlwriter.h>
89 # endif
90 
228 class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint
229 {
230 
231 public:
234  constantFactorDistanceThreshold,
236  stdDistanceThreshold,
238  ratioDistanceThreshold,
241  stdAndRatioDistanceThreshold,
243  noFilterMatching
244  };
245 
248  detectionThreshold,
250  detectionScore
253  };
254 
256  typedef enum {
260  pgmImageFormat
262 
265 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
272 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
273  DETECTOR_STAR,
274 #endif
275 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
278 #endif
279 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
283 #endif
284 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
285  DETECTOR_MSD,
286 #endif
287 #endif
288  DETECTOR_TYPE_SIZE
289  };
290 
293 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
296 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
297  DESCRIPTOR_FREAK,
298  DESCRIPTOR_BRIEF,
299 #endif
300 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
303 #endif
304 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
307 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
308  DESCRIPTOR_DAISY,
309  DESCRIPTOR_LATCH,
310 #endif
311 #endif
312 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
313  DESCRIPTOR_VGG,
314  DESCRIPTOR_BoostDesc,
315 #endif
316 #endif
317  DESCRIPTOR_TYPE_SIZE
318  };
319 
320  vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType,
321  const std::string &matcherName, const vpFilterMatchingType &filterType = ratioDistanceThreshold);
322  vpKeyPoint(const std::string &detectorName = "ORB", const std::string &extractorName = "ORB",
323  const std::string &matcherName = "BruteForce-Hamming",
324  const vpFilterMatchingType &filterType = ratioDistanceThreshold);
325  vpKeyPoint(const std::vector<std::string> &detectorNames, const std::vector<std::string> &extractorNames,
326  const std::string &matcherName = "BruteForce",
327  const vpFilterMatchingType &filterType = ratioDistanceThreshold);
328 
329  unsigned int buildReference(const vpImage<unsigned char> &I);
330  unsigned int buildReference(const vpImage<unsigned char> &I, const vpImagePoint &iP, const unsigned int height,
331  const unsigned int width);
332  unsigned int buildReference(const vpImage<unsigned char> &I, const vpRect &rectangle);
333 
334  void buildReference(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &trainKeyPoint,
335  std::vector<cv::Point3f> &points3f, const bool append = false, const int class_id = -1);
336  void buildReference(const vpImage<unsigned char> &I, const std::vector<cv::KeyPoint> &trainKeyPoint,
337  const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
338  const bool append = false, const int class_id = -1);
339 
340  static void compute3D(const cv::KeyPoint &candidate, const std::vector<vpPoint> &roi, const vpCameraParameters &cam,
341  const vpHomogeneousMatrix &cMo, cv::Point3f &point);
342 
343  static void compute3D(const vpImagePoint &candidate, const std::vector<vpPoint> &roi, const vpCameraParameters &cam,
344  const vpHomogeneousMatrix &cMo, vpPoint &point);
345 
346  static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam,
347  std::vector<cv::KeyPoint> &candidates,
348  const std::vector<vpPolygon> &polygons,
349  const std::vector<std::vector<vpPoint> > &roisPt,
350  std::vector<cv::Point3f> &points, cv::Mat *descriptors = NULL);
351 
352  static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam,
353  std::vector<vpImagePoint> &candidates,
354  const std::vector<vpPolygon> &polygons,
355  const std::vector<std::vector<vpPoint> > &roisPt,
356  std::vector<vpPoint> &points, cv::Mat *descriptors = NULL);
357 
358  static void
359  compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam,
360  std::vector<cv::KeyPoint> &candidates, const std::vector<vpCylinder> &cylinders,
361  const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
362  std::vector<cv::Point3f> &points, cv::Mat *descriptors = NULL);
363 
364  static void
365  compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam,
366  std::vector<vpImagePoint> &candidates, const std::vector<vpCylinder> &cylinders,
367  const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
368  std::vector<vpPoint> &points, cv::Mat *descriptors = NULL);
369 
370  bool computePose(const std::vector<cv::Point2f> &imagePoints, const std::vector<cv::Point3f> &objectPoints,
371  const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector<int> &inlierIndex,
372  double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL);
373 
374  bool computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo, std::vector<vpPoint> &inliers,
375  double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL);
376 
377  bool computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo, std::vector<vpPoint> &inliers,
378  std::vector<unsigned int> &inlierIndex, double &elapsedTime,
379  bool (*func)(const vpHomogeneousMatrix &) = NULL);
380 
381  void createImageMatching(vpImage<unsigned char> &IRef, vpImage<unsigned char> &ICurrent,
382  vpImage<unsigned char> &IMatching);
383  void createImageMatching(vpImage<unsigned char> &ICurrent, vpImage<unsigned char> &IMatching);
384 
385  void detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints,
386  const vpRect &rectangle = vpRect());
387  void detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, const cv::Mat &mask = cv::Mat());
388  void detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
389  const vpRect &rectangle = vpRect());
390  void detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
391  const cv::Mat &mask = cv::Mat());
392 
393  void detectExtractAffine(const vpImage<unsigned char> &I, std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
394  std::vector<cv::Mat> &listOfDescriptors,
395  std::vector<vpImage<unsigned char> > *listOfAffineI = NULL);
396 
397  void display(const vpImage<unsigned char> &IRef, const vpImage<unsigned char> &ICurrent, unsigned int size = 3);
398  void display(const vpImage<unsigned char> &ICurrent, unsigned int size = 3, const vpColor &color = vpColor::green);
399 
400  void displayMatching(const vpImage<unsigned char> &IRef, vpImage<unsigned char> &IMatching, unsigned int crossSize,
401  unsigned int lineThickness = 1, const vpColor &color = vpColor::green);
402  void displayMatching(const vpImage<unsigned char> &ICurrent, vpImage<unsigned char> &IMatching,
403  const std::vector<vpImagePoint> &ransacInliers = std::vector<vpImagePoint>(),
404  unsigned int crossSize = 3, unsigned int lineThickness = 1);
405 
406  void extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
407  std::vector<cv::Point3f> *trainPoints = NULL);
408  void extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
409  std::vector<cv::Point3f> *trainPoints = NULL);
410  void extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
411  double &elapsedTime, std::vector<cv::Point3f> *trainPoints = NULL);
412  void extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors, double &elapsedTime,
413  std::vector<cv::Point3f> *trainPoints = NULL);
414 
425  {
426  if (!m_computeCovariance) {
427  std::cout << "Warning : The covariance matrix has not been computed. "
428  "See setCovarianceComputation() to do it."
429  << std::endl;
430  return vpMatrix();
431  }
432 
433  if (m_computeCovariance && !m_useRansacVVS) {
434  std::cout << "Warning : The covariance matrix can only be computed "
435  "with a Virtual Visual Servoing approach."
436  << std::endl
437  << "Use setUseRansacVVS(true) to choose to use a pose "
438  "estimation method based on a Virtual Visual Servoing "
439  "approach."
440  << std::endl;
441  return vpMatrix();
442  }
443 
444  return m_covarianceMatrix;
445  }
446 
452  inline double getDetectionTime() const { return m_detectionTime; }
453 
461  inline cv::Ptr<cv::FeatureDetector> getDetector(const vpFeatureDetectorType &type) const
462  {
463  std::map<vpFeatureDetectorType, std::string>::const_iterator it_name = m_mapOfDetectorNames.find(type);
464  if (it_name == m_mapOfDetectorNames.end()) {
465  std::cerr << "Internal problem with the feature type and the "
466  "corresponding name!"
467  << std::endl;
468  }
469 
470  std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator findDetector =
471  m_detectors.find(it_name->second);
472  if (findDetector != m_detectors.end()) {
473  return findDetector->second;
474  }
475 
476  std::cerr << "Cannot find: " << it_name->second << std::endl;
477  return cv::Ptr<cv::FeatureDetector>();
478  }
479 
487  inline cv::Ptr<cv::FeatureDetector> getDetector(const std::string &name) const
488  {
489  std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator findDetector = m_detectors.find(name);
490  if (findDetector != m_detectors.end()) {
491  return findDetector->second;
492  }
493 
494  std::cerr << "Cannot find: " << name << std::endl;
495  return cv::Ptr<cv::FeatureDetector>();
496  }
497 
501  inline std::map<vpFeatureDetectorType, std::string> getDetectorNames() const { return m_mapOfDetectorNames; }
502 
508  inline double getExtractionTime() const { return m_extractionTime; }
509 
517  inline cv::Ptr<cv::DescriptorExtractor> getExtractor(const vpFeatureDescriptorType &type) const
518  {
519  std::map<vpFeatureDescriptorType, std::string>::const_iterator it_name = m_mapOfDescriptorNames.find(type);
520  if (it_name == m_mapOfDescriptorNames.end()) {
521  std::cerr << "Internal problem with the feature type and the "
522  "corresponding name!"
523  << std::endl;
524  }
525 
526  std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator findExtractor =
527  m_extractors.find(it_name->second);
528  if (findExtractor != m_extractors.end()) {
529  return findExtractor->second;
530  }
531 
532  std::cerr << "Cannot find: " << it_name->second << std::endl;
533  return cv::Ptr<cv::DescriptorExtractor>();
534  }
535 
543  inline cv::Ptr<cv::DescriptorExtractor> getExtractor(const std::string &name) const
544  {
545  std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator findExtractor = m_extractors.find(name);
546  if (findExtractor != m_extractors.end()) {
547  return findExtractor->second;
548  }
549 
550  std::cerr << "Cannot find: " << name << std::endl;
551  return cv::Ptr<cv::DescriptorExtractor>();
552  }
553 
557  inline std::map<vpFeatureDescriptorType, std::string> getExtractorNames() const { return m_mapOfDescriptorNames; }
558 
564  inline vpImageFormatType getImageFormat() const { return m_imageFormat; }
565 
571  inline double getMatchingTime() const { return m_matchingTime; }
572 
578  inline cv::Ptr<cv::DescriptorMatcher> getMatcher() const { return m_matcher; }
579 
586  inline std::vector<cv::DMatch> getMatches() const { return m_filteredMatches; }
587 
595  inline std::vector<std::pair<cv::KeyPoint, cv::KeyPoint> > getMatchQueryToTrainKeyPoints() const
596  {
597  std::vector<std::pair<cv::KeyPoint, cv::KeyPoint> > matchQueryToTrainKeyPoints(m_filteredMatches.size());
598  for (size_t i = 0; i < m_filteredMatches.size(); i++) {
599  matchQueryToTrainKeyPoints.push_back(
600  std::pair<cv::KeyPoint, cv::KeyPoint>(m_queryFilteredKeyPoints[(size_t)m_filteredMatches[i].queryIdx],
601  m_trainKeyPoints[(size_t)m_filteredMatches[i].trainIdx]));
602  }
603  return matchQueryToTrainKeyPoints;
604  }
605 
611  inline unsigned int getNbImages() const { return static_cast<unsigned int>(m_mapOfImages.size()); }
612 
613  void getObjectPoints(std::vector<cv::Point3f> &objectPoints) const;
614  void getObjectPoints(std::vector<vpPoint> &objectPoints) const;
615 
621  inline double getPoseTime() const { return m_poseTime; }
622 
629  inline cv::Mat getQueryDescriptors() const { return m_queryDescriptors; }
630 
631  void getQueryKeyPoints(std::vector<cv::KeyPoint> &keyPoints) const;
632  void getQueryKeyPoints(std::vector<vpImagePoint> &keyPoints) const;
633 
639  inline std::vector<vpImagePoint> getRansacInliers() const { return m_ransacInliers; }
640 
646  inline std::vector<vpImagePoint> getRansacOutliers() const { return m_ransacOutliers; }
647 
654  inline cv::Mat getTrainDescriptors() const { return m_trainDescriptors; }
655 
656  void getTrainKeyPoints(std::vector<cv::KeyPoint> &keyPoints) const;
657  void getTrainKeyPoints(std::vector<vpImagePoint> &keyPoints) const;
658 
659  void getTrainPoints(std::vector<cv::Point3f> &points) const;
660  void getTrainPoints(std::vector<vpPoint> &points) const;
661 
662  void initMatcher(const std::string &matcherName);
663 
664  void insertImageMatching(const vpImage<unsigned char> &IRef, const vpImage<unsigned char> &ICurrent,
665  vpImage<unsigned char> &IMatching);
666  void insertImageMatching(const vpImage<unsigned char> &ICurrent, vpImage<unsigned char> &IMatching);
667 
668 #ifdef VISP_HAVE_XML2
669  void loadConfigFile(const std::string &configFile);
670 #endif
671 
672  void loadLearningData(const std::string &filename, const bool binaryMode = false, const bool append = false);
673 
674  void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector<cv::DMatch> &matches,
675  double &elapsedTime);
676 
677  unsigned int matchPoint(const vpImage<unsigned char> &I);
678  unsigned int matchPoint(const vpImage<unsigned char> &I, const vpImagePoint &iP, const unsigned int height,
679  const unsigned int width);
680  unsigned int matchPoint(const vpImage<unsigned char> &I, const vpRect &rectangle);
681 
683  bool (*func)(const vpHomogeneousMatrix &) = NULL, const vpRect &rectangle = vpRect());
685  double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL,
686  const vpRect &rectangle = vpRect());
687 
688  bool matchPointAndDetect(const vpImage<unsigned char> &I, vpRect &boundingBox, vpImagePoint &centerOfGravity,
689  const bool isPlanarObject = true, std::vector<vpImagePoint> *imPts1 = NULL,
690  std::vector<vpImagePoint> *imPts2 = NULL, double *meanDescriptorDistance = NULL,
691  double *detectionScore = NULL, const vpRect &rectangle = vpRect());
692 
693  bool matchPointAndDetect(const vpImage<unsigned char> &I, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo,
694  double &error, double &elapsedTime, vpRect &boundingBox, vpImagePoint &centerOfGravity,
695  bool (*func)(const vpHomogeneousMatrix &) = NULL, const vpRect &rectangle = vpRect());
696 
697  void reset();
698 
699  void saveLearningData(const std::string &filename, const bool binaryMode = false,
700  const bool saveTrainingImages = true);
701 
708  inline void setCovarianceComputation(const bool &flag)
709  {
710  m_computeCovariance = flag;
711  if (!m_useRansacVVS) {
712  std::cout << "Warning : The covariance matrix can only be computed "
713  "with a Virtual Visual Servoing approach."
714  << std::endl
715  << "Use setUseRansacVVS(true) to choose to use a pose "
716  "estimation method based on a Virtual "
717  "Visual Servoing approach."
718  << std::endl;
719  }
720  }
721 
727  inline void setDetectionMethod(const vpDetectionMethodType &method) { m_detectionMethod = method; }
728 
734  inline void setDetector(const vpFeatureDetectorType &detectorType)
735  {
736  m_detectorNames.clear();
737  m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
738  m_detectors.clear();
739  initDetector(m_mapOfDetectorNames[detectorType]);
740  }
741 
747  inline void setDetector(const std::string &detectorName)
748  {
749  m_detectorNames.clear();
750  m_detectorNames.push_back(detectorName);
751  m_detectors.clear();
752  initDetector(detectorName);
753  }
754 
755 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
756 
764  template <typename T1, typename T2, typename T3>
765  inline void setDetectorParameter(const T1 detectorName, const T2 parameterName, const T3 value)
766  {
767  if (m_detectors.find(detectorName) != m_detectors.end()) {
768  m_detectors[detectorName]->set(parameterName, value);
769  }
770  }
771 #endif
772 
779  inline void setDetectors(const std::vector<std::string> &detectorNames)
780  {
781  m_detectorNames.clear();
782  m_detectors.clear();
783  m_detectorNames = detectorNames;
784  initDetectors(m_detectorNames);
785  }
786 
792  inline void setExtractor(const vpFeatureDescriptorType &extractorType)
793  {
794  m_extractorNames.clear();
795  m_extractorNames.push_back(m_mapOfDescriptorNames[extractorType]);
796  m_extractors.clear();
797  initExtractor(m_mapOfDescriptorNames[extractorType]);
798  }
799 
806  inline void setExtractor(const std::string &extractorName)
807  {
808  m_extractorNames.clear();
809  m_extractorNames.push_back(extractorName);
810  m_extractors.clear();
811  initExtractor(extractorName);
812  }
813 
814 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
815 
823  template <typename T1, typename T2, typename T3>
824  inline void setExtractorParameter(const T1 extractorName, const T2 parameterName, const T3 value)
825  {
826  if (m_extractors.find(extractorName) != m_extractors.end()) {
827  m_extractors[extractorName]->set(parameterName, value);
828  }
829  }
830 #endif
831 
838  inline void setExtractors(const std::vector<std::string> &extractorNames)
839  {
840  m_extractorNames.clear();
841  m_extractorNames = extractorNames;
842  m_extractors.clear();
843  initExtractors(m_extractorNames);
844  }
845 
851  inline void setImageFormat(const vpImageFormatType &imageFormat) { m_imageFormat = imageFormat; }
852 
868  inline void setMatcher(const std::string &matcherName)
869  {
870  m_matcherName = matcherName;
871  initMatcher(m_matcherName);
872  }
873 
889  inline void setFilterMatchingType(const vpFilterMatchingType &filterType)
890  {
891  m_filterType = filterType;
892 
893  // Use k-nearest neighbors (knn) to retrieve the two best matches for a
894  // keypoint So this is useful only for ratioDistanceThreshold method
895  if (filterType == ratioDistanceThreshold || filterType == stdAndRatioDistanceThreshold) {
896  m_useKnn = true;
897 
898 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
899  if (m_matcher != NULL && m_matcherName == "BruteForce") {
900  // if a matcher is already initialized, disable the crossCheck
901  // because it will not work with knnMatch
902  m_matcher->set("crossCheck", false);
903  }
904 #endif
905  } else {
906  m_useKnn = false;
907 
908 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
909  if (m_matcher != NULL && m_matcherName == "BruteForce") {
910  // if a matcher is already initialized, set the crossCheck mode if
911  // necessary
912  m_matcher->set("crossCheck", m_useBruteForceCrossCheck);
913  }
914 #endif
915  }
916  }
917 
924  inline void setMatchingFactorThreshold(const double factor)
925  {
926  if (factor > 0.0) {
927  m_matchingFactorThreshold = factor;
928  } else {
929  throw vpException(vpException::badValue, "The factor must be positive.");
930  }
931  }
932 
938  inline void setMatchingRatioThreshold(const double ratio)
939  {
940  if (ratio > 0.0 && (ratio < 1.0 || std::fabs(ratio - 1.0) < std::numeric_limits<double>::epsilon())) {
941  m_matchingRatioThreshold = ratio;
942  } else {
943  throw vpException(vpException::badValue, "The ratio must be in the interval ]0 ; 1].");
944  }
945  }
946 
953  inline void setRansacConsensusPercentage(const double percentage)
954  {
955  if (percentage > 0.0 &&
956  (percentage < 100.0 || std::fabs(percentage - 100.0) < std::numeric_limits<double>::epsilon())) {
957  m_ransacConsensusPercentage = percentage;
958  } else {
959  throw vpException(vpException::badValue, "The percentage must be in the interval ]0 ; 100].");
960  }
961  }
962 
967  {
968  m_ransacFilterFlag = flag;
969  }
970 
977  inline void setRansacIteration(const int nbIter)
978  {
979  if (nbIter > 0) {
980  m_nbRansacIterations = nbIter;
981  } else {
982  throw vpException(vpException::badValue, "The number of iterations must be greater than zero.");
983  }
984  }
985 
991  inline void setRansacParallel(const bool parallel)
992  {
993  m_ransacParallel = parallel;
994  }
995 
1002  inline void setRansacParallelNbThreads(const unsigned int nthreads)
1003  {
1004  m_ransacParallelNbThreads = nthreads;
1005  }
1006 
1014  inline void setRansacReprojectionError(const double reprojectionError)
1015  {
1016  if (reprojectionError > 0.0) {
1017  m_ransacReprojectionError = reprojectionError;
1018  } else {
1019  throw vpException(vpException::badValue, "The Ransac reprojection "
1020  "threshold must be positive "
1021  "as we deal with distance.");
1022  }
1023  }
1024 
1030  inline void setRansacMinInlierCount(const int minCount)
1031  {
1032  if (minCount > 0) {
1033  m_nbRansacMinInlierCount = minCount;
1034  } else {
1035  throw vpException(vpException::badValue, "The minimum number of inliers must be greater than zero.");
1036  }
1037  }
1038 
1045  inline void setRansacThreshold(const double threshold)
1046  {
1047  if (threshold > 0.0) {
1048  m_ransacThreshold = threshold;
1049  } else {
1050  throw vpException(vpException::badValue, "The Ransac threshold must be positive as we deal with distance.");
1051  }
1052  }
1053 
1061  inline void setUseAffineDetection(const bool useAffine) { m_useAffineDetection = useAffine; }
1062 
1063 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1064 
1070  inline void setUseBruteForceCrossCheck(const bool useCrossCheck)
1071  {
1072  // Only available with BruteForce and with k=1 (i.e not used with a
1073  // ratioDistanceThreshold method)
1074  if (m_matcher != NULL && !m_useKnn && m_matcherName == "BruteForce") {
1075  m_matcher->set("crossCheck", useCrossCheck);
1076  } else if (m_matcher != NULL && m_useKnn && m_matcherName == "BruteForce") {
1077  std::cout << "Warning, you try to set the crossCheck parameter with a "
1078  "BruteForce matcher but knn is enabled";
1079  std::cout << " (the filtering method uses a ratio constraint)" << std::endl;
1080  }
1081  }
1082 #endif
1083 
1090  inline void setUseMatchTrainToQuery(const bool useMatchTrainToQuery)
1091  {
1092  m_useMatchTrainToQuery = useMatchTrainToQuery;
1093  }
1094 
1102  inline void setUseRansacConsensusPercentage(const bool usePercentage) { m_useConsensusPercentage = usePercentage; }
1103 
1111  inline void setUseRansacVVS(const bool ransacVVS) { m_useRansacVVS = ransacVVS; }
1112 
1119  inline void setUseSingleMatchFilter(const bool singleMatchFilter) { m_useSingleMatchFilter = singleMatchFilter; }
1120 
1121 private:
1124  bool m_computeCovariance;
1126  vpMatrix m_covarianceMatrix;
1128  int m_currentImageId;
1131  vpDetectionMethodType m_detectionMethod;
1133  double m_detectionScore;
1136  double m_detectionThreshold;
1138  double m_detectionTime;
1140  std::vector<std::string> m_detectorNames;
1143  // with a key based upon the detector name.
1144  std::map<std::string, cv::Ptr<cv::FeatureDetector> > m_detectors;
1146  double m_extractionTime;
1148  std::vector<std::string> m_extractorNames;
1151  // with a key based upon the extractor name.
1152  std::map<std::string, cv::Ptr<cv::DescriptorExtractor> > m_extractors;
1154  std::vector<cv::DMatch> m_filteredMatches;
1156  vpFilterMatchingType m_filterType;
1158  vpImageFormatType m_imageFormat;
1161  std::vector<std::vector<cv::DMatch> > m_knnMatches;
1163  std::map<vpFeatureDescriptorType, std::string> m_mapOfDescriptorNames;
1165  std::map<vpFeatureDetectorType, std::string> m_mapOfDetectorNames;
1168  std::map<int, int> m_mapOfImageId;
1171  std::map<int, vpImage<unsigned char> > m_mapOfImages;
1174  cv::Ptr<cv::DescriptorMatcher> m_matcher;
1176  std::string m_matcherName;
1178  std::vector<cv::DMatch> m_matches;
1180  double m_matchingFactorThreshold;
1182  double m_matchingRatioThreshold;
1184  double m_matchingTime;
1186  std::vector<std::pair<cv::KeyPoint, cv::Point3f> > m_matchRansacKeyPointsToPoints;
1188  int m_nbRansacIterations;
1190  int m_nbRansacMinInlierCount;
1193  std::vector<cv::Point3f> m_objectFilteredPoints;
1195  double m_poseTime;
1198  cv::Mat m_queryDescriptors;
1200  std::vector<cv::KeyPoint> m_queryFilteredKeyPoints;
1202  std::vector<cv::KeyPoint> m_queryKeyPoints;
1205  double m_ransacConsensusPercentage;
1207  vpPose::RANSAC_FILTER_FLAGS m_ransacFilterFlag;
1209  std::vector<vpImagePoint> m_ransacInliers;
1211  std::vector<vpImagePoint> m_ransacOutliers;
1213  bool m_ransacParallel;
1215  unsigned int m_ransacParallelNbThreads;
1218  double m_ransacReprojectionError;
1221  double m_ransacThreshold;
1224  // detected in the train images).
1225  cv::Mat m_trainDescriptors;
1227  std::vector<cv::KeyPoint> m_trainKeyPoints;
1230  std::vector<cv::Point3f> m_trainPoints;
1233  std::vector<vpPoint> m_trainVpPoints;
1236  bool m_useAffineDetection;
1237 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1238  bool m_useBruteForceCrossCheck;
1242 #endif
1243  bool m_useConsensusPercentage;
1247  bool m_useKnn;
1252  bool m_useMatchTrainToQuery;
1254  bool m_useRansacVVS;
1257  bool m_useSingleMatchFilter;
1258 
1259  void affineSkew(double tilt, double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai);
1260 
1261  double computePoseEstimationError(const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
1262  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo_est);
1263 
1264  void filterMatches();
1265 
1266  void init();
1267  void initDetector(const std::string &detectorNames);
1268  void initDetectors(const std::vector<std::string> &detectorNames);
1269 
1270  void initExtractor(const std::string &extractorName);
1271  void initExtractors(const std::vector<std::string> &extractorNames);
1272 
1273  void initFeatureNames();
1274 
1275  inline size_t myKeypointHash(const cv::KeyPoint &kp)
1276  {
1277  size_t _Val = 2166136261U, scale = 16777619U;
1278  Cv32suf u;
1279  u.f = kp.pt.x;
1280  _Val = (scale * _Val) ^ u.u;
1281  u.f = kp.pt.y;
1282  _Val = (scale * _Val) ^ u.u;
1283  u.f = kp.size;
1284  _Val = (scale * _Val) ^ u.u;
1285  // As the keypoint angle can be computed for certain type of keypoint only
1286  // when extracting the corresponding descriptor, the angle field is not
1287  // taking into account for the hash
1288  // u.f = kp.angle; _Val = (scale * _Val) ^ u.u;
1289  u.f = kp.response;
1290  _Val = (scale * _Val) ^ u.u;
1291  _Val = (scale * _Val) ^ ((size_t)kp.octave);
1292  _Val = (scale * _Val) ^ ((size_t)kp.class_id);
1293  return _Val;
1294  }
1295 
1296 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
1297  /*
1298  * Adapts a detector to detect points over multiple levels of a Gaussian
1299  * pyramid. Useful for detectors that are not inherently scaled.
1300  * From OpenCV 2.4.11 source code.
1301  */
1302  class PyramidAdaptedFeatureDetector : public cv::FeatureDetector
1303  {
1304  public:
1305  // maxLevel - The 0-based index of the last pyramid layer
1306  PyramidAdaptedFeatureDetector(const cv::Ptr<cv::FeatureDetector> &detector, int maxLevel = 2);
1307 
1308  // TODO implement read/write
1309  virtual bool empty() const;
1310 
1311  protected:
1312  virtual void detect(cv::InputArray image, CV_OUT std::vector<cv::KeyPoint> &keypoints,
1313  cv::InputArray mask = cv::noArray());
1314  virtual void detectImpl(const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
1315  const cv::Mat &mask = cv::Mat()) const;
1316 
1317  cv::Ptr<cv::FeatureDetector> detector;
1318  int maxLevel;
1319  };
1320 
1321  /*
1322  * A class filters a vector of keypoints.
1323  * Because now it is difficult to provide a convenient interface for all
1324  * usage scenarios of the keypoints filter class, it has only several needed
1325  * by now static methods.
1326  */
1327  class KeyPointsFilter
1328  {
1329  public:
1330  KeyPointsFilter() {}
1331 
1332  /*
1333  * Remove keypoints within borderPixels of an image edge.
1334  */
1335  static void runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize, int borderSize);
1336  /*
1337  * Remove keypoints of sizes out of range.
1338  */
1339  static void runByKeypointSize(std::vector<cv::KeyPoint> &keypoints, float minSize, float maxSize = FLT_MAX);
1340  /*
1341  * Remove keypoints from some image by mask for pixels of this image.
1342  */
1343  static void runByPixelsMask(std::vector<cv::KeyPoint> &keypoints, const cv::Mat &mask);
1344  /*
1345  * Remove duplicated keypoints.
1346  */
1347  static void removeDuplicated(std::vector<cv::KeyPoint> &keypoints);
1348 
1349  /*
1350  * Retain the specified number of the best keypoints (according to the
1351  * response)
1352  */
1353  static void retainBest(std::vector<cv::KeyPoint> &keypoints, int npoints);
1354  };
1355 
1356 #endif
1357 };
1358 
1359 #endif
1360 #endif
Used to indicate that a value is not in the allowed range.
Definition: vpException.h:97
Implementation of a matrix and operations on matrices.
Definition: vpMatrix.h:104
void setRansacIteration(const int nbIter)
Definition: vpKeyPoint.h:977
class that defines what is a Keypoint. This class provides all the basic elements to implement classe...
void setUseRansacVVS(const bool ransacVVS)
Definition: vpKeyPoint.h:1111
Implementation of an homogeneous matrix and operations on such kind of matrices.
std::vector< std::pair< cv::KeyPoint, cv::KeyPoint > > getMatchQueryToTrainKeyPoints() const
Definition: vpKeyPoint.h:595
void setRansacThreshold(const double threshold)
Definition: vpKeyPoint.h:1045
void setExtractor(const std::string &extractorName)
Definition: vpKeyPoint.h:806
void setUseSingleMatchFilter(const bool singleMatchFilter)
Definition: vpKeyPoint.h:1119
Class to define colors available for display functionnalities.
Definition: vpColor.h:120
std::map< vpFeatureDetectorType, std::string > getDetectorNames() const
Definition: vpKeyPoint.h:501
error that can be emited by ViSP classes.
Definition: vpException.h:71
void setDetectors(const std::vector< std::string > &detectorNames)
Definition: vpKeyPoint.h:779
static const vpColor green
Definition: vpColor.h:183
cv::Ptr< cv::DescriptorExtractor > getExtractor(const vpFeatureDescriptorType &type) const
Definition: vpKeyPoint.h:517
Class that defines what is a point.
Definition: vpPoint.h:58
cv::Mat getQueryDescriptors() const
Definition: vpKeyPoint.h:629
void setExtractors(const std::vector< std::string > &extractorNames)
Definition: vpKeyPoint.h:838
void setMatcher(const std::string &matcherName)
Definition: vpKeyPoint.h:868
void setUseMatchTrainToQuery(const bool useMatchTrainToQuery)
Definition: vpKeyPoint.h:1090
cv::Ptr< cv::DescriptorMatcher > getMatcher() const
Definition: vpKeyPoint.h:578
void setRansacFilterFlag(const vpPose::RANSAC_FILTER_FLAGS &flag)
Definition: vpKeyPoint.h:966
vpFeatureDetectorType
Definition: vpKeyPoint.h:264
virtual unsigned int buildReference(const vpImage< unsigned char > &I)=0
double getDetectionTime() const
Definition: vpKeyPoint.h:452
void setRansacParallel(const bool parallel)
Definition: vpKeyPoint.h:991
Generic class defining intrinsic camera parameters.
vpFeatureDescriptorType
Definition: vpKeyPoint.h:292
void setDetector(const vpFeatureDetectorType &detectorType)
Definition: vpKeyPoint.h:734
double getMatchingTime() const
Definition: vpKeyPoint.h:571
void setDetectionMethod(const vpDetectionMethodType &method)
Definition: vpKeyPoint.h:727
unsigned int getNbImages() const
Definition: vpKeyPoint.h:611
cv::Ptr< cv::FeatureDetector > getDetector(const std::string &name) const
Definition: vpKeyPoint.h:487
void setRansacParallelNbThreads(const unsigned int nthreads)
Definition: vpKeyPoint.h:1002
void setRansacMinInlierCount(const int minCount)
Definition: vpKeyPoint.h:1030
vpMatrix getCovarianceMatrix() const
Definition: vpKeyPoint.h:424
void setImageFormat(const vpImageFormatType &imageFormat)
Definition: vpKeyPoint.h:851
virtual void display(const vpImage< unsigned char > &Iref, const vpImage< unsigned char > &Icurrent, unsigned int size=3)=0
void setRansacConsensusPercentage(const double percentage)
Definition: vpKeyPoint.h:953
virtual unsigned int matchPoint(const vpImage< unsigned char > &I)=0
vpImageFormatType getImageFormat() const
Definition: vpKeyPoint.h:564
void setUseAffineDetection(const bool useAffine)
Definition: vpKeyPoint.h:1061
std::map< vpFeatureDescriptorType, std::string > getExtractorNames() const
Definition: vpKeyPoint.h:557
Class that allows keypoints detection (and descriptors extraction) and matching thanks to OpenCV libr...
Definition: vpKeyPoint.h:228
std::vector< vpImagePoint > getRansacOutliers() const
Definition: vpKeyPoint.h:646
vpDetectionMethodType
Definition: vpKeyPoint.h:247
vpFilterMatchingType
Definition: vpKeyPoint.h:233
cv::Mat getTrainDescriptors() const
Definition: vpKeyPoint.h:654
double getExtractionTime() const
Definition: vpKeyPoint.h:508
double getPoseTime() const
Definition: vpKeyPoint.h:621
void setDetector(const std::string &detectorName)
Definition: vpKeyPoint.h:747
Defines a rectangle in the plane.
Definition: vpRect.h:78
cv::Ptr< cv::DescriptorExtractor > getExtractor(const std::string &name) const
Definition: vpKeyPoint.h:543
void setUseRansacConsensusPercentage(const bool usePercentage)
Definition: vpKeyPoint.h:1102
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:88
std::vector< vpImagePoint > getRansacInliers() const
Definition: vpKeyPoint.h:639
void setFilterMatchingType(const vpFilterMatchingType &filterType)
Definition: vpKeyPoint.h:889
void setMatchingFactorThreshold(const double factor)
Definition: vpKeyPoint.h:924
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
Definition: vpKeyPoint.h:461
void setExtractor(const vpFeatureDescriptorType &extractorType)
Definition: vpKeyPoint.h:792
std::vector< cv::DMatch > getMatches() const
Definition: vpKeyPoint.h:586
void setRansacReprojectionError(const double reprojectionError)
Definition: vpKeyPoint.h:1014
void setMatchingRatioThreshold(const double ratio)
Definition: vpKeyPoint.h:938
void setCovarianceComputation(const bool &flag)
Definition: vpKeyPoint.h:708
RANSAC_FILTER_FLAGS
Definition: vpPose.h:101