Visual Servoing Platform  version 3.5.1 under development (2022-10-02)
vpKeyPoint.h
1 /****************************************************************************
2  *
3  * ViSP, open source Visual Servoing Platform software.
4  * Copyright (C) 2005 - 2019 by Inria. All rights reserved.
5  *
6  * This software is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  * See the file LICENSE.txt at the root directory of this source
11  * distribution for additional information about the GNU GPL.
12  *
13  * For using ViSP with software that can not be combined with the GNU
14  * GPL, please contact Inria about acquiring a ViSP Professional
15  * Edition License.
16  *
17  * See http://visp.inria.fr for more information.
18  *
19  * This software was developed at:
20  * Inria Rennes - Bretagne Atlantique
21  * Campus Universitaire de Beaulieu
22  * 35042 Rennes Cedex
23  * France
24  *
25  * If you have questions regarding the use of this file, please contact
26  * Inria at visp@inria.fr
27  *
28  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
29  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
30  *
31  * Description:
32  * Key point functionalities.
33  *
34  * Authors:
35  * Souriya Trinh
36  *
37  *****************************************************************************/
38 #ifndef _vpKeyPoint_h_
39 #define _vpKeyPoint_h_
40 
41 #include <algorithm> // std::transform
42 #include <float.h> // DBL_MAX
43 #include <fstream> // std::ofstream
44 #include <limits>
45 #include <map> // std::map
46 #include <numeric> // std::accumulate
47 #include <stdlib.h> // srand, rand
48 #include <time.h> // time
49 #include <vector> // std::vector
50 
51 #include <visp3/core/vpConfig.h>
52 #include <visp3/core/vpDisplay.h>
53 #include <visp3/core/vpImageConvert.h>
54 #include <visp3/core/vpPixelMeterConversion.h>
55 #include <visp3/core/vpPlane.h>
56 #include <visp3/core/vpPoint.h>
57 #include <visp3/vision/vpBasicKeyPoint.h>
58 #include <visp3/vision/vpPose.h>
59 #ifdef VISP_HAVE_MODULE_IO
60 #include <visp3/io/vpImageIo.h>
61 #endif
62 #include <visp3/core/vpConvert.h>
63 #include <visp3/core/vpCylinder.h>
64 #include <visp3/core/vpMeterPixelConversion.h>
65 #include <visp3/core/vpPolygon.h>
66 #include <visp3/vision/vpXmlConfigParserKeyPoint.h>
67 
68 // Require at least OpenCV >= 2.1.1
69 #if (VISP_HAVE_OPENCV_VERSION >= 0x020101)
70 
71 #include <opencv2/calib3d/calib3d.hpp>
72 #include <opencv2/features2d/features2d.hpp>
73 #include <opencv2/imgproc/imgproc.hpp>
74 
75 #if (VISP_HAVE_OPENCV_VERSION >= 0x040000) // Require opencv >= 4.0.0
76 #include <opencv2/imgproc.hpp>
77 #include <opencv2/imgproc/imgproc_c.h>
78 #endif
79 
80 #if defined(VISP_HAVE_OPENCV_XFEATURES2D) // OpenCV >= 3.0.0
81 #include <opencv2/xfeatures2d.hpp>
82 #elif defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && \
83  (VISP_HAVE_OPENCV_VERSION < 0x030000)
84 #include <opencv2/nonfree/nonfree.hpp>
85 #endif
86 
222 class VISP_EXPORT vpKeyPoint : public vpBasicKeyPoint
223 {
224 
225 public:
228  constantFactorDistanceThreshold,
230  stdDistanceThreshold,
232  ratioDistanceThreshold,
235  stdAndRatioDistanceThreshold,
237  noFilterMatching
238  };
239 
242  detectionThreshold,
244  detectionScore
247  };
248 
250  typedef enum {
254  pgmImageFormat
255  } vpImageFormatType;
256 
259 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
266 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
267  DETECTOR_STAR,
268 #endif
269 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
270  (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
272 #endif
273 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
275 #endif
276 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
280 #endif
281 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
282  DETECTOR_MSD,
283 #endif
284 #endif
285  DETECTOR_TYPE_SIZE
286  };
287 
290 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
293 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined(VISP_HAVE_OPENCV_XFEATURES2D))
294  DESCRIPTOR_FREAK,
295  DESCRIPTOR_BRIEF,
296 #endif
297 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D) || \
298  (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
300 #endif
301 #if defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)
303 #endif
304 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
307 #if defined(VISP_HAVE_OPENCV_XFEATURES2D)
308  DESCRIPTOR_DAISY,
309  DESCRIPTOR_LATCH,
310 #endif
311 #endif
312 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined(VISP_HAVE_OPENCV_XFEATURES2D)
313  DESCRIPTOR_VGG,
314  DESCRIPTOR_BoostDesc,
315 #endif
316 #endif
317  DESCRIPTOR_TYPE_SIZE
318  };
319 
320  vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType,
321  const std::string &matcherName, const vpFilterMatchingType &filterType = ratioDistanceThreshold);
322  vpKeyPoint(const std::string &detectorName = "ORB", const std::string &extractorName = "ORB",
323  const std::string &matcherName = "BruteForce-Hamming",
324  const vpFilterMatchingType &filterType = ratioDistanceThreshold);
325  vpKeyPoint(const std::vector<std::string> &detectorNames, const std::vector<std::string> &extractorNames,
326  const std::string &matcherName = "BruteForce",
327  const vpFilterMatchingType &filterType = ratioDistanceThreshold);
328 
329  unsigned int buildReference(const vpImage<unsigned char> &I);
330  unsigned int buildReference(const vpImage<unsigned char> &I, const vpImagePoint &iP, unsigned int height,
331  unsigned int width);
332  unsigned int buildReference(const vpImage<unsigned char> &I, const vpRect &rectangle);
333 
334  unsigned int buildReference(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &trainKeyPoints,
335  std::vector<cv::Point3f> &points3f, bool append = false, int class_id = -1);
336  unsigned int buildReference(const vpImage<unsigned char> &I, const std::vector<cv::KeyPoint> &trainKeyPoints,
337  const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
338  bool append = false, int class_id = -1);
339 
340  unsigned int buildReference(const vpImage<vpRGBa> &I_color);
341  unsigned int buildReference(const vpImage<vpRGBa> &I_color, const vpImagePoint &iP, unsigned int height,
342  unsigned int width);
343  unsigned int buildReference(const vpImage<vpRGBa> &I_color, const vpRect &rectangle);
344 
345  unsigned int buildReference(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &trainKeyPoints,
346  std::vector<cv::Point3f> &points3f, bool append = false, int class_id = -1);
347  unsigned int buildReference(const vpImage<vpRGBa> &I, const std::vector<cv::KeyPoint> &trainKeyPoints,
348  const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
349  bool append = false, int class_id = -1);
350 
351  static void compute3D(const cv::KeyPoint &candidate, const std::vector<vpPoint> &roi, const vpCameraParameters &cam,
352  const vpHomogeneousMatrix &cMo, cv::Point3f &point);
353 
354  static void compute3D(const vpImagePoint &candidate, const std::vector<vpPoint> &roi, const vpCameraParameters &cam,
355  const vpHomogeneousMatrix &cMo, vpPoint &point);
356 
357  static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam,
358  std::vector<cv::KeyPoint> &candidates,
359  const std::vector<vpPolygon> &polygons,
360  const std::vector<std::vector<vpPoint> > &roisPt,
361  std::vector<cv::Point3f> &points, cv::Mat *descriptors = NULL);
362 
363  static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam,
364  std::vector<vpImagePoint> &candidates,
365  const std::vector<vpPolygon> &polygons,
366  const std::vector<std::vector<vpPoint> > &roisPt,
367  std::vector<vpPoint> &points, cv::Mat *descriptors = NULL);
368 
369  static void
370  compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam,
371  std::vector<cv::KeyPoint> &candidates, const std::vector<vpCylinder> &cylinders,
372  const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
373  std::vector<cv::Point3f> &points, cv::Mat *descriptors = NULL);
374 
375  static void
376  compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam,
377  std::vector<vpImagePoint> &candidates, const std::vector<vpCylinder> &cylinders,
378  const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
379  std::vector<vpPoint> &points, cv::Mat *descriptors = NULL);
380 
381  bool computePose(const std::vector<cv::Point2f> &imagePoints, const std::vector<cv::Point3f> &objectPoints,
382  const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector<int> &inlierIndex,
383  double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL);
384 
385  bool computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo, std::vector<vpPoint> &inliers,
386  double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL);
387 
388  bool computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo, std::vector<vpPoint> &inliers,
389  std::vector<unsigned int> &inlierIndex, double &elapsedTime,
390  bool (*func)(const vpHomogeneousMatrix &) = NULL);
391 
392  void createImageMatching(vpImage<unsigned char> &IRef, vpImage<unsigned char> &ICurrent,
393  vpImage<unsigned char> &IMatching);
394  void createImageMatching(vpImage<unsigned char> &ICurrent, vpImage<unsigned char> &IMatching);
395 
396  void createImageMatching(vpImage<unsigned char> &IRef, vpImage<vpRGBa> &ICurrent, vpImage<vpRGBa> &IMatching);
397  void createImageMatching(vpImage<vpRGBa> &ICurrent, vpImage<vpRGBa> &IMatching);
398 
399  void detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints,
400  const vpRect &rectangle = vpRect());
401  void detect(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, const vpRect &rectangle = vpRect());
402  void detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, const cv::Mat &mask = cv::Mat());
403  void detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
404  const vpRect &rectangle = vpRect());
405  void detect(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
406  const vpRect &rectangle = vpRect());
407  void detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
408  const cv::Mat &mask = cv::Mat());
409 
410  void detectExtractAffine(const vpImage<unsigned char> &I, std::vector<std::vector<cv::KeyPoint> > &listOfKeypoints,
411  std::vector<cv::Mat> &listOfDescriptors,
412  std::vector<vpImage<unsigned char> > *listOfAffineI = NULL);
413 
414  void display(const vpImage<unsigned char> &IRef, const vpImage<unsigned char> &ICurrent, unsigned int size = 3);
415  void display(const vpImage<unsigned char> &ICurrent, unsigned int size = 3, const vpColor &color = vpColor::green);
416  void display(const vpImage<vpRGBa> &IRef, const vpImage<vpRGBa> &ICurrent, unsigned int size = 3);
417  void display(const vpImage<vpRGBa> &ICurrent, unsigned int size = 3, const vpColor &color = vpColor::green);
418 
419  void displayMatching(const vpImage<unsigned char> &IRef, vpImage<unsigned char> &IMatching, unsigned int crossSize,
420  unsigned int lineThickness = 1, const vpColor &color = vpColor::green);
421  void displayMatching(const vpImage<unsigned char> &ICurrent, vpImage<unsigned char> &IMatching,
422  const std::vector<vpImagePoint> &ransacInliers = std::vector<vpImagePoint>(),
423  unsigned int crossSize = 3, unsigned int lineThickness = 1);
424  void displayMatching(const vpImage<unsigned char> &IRef, vpImage<vpRGBa> &IMatching, unsigned int crossSize,
425  unsigned int lineThickness = 1, const vpColor &color = vpColor::green);
426  void displayMatching(const vpImage<vpRGBa> &IRef, vpImage<vpRGBa> &IMatching, unsigned int crossSize,
427  unsigned int lineThickness = 1, const vpColor &color = vpColor::green);
428  void displayMatching(const vpImage<vpRGBa> &ICurrent, vpImage<vpRGBa> &IMatching,
429  const std::vector<vpImagePoint> &ransacInliers = std::vector<vpImagePoint>(),
430  unsigned int crossSize = 3, unsigned int lineThickness = 1);
431 
432  void extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
433  std::vector<cv::Point3f> *trainPoints = NULL);
434  void extract(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
435  std::vector<cv::Point3f> *trainPoints = NULL);
436  void extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
437  std::vector<cv::Point3f> *trainPoints = NULL);
438  void extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
439  double &elapsedTime, std::vector<cv::Point3f> *trainPoints = NULL);
440  void extract(const vpImage<vpRGBa> &I_color, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
441  double &elapsedTime, std::vector<cv::Point3f> *trainPoints = NULL);
442  void extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors, double &elapsedTime,
443  std::vector<cv::Point3f> *trainPoints = NULL);
444 
455  {
456  if (!m_computeCovariance) {
457  std::cout << "Warning : The covariance matrix has not been computed. "
458  "See setCovarianceComputation() to do it."
459  << std::endl;
460  return vpMatrix();
461  }
462 
463  if (m_computeCovariance && !m_useRansacVVS) {
464  std::cout << "Warning : The covariance matrix can only be computed "
465  "with a Virtual Visual Servoing approach."
466  << std::endl
467  << "Use setUseRansacVVS(true) to choose to use a pose "
468  "estimation method based on a Virtual Visual Servoing "
469  "approach."
470  << std::endl;
471  return vpMatrix();
472  }
473 
474  return m_covarianceMatrix;
475  }
476 
482  inline double getDetectionTime() const { return m_detectionTime; }
483 
491  inline cv::Ptr<cv::FeatureDetector> getDetector(const vpFeatureDetectorType &type) const
492  {
493  std::map<vpFeatureDetectorType, std::string>::const_iterator it_name = m_mapOfDetectorNames.find(type);
494  if (it_name == m_mapOfDetectorNames.end()) {
495  std::cerr << "Internal problem with the feature type and the "
496  "corresponding name!"
497  << std::endl;
498  }
499 
500  std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator findDetector =
501  m_detectors.find(it_name->second);
502  if (findDetector != m_detectors.end()) {
503  return findDetector->second;
504  }
505 
506  std::cerr << "Cannot find: " << it_name->second << std::endl;
507  return cv::Ptr<cv::FeatureDetector>();
508  }
509 
517  inline cv::Ptr<cv::FeatureDetector> getDetector(const std::string &name) const
518  {
519  std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator findDetector = m_detectors.find(name);
520  if (findDetector != m_detectors.end()) {
521  return findDetector->second;
522  }
523 
524  std::cerr << "Cannot find: " << name << std::endl;
525  return cv::Ptr<cv::FeatureDetector>();
526  }
527 
531  inline std::map<vpFeatureDetectorType, std::string> getDetectorNames() const { return m_mapOfDetectorNames; }
532 
538  inline double getExtractionTime() const { return m_extractionTime; }
539 
547  inline cv::Ptr<cv::DescriptorExtractor> getExtractor(const vpFeatureDescriptorType &type) const
548  {
549  std::map<vpFeatureDescriptorType, std::string>::const_iterator it_name = m_mapOfDescriptorNames.find(type);
550  if (it_name == m_mapOfDescriptorNames.end()) {
551  std::cerr << "Internal problem with the feature type and the "
552  "corresponding name!"
553  << std::endl;
554  }
555 
556  std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator findExtractor =
557  m_extractors.find(it_name->second);
558  if (findExtractor != m_extractors.end()) {
559  return findExtractor->second;
560  }
561 
562  std::cerr << "Cannot find: " << it_name->second << std::endl;
563  return cv::Ptr<cv::DescriptorExtractor>();
564  }
565 
573  inline cv::Ptr<cv::DescriptorExtractor> getExtractor(const std::string &name) const
574  {
575  std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator findExtractor = m_extractors.find(name);
576  if (findExtractor != m_extractors.end()) {
577  return findExtractor->second;
578  }
579 
580  std::cerr << "Cannot find: " << name << std::endl;
581  return cv::Ptr<cv::DescriptorExtractor>();
582  }
583 
587  inline std::map<vpFeatureDescriptorType, std::string> getExtractorNames() const { return m_mapOfDescriptorNames; }
588 
594  inline vpImageFormatType getImageFormat() const { return m_imageFormat; }
595 
601  inline double getMatchingTime() const { return m_matchingTime; }
602 
608  inline cv::Ptr<cv::DescriptorMatcher> getMatcher() const { return m_matcher; }
609 
616  inline std::vector<cv::DMatch> getMatches() const { return m_filteredMatches; }
617 
625  inline std::vector<std::pair<cv::KeyPoint, cv::KeyPoint> > getMatchQueryToTrainKeyPoints() const
626  {
627  std::vector<std::pair<cv::KeyPoint, cv::KeyPoint> > matchQueryToTrainKeyPoints(m_filteredMatches.size());
628  for (size_t i = 0; i < m_filteredMatches.size(); i++) {
629  matchQueryToTrainKeyPoints.push_back(
630  std::pair<cv::KeyPoint, cv::KeyPoint>(m_queryFilteredKeyPoints[(size_t)m_filteredMatches[i].queryIdx],
631  m_trainKeyPoints[(size_t)m_filteredMatches[i].trainIdx]));
632  }
633  return matchQueryToTrainKeyPoints;
634  }
635 
641  inline unsigned int getNbImages() const { return static_cast<unsigned int>(m_mapOfImages.size()); }
642 
643  void getObjectPoints(std::vector<cv::Point3f> &objectPoints) const;
644  void getObjectPoints(std::vector<vpPoint> &objectPoints) const;
645 
651  inline double getPoseTime() const { return m_poseTime; }
652 
659  inline cv::Mat getQueryDescriptors() const { return m_queryDescriptors; }
660 
661  void getQueryKeyPoints(std::vector<cv::KeyPoint> &keyPoints, bool matches = true) const;
662  void getQueryKeyPoints(std::vector<vpImagePoint> &keyPoints, bool matches = true) const;
663 
669  inline std::vector<vpImagePoint> getRansacInliers() const { return m_ransacInliers; }
670 
676  inline std::vector<vpImagePoint> getRansacOutliers() const { return m_ransacOutliers; }
677 
684  inline cv::Mat getTrainDescriptors() const { return m_trainDescriptors; }
685 
686  void getTrainKeyPoints(std::vector<cv::KeyPoint> &keyPoints) const;
687  void getTrainKeyPoints(std::vector<vpImagePoint> &keyPoints) const;
688 
689  void getTrainPoints(std::vector<cv::Point3f> &points) const;
690  void getTrainPoints(std::vector<vpPoint> &points) const;
691 
692  void initMatcher(const std::string &matcherName);
693 
694  void insertImageMatching(const vpImage<unsigned char> &IRef, const vpImage<unsigned char> &ICurrent,
695  vpImage<unsigned char> &IMatching);
696  void insertImageMatching(const vpImage<unsigned char> &ICurrent, vpImage<unsigned char> &IMatching);
697 
698  void insertImageMatching(const vpImage<vpRGBa> &IRef, const vpImage<vpRGBa> &ICurrent, vpImage<vpRGBa> &IMatching);
699  void insertImageMatching(const vpImage<vpRGBa> &ICurrent, vpImage<vpRGBa> &IMatching);
700 
701  void loadConfigFile(const std::string &configFile);
702 
703  void loadLearningData(const std::string &filename, bool binaryMode = false, bool append = false);
704 
705  void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector<cv::DMatch> &matches,
706  double &elapsedTime);
707 
708  unsigned int matchPoint(const vpImage<unsigned char> &I);
709  unsigned int matchPoint(const vpImage<unsigned char> &I, const vpImagePoint &iP, unsigned int height,
710  unsigned int width);
711  unsigned int matchPoint(const vpImage<unsigned char> &I, const vpRect &rectangle);
712 
713  unsigned int matchPoint(const std::vector<cv::KeyPoint> &queryKeyPoints, const cv::Mat &queryDescriptors);
715  bool (*func)(const vpHomogeneousMatrix &) = NULL, const vpRect &rectangle = vpRect());
717  double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL,
718  const vpRect &rectangle = vpRect());
719 
720  bool matchPointAndDetect(const vpImage<unsigned char> &I, vpRect &boundingBox, vpImagePoint &centerOfGravity,
721  const bool isPlanarObject = true, std::vector<vpImagePoint> *imPts1 = NULL,
722  std::vector<vpImagePoint> *imPts2 = NULL, double *meanDescriptorDistance = NULL,
723  double *detectionScore = NULL, const vpRect &rectangle = vpRect());
724 
725  bool matchPointAndDetect(const vpImage<unsigned char> &I, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo,
726  double &error, double &elapsedTime, vpRect &boundingBox, vpImagePoint &centerOfGravity,
727  bool (*func)(const vpHomogeneousMatrix &) = NULL, const vpRect &rectangle = vpRect());
728 
729  unsigned int matchPoint(const vpImage<vpRGBa> &I_color);
730  unsigned int matchPoint(const vpImage<vpRGBa> &I_color, const vpImagePoint &iP, unsigned int height,
731  unsigned int width);
732  unsigned int matchPoint(const vpImage<vpRGBa> &I_color, const vpRect &rectangle);
733 
734  bool matchPoint(const vpImage<vpRGBa> &I_color, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo,
735  bool (*func)(const vpHomogeneousMatrix &) = NULL, const vpRect &rectangle = vpRect());
736  bool matchPoint(const vpImage<vpRGBa> &I_color, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo,
737  double &error, double &elapsedTime, bool (*func)(const vpHomogeneousMatrix &) = NULL,
738  const vpRect &rectangle = vpRect());
739 
740  void reset();
741 
742  void saveLearningData(const std::string &filename, bool binaryMode = false, bool saveTrainingImages = true);
743 
750  inline void setCovarianceComputation(const bool &flag)
751  {
752  m_computeCovariance = flag;
753  if (!m_useRansacVVS) {
754  std::cout << "Warning : The covariance matrix can only be computed "
755  "with a Virtual Visual Servoing approach."
756  << std::endl
757  << "Use setUseRansacVVS(true) to choose to use a pose "
758  "estimation method based on a Virtual "
759  "Visual Servoing approach."
760  << std::endl;
761  }
762  }
763 
769  inline void setDetectionMethod(const vpDetectionMethodType &method) { m_detectionMethod = method; }
770 
776  inline void setDetector(const vpFeatureDetectorType &detectorType)
777  {
778  m_detectorNames.clear();
779  m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
780  m_detectors.clear();
781  initDetector(m_mapOfDetectorNames[detectorType]);
782  }
783 
789  inline void setDetector(const std::string &detectorName)
790  {
791  m_detectorNames.clear();
792  m_detectorNames.push_back(detectorName);
793  m_detectors.clear();
794  initDetector(detectorName);
795  }
796 
797 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
806  template <typename T1, typename T2, typename T3>
807  inline void setDetectorParameter(const T1 detectorName, const T2 parameterName, const T3 value)
808  {
809  if (m_detectors.find(detectorName) != m_detectors.end()) {
810  m_detectors[detectorName]->set(parameterName, value);
811  }
812  }
813 #endif
814 
821  inline void setDetectors(const std::vector<std::string> &detectorNames)
822  {
823  m_detectorNames.clear();
824  m_detectors.clear();
825  m_detectorNames = detectorNames;
826  initDetectors(m_detectorNames);
827  }
828 
834  inline void setExtractor(const vpFeatureDescriptorType &extractorType)
835  {
836  m_extractorNames.clear();
837  m_extractorNames.push_back(m_mapOfDescriptorNames[extractorType]);
838  m_extractors.clear();
839  initExtractor(m_mapOfDescriptorNames[extractorType]);
840  }
841 
848  inline void setExtractor(const std::string &extractorName)
849  {
850  m_extractorNames.clear();
851  m_extractorNames.push_back(extractorName);
852  m_extractors.clear();
853  initExtractor(extractorName);
854  }
855 
856 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
865  template <typename T1, typename T2, typename T3>
866  inline void setExtractorParameter(const T1 extractorName, const T2 parameterName, const T3 value)
867  {
868  if (m_extractors.find(extractorName) != m_extractors.end()) {
869  m_extractors[extractorName]->set(parameterName, value);
870  }
871  }
872 #endif
873 
880  inline void setExtractors(const std::vector<std::string> &extractorNames)
881  {
882  m_extractorNames.clear();
883  m_extractorNames = extractorNames;
884  m_extractors.clear();
885  initExtractors(m_extractorNames);
886  }
887 
893  inline void setImageFormat(const vpImageFormatType &imageFormat) { m_imageFormat = imageFormat; }
894 
910  inline void setMatcher(const std::string &matcherName)
911  {
912  m_matcherName = matcherName;
913  initMatcher(m_matcherName);
914  }
915 
921  void setMaxFeatures(int maxFeatures) { m_maxFeatures = maxFeatures; }
922 
938  inline void setFilterMatchingType(const vpFilterMatchingType &filterType)
939  {
940  m_filterType = filterType;
941 
942  // Use k-nearest neighbors (knn) to retrieve the two best matches for a
943  // keypoint So this is useful only for ratioDistanceThreshold method
944  if (filterType == ratioDistanceThreshold || filterType == stdAndRatioDistanceThreshold) {
945  m_useKnn = true;
946 
947 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
948  if (m_matcher != NULL && m_matcherName == "BruteForce") {
949  // if a matcher is already initialized, disable the crossCheck
950  // because it will not work with knnMatch
951  m_matcher->set("crossCheck", false);
952  }
953 #endif
954  } else {
955  m_useKnn = false;
956 
957 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
958  if (m_matcher != NULL && m_matcherName == "BruteForce") {
959  // if a matcher is already initialized, set the crossCheck mode if
960  // necessary
961  m_matcher->set("crossCheck", m_useBruteForceCrossCheck);
962  }
963 #endif
964  }
965  }
966 
973  inline void setMatchingFactorThreshold(const double factor)
974  {
975  if (factor > 0.0) {
976  m_matchingFactorThreshold = factor;
977  } else {
978  throw vpException(vpException::badValue, "The factor must be positive.");
979  }
980  }
981 
987  inline void setMatchingRatioThreshold(double ratio)
988  {
989  if (ratio > 0.0 && (ratio < 1.0 || std::fabs(ratio - 1.0) < std::numeric_limits<double>::epsilon())) {
990  m_matchingRatioThreshold = ratio;
991  } else {
992  throw vpException(vpException::badValue, "The ratio must be in the interval ]0 ; 1].");
993  }
994  }
995 
1002  inline void setRansacConsensusPercentage(double percentage)
1003  {
1004  if (percentage > 0.0 &&
1005  (percentage < 100.0 || std::fabs(percentage - 100.0) < std::numeric_limits<double>::epsilon())) {
1006  m_ransacConsensusPercentage = percentage;
1007  } else {
1008  throw vpException(vpException::badValue, "The percentage must be in the interval ]0 ; 100].");
1009  }
1010  }
1011 
1015  inline void setRansacFilterFlag(const vpPose::RANSAC_FILTER_FLAGS &flag) { m_ransacFilterFlag = flag; }
1016 
1023  inline void setRansacIteration(int nbIter)
1024  {
1025  if (nbIter > 0) {
1026  m_nbRansacIterations = nbIter;
1027  } else {
1028  throw vpException(vpException::badValue, "The number of iterations must be greater than zero.");
1029  }
1030  }
1031 
1037  inline void setRansacParallel(bool parallel) { m_ransacParallel = parallel; }
1038 
1045  inline void setRansacParallelNbThreads(unsigned int nthreads) { m_ransacParallelNbThreads = nthreads; }
1046 
1054  inline void setRansacReprojectionError(double reprojectionError)
1055  {
1056  if (reprojectionError > 0.0) {
1057  m_ransacReprojectionError = reprojectionError;
1058  } else {
1059  throw vpException(vpException::badValue, "The Ransac reprojection "
1060  "threshold must be positive "
1061  "as we deal with distance.");
1062  }
1063  }
1064 
1070  inline void setRansacMinInlierCount(int minCount)
1071  {
1072  if (minCount > 0) {
1073  m_nbRansacMinInlierCount = minCount;
1074  } else {
1075  throw vpException(vpException::badValue, "The minimum number of inliers must be greater than zero.");
1076  }
1077  }
1078 
1085  inline void setRansacThreshold(double threshold)
1086  {
1087  if (threshold > 0.0) {
1088  m_ransacThreshold = threshold;
1089  } else {
1090  throw vpException(vpException::badValue, "The Ransac threshold must be positive as we deal with distance.");
1091  }
1092  }
1093 
1101  inline void setUseAffineDetection(bool useAffine) { m_useAffineDetection = useAffine; }
1102 
1103 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1110  inline void setUseBruteForceCrossCheck(bool useCrossCheck)
1111  {
1112  // Only available with BruteForce and with k=1 (i.e not used with a
1113  // ratioDistanceThreshold method)
1114  if (m_matcher != NULL && !m_useKnn && m_matcherName == "BruteForce") {
1115  m_matcher->set("crossCheck", useCrossCheck);
1116  } else if (m_matcher != NULL && m_useKnn && m_matcherName == "BruteForce") {
1117  std::cout << "Warning, you try to set the crossCheck parameter with a "
1118  "BruteForce matcher but knn is enabled";
1119  std::cout << " (the filtering method uses a ratio constraint)" << std::endl;
1120  }
1121  }
1122 #endif
1123 
1130  inline void setUseMatchTrainToQuery(bool useMatchTrainToQuery) { m_useMatchTrainToQuery = useMatchTrainToQuery; }
1131 
1139  inline void setUseRansacConsensusPercentage(bool usePercentage) { m_useConsensusPercentage = usePercentage; }
1140 
1148  inline void setUseRansacVVS(bool ransacVVS) { m_useRansacVVS = ransacVVS; }
1149 
1156  inline void setUseSingleMatchFilter(bool singleMatchFilter) { m_useSingleMatchFilter = singleMatchFilter; }
1157 
1158 private:
1161  bool m_computeCovariance;
1163  vpMatrix m_covarianceMatrix;
1165  int m_currentImageId;
1168  vpDetectionMethodType m_detectionMethod;
1170  double m_detectionScore;
1173  double m_detectionThreshold;
1175  double m_detectionTime;
1177  std::vector<std::string> m_detectorNames;
1180  // with a key based upon the detector name.
1181  std::map<std::string, cv::Ptr<cv::FeatureDetector> > m_detectors;
1183  double m_extractionTime;
1185  std::vector<std::string> m_extractorNames;
1188  // with a key based upon the extractor name.
1189  std::map<std::string, cv::Ptr<cv::DescriptorExtractor> > m_extractors;
1191  std::vector<cv::DMatch> m_filteredMatches;
1193  vpFilterMatchingType m_filterType;
1195  vpImageFormatType m_imageFormat;
1198  std::vector<std::vector<cv::DMatch> > m_knnMatches;
1200  std::map<vpFeatureDescriptorType, std::string> m_mapOfDescriptorNames;
1202  std::map<vpFeatureDetectorType, std::string> m_mapOfDetectorNames;
1205  std::map<int, int> m_mapOfImageId;
1208  std::map<int, vpImage<unsigned char> > m_mapOfImages;
1211  cv::Ptr<cv::DescriptorMatcher> m_matcher;
1213  std::string m_matcherName;
1215  std::vector<cv::DMatch> m_matches;
1217  double m_matchingFactorThreshold;
1219  double m_matchingRatioThreshold;
1221  double m_matchingTime;
1223  std::vector<std::pair<cv::KeyPoint, cv::Point3f> > m_matchRansacKeyPointsToPoints;
1225  int m_nbRansacIterations;
1227  int m_nbRansacMinInlierCount;
1230  std::vector<cv::Point3f> m_objectFilteredPoints;
1232  double m_poseTime;
1235  cv::Mat m_queryDescriptors;
1237  std::vector<cv::KeyPoint> m_queryFilteredKeyPoints;
1239  std::vector<cv::KeyPoint> m_queryKeyPoints;
1242  double m_ransacConsensusPercentage;
1244  vpPose::RANSAC_FILTER_FLAGS m_ransacFilterFlag;
1246  std::vector<vpImagePoint> m_ransacInliers;
1248  std::vector<vpImagePoint> m_ransacOutliers;
1250  bool m_ransacParallel;
1252  unsigned int m_ransacParallelNbThreads;
1255  double m_ransacReprojectionError;
1258  double m_ransacThreshold;
1261  // detected in the train images).
1262  cv::Mat m_trainDescriptors;
1264  std::vector<cv::KeyPoint> m_trainKeyPoints;
1267  std::vector<cv::Point3f> m_trainPoints;
1270  std::vector<vpPoint> m_trainVpPoints;
1273  bool m_useAffineDetection;
1274 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
1278  bool m_useBruteForceCrossCheck;
1279 #endif
1282  bool m_useConsensusPercentage;
1284  bool m_useKnn;
1289  bool m_useMatchTrainToQuery;
1291  bool m_useRansacVVS;
1294  bool m_useSingleMatchFilter;
1298  int m_maxFeatures;
1299 
1300  void affineSkew(double tilt, double phi, cv::Mat &img, cv::Mat &mask, cv::Mat &Ai);
1301 
1302  double computePoseEstimationError(const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
1303  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo_est);
1304 
1305  void filterMatches();
1306 
1307  void init();
1308  void initDetector(const std::string &detectorNames);
1309  void initDetectors(const std::vector<std::string> &detectorNames);
1310 
1311  void initExtractor(const std::string &extractorName);
1312  void initExtractors(const std::vector<std::string> &extractorNames);
1313 
1314  void initFeatureNames();
1315 
1316  inline size_t myKeypointHash(const cv::KeyPoint &kp)
1317  {
1318  size_t _Val = 2166136261U, scale = 16777619U;
1319  Cv32suf u;
1320  u.f = kp.pt.x;
1321  _Val = (scale * _Val) ^ u.u;
1322  u.f = kp.pt.y;
1323  _Val = (scale * _Val) ^ u.u;
1324  u.f = kp.size;
1325  _Val = (scale * _Val) ^ u.u;
1326  // As the keypoint angle can be computed for certain type of keypoint only
1327  // when extracting the corresponding descriptor, the angle field is not
1328  // taking into account for the hash
1329  // u.f = kp.angle; _Val = (scale * _Val) ^ u.u;
1330  u.f = kp.response;
1331  _Val = (scale * _Val) ^ u.u;
1332  _Val = (scale * _Val) ^ ((size_t)kp.octave);
1333  _Val = (scale * _Val) ^ ((size_t)kp.class_id);
1334  return _Val;
1335  }
1336 
1337 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
1338  /*
1339  * Adapts a detector to detect points over multiple levels of a Gaussian
1340  * pyramid. Useful for detectors that are not inherently scaled.
1341  * From OpenCV 2.4.11 source code.
1342  */
1343  class PyramidAdaptedFeatureDetector : public cv::FeatureDetector
1344  {
1345  public:
1346  // maxLevel - The 0-based index of the last pyramid layer
1347  PyramidAdaptedFeatureDetector(const cv::Ptr<cv::FeatureDetector> &detector, int maxLevel = 2);
1348 
1349  // TODO implement read/write
1350  virtual bool empty() const;
1351 
1352  protected:
1353  virtual void detect(cv::InputArray image, CV_OUT std::vector<cv::KeyPoint> &keypoints,
1354  cv::InputArray mask = cv::noArray());
1355  virtual void detectImpl(const cv::Mat &image, std::vector<cv::KeyPoint> &keypoints,
1356  const cv::Mat &mask = cv::Mat()) const;
1357 
1358  cv::Ptr<cv::FeatureDetector> detector;
1359  int maxLevel;
1360  };
1361 
1362  /*
1363  * A class filters a vector of keypoints.
1364  * Because now it is difficult to provide a convenient interface for all
1365  * usage scenarios of the keypoints filter class, it has only several needed
1366  * by now static methods.
1367  */
1368  class KeyPointsFilter
1369  {
1370  public:
1371  KeyPointsFilter() {}
1372 
1373  /*
1374  * Remove keypoints within borderPixels of an image edge.
1375  */
1376  static void runByImageBorder(std::vector<cv::KeyPoint> &keypoints, cv::Size imageSize, int borderSize);
1377  /*
1378  * Remove keypoints of sizes out of range.
1379  */
1380  static void runByKeypointSize(std::vector<cv::KeyPoint> &keypoints, float minSize, float maxSize = FLT_MAX);
1381  /*
1382  * Remove keypoints from some image by mask for pixels of this image.
1383  */
1384  static void runByPixelsMask(std::vector<cv::KeyPoint> &keypoints, const cv::Mat &mask);
1385  /*
1386  * Remove duplicated keypoints.
1387  */
1388  static void removeDuplicated(std::vector<cv::KeyPoint> &keypoints);
1389 
1390  /*
1391  * Retain the specified number of the best keypoints (according to the
1392  * response)
1393  */
1394  static void retainBest(std::vector<cv::KeyPoint> &keypoints, int npoints);
1395  };
1396 
1397 #endif
1398 };
1399 
1400 #endif
1401 #endif
class that defines what is a Keypoint. This class provides all the basic elements to implement classe...
virtual unsigned int buildReference(const vpImage< unsigned char > &I)=0
virtual unsigned int matchPoint(const vpImage< unsigned char > &I)=0
virtual void display(const vpImage< unsigned char > &Iref, const vpImage< unsigned char > &Icurrent, unsigned int size=3)=0
Generic class defining intrinsic camera parameters.
Class to define RGB colors available for display functionnalities.
Definition: vpColor.h:158
static const vpColor green
Definition: vpColor.h:220
error that can be emited by ViSP classes.
Definition: vpException.h:72
@ badValue
Used to indicate that a value is not in the allowed range.
Definition: vpException.h:97
Implementation of an homogeneous matrix and operations on such kind of matrices.
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:89
Class that allows keypoints detection (and descriptors extraction) and matching thanks to OpenCV libr...
Definition: vpKeyPoint.h:223
double getDetectionTime() const
Definition: vpKeyPoint.h:482
std::vector< vpImagePoint > getRansacInliers() const
Definition: vpKeyPoint.h:669
void setMatchingFactorThreshold(const double factor)
Definition: vpKeyPoint.h:973
void setRansacConsensusPercentage(double percentage)
Definition: vpKeyPoint.h:1002
cv::Ptr< cv::DescriptorMatcher > getMatcher() const
Definition: vpKeyPoint.h:608
vpDetectionMethodType
Definition: vpKeyPoint.h:241
void setRansacParallel(bool parallel)
Definition: vpKeyPoint.h:1037
void setRansacReprojectionError(double reprojectionError)
Definition: vpKeyPoint.h:1054
void setExtractor(const std::string &extractorName)
Definition: vpKeyPoint.h:848
void setUseSingleMatchFilter(bool singleMatchFilter)
Definition: vpKeyPoint.h:1156
void setFilterMatchingType(const vpFilterMatchingType &filterType)
Definition: vpKeyPoint.h:938
void setRansacParallelNbThreads(unsigned int nthreads)
Definition: vpKeyPoint.h:1045
double getExtractionTime() const
Definition: vpKeyPoint.h:538
void setUseRansacVVS(bool ransacVVS)
Definition: vpKeyPoint.h:1148
void setDetectors(const std::vector< std::string > &detectorNames)
Definition: vpKeyPoint.h:821
void setExtractors(const std::vector< std::string > &extractorNames)
Definition: vpKeyPoint.h:880
cv::Mat getTrainDescriptors() const
Definition: vpKeyPoint.h:684
vpFeatureDetectorType
Definition: vpKeyPoint.h:258
@ DETECTOR_BRISK
Definition: vpKeyPoint.h:263
@ DETECTOR_AKAZE
Definition: vpKeyPoint.h:278
@ DETECTOR_AGAST
Definition: vpKeyPoint.h:279
@ DETECTOR_SimpleBlob
Definition: vpKeyPoint.h:265
void setExtractor(const vpFeatureDescriptorType &extractorType)
Definition: vpKeyPoint.h:834
void setImageFormat(const vpImageFormatType &imageFormat)
Definition: vpKeyPoint.h:893
void setRansacThreshold(double threshold)
Definition: vpKeyPoint.h:1085
void setRansacMinInlierCount(int minCount)
Definition: vpKeyPoint.h:1070
void setRansacFilterFlag(const vpPose::RANSAC_FILTER_FLAGS &flag)
Definition: vpKeyPoint.h:1015
double getPoseTime() const
Definition: vpKeyPoint.h:651
std::map< vpFeatureDetectorType, std::string > getDetectorNames() const
Definition: vpKeyPoint.h:531
unsigned int getNbImages() const
Definition: vpKeyPoint.h:641
double getMatchingTime() const
Definition: vpKeyPoint.h:601
std::vector< vpImagePoint > getRansacOutliers() const
Definition: vpKeyPoint.h:676
vpFeatureDescriptorType
Definition: vpKeyPoint.h:289
@ DESCRIPTOR_AKAZE
Definition: vpKeyPoint.h:306
@ DESCRIPTOR_ORB
Definition: vpKeyPoint.h:291
@ DESCRIPTOR_KAZE
Definition: vpKeyPoint.h:305
@ DESCRIPTOR_SURF
Definition: vpKeyPoint.h:302
@ DESCRIPTOR_BRISK
Definition: vpKeyPoint.h:292
@ DESCRIPTOR_SIFT
Definition: vpKeyPoint.h:299
std::vector< cv::DMatch > getMatches() const
Definition: vpKeyPoint.h:616
std::map< vpFeatureDescriptorType, std::string > getExtractorNames() const
Definition: vpKeyPoint.h:587
void setMatcher(const std::string &matcherName)
Definition: vpKeyPoint.h:910
vpImageFormatType getImageFormat() const
Definition: vpKeyPoint.h:594
cv::Ptr< cv::DescriptorExtractor > getExtractor(const std::string &name) const
Definition: vpKeyPoint.h:573
void setUseAffineDetection(bool useAffine)
Definition: vpKeyPoint.h:1101
void setUseRansacConsensusPercentage(bool usePercentage)
Definition: vpKeyPoint.h:1139
void setMatchingRatioThreshold(double ratio)
Definition: vpKeyPoint.h:987
@ ppmImageFormat
Definition: vpKeyPoint.h:253
@ jpgImageFormat
Definition: vpKeyPoint.h:251
@ pngImageFormat
Definition: vpKeyPoint.h:252
vpFilterMatchingType
Definition: vpKeyPoint.h:227
void setCovarianceComputation(const bool &flag)
Definition: vpKeyPoint.h:750
void setDetector(const vpFeatureDetectorType &detectorType)
Definition: vpKeyPoint.h:776
cv::Ptr< cv::DescriptorExtractor > getExtractor(const vpFeatureDescriptorType &type) const
Definition: vpKeyPoint.h:547
void setUseMatchTrainToQuery(bool useMatchTrainToQuery)
Definition: vpKeyPoint.h:1130
vpMatrix getCovarianceMatrix() const
Definition: vpKeyPoint.h:454
void setDetectionMethod(const vpDetectionMethodType &method)
Definition: vpKeyPoint.h:769
cv::Ptr< cv::FeatureDetector > getDetector(const std::string &name) const
Definition: vpKeyPoint.h:517
void setDetector(const std::string &detectorName)
Definition: vpKeyPoint.h:789
void setMaxFeatures(int maxFeatures)
Definition: vpKeyPoint.h:921
std::vector< std::pair< cv::KeyPoint, cv::KeyPoint > > getMatchQueryToTrainKeyPoints() const
Definition: vpKeyPoint.h:625
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
Definition: vpKeyPoint.h:491
void setRansacIteration(int nbIter)
Definition: vpKeyPoint.h:1023
cv::Mat getQueryDescriptors() const
Definition: vpKeyPoint.h:659
Implementation of a matrix and operations on matrices.
Definition: vpMatrix.h:154
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Definition: vpPoint.h:82
RANSAC_FILTER_FLAGS
Definition: vpPose.h:112
Defines a rectangle in the plane.
Definition: vpRect.h:80