Visual Servoing Platform  version 3.0.1
 All Classes Namespaces Functions Variables Typedefs Enumerations Enumerator Friends Groups Pages
vpKeyPoint.cpp
1 /****************************************************************************
2  *
3  * This file is part of the ViSP software.
4  * Copyright (C) 2005 - 2017 by Inria. All rights reserved.
5  *
6  * This software is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * ("GPL") version 2 as published by the Free Software Foundation.
9  * See the file LICENSE.txt at the root directory of this source
10  * distribution for additional information about the GNU GPL.
11  *
12  * For using ViSP with software that can not be combined with the GNU
13  * GPL, please contact Inria about acquiring a ViSP Professional
14  * Edition License.
15  *
16  * See http://visp.inria.fr for more information.
17  *
18  * This software was developed at:
19  * Inria Rennes - Bretagne Atlantique
20  * Campus Universitaire de Beaulieu
21  * 35042 Rennes Cedex
22  * France
23  *
24  * If you have questions regarding the use of this file, please contact
25  * Inria at visp@inria.fr
26  *
27  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
28  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
29  *
30  * Description:
31  * Key point functionalities.
32  *
33  * Authors:
34  * Souriya Trinh
35  *
36  *****************************************************************************/
37 
38 #include <limits>
39 #include <iomanip>
40 #include <stdint.h> //uint32_t ; works also with >= VS2010 / _MSC_VER >= 1600
41 
42 #include <visp3/vision/vpKeyPoint.h>
43 #include <visp3/core/vpIoTools.h>
44 
45 #if (VISP_HAVE_OPENCV_VERSION >= 0x020101)
46 
47 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
48 # include <opencv2/calib3d/calib3d.hpp>
49 #endif
50 
51 //Detect endianness of the host machine
52 //Reference: http://www.boost.org/doc/libs/1_36_0/boost/detail/endian.hpp
53 #if defined (__GLIBC__)
54 # include <endian.h>
55 # if (__BYTE_ORDER == __LITTLE_ENDIAN)
56 # define VISP_LITTLE_ENDIAN
57 # elif (__BYTE_ORDER == __BIG_ENDIAN)
58 # define VISP_BIG_ENDIAN
59 # elif (__BYTE_ORDER == __PDP_ENDIAN)
60  //Currently not supported when reading / writing binary file
61 # define VISP_PDP_ENDIAN
62 # else
63 # error Unknown machine endianness detected.
64 # endif
65 #elif defined(_BIG_ENDIAN) && !defined(_LITTLE_ENDIAN) || defined(__BIG_ENDIAN__) && !defined(__LITTLE_ENDIAN__)
66 # define VISP_BIG_ENDIAN
67 #elif defined(_LITTLE_ENDIAN) && !defined(_BIG_ENDIAN) || defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__)
68 # define VISP_LITTLE_ENDIAN
69 #elif defined(__sparc) || defined(__sparc__) \
70  || defined(_POWER) || defined(__powerpc__) \
71  || defined(__ppc__) || defined(__hpux) \
72  || defined(_MIPSEB) || defined(_POWER) \
73  || defined(__s390__)
74 
75 # define VISP_BIG_ENDIAN
76 #elif defined(__i386__) || defined(__alpha__) \
77  || defined(__ia64) || defined(__ia64__) \
78  || defined(_M_IX86) || defined(_M_IA64) \
79  || defined(_M_ALPHA) || defined(__amd64) \
80  || defined(__amd64__) || defined(_M_AMD64) \
81  || defined(__x86_64) || defined(__x86_64__) \
82  || defined(_M_X64)
83 
84 # define VISP_LITTLE_ENDIAN
85 #else
86 # error Cannot detect host machine endianness.
87 #endif
88 
89 
90 namespace {
91  //Specific Type transformation functions
93  // Convert a list of cv::DMatch to a cv::DMatch (extract the first cv::DMatch, the nearest neighbor).
94  //
95  // \param knnMatches : List of cv::DMatch.
96  // \return The nearest neighbor.
97  // */
98  inline cv::DMatch knnToDMatch(const std::vector<cv::DMatch> &knnMatches) {
99  if(knnMatches.size() > 0) {
100  return knnMatches[0];
101  }
102 
103  return cv::DMatch();
104  }
105 
107  // Convert a cv::DMatch to an index (extract the train index).
108  //
109  // \param match : Point to convert in ViSP type.
110  // \return The train index.
111  // */
112  inline vpImagePoint matchRansacToVpImage(const std::pair<cv::KeyPoint, cv::Point3f> &pair) {
113  return vpImagePoint(pair.first.pt.y, pair.first.pt.x);
114  }
115 
116  //Keep this function to know how to detect big endian with code
117  //bool isBigEndian() {
118  // union {
119  // uint32_t i;
120  // char c[4];
121  // } bint = { 0x01020304 };
122  //
123  // return bint.c[0] == 1;
124  //}
125 
126 #ifdef VISP_BIG_ENDIAN
127  //Swap 16 bits by shifting to the right the first byte and by shifting to the left the second byte
128  uint16_t swap16bits(const uint16_t val) {
129  return ( ((val >> 8) & 0x00FF) | ((val << 8) & 0xFF00) );
130  }
131 
132  //Swap 32 bits by shifting to the right the first 2 bytes and by shifting to the left the last 2 bytes
133  uint32_t swap32bits(const uint32_t val) {
134  return ( ((val >> 24) & 0x000000FF) | ((val >> 8) & 0x0000FF00) |
135  ((val << 8) & 0x00FF0000) | ((val << 24) & 0xFF000000) );
136  }
137 
138  //Swap a float, the union is necessary because of the representation of a float in memory in IEEE 754.
139  float swapFloat(const float f) {
140  union {
141  float f;
142  unsigned char b[4];
143  } dat1, dat2;
144 
145  dat1.f = f;
146  dat2.b[0] = dat1.b[3];
147  dat2.b[1] = dat1.b[2];
148  dat2.b[2] = dat1.b[1];
149  dat2.b[3] = dat1.b[0];
150  return dat2.f;
151  }
152 
153  //Swap a double, the union is necessary because of the representation of a double in memory in IEEE 754.
154  double swapDouble(const double d) {
155  union {
156  double d;
157  unsigned char b[8];
158  } dat1, dat2;
159 
160  dat1.d = d;
161  dat2.b[0] = dat1.b[7];
162  dat2.b[1] = dat1.b[6];
163  dat2.b[2] = dat1.b[5];
164  dat2.b[3] = dat1.b[4];
165  dat2.b[4] = dat1.b[3];
166  dat2.b[5] = dat1.b[2];
167  dat2.b[6] = dat1.b[1];
168  dat2.b[7] = dat1.b[0];
169  return dat2.d;
170  }
171 #endif
172 
173  //Read an unsigned short int stored in little endian
174  void readBinaryUShortLE(std::ifstream &file, unsigned short &ushort_value) {
175  //Read
176  file.read((char *)(&ushort_value), sizeof(ushort_value));
177 
178  #ifdef VISP_BIG_ENDIAN
179  //Swap bytes order from little endian to big endian
180  ushort_value = swap16bits(ushort_value);
181  #endif
182  }
183 
184  //Read a short int stored in little endian
185  void readBinaryShortLE(std::ifstream &file, short &short_value) {
186  //Read
187  file.read((char *)(&short_value), sizeof(short_value));
188 
189  #ifdef VISP_BIG_ENDIAN
190  //Swap bytes order from little endian to big endian
191  short_value = (short) swap16bits((uint16_t) short_value);
192  #endif
193  }
194 
195 // //Read an unsigned int stored in little endian
196 // void readBinaryUIntLE(std::ifstream &file, unsigned int &uint_value) {
197 // //Read
198 // file.read((char *)(&uint_value), sizeof(uint_value));
199 
200 // #ifdef VISP_BIG_ENDIAN
201 // //Swap bytes order from little endian to big endian
202 // if(sizeof(uint_value) == 4) {
203 // uint_value = swap32bits(uint_value);
204 // } else {
205 // uint_value = swap16bits(uint_value);
206 // }
207 // #endif
208 // }
209 
210  //Read an int stored in little endian
211  void readBinaryIntLE(std::ifstream &file, int &int_value) {
212  //Read
213  file.read((char *)(&int_value), sizeof(int_value));
214 
215  #ifdef VISP_BIG_ENDIAN
216  //Swap bytes order from little endian to big endian
217  if(sizeof(int_value) == 4) {
218  int_value = (int) swap32bits((uint32_t) int_value);
219  } else {
220  int_value = swap16bits((uint16_t) int_value);
221  }
222  #endif
223  }
224 
225  //Read a float stored in little endian
226  void readBinaryFloatLE(std::ifstream &file, float &float_value) {
227  //Read
228  file.read((char *)(&float_value), sizeof(float_value));
229 
230  #ifdef VISP_BIG_ENDIAN
231  //Swap bytes order from little endian to big endian
232  float_value = swapFloat(float_value);
233  #endif
234  }
235 
236  //Read a double stored in little endian
237  void readBinaryDoubleLE(std::ifstream &file, double &double_value) {
238  //Read
239  file.read((char *)(&double_value), sizeof(double_value));
240 
241  #ifdef VISP_BIG_ENDIAN
242  //Swap bytes order from little endian to big endian
243  double_value = swapDouble(double_value);
244  #endif
245  }
246 
247  //Write an unsigned short in little endian
248  void writeBinaryUShortLE(std::ofstream &file, const unsigned short ushort_value) {
249  #ifdef VISP_BIG_ENDIAN
250  //Swap bytes order to little endian
251  uint16_t swap_ushort = swap16bits(ushort_value);
252  file.write((char *)(&swap_ushort), sizeof(swap_ushort));
253  #else
254  file.write((char *)(&ushort_value), sizeof(ushort_value));
255  #endif
256  }
257 
258  //Write a short in little endian
259  void writeBinaryShortLE(std::ofstream &file, const short short_value) {
260  #ifdef VISP_BIG_ENDIAN
261  //Swap bytes order to little endian
262  uint16_t swap_short = swap16bits((uint16_t) short_value);
263  file.write((char *)(&swap_short), sizeof(swap_short));
264  #else
265  file.write((char *)(&short_value), sizeof(short_value));
266  #endif
267  }
268 
269  //Write an unsigned int in little endian
270 // void writeBinaryUIntLE(std::ofstream &file, const unsigned int uint_value) {
271 // #ifdef VISP_BIG_ENDIAN
272 // //Swap bytes order to little endian
273 // //More info on data type: http://en.cppreference.com/w/cpp/language/types
274 // if(sizeof(uint_value) == 4) {
275 // uint32_t swap_uint = swap32bits(uint_value);
276 // file.write((char *)(&swap_uint), sizeof(swap_uint));
277 // } else {
278 // uint16_t swap_uint = swap16bits(uint_value);
279 // file.write((char *)(&swap_uint), sizeof(swap_uint));
280 // }
281 // #else
282 // file.write((char *)(&uint_value), sizeof(uint_value));
283 // #endif
284 // }
285 
286  //Write an int in little endian
287  void writeBinaryIntLE(std::ofstream &file, const int int_value) {
288  #ifdef VISP_BIG_ENDIAN
289  //Swap bytes order to little endian
290  //More info on data type: http://en.cppreference.com/w/cpp/language/types
291  if(sizeof(int_value) == 4) {
292  uint32_t swap_int = swap32bits((uint32_t) int_value);
293  file.write((char *)(&swap_int), sizeof(swap_int));
294  } else {
295  uint16_t swap_int = swap16bits((uint16_t) int_value);
296  file.write((char *)(&swap_int), sizeof(swap_int));
297  }
298  #else
299  file.write((char *)(&int_value), sizeof(int_value));
300  #endif
301  }
302 
303  //Write a float in little endian
304  void writeBinaryFloatLE(std::ofstream &file, const float float_value) {
305  #ifdef VISP_BIG_ENDIAN
306  //Swap bytes order to little endian
307  float swap_float = swapFloat(float_value);
308  file.write((char *)(&swap_float), sizeof(swap_float));
309  #else
310  file.write((char *)(&float_value), sizeof(float_value));
311  #endif
312  }
313 
314  //Write a double in little endian
315  void writeBinaryDoubleLE(std::ofstream &file, const double double_value) {
316  #ifdef VISP_BIG_ENDIAN
317  //Swap bytes order to little endian
318  double swap_double = swapDouble(double_value);
319  file.write((char *)(&swap_double), sizeof(swap_double));
320  #else
321  file.write((char *)(&double_value), sizeof(double_value));
322  #endif
323  }
324 }
325 
334 vpKeyPoint::vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType,
335  const std::string &matcherName, const vpFilterMatchingType &filterType)
336  : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
337  m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(),
338  m_detectors(), m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
339  m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(),
340  m_matcher(), m_matcherName(matcherName),
341  m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85), m_matchingTime(0.),
342  m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100), m_objectFilteredPoints(),
343  m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
344  m_ransacConsensusPercentage(20.0), m_ransacInliers(), m_ransacOutliers(), m_ransacReprojectionError(6.0),
345  m_ransacThreshold(0.01), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(),
346  m_trainVpPoints(), m_useAffineDetection(false),
347 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
348  m_useBruteForceCrossCheck(true),
349 #endif
350  m_useConsensusPercentage(false),
351  m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true), m_useSingleMatchFilter(true)
352 {
353  initFeatureNames();
354 
355  m_detectorNames.push_back(m_mapOfDetectorNames[detectorType]);
356  m_extractorNames.push_back(m_mapOfDescriptorNames[descriptorType]);
357 
358  init();
359 }
360 
369 vpKeyPoint::vpKeyPoint(const std::string &detectorName, const std::string &extractorName,
370  const std::string &matcherName, const vpFilterMatchingType &filterType)
371  : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
372  m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(),
373  m_detectors(), m_extractionTime(0.), m_extractorNames(), m_extractors(), m_filteredMatches(), m_filterType(filterType),
374  m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(),
375  m_matcher(), m_matcherName(matcherName),
376  m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85), m_matchingTime(0.),
377  m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100), m_objectFilteredPoints(),
378  m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
379  m_ransacConsensusPercentage(20.0), m_ransacInliers(), m_ransacOutliers(), m_ransacReprojectionError(6.0),
380  m_ransacThreshold(0.01), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(),
381  m_trainVpPoints(), m_useAffineDetection(false),
382 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
383  m_useBruteForceCrossCheck(true),
384 #endif
385  m_useConsensusPercentage(false),
386  m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true), m_useSingleMatchFilter(true)
387 {
388  initFeatureNames();
389 
390  m_detectorNames.push_back(detectorName);
391  m_extractorNames.push_back(extractorName);
392 
393  init();
394 }
395 
404 vpKeyPoint::vpKeyPoint(const std::vector<std::string> &detectorNames, const std::vector<std::string> &extractorNames,
405  const std::string &matcherName, const vpFilterMatchingType &filterType)
406  : m_computeCovariance(false), m_covarianceMatrix(), m_currentImageId(0), m_detectionMethod(detectionScore),
407  m_detectionScore(0.15), m_detectionThreshold(100.0), m_detectionTime(0.), m_detectorNames(detectorNames),
408  m_detectors(), m_extractionTime(0.), m_extractorNames(extractorNames), m_extractors(), m_filteredMatches(),
409  m_filterType(filterType), m_imageFormat(jpgImageFormat), m_knnMatches(), m_mapOfImageId(), m_mapOfImages(),
410  m_matcher(),
411  m_matcherName(matcherName), m_matches(), m_matchingFactorThreshold(2.0), m_matchingRatioThreshold(0.85), m_matchingTime(0.),
412  m_matchRansacKeyPointsToPoints(), m_nbRansacIterations(200), m_nbRansacMinInlierCount(100), m_objectFilteredPoints(),
413  m_poseTime(0.), m_queryDescriptors(), m_queryFilteredKeyPoints(), m_queryKeyPoints(),
414  m_ransacConsensusPercentage(20.0), m_ransacInliers(), m_ransacOutliers(), m_ransacReprojectionError(6.0),
415  m_ransacThreshold(0.01), m_trainDescriptors(), m_trainKeyPoints(), m_trainPoints(),
416  m_trainVpPoints(), m_useAffineDetection(false),
417 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
418  m_useBruteForceCrossCheck(true),
419 #endif
420  m_useConsensusPercentage(false),
421  m_useKnn(false), m_useMatchTrainToQuery(false), m_useRansacVVS(true), m_useSingleMatchFilter(true)
422 {
423  initFeatureNames();
424  init();
425 }
426 
435 void vpKeyPoint::affineSkew(double tilt, double phi, cv::Mat& img,
436  cv::Mat& mask, cv::Mat& Ai) {
437  int h = img.rows;
438  int w = img.cols;
439 
440  mask = cv::Mat(h, w, CV_8UC1, cv::Scalar(255));
441 
442  cv::Mat A = cv::Mat::eye(2, 3, CV_32F);
443 
444  //if (phi != 0.0) {
445  if (std::fabs(phi) > std::numeric_limits<double>::epsilon()) {
446  phi *= M_PI / 180.;
447  double s = sin(phi);
448  double c = cos(phi);
449 
450  A = (cv::Mat_<float>(2, 2) << c, -s, s, c);
451 
452  cv::Mat corners = (cv::Mat_<float>(4, 2) << 0, 0, w, 0, w, h, 0, h);
453  cv::Mat tcorners = corners * A.t();
454  cv::Mat tcorners_x, tcorners_y;
455  tcorners.col(0).copyTo(tcorners_x);
456  tcorners.col(1).copyTo(tcorners_y);
457  std::vector<cv::Mat> channels;
458  channels.push_back(tcorners_x);
459  channels.push_back(tcorners_y);
460  cv::merge(channels, tcorners);
461 
462  cv::Rect rect = cv::boundingRect(tcorners);
463  A = (cv::Mat_<float>(2, 3) << c, -s, -rect.x, s, c, -rect.y);
464 
465  cv::warpAffine(img, img, A, cv::Size(rect.width, rect.height),
466  cv::INTER_LINEAR, cv::BORDER_REPLICATE);
467  }
468  //if (tilt != 1.0) {
469  if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon()) {
470  double s = 0.8 * sqrt(tilt * tilt - 1);
471  cv::GaussianBlur(img, img, cv::Size(0, 0), s, 0.01);
472  cv::resize(img, img, cv::Size(0, 0), 1.0 / tilt, 1.0, cv::INTER_NEAREST);
473  A.row(0) = A.row(0) / tilt;
474  }
475  //if (tilt != 1.0 || phi != 0.0) {
476  if (std::fabs(tilt - 1.0) > std::numeric_limits<double>::epsilon() || std::fabs(phi) > std::numeric_limits<double>::epsilon() ) {
477  h = img.rows;
478  w = img.cols;
479  cv::warpAffine(mask, mask, A, cv::Size(w, h), cv::INTER_NEAREST);
480  }
481  cv::invertAffineTransform(A, Ai);
482 }
483 
491  return buildReference(I, vpRect());
492 }
493 
504  const vpImagePoint &iP,
505  const unsigned int height, const unsigned int width) {
506 
507  return buildReference(I, vpRect(iP, width, height));
508 }
509 
518  const vpRect &rectangle) {
519  //Reset variables used when dealing with 3D models
520  //So as no 3D point list is passed, we dont need this variables
521  m_trainPoints.clear();
522  m_mapOfImageId.clear();
523  m_mapOfImages.clear();
524  m_currentImageId = 1;
525 
526  if(m_useAffineDetection) {
527  std::vector<std::vector<cv::KeyPoint> > listOfTrainKeyPoints;
528  std::vector<cv::Mat> listOfTrainDescriptors;
529 
530  //Detect keypoints and extract descriptors on multiple images
531  detectExtractAffine(I, listOfTrainKeyPoints, listOfTrainDescriptors);
532 
533  //Flatten the different train lists
534  m_trainKeyPoints.clear();
535  for(std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfTrainKeyPoints.begin();
536  it != listOfTrainKeyPoints.end(); ++it) {
537  m_trainKeyPoints.insert(m_trainKeyPoints.end(), it->begin(), it->end());
538  }
539 
540  bool first = true;
541  for(std::vector<cv::Mat>::const_iterator it = listOfTrainDescriptors.begin(); it != listOfTrainDescriptors.end(); ++it) {
542  if(first) {
543  first = false;
544  it->copyTo(m_trainDescriptors);
545  } else {
546  m_trainDescriptors.push_back(*it);
547  }
548  }
549  } else {
550  detect(I, m_trainKeyPoints, m_detectionTime, rectangle);
551  extract(I, m_trainKeyPoints, m_trainDescriptors, m_extractionTime);
552  }
553 
554  //Save the correspondence keypoint class_id with the training image_id in a map
555  //Used to display the matching with all the training images
556  for(std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
557  m_mapOfImageId[it->class_id] = m_currentImageId;
558  }
559 
560  //Save the image in a map at a specific image_id
561  m_mapOfImages[m_currentImageId] = I;
562 
563  //Convert OpenCV type to ViSP type for compatibility
565 
566  _reference_computed = true;
567 
568  //Add train descriptors in matcher object
569  m_matcher->clear();
570  m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
571 
572  return static_cast<unsigned int>(m_trainKeyPoints.size());
573 }
574 
584 void vpKeyPoint::buildReference(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &trainKeyPoints,
585  std::vector<cv::Point3f> &points3f, const bool append, const int class_id) {
586  cv::Mat trainDescriptors;
587  //Copy the input list of keypoints
588  std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
589 
590  extract(I, trainKeyPoints, trainDescriptors, m_extractionTime, &points3f);
591 
592  if(trainKeyPoints.size() != trainKeyPoints_tmp.size()) {
593  //Keypoints have been removed
594  //Store the hash of a keypoint as the key and the index of the keypoint as the value
595  std::map<size_t, size_t> mapOfKeypointHashes;
596  size_t cpt = 0;
597  for(std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it, cpt++) {
598  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
599  }
600 
601  std::vector<cv::Point3f> trainPoints_tmp;
602  for(std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
603  if(mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
604  trainPoints_tmp.push_back(points3f[mapOfKeypointHashes[myKeypointHash(*it)]]);
605  }
606  }
607 
608  //Copy trainPoints_tmp to points3f
609  points3f = trainPoints_tmp;
610  }
611 
612  buildReference(I, trainKeyPoints, trainDescriptors, points3f, append, class_id);
613 }
614 
625 void vpKeyPoint::buildReference(const vpImage<unsigned char> &I, const std::vector<cv::KeyPoint> &trainKeyPoints,
626  const cv::Mat &trainDescriptors, const std::vector<cv::Point3f> &points3f,
627  const bool append, const int class_id) {
628  if(!append) {
629  m_currentImageId = 0;
630  m_mapOfImageId.clear();
631  m_mapOfImages.clear();
632  this->m_trainKeyPoints.clear();
633  this->m_trainPoints.clear();
634  }
635 
636  m_currentImageId++;
637 
638  std::vector<cv::KeyPoint> trainKeyPoints_tmp = trainKeyPoints;
639  //Set class_id if != -1
640  if(class_id != -1) {
641  for(std::vector<cv::KeyPoint>::iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
642  it->class_id = class_id;
643  }
644  }
645 
646  //Save the correspondence keypoint class_id with the training image_id in a map
647  //Used to display the matching with all the training images
648  for(std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints_tmp.begin(); it != trainKeyPoints_tmp.end(); ++it) {
649  m_mapOfImageId[it->class_id] = m_currentImageId;
650  }
651 
652  //Save the image in a map at a specific image_id
653  m_mapOfImages[m_currentImageId] = I;
654 
655  //Append reference lists
656  this->m_trainKeyPoints.insert(this->m_trainKeyPoints.end(), trainKeyPoints_tmp.begin(), trainKeyPoints_tmp.end());
657  if(!append) {
658  trainDescriptors.copyTo(this->m_trainDescriptors);
659  } else {
660  this->m_trainDescriptors.push_back(trainDescriptors);
661  }
662  this->m_trainPoints.insert(this->m_trainPoints.end(), points3f.begin(), points3f.end());
663 
664 
665  //Convert OpenCV type to ViSP type for compatibility
667  vpConvert::convertFromOpenCV(this->m_trainPoints, m_trainVpPoints);
668 
669  //Add train descriptors in matcher object
670  m_matcher->clear();
671  m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
672 
673  _reference_computed = true;
674 }
675 
688 void vpKeyPoint::compute3D(const cv::KeyPoint &candidate, const std::vector<vpPoint> &roi,
689  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point) {
690  /* compute plane equation */
691  std::vector<vpPoint>::const_iterator it_roi = roi.begin();
692  vpPoint pts[3];
693  pts[0] = *it_roi;
694  ++it_roi;
695  pts[1] = *it_roi;
696  ++it_roi;
697  pts[2] = *it_roi;
698  vpPlane Po(pts[0], pts[1], pts[2]);
699  double xc = 0.0, yc = 0.0;
700  vpPixelMeterConversion::convertPoint(cam, candidate.pt.x, candidate.pt.y, xc, yc);
701  double Z = -Po.getD() / (Po.getA() * xc + Po.getB() * yc + Po.getC());
702  double X = xc * Z;
703  double Y = yc * Z;
704  vpColVector point_cam(4);
705  point_cam[0] = X;
706  point_cam[1] = Y;
707  point_cam[2] = Z;
708  point_cam[3] = 1;
709  vpColVector point_obj(4);
710  point_obj = cMo.inverse() * point_cam;
711  point = cv::Point3f((float) point_obj[0], (float) point_obj[1], (float) point_obj[2]);
712 }
713 
726 void vpKeyPoint::compute3D(const vpImagePoint &candidate, const std::vector<vpPoint> &roi,
727  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, vpPoint &point) {
728  /* compute plane equation */
729  std::vector<vpPoint>::const_iterator it_roi = roi.begin();
730  vpPoint pts[3];
731  pts[0] = *it_roi;
732  ++it_roi;
733  pts[1] = *it_roi;
734  ++it_roi;
735  pts[2] = *it_roi;
736  vpPlane Po(pts[0], pts[1], pts[2]);
737  double xc = 0.0, yc = 0.0;
738  vpPixelMeterConversion::convertPoint(cam, candidate, xc, yc);
739  double Z = -Po.getD() / (Po.getA() * xc + Po.getB() * yc + Po.getC());
740  double X = xc * Z;
741  double Y = yc * Z;
742  vpColVector point_cam(4);
743  point_cam[0] = X;
744  point_cam[1] = Y;
745  point_cam[2] = Z;
746  point_cam[3] = 1;
747  vpColVector point_obj(4);
748  point_obj = cMo.inverse() * point_cam;
749  point.setWorldCoordinates(point_obj);
750 }
751 
766  std::vector<cv::KeyPoint> &candidates, const std::vector<vpPolygon> &polygons,
767  const std::vector<std::vector<vpPoint> > &roisPt, std::vector<cv::Point3f> &points, cv::Mat *descriptors) {
768 
769  std::vector<cv::KeyPoint> candidatesToCheck = candidates;
770  candidates.clear();
771  points.clear();
772  vpImagePoint imPt;
773  cv::Point3f pt;
774  cv::Mat desc;
775 
776  std::vector<std::pair<cv::KeyPoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
777  for(size_t i = 0; i < candidatesToCheck.size(); i++) {
778  pairOfCandidatesToCheck[i] = std::pair<cv::KeyPoint, size_t>(candidatesToCheck[i], i);
779  }
780 
781  size_t cpt1 = 0;
782  std::vector<vpPolygon> polygons_tmp = polygons;
783  for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
784  std::vector<std::pair<cv::KeyPoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
785 
786  while(it2 != pairOfCandidatesToCheck.end()) {
787  imPt.set_ij(it2->first.pt.y, it2->first.pt.x);
788  if (it1->isInside(imPt)) {
789  candidates.push_back(it2->first);
790  vpKeyPoint::compute3D(it2->first, roisPt[cpt1], cam, cMo, pt);
791  points.push_back(pt);
792 
793  if(descriptors != NULL) {
794  desc.push_back(descriptors->row((int) it2->second));
795  }
796 
797  //Remove candidate keypoint which is located on the current polygon
798  it2 = pairOfCandidatesToCheck.erase(it2);
799  } else {
800  ++it2;
801  }
802  }
803  }
804 
805  if(descriptors != NULL) {
806  desc.copyTo(*descriptors);
807  }
808 }
809 
824  std::vector<vpImagePoint> &candidates, const std::vector<vpPolygon> &polygons,
825  const std::vector<std::vector<vpPoint> > &roisPt, std::vector<vpPoint> &points, cv::Mat *descriptors) {
826 
827  std::vector<vpImagePoint> candidatesToCheck = candidates;
828  candidates.clear();
829  points.clear();
830  vpPoint pt;
831  cv::Mat desc;
832 
833  std::vector<std::pair<vpImagePoint, size_t> > pairOfCandidatesToCheck(candidatesToCheck.size());
834  for(size_t i = 0; i < candidatesToCheck.size(); i++) {
835  pairOfCandidatesToCheck[i] = std::pair<vpImagePoint, size_t>(candidatesToCheck[i], i);
836  }
837 
838  size_t cpt1 = 0;
839  std::vector<vpPolygon> polygons_tmp = polygons;
840  for (std::vector<vpPolygon>::iterator it1 = polygons_tmp.begin(); it1 != polygons_tmp.end(); ++it1, cpt1++) {
841  std::vector<std::pair<vpImagePoint, size_t> >::iterator it2 = pairOfCandidatesToCheck.begin();
842 
843  while(it2 != pairOfCandidatesToCheck.end()) {
844  if (it1->isInside(it2->first)) {
845  candidates.push_back(it2->first);
846  vpKeyPoint::compute3D(it2->first, roisPt[cpt1], cam, cMo, pt);
847  points.push_back(pt);
848 
849  if(descriptors != NULL) {
850  desc.push_back(descriptors->row((int) it2->second));
851  }
852 
853  //Remove candidate keypoint which is located on the current polygon
854  it2 = pairOfCandidatesToCheck.erase(it2);
855  } else {
856  ++it2;
857  }
858  }
859  }
860 }
861 
875  std::vector<cv::KeyPoint> &candidates, const std::vector<vpCylinder> &cylinders,
876  const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
877  std::vector<cv::Point3f> &points, cv::Mat *descriptors) {
878  std::vector<cv::KeyPoint> candidatesToCheck = candidates;
879  candidates.clear();
880  points.clear();
881  cv::Mat desc;
882 
883  //Keep only keypoints on cylinders
884  size_t cpt_keypoint = 0;
885  for(std::vector<cv::KeyPoint>::const_iterator it1 = candidatesToCheck.begin();
886  it1 != candidatesToCheck.end(); ++it1, cpt_keypoint++) {
887  size_t cpt_cylinder = 0;
888 
889  //Iterate through the list of vpCylinders
890  for(std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
891  it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
892  //Iterate through the list of the bounding boxes of the current vpCylinder
893  for(std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
894  if (vpPolygon::isInside(*it3, it1->pt.y, it1->pt.x)) {
895  candidates.push_back(*it1);
896 
897  //Calculate the 3D coordinates for each keypoint located on cylinders
898  double xm = 0.0, ym = 0.0;
899  vpPixelMeterConversion::convertPoint(cam, it1->pt.x, it1->pt.y, xm, ym);
900  double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
901 
902  if(!vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
903  vpColVector point_cam(4);
904  point_cam[0] = xm * Z;
905  point_cam[1] = ym * Z;
906  point_cam[2] = Z;
907  point_cam[3] = 1;
908  vpColVector point_obj(4);
909  point_obj = cMo.inverse() * point_cam;
910  vpPoint pt;
911  pt.setWorldCoordinates(point_obj);
912  points.push_back(cv::Point3f((float) pt.get_oX(), (float) pt.get_oY(), (float) pt.get_oZ()));
913 
914  if(descriptors != NULL) {
915  desc.push_back(descriptors->row((int) cpt_keypoint));
916  }
917 
918  break;
919  }
920  }
921  }
922  }
923  }
924 
925  if(descriptors != NULL) {
926  desc.copyTo(*descriptors);
927  }
928 }
929 
943  std::vector<vpImagePoint> &candidates, const std::vector<vpCylinder> &cylinders,
944  const std::vector<std::vector<std::vector<vpImagePoint> > > &vectorOfCylinderRois,
945  std::vector<vpPoint> &points, cv::Mat *descriptors) {
946  std::vector<vpImagePoint> candidatesToCheck = candidates;
947  candidates.clear();
948  points.clear();
949  cv::Mat desc;
950 
951  //Keep only keypoints on cylinders
952  size_t cpt_keypoint = 0;
953  for(std::vector<vpImagePoint>::const_iterator it1 = candidatesToCheck.begin();
954  it1 != candidatesToCheck.end(); ++it1, cpt_keypoint++) {
955  size_t cpt_cylinder = 0;
956 
957  //Iterate through the list of vpCylinders
958  for(std::vector<std::vector<std::vector<vpImagePoint> > >::const_iterator it2 = vectorOfCylinderRois.begin();
959  it2 != vectorOfCylinderRois.end(); ++it2, cpt_cylinder++) {
960  //Iterate through the list of the bounding boxes of the current vpCylinder
961  for(std::vector<std::vector<vpImagePoint> >::const_iterator it3 = it2->begin(); it3 != it2->end(); ++it3) {
962  if (vpPolygon::isInside(*it3, it1->get_i(), it1->get_j())) {
963  candidates.push_back(*it1);
964 
965  //Calculate the 3D coordinates for each keypoint located on cylinders
966  double xm = 0.0, ym = 0.0;
967  vpPixelMeterConversion::convertPoint(cam, it1->get_u(), it1->get_v(), xm, ym);
968  double Z = cylinders[cpt_cylinder].computeZ(xm, ym);
969 
970  if(!vpMath::isNaN(Z) && Z > std::numeric_limits<double>::epsilon()) {
971  vpColVector point_cam(4);
972  point_cam[0] = xm * Z;
973  point_cam[1] = ym * Z;
974  point_cam[2] = Z;
975  point_cam[3] = 1;
976  vpColVector point_obj(4);
977  point_obj = cMo.inverse() * point_cam;
978  vpPoint pt;
979  pt.setWorldCoordinates(point_obj);
980  points.push_back(pt);
981 
982  if(descriptors != NULL) {
983  desc.push_back(descriptors->row((int) cpt_keypoint));
984  }
985 
986  break;
987  }
988  }
989  }
990  }
991  }
992 
993  if(descriptors != NULL) {
994  desc.copyTo(*descriptors);
995  }
996 }
997 
1010 bool vpKeyPoint::computePose(const std::vector<cv::Point2f> &imagePoints, const std::vector<cv::Point3f> &objectPoints,
1011  const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector<int> &inlierIndex,
1012  double &elapsedTime, bool (*func)(vpHomogeneousMatrix *)) {
1013  double t = vpTime::measureTimeMs();
1014 
1015  if(imagePoints.size() < 4 || objectPoints.size() < 4 || imagePoints.size() != objectPoints.size()) {
1016  elapsedTime = (vpTime::measureTimeMs() - t);
1017  std::cerr << "Not enough points to compute the pose (at least 4 points are needed)." << std::endl;
1018 
1019  return false;
1020  }
1021 
1022  cv::Mat cameraMatrix =
1023  (cv::Mat_<double>(3, 3) << cam.get_px(), 0, cam.get_u0(), 0, cam.get_py(), cam.get_v0(), 0, 0, 1);
1024  cv::Mat rvec, tvec;
1025 
1026 
1027  //Bug with OpenCV < 2.4.0 when zero distorsion is provided by an empty array.
1028  //http://code.opencv.org/issues/1700 ; http://code.opencv.org/issues/1718
1029  //what(): Distortion coefficients must be 1x4, 4x1, 1x5, 5x1, 1x8 or 8x1 floating-point vector in function cvProjectPoints2
1030  //Fixed in OpenCV 2.4.0 (r7558)
1031 // cv::Mat distCoeffs;
1032  cv::Mat distCoeffs = cv::Mat::zeros(1, 5, CV_64F);
1033 
1034  try {
1035 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
1036  //OpenCV 3.0.0 (2014/12/12)
1037  cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs,
1038  rvec, tvec, false, m_nbRansacIterations, (float) m_ransacReprojectionError,
1039  0.99,//confidence=0.99 (default) – The probability that the algorithm produces a useful result.
1040  inlierIndex,
1041  cv::SOLVEPNP_ITERATIVE);
1042  //SOLVEPNP_ITERATIVE (default): Iterative method is based on Levenberg-Marquardt optimization.
1043  //In this case the function finds such a pose that minimizes reprojection error, that is the sum of squared distances
1044  //between the observed projections imagePoints and the projected (using projectPoints() ) objectPoints .
1045  //SOLVEPNP_P3P: Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang “Complete Solution Classification
1046  //for the Perspective-Three-Point Problem”. In this case the function requires exactly four object and image points.
1047  //SOLVEPNP_EPNP: Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the paper “EPnP: Efficient
1048  //Perspective-n-Point Camera Pose Estimation”.
1049  //SOLVEPNP_DLS: Method is based on the paper of Joel A. Hesch and Stergios I. Roumeliotis. “A Direct Least-Squares (DLS)
1050  //Method for PnP”.
1051  //SOLVEPNP_UPNP Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto, F.Moreno-Noguer.
1052  //“Exhaustive Linearization for Robust Camera Pose and Focal Length Estimation”. In this case the function also
1053  //estimates the parameters f_x and f_y assuming that both have the same value. Then the cameraMatrix is updated with the
1054  //estimated focal length.
1055 #else
1056  int nbInlierToReachConsensus = m_nbRansacMinInlierCount;
1057  if(m_useConsensusPercentage) {
1058  nbInlierToReachConsensus = (int) (m_ransacConsensusPercentage / 100.0 * (double) m_queryFilteredKeyPoints.size());
1059  }
1060 
1061  cv::solvePnPRansac(objectPoints, imagePoints, cameraMatrix, distCoeffs,
1062  rvec, tvec, false, m_nbRansacIterations,
1063  (float) m_ransacReprojectionError, nbInlierToReachConsensus,
1064  inlierIndex);
1065 #endif
1066  } catch (cv::Exception &e) {
1067  std::cerr << e.what() << std::endl;
1068  elapsedTime = (vpTime::measureTimeMs() - t);
1069  return false;
1070  }
1071  vpTranslationVector translationVec(tvec.at<double>(0),
1072  tvec.at<double>(1), tvec.at<double>(2));
1073  vpThetaUVector thetaUVector(rvec.at<double>(0), rvec.at<double>(1),
1074  rvec.at<double>(2));
1075  cMo = vpHomogeneousMatrix(translationVec, thetaUVector);
1076 
1077  if(func != NULL) {
1078  //Check the final pose returned by solvePnPRansac to discard
1079  //solutions which do not respect the pose criterion.
1080  if(!func(&cMo)) {
1081  elapsedTime = (vpTime::measureTimeMs() - t);
1082  return false;
1083  }
1084  }
1085 
1086  elapsedTime = (vpTime::measureTimeMs() - t);
1087  return true;
1088 }
1089 
1101 bool vpKeyPoint::computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo,
1102  std::vector<vpPoint> &inliers, double &elapsedTime, bool (*func)(vpHomogeneousMatrix *)) {
1103  std::vector<unsigned int> inlierIndex;
1104  return computePose(objectVpPoints, cMo, inliers, inlierIndex, elapsedTime, func);
1105 }
1106 
1119 bool vpKeyPoint::computePose(const std::vector<vpPoint> &objectVpPoints, vpHomogeneousMatrix &cMo,
1120  std::vector<vpPoint> &inliers, std::vector<unsigned int> &inlierIndex,
1121  double &elapsedTime, bool (*func)(vpHomogeneousMatrix *)) {
1122  double t = vpTime::measureTimeMs();
1123 
1124  if(objectVpPoints.size() < 4) {
1125  elapsedTime = (vpTime::measureTimeMs() - t);
1126 // std::cerr << "Not enough points to compute the pose (at least 4 points are needed)." << std::endl;
1127 
1128  return false;
1129  }
1130 
1131  vpPose pose;
1132 
1133  for(std::vector<vpPoint>::const_iterator it = objectVpPoints.begin(); it != objectVpPoints.end(); ++it) {
1134  pose.addPoint(*it);
1135  }
1136 
1137  unsigned int nbInlierToReachConsensus = (unsigned int) m_nbRansacMinInlierCount;
1138  if(m_useConsensusPercentage) {
1139  nbInlierToReachConsensus = (unsigned int) (m_ransacConsensusPercentage / 100.0 *
1140  (double) m_queryFilteredKeyPoints.size());
1141  }
1142 
1143  pose.setRansacNbInliersToReachConsensus(nbInlierToReachConsensus);
1144  pose.setRansacThreshold(m_ransacThreshold);
1145  pose.setRansacMaxTrials(m_nbRansacIterations);
1146 
1147  bool isRansacPoseEstimationOk = false;
1148  try {
1149  pose.setCovarianceComputation(m_computeCovariance);
1150  isRansacPoseEstimationOk = pose.computePose(vpPose::RANSAC, cMo, func);
1151  inliers = pose.getRansacInliers();
1152  inlierIndex = pose.getRansacInlierIndex();
1153 
1154  if(m_computeCovariance) {
1155  m_covarianceMatrix = pose.getCovarianceMatrix();
1156  }
1157  } catch(vpException &e) {
1158  std::cerr << "e=" << e.what() << std::endl;
1159  elapsedTime = (vpTime::measureTimeMs() - t);
1160  return false;
1161  }
1162 
1163 // if(func != NULL && isRansacPoseEstimationOk) {
1164 // //Check the final pose returned by the Ransac VVS pose estimation as in rare some cases
1165 // //we can converge toward a final cMo that does not respect the pose criterion even
1166 // //if the 4 minimal points picked to respect the pose criterion.
1167 // if(!func(&cMo)) {
1168 // elapsedTime = (vpTime::measureTimeMs() - t);
1169 // return false;
1170 // }
1171 // }
1172 
1173  elapsedTime = (vpTime::measureTimeMs() - t);
1174  return isRansacPoseEstimationOk;
1175 }
1176 
1188 double vpKeyPoint::computePoseEstimationError(const std::vector<std::pair<cv::KeyPoint, cv::Point3f> > &matchKeyPoints,
1189  const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo_est) {
1190  if(matchKeyPoints.size() == 0) {
1191  //return std::numeric_limits<double>::max(); // create an error under Windows. To fix it we have to add #undef max
1192  return DBL_MAX;
1193  }
1194 
1195  std::vector<double> errors(matchKeyPoints.size());
1196  size_t cpt = 0;
1197  vpPoint pt;
1198  for(std::vector<std::pair<cv::KeyPoint, cv::Point3f> >::const_iterator it = matchKeyPoints.begin();
1199  it != matchKeyPoints.end(); ++it, cpt++) {
1200  pt.set_oX(it->second.x);
1201  pt.set_oY(it->second.y);
1202  pt.set_oZ(it->second.z);
1203  pt.project(cMo_est);
1204  double u = 0.0, v = 0.0;
1205  vpMeterPixelConversion::convertPoint(cam, pt.get_x(), pt.get_y(), u, v);
1206  errors[cpt] = std::sqrt((u-it->first.pt.x)*(u-it->first.pt.x) + (v-it->first.pt.y)*(v-it->first.pt.y));
1207  }
1208 
1209  return std::accumulate(errors.begin(), errors.end(), 0.0) / errors.size();
1210 }
1211 
1220  vpImage<unsigned char> &IMatching) {
1221  //Image matching side by side
1222  unsigned int width = IRef.getWidth() + ICurrent.getWidth();
1223  unsigned int height = (std::max)(IRef.getHeight(), ICurrent.getHeight());
1224 
1225  IMatching = vpImage<unsigned char>(height, width);
1226 }
1227 
1237  //Nb images in the training database + the current image we want to detect the object
1238  unsigned int nbImg = (unsigned int) (m_mapOfImages.size() + 1);
1239 
1240  if(m_mapOfImages.empty()) {
1241  std::cerr << "There is no training image loaded !" << std::endl;
1242  return;
1243  }
1244 
1245  if(nbImg == 2) {
1246  //Only one training image, so we display them side by side
1247  createImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
1248  } else {
1249  //Multiple training images, display them as a mosaic image
1250  //(unsigned int) std::floor(std::sqrt((double) nbImg) + 0.5);
1251  unsigned int nbImgSqrt = (unsigned int) vpMath::round(std::sqrt((double) nbImg));
1252 
1253  //Number of columns in the mosaic grid
1254  unsigned int nbWidth = nbImgSqrt;
1255  //Number of rows in the mosaic grid
1256  unsigned int nbHeight = nbImgSqrt;
1257 
1258  //Deals with non square mosaic grid and the total number of images
1259  if(nbImgSqrt * nbImgSqrt < nbImg) {
1260  nbWidth++;
1261  }
1262 
1263  unsigned int maxW = ICurrent.getWidth();
1264  unsigned int maxH = ICurrent.getHeight();
1265  for(std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end(); ++it) {
1266  if(maxW < it->second.getWidth()) {
1267  maxW = it->second.getWidth();
1268  }
1269 
1270  if(maxH < it->second.getHeight()) {
1271  maxH = it->second.getHeight();
1272  }
1273  }
1274 
1275  IMatching = vpImage<unsigned char>(maxH * nbHeight, maxW * nbWidth);
1276  }
1277 }
1278 
1286 void vpKeyPoint::detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, const vpRect &rectangle) {
1287  double elapsedTime;
1288  detect(I, keyPoints, elapsedTime, rectangle);
1289 }
1290 
1298 void vpKeyPoint::detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, const cv::Mat &mask) {
1299  double elapsedTime;
1300  detect(matImg, keyPoints, elapsedTime, mask);
1301 }
1302 
1311 void vpKeyPoint::detect(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
1312  const vpRect &rectangle) {
1313  cv::Mat matImg;
1314  vpImageConvert::convert(I, matImg, false);
1315  cv::Mat mask = cv::Mat::zeros(matImg.rows, matImg.cols, CV_8U);
1316 
1317  if(rectangle.getWidth() > 0 && rectangle.getHeight() > 0) {
1318  cv::Point leftTop((int) rectangle.getLeft(), (int) rectangle.getTop()), rightBottom((int) rectangle.getRight(),
1319  (int) rectangle.getBottom());
1320  cv::rectangle(mask, leftTop, rightBottom, cv::Scalar(255), CV_FILLED);
1321  } else {
1322  mask = cv::Mat::ones(matImg.rows, matImg.cols, CV_8U) * 255;
1323  }
1324 
1325  detect(matImg, keyPoints, elapsedTime, mask);
1326 }
1327 
1336 void vpKeyPoint::detect(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, double &elapsedTime,
1337  const cv::Mat &mask) {
1338  double t = vpTime::measureTimeMs();
1339  keyPoints.clear();
1340 
1341  for(std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin(); it != m_detectors.end(); ++it) {
1342  std::vector<cv::KeyPoint> kp;
1343  it->second->detect(matImg, kp, mask);
1344  keyPoints.insert(keyPoints.end(), kp.begin(), kp.end());
1345  }
1346 
1347  elapsedTime = vpTime::measureTimeMs() - t;
1348 }
1349 
1358  const vpImage<unsigned char> &ICurrent, unsigned int size) {
1359  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1360  getQueryKeyPoints(vpQueryImageKeyPoints);
1361  std::vector<vpImagePoint> vpTrainImageKeyPoints;
1362  getTrainKeyPoints(vpTrainImageKeyPoints);
1363 
1364  for(std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1365  vpDisplay::displayCross(IRef, vpTrainImageKeyPoints[(size_t)(it->trainIdx)], size, vpColor::red);
1366  vpDisplay::displayCross(ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, vpColor::green);
1367  }
1368 }
1369 
1377 void vpKeyPoint::display(const vpImage<unsigned char> &ICurrent, unsigned int size, const vpColor &color) {
1378  std::vector<vpImagePoint> vpQueryImageKeyPoints;
1379  getQueryKeyPoints(vpQueryImageKeyPoints);
1380 
1381  for(std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1382  vpDisplay::displayCross (ICurrent, vpQueryImageKeyPoints[(size_t)(it->queryIdx)], size, color);
1383  }
1384 }
1385 
1396  unsigned int crossSize, unsigned int lineThickness, const vpColor &color) {
1397  bool randomColor = (color == vpColor::none);
1398  srand((unsigned int) time(NULL));
1399  vpColor currentColor = color;
1400 
1401  std::vector<vpImagePoint> queryImageKeyPoints;
1402  getQueryKeyPoints(queryImageKeyPoints);
1403  std::vector<vpImagePoint> trainImageKeyPoints;
1404  getTrainKeyPoints(trainImageKeyPoints);
1405 
1406  vpImagePoint leftPt, rightPt;
1407  for(std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1408  if(randomColor) {
1409  currentColor = vpColor((rand() % 256), (rand() % 256), (rand() % 256));
1410  }
1411 
1412  leftPt = trainImageKeyPoints[(size_t)(it->trainIdx)];
1413  rightPt = vpImagePoint(queryImageKeyPoints[(size_t)(it->queryIdx)].get_i(),
1414  queryImageKeyPoints[(size_t)it->queryIdx].get_j() + IRef.getWidth());
1415  vpDisplay::displayCross(IMatching, leftPt, crossSize, currentColor);
1416  vpDisplay::displayCross(IMatching, rightPt, crossSize, currentColor);
1417  vpDisplay::displayLine(IMatching, leftPt, rightPt, currentColor, lineThickness);
1418  }
1419 }
1420 
1432  const std::vector<vpImagePoint> &ransacInliers, unsigned int crossSize, unsigned int lineThickness) {
1433  if(m_mapOfImages.empty() || m_mapOfImageId.empty()) {
1434  //No training images so return
1435  std::cerr << "There is no training image loaded !" << std::endl;
1436  return;
1437  }
1438 
1439  //Nb images in the training database + the current image we want to detect the object
1440  int nbImg = (int) (m_mapOfImages.size() + 1);
1441 
1442  if(nbImg == 2) {
1443  //Only one training image, so we display the matching result side-by-side
1444  displayMatching(m_mapOfImages.begin()->second, IMatching, crossSize);
1445  } else {
1446  //Multiple training images, display them as a mosaic image
1447  int nbImgSqrt = vpMath::round(std::sqrt((double) nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
1448  int nbWidth = nbImgSqrt;
1449  int nbHeight = nbImgSqrt;
1450 
1451  if(nbImgSqrt * nbImgSqrt < nbImg) {
1452  nbWidth++;
1453  }
1454 
1455  std::map<int, int> mapOfImageIdIndex;
1456  int cpt = 0;
1457  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
1458  for(std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end(); ++it, cpt++) {
1459  mapOfImageIdIndex[it->first] = cpt;
1460 
1461  if(maxW < it->second.getWidth()) {
1462  maxW = it->second.getWidth();
1463  }
1464 
1465  if(maxH < it->second.getHeight()) {
1466  maxH = it->second.getHeight();
1467  }
1468  }
1469 
1470  //Indexes of the current image in the grid made in order to the image is in the center square in the mosaic grid
1471  int medianI = nbHeight / 2;
1472  int medianJ = nbWidth / 2;
1473  int medianIndex = medianI * nbWidth + medianJ;
1474  for(std::vector<cv::KeyPoint>::const_iterator it = m_trainKeyPoints.begin(); it != m_trainKeyPoints.end(); ++it) {
1475  vpImagePoint topLeftCorner;
1476  int current_class_id_index = 0;
1477  if(mapOfImageIdIndex[m_mapOfImageId[it->class_id]] < medianIndex) {
1478  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]];
1479  } else {
1480  //Shift of one unity the index of the training images which are after the current image
1481  current_class_id_index = mapOfImageIdIndex[m_mapOfImageId[it->class_id]] + 1;
1482  }
1483 
1484  int indexI = current_class_id_index / nbWidth;
1485  int indexJ = current_class_id_index - (indexI * nbWidth);
1486  topLeftCorner.set_ij((int)maxH*indexI, (int)maxW*indexJ);
1487 
1488  //Display cross for keypoints in the learning database
1489  vpDisplay::displayCross(IMatching, (int) (it->pt.y + topLeftCorner.get_i()), (int) (it->pt.x + topLeftCorner.get_j()),
1490  crossSize, vpColor::red);
1491  }
1492 
1493  vpImagePoint topLeftCorner((int)maxH*medianI, (int)maxW*medianJ);
1494  for(std::vector<cv::KeyPoint>::const_iterator it = m_queryKeyPoints.begin(); it != m_queryKeyPoints.end(); ++it) {
1495  //Display cross for keypoints detected in the current image
1496  vpDisplay::displayCross(IMatching, (int) (it->pt.y + topLeftCorner.get_i()), (int) (it->pt.x + topLeftCorner.get_j()),
1497  crossSize, vpColor::red);
1498  }
1499  for(std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin();
1500  it != ransacInliers.end(); ++it) {
1501  //Display green circle for RANSAC inliers
1502  vpDisplay::displayCircle(IMatching, (int) (it->get_v() + topLeftCorner.get_i()), (int) (it->get_u() +
1503  topLeftCorner.get_j()), 4, vpColor::green);
1504  }
1505  for(std::vector<vpImagePoint>::const_iterator it = m_ransacOutliers.begin(); it != m_ransacOutliers.end(); ++it) {
1506  //Display red circle for RANSAC outliers
1507  vpDisplay::displayCircle(IMatching, (int) (it->get_i() + topLeftCorner.get_i()), (int) (it->get_j() +
1508  topLeftCorner.get_j()), 4, vpColor::red);
1509  }
1510 
1511  for(std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
1512  int current_class_id = 0;
1513  if(mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t) it->trainIdx].class_id]] < medianIndex) {
1514  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t) it->trainIdx].class_id]];
1515  } else {
1516  //Shift of one unity the index of the training images which are after the current image
1517  current_class_id = mapOfImageIdIndex[m_mapOfImageId[m_trainKeyPoints[(size_t) it->trainIdx].class_id]] + 1;
1518  }
1519 
1520  int indexI = current_class_id / nbWidth;
1521  int indexJ = current_class_id - (indexI * nbWidth);
1522 
1523  vpImagePoint end((int)maxH*indexI + m_trainKeyPoints[(size_t) it->trainIdx].pt.y,
1524  (int)maxW*indexJ + m_trainKeyPoints[(size_t) it->trainIdx].pt.x);
1525  vpImagePoint start((int)maxH*medianI + m_queryFilteredKeyPoints[(size_t) it->queryIdx].pt.y,
1526  (int)maxW*medianJ + m_queryFilteredKeyPoints[(size_t) it->queryIdx].pt.x);
1527 
1528  //Draw line for matching keypoints detected in the current image and those detected
1529  //in the training images
1530  vpDisplay::displayLine(IMatching, start, end, vpColor::green, lineThickness);
1531  }
1532  }
1533 }
1534 
1544 void vpKeyPoint::extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1545  std::vector<cv::Point3f> *trainPoints) {
1546  double elapsedTime;
1547  extract(I, keyPoints, descriptors, elapsedTime, trainPoints);
1548 }
1549 
1559 void vpKeyPoint::extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1560  std::vector<cv::Point3f> *trainPoints) {
1561  double elapsedTime;
1562  extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1563 }
1564 
1575 void vpKeyPoint::extract(const vpImage<unsigned char> &I, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1576  double &elapsedTime, std::vector<cv::Point3f> *trainPoints) {
1577  cv::Mat matImg;
1578  vpImageConvert::convert(I, matImg, false);
1579  extract(matImg, keyPoints, descriptors, elapsedTime, trainPoints);
1580 }
1581 
1592 void vpKeyPoint::extract(const cv::Mat &matImg, std::vector<cv::KeyPoint> &keyPoints, cv::Mat &descriptors,
1593  double &elapsedTime, std::vector<cv::Point3f> *trainPoints) {
1594  double t = vpTime::measureTimeMs();
1595  bool first = true;
1596 
1597  for(std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator itd = m_extractors.begin();
1598  itd != m_extractors.end(); ++itd) {
1599  if(first) {
1600  first = false;
1601  //Check if we have 3D object points information
1602  if(trainPoints != NULL && !trainPoints->empty()) {
1603  //Copy the input list of keypoints, keypoints that cannot be computed are removed in the function compute
1604  std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1605 
1606  //Extract descriptors for the given list of keypoints
1607  itd->second->compute(matImg, keyPoints, descriptors);
1608 
1609  if(keyPoints.size() != keyPoints_tmp.size()) {
1610  //Keypoints have been removed
1611  //Store the hash of a keypoint as the key and the index of the keypoint as the value
1612  std::map<size_t, size_t> mapOfKeypointHashes;
1613  size_t cpt = 0;
1614  for(std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end(); ++it, cpt++) {
1615  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1616  }
1617 
1618  std::vector<cv::Point3f> trainPoints_tmp;
1619  for(std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1620  if(mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1621  trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1622  }
1623  }
1624 
1625  //Copy trainPoints_tmp to m_trainPoints
1626  *trainPoints = trainPoints_tmp;
1627  }
1628  } else {
1629  //Extract descriptors for the given list of keypoints
1630  itd->second->compute(matImg, keyPoints, descriptors);
1631  }
1632  } else {
1633  //Copy the input list of keypoints, keypoints that cannot be computed are removed in the function compute
1634  std::vector<cv::KeyPoint> keyPoints_tmp = keyPoints;
1635 
1636  cv::Mat desc;
1637  //Extract descriptors for the given list of keypoints
1638  itd->second->compute(matImg, keyPoints, desc);
1639 
1640  if(keyPoints.size() != keyPoints_tmp.size()) {
1641  //Keypoints have been removed
1642  //Store the hash of a keypoint as the key and the index of the keypoint as the value
1643  std::map<size_t, size_t> mapOfKeypointHashes;
1644  size_t cpt = 0;
1645  for(std::vector<cv::KeyPoint>::const_iterator it = keyPoints_tmp.begin(); it != keyPoints_tmp.end(); ++it, cpt++) {
1646  mapOfKeypointHashes[myKeypointHash(*it)] = cpt;
1647  }
1648 
1649  std::vector<cv::Point3f> trainPoints_tmp;
1650  cv::Mat descriptors_tmp;
1651  for(std::vector<cv::KeyPoint>::const_iterator it = keyPoints.begin(); it != keyPoints.end(); ++it) {
1652  if(mapOfKeypointHashes.find(myKeypointHash(*it)) != mapOfKeypointHashes.end()) {
1653  if(trainPoints != NULL && !trainPoints->empty()) {
1654  trainPoints_tmp.push_back((*trainPoints)[mapOfKeypointHashes[myKeypointHash(*it)]]);
1655  }
1656 
1657  if(!descriptors.empty()) {
1658  descriptors_tmp.push_back(descriptors.row((int) mapOfKeypointHashes[myKeypointHash(*it)]));
1659  }
1660  }
1661  }
1662 
1663  if(trainPoints != NULL) {
1664  //Copy trainPoints_tmp to m_trainPoints
1665  *trainPoints = trainPoints_tmp;
1666  }
1667  //Copy descriptors_tmp to descriptors
1668  descriptors_tmp.copyTo(descriptors);
1669  }
1670 
1671  //Merge descriptors horizontally
1672  if(descriptors.empty()) {
1673  desc.copyTo(descriptors);
1674  } else {
1675  cv::hconcat(descriptors, desc, descriptors);
1676  }
1677  }
1678  }
1679 
1680  if(keyPoints.size() != (size_t) descriptors.rows) {
1681  std::cerr << "keyPoints.size() != (size_t) descriptors.rows" << std::endl;
1682  }
1683  elapsedTime = vpTime::measureTimeMs() - t;
1684 }
1685 
1689 void vpKeyPoint::filterMatches() {
1690  std::vector<cv::KeyPoint> queryKpts;
1691  std::vector<cv::Point3f> trainPts;
1692  std::vector<cv::DMatch> m;
1693 
1694  if(m_useKnn) {
1695  double max_dist = 0;
1696  //double min_dist = std::numeric_limits<double>::max(); // create an error under Windows. To fix it we have to add #undef max
1697  double min_dist = DBL_MAX;
1698  double mean = 0.0;
1699  std::vector<double> distance_vec(m_knnMatches.size());
1700 
1701  if(m_filterType == stdAndRatioDistanceThreshold) {
1702  for(size_t i = 0; i < m_knnMatches.size(); i++) {
1703  double dist = m_knnMatches[i][0].distance;
1704  mean += dist;
1705  distance_vec[i] = dist;
1706 
1707  if (dist < min_dist) {
1708  min_dist = dist;
1709  }
1710  if (dist > max_dist) {
1711  max_dist = dist;
1712  }
1713  }
1714  mean /= m_queryDescriptors.rows;
1715  }
1716 
1717  double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1718  double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1719  double threshold = min_dist + stdev;
1720 
1721  for(size_t i = 0; i < m_knnMatches.size(); i++) {
1722  if(m_knnMatches[i].size() >= 2) {
1723  //Calculate ratio of the descriptor distance between the two nearest neighbors of the keypoint
1724  float ratio = m_knnMatches[i][0].distance / m_knnMatches[i][1].distance;
1725 // float ratio = std::sqrt((vecMatches[i][0].distance * vecMatches[i][0].distance)
1726 // / (vecMatches[i][1].distance * vecMatches[i][1].distance));
1727  double dist = m_knnMatches[i][0].distance;
1728 
1729  if(ratio < m_matchingRatioThreshold || (m_filterType == stdAndRatioDistanceThreshold && dist < threshold)) {
1730  m.push_back(cv::DMatch((int) queryKpts.size(), m_knnMatches[i][0].trainIdx, m_knnMatches[i][0].distance));
1731 
1732  if(!m_trainPoints.empty()) {
1733  trainPts.push_back(m_trainPoints[(size_t)m_knnMatches[i][0].trainIdx]);
1734  }
1735  queryKpts.push_back(m_queryKeyPoints[(size_t)m_knnMatches[i][0].queryIdx]);
1736  }
1737  }
1738  }
1739  } else {
1740  double max_dist = 0;
1741  // create an error under Windows. To fix it we have to add #undef max
1742  //double min_dist = std::numeric_limits<double>::max();
1743  double min_dist = DBL_MAX;
1744  double mean = 0.0;
1745  std::vector<double> distance_vec(m_matches.size());
1746  for(size_t i = 0; i < m_matches.size(); i++) {
1747  double dist = m_matches[i].distance;
1748  mean += dist;
1749  distance_vec[i] = dist;
1750 
1751  if (dist < min_dist) {
1752  min_dist = dist;
1753  }
1754  if (dist > max_dist) {
1755  max_dist = dist;
1756  }
1757  }
1758  mean /= m_queryDescriptors.rows;
1759 
1760  double sq_sum = std::inner_product(distance_vec.begin(), distance_vec.end(), distance_vec.begin(), 0.0);
1761  double stdev = std::sqrt(sq_sum / distance_vec.size() - mean * mean);
1762 
1763  //Define a threshold where we keep all keypoints whose the descriptor distance falls below a factor of the
1764  //minimum descriptor distance (for all the query keypoints)
1765  //or below the minimum descriptor distance + the standard deviation (calculated on all the query descriptor distances)
1766  double threshold = m_filterType == constantFactorDistanceThreshold ? m_matchingFactorThreshold * min_dist : min_dist + stdev;
1767 
1768  for (size_t i = 0; i < m_matches.size(); i++) {
1769  if(m_matches[i].distance <= threshold) {
1770  m.push_back(cv::DMatch((int) queryKpts.size(), m_matches[i].trainIdx, m_matches[i].distance));
1771 
1772  if(!m_trainPoints.empty()) {
1773  trainPts.push_back(m_trainPoints[(size_t)m_matches[i].trainIdx]);
1774  }
1775  queryKpts.push_back(m_queryKeyPoints[(size_t)m_matches[i].queryIdx]);
1776  }
1777  }
1778  }
1779 
1780  if(m_useSingleMatchFilter) {
1781  //Eliminate matches where multiple query keypoints are matched to the same train keypoint
1782  std::vector<cv::DMatch> mTmp;
1783  std::vector<cv::Point3f> trainPtsTmp;
1784  std::vector<cv::KeyPoint> queryKptsTmp;
1785 
1786  std::map<int, int> mapOfTrainIdx;
1787  //Count the number of query points matched to the same train point
1788  for(std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1789  mapOfTrainIdx[it->trainIdx]++;
1790  }
1791 
1792  //Keep matches with only one correspondence
1793  for(std::vector<cv::DMatch>::const_iterator it = m.begin(); it != m.end(); ++it) {
1794  if(mapOfTrainIdx[it->trainIdx] == 1) {
1795  mTmp.push_back(cv::DMatch((int) queryKptsTmp.size(), it->trainIdx, it->distance));
1796 
1797  if(!m_trainPoints.empty()) {
1798  trainPtsTmp.push_back(m_trainPoints[(size_t) it->trainIdx]);
1799  }
1800  queryKptsTmp.push_back(queryKpts[(size_t) it->queryIdx]);
1801  }
1802  }
1803 
1804  m_filteredMatches = mTmp;
1805  m_objectFilteredPoints = trainPtsTmp;
1806  m_queryFilteredKeyPoints = queryKptsTmp;
1807  } else {
1808  m_filteredMatches = m;
1809  m_objectFilteredPoints = trainPts;
1810  m_queryFilteredKeyPoints = queryKpts;
1811  }
1812 }
1813 
1820 void vpKeyPoint::getObjectPoints(std::vector<cv::Point3f> &objectPoints) const {
1821  objectPoints = m_objectFilteredPoints;
1822 }
1823 
1830 void vpKeyPoint::getObjectPoints(std::vector<vpPoint> &objectPoints) const {
1831  vpConvert::convertFromOpenCV(m_objectFilteredPoints, objectPoints);
1832 }
1833 
1839 void vpKeyPoint::getQueryKeyPoints(std::vector<cv::KeyPoint> &keyPoints) const {
1840  keyPoints = m_queryFilteredKeyPoints;
1841 }
1842 
1848 void vpKeyPoint::getQueryKeyPoints(std::vector<vpImagePoint> &keyPoints) const {
1849  keyPoints = currentImagePointsList;
1850 }
1851 
1857 void vpKeyPoint::getTrainKeyPoints(std::vector<cv::KeyPoint> &keyPoints) const {
1858  keyPoints = m_trainKeyPoints;
1859 }
1860 
1866 void vpKeyPoint::getTrainKeyPoints(std::vector<vpImagePoint> &keyPoints) const {
1867  keyPoints = referenceImagePointsList;
1868 }
1869 
1875 void vpKeyPoint::getTrainPoints(std::vector<cv::Point3f> &points) const {
1876  points = m_trainPoints;
1877 }
1878 
1884 void vpKeyPoint::getTrainPoints(std::vector<vpPoint> &points) const {
1885  points = m_trainVpPoints;
1886 }
1887 
1891 void vpKeyPoint::init() {
1892  // Require 2.4.0 <= opencv < 3.0.0
1893 #if defined(VISP_HAVE_OPENCV_NONFREE) && (VISP_HAVE_OPENCV_VERSION >= 0x020400) && (VISP_HAVE_OPENCV_VERSION < 0x030000)
1894  //The following line must be called in order to use SIFT or SURF
1895  if (!cv::initModule_nonfree()) {
1896  std::cerr << "Cannot init module non free, SIFT or SURF cannot be used."
1897  << std::endl;
1898  }
1899 #endif
1900 
1901  //Use k-nearest neighbors (knn) to retrieve the two best matches for a keypoint
1902  //So this is useful only for ratioDistanceThreshold method
1903  if(m_filterType == ratioDistanceThreshold || m_filterType == stdAndRatioDistanceThreshold) {
1904  m_useKnn = true;
1905  }
1906 
1907  initDetectors(m_detectorNames);
1908  initExtractors(m_extractorNames);
1909  initMatcher(m_matcherName);
1910 }
1911 
1917 void vpKeyPoint::initDetector(const std::string &detectorName) {
1918 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
1919  m_detectors[detectorName] = cv::FeatureDetector::create(detectorName);
1920 
1921  if(m_detectors[detectorName] == NULL) {
1922  std::stringstream ss_msg;
1923  ss_msg << "Fail to initialize the detector: " << detectorName << " or it is not available in OpenCV version: "
1924  << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
1925  throw vpException(vpException::fatalError, ss_msg.str());
1926  }
1927 #else
1928  std::string detectorNameTmp = detectorName;
1929  std::string pyramid = "Pyramid";
1930  std::size_t pos = detectorName.find(pyramid);
1931  bool usePyramid = false;
1932  if (pos != std::string::npos) {
1933  detectorNameTmp = detectorName.substr(pos + pyramid.size());
1934  usePyramid = true;
1935  }
1936 
1937  if(detectorNameTmp == "SIFT") {
1938 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1939  cv::Ptr<cv::FeatureDetector> siftDetector = cv::xfeatures2d::SIFT::create();
1940  if (!usePyramid) {
1941  m_detectors[detectorNameTmp] = siftDetector;
1942  } else {
1943  std::cerr << "You should not use SIFT with Pyramid feature detection!" << std::endl;
1944  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(siftDetector);
1945  }
1946 #else
1947  std::stringstream ss_msg;
1948  ss_msg << "Fail to initialize the detector: SIFT. OpenCV version "
1949  << std::hex << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
1950  throw vpException(vpException::fatalError, ss_msg.str());
1951 #endif
1952  } else if (detectorNameTmp == "SURF") {
1953 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
1954  cv::Ptr<cv::FeatureDetector> surfDetector = cv::xfeatures2d::SURF::create();
1955  if (!usePyramid) {
1956  m_detectors[detectorNameTmp] = surfDetector;
1957  } else {
1958  std::cerr << "You should not use SURF with Pyramid feature detection!" << std::endl;
1959  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(surfDetector);
1960  }
1961 #else
1962  std::stringstream ss_msg;
1963  ss_msg << "Fail to initialize the detector: SURF. OpenCV version "
1964  << std::hex << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
1965  throw vpException(vpException::fatalError, ss_msg.str());
1966 #endif
1967  } else if (detectorNameTmp == "FAST") {
1968  cv::Ptr<cv::FeatureDetector> fastDetector = cv::FastFeatureDetector::create();
1969  if (!usePyramid) {
1970  m_detectors[detectorNameTmp] = fastDetector;
1971  } else {
1972  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
1973  }
1974  } else if (detectorNameTmp == "MSER") {
1975  cv::Ptr<cv::FeatureDetector> fastDetector = cv::MSER::create();
1976  if (!usePyramid) {
1977  m_detectors[detectorNameTmp] = fastDetector;
1978  } else {
1979  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(fastDetector);
1980  }
1981  } else if (detectorNameTmp == "ORB") {
1982  cv::Ptr<cv::FeatureDetector> orbDetector = cv::ORB::create();
1983  if (!usePyramid) {
1984  m_detectors[detectorNameTmp] = orbDetector;
1985  } else {
1986  std::cerr << "You should not use ORB with Pyramid feature detection!" << std::endl;
1987  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(orbDetector);
1988  }
1989  } else if (detectorNameTmp == "BRISK") {
1990  cv::Ptr<cv::FeatureDetector> briskDetector = cv::BRISK::create();
1991  if (!usePyramid) {
1992  m_detectors[detectorNameTmp] = briskDetector;
1993  } else {
1994  std::cerr << "You should not use BRISK with Pyramid feature detection!" << std::endl;
1995  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(briskDetector);
1996  }
1997  } else if (detectorNameTmp == "KAZE") {
1998  cv::Ptr<cv::FeatureDetector> kazeDetector = cv::KAZE::create();
1999  if (!usePyramid) {
2000  m_detectors[detectorNameTmp] = kazeDetector;
2001  } else {
2002  std::cerr << "You should not use KAZE with Pyramid feature detection!" << std::endl;
2003  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(kazeDetector);
2004  }
2005  } else if (detectorNameTmp == "AKAZE") {
2006  cv::Ptr<cv::FeatureDetector> akazeDetector = cv::AKAZE::create();
2007  if (!usePyramid) {
2008  m_detectors[detectorNameTmp] = akazeDetector;
2009  } else {
2010  std::cerr << "You should not use AKAZE with Pyramid feature detection!" << std::endl;
2011  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(akazeDetector);
2012  }
2013  } else if (detectorNameTmp == "GFTT") {
2014  cv::Ptr<cv::FeatureDetector> gfttDetector = cv::GFTTDetector::create();
2015  if (!usePyramid) {
2016  m_detectors[detectorNameTmp] = gfttDetector;
2017  } else {
2018  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(gfttDetector);
2019  }
2020  } else if (detectorNameTmp == "SimpleBlob") {
2021  cv::Ptr<cv::FeatureDetector> simpleBlobDetector = cv::SimpleBlobDetector::create();
2022  if (!usePyramid) {
2023  m_detectors[detectorNameTmp] = simpleBlobDetector;
2024  } else {
2025  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(simpleBlobDetector);
2026  }
2027  } else if (detectorNameTmp == "STAR") {
2028 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2029  cv::Ptr<cv::FeatureDetector> starDetector = cv::xfeatures2d::StarDetector::create();
2030  if (!usePyramid) {
2031  m_detectors[detectorNameTmp] = starDetector;
2032  } else {
2033  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(starDetector);
2034  }
2035 #else
2036  std::stringstream ss_msg;
2037  ss_msg << "Fail to initialize the detector: STAR. OpenCV version "
2038  << std::hex << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2039  throw vpException(vpException::fatalError, ss_msg.str());
2040 #endif
2041  } else if (detectorNameTmp == "AGAST") {
2042  cv::Ptr<cv::FeatureDetector> agastDetector = cv::AgastFeatureDetector::create();
2043  if(!usePyramid) {
2044  m_detectors[detectorNameTmp] = agastDetector;
2045  } else {
2046  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(agastDetector);
2047  }
2048  } else if (detectorNameTmp == "MSD") {
2049 #if (VISP_HAVE_OPENCV_VERSION >= 0x030100)
2050  #if defined (VISP_HAVE_OPENCV_XFEATURES2D)
2051  cv::Ptr<cv::FeatureDetector> msdDetector = cv::xfeatures2d::MSDDetector::create();
2052  if(!usePyramid) {
2053  m_detectors[detectorNameTmp] = msdDetector;
2054  } else {
2055  std::cerr << "You should not use MSD with Pyramid feature detection!" << std::endl;
2056  m_detectors[detectorName] = cv::makePtr<PyramidAdaptedFeatureDetector>(msdDetector);
2057  }
2058  #else
2059  std::stringstream ss_msg;
2060  ss_msg << "Fail to initialize the detector: MSD. OpenCV version "
2061  << std::hex << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2062  throw vpException(vpException::fatalError, ss_msg.str());
2063  #endif
2064 #else
2065  std::stringstream ss_msg;
2066  ss_msg << "Feature " << detectorName << " is not available in OpenCV version: "
2067  << std::hex << VISP_HAVE_OPENCV_VERSION << " (require >= OpenCV 3.1).";
2068 #endif
2069  }
2070  else {
2071  std::cerr << "The detector:" << detectorNameTmp << " is not available." << std::endl;
2072  }
2073 
2074  bool detectorInitialized = false;
2075  if(!usePyramid) {
2076  detectorInitialized = (m_detectors[detectorNameTmp] != NULL);
2077  } else {
2078  detectorInitialized = (m_detectors[detectorName] != NULL);
2079  }
2080 
2081  if(!detectorInitialized) {
2082  std::stringstream ss_msg;
2083  ss_msg << "Fail to initialize the detector: " << detectorNameTmp << " or it is not available in OpenCV version: "
2084  << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2085  throw vpException(vpException::fatalError, ss_msg.str());
2086  }
2087 
2088 #endif
2089 }
2090 
2096 void vpKeyPoint::initDetectors(const std::vector<std::string> &detectorNames) {
2097  for(std::vector<std::string>::const_iterator it = detectorNames.begin(); it != detectorNames.end(); ++it) {
2098  initDetector(*it);
2099  }
2100 }
2101 
2107 void vpKeyPoint::initExtractor(const std::string &extractorName) {
2108 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
2109  m_extractors[extractorName] = cv::DescriptorExtractor::create(extractorName);
2110 #else
2111  if(extractorName == "SIFT") {
2112 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2113  m_extractors[extractorName] = cv::xfeatures2d::SIFT::create();
2114 #else
2115  std::stringstream ss_msg;
2116  ss_msg << "Fail to initialize the extractor: SIFT. OpenCV version "
2117  << std::hex << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2118  throw vpException(vpException::fatalError, ss_msg.str());
2119 #endif
2120  } else if(extractorName == "SURF") {
2121 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2122  //Use extended set of SURF descriptors (128 instead of 64)
2123  m_extractors[extractorName] = cv::xfeatures2d::SURF::create(100, 4, 3, true);
2124 #else
2125  std::stringstream ss_msg;
2126  ss_msg << "Fail to initialize the extractor: SURF. OpenCV version "
2127  << std::hex << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2128  throw vpException(vpException::fatalError, ss_msg.str());
2129 #endif
2130  } else if(extractorName == "ORB") {
2131  m_extractors[extractorName] = cv::ORB::create();
2132  } else if(extractorName == "BRISK") {
2133  m_extractors[extractorName] = cv::BRISK::create();
2134  } else if(extractorName == "FREAK") {
2135 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2136  m_extractors[extractorName] = cv::xfeatures2d::FREAK::create();
2137 #else
2138  std::stringstream ss_msg;
2139  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version "
2140  << std::hex << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2141  throw vpException(vpException::fatalError, ss_msg.str());
2142 #endif
2143  } else if(extractorName == "BRIEF") {
2144 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2145  m_extractors[extractorName] = cv::xfeatures2d::BriefDescriptorExtractor::create();
2146 #else
2147  std::stringstream ss_msg;
2148  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version "
2149  << std::hex << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2150  throw vpException(vpException::fatalError, ss_msg.str());
2151 #endif
2152  } else if(extractorName == "KAZE") {
2153  m_extractors[extractorName] = cv::KAZE::create();
2154  } else if(extractorName == "AKAZE") {
2155  m_extractors[extractorName] = cv::AKAZE::create();
2156  } else if(extractorName == "DAISY") {
2157 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2158  m_extractors[extractorName] = cv::xfeatures2d::DAISY::create();
2159 #else
2160  std::stringstream ss_msg;
2161  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version "
2162  << std::hex << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2163  throw vpException(vpException::fatalError, ss_msg.str());
2164 #endif
2165  } else if(extractorName == "LATCH") {
2166 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2167  m_extractors[extractorName] = cv::xfeatures2d::LATCH::create();
2168 #else
2169  std::stringstream ss_msg;
2170  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version "
2171  << std::hex << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2172  throw vpException(vpException::fatalError, ss_msg.str());
2173 #endif
2174  } else if(extractorName == "LUCID") {
2175 #ifdef VISP_HAVE_OPENCV_XFEATURES2D
2176 // m_extractors[extractorName] = cv::xfeatures2d::LUCID::create(1, 2);
2177  //Not possible currently, need a color image
2178  throw vpException(vpException::badValue, "Not possible currently as it needs a color image.");
2179 #else
2180  std::stringstream ss_msg;
2181  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version "
2182  << std::hex << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2183  throw vpException(vpException::fatalError, ss_msg.str());
2184 #endif
2185  } else if (extractorName == "VGG") {
2186 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2187  #if defined (VISP_HAVE_OPENCV_XFEATURES2D)
2188  m_extractors[extractorName] = cv::xfeatures2d::VGG::create();
2189  #else
2190  std::stringstream ss_msg;
2191  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version "
2192  << std::hex << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2193  throw vpException(vpException::fatalError, ss_msg.str());
2194  #endif
2195 #else
2196  std::stringstream ss_msg;
2197  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version "
2198  << std::hex << VISP_HAVE_OPENCV_VERSION << " but requires at least OpenCV 3.2.";
2199  throw vpException(vpException::fatalError, ss_msg.str());
2200 #endif
2201  } else if (extractorName == "BoostDesc") {
2202 #if (VISP_HAVE_OPENCV_VERSION >= 0x030200)
2203  #if defined (VISP_HAVE_OPENCV_XFEATURES2D)
2204  m_extractors[extractorName] = cv::xfeatures2d::BoostDesc::create();
2205  #else
2206  std::stringstream ss_msg;
2207  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version "
2208  << std::hex << VISP_HAVE_OPENCV_VERSION << " was not build with xFeatures2d module.";
2209  throw vpException(vpException::fatalError, ss_msg.str());
2210  #endif
2211 #else
2212  std::stringstream ss_msg;
2213  ss_msg << "Fail to initialize the extractor: " << extractorName << ". OpenCV version "
2214  << std::hex << VISP_HAVE_OPENCV_VERSION << " but requires at least OpenCV 3.2.";
2215  throw vpException(vpException::fatalError, ss_msg.str());
2216 #endif
2217  }
2218  else {
2219  std::cerr << "The extractor:" << extractorName << " is not available." << std::endl;
2220  }
2221 #endif
2222 
2223  if(m_extractors[extractorName] == NULL) {
2224  std::stringstream ss_msg;
2225  ss_msg << "Fail to initialize the extractor: " << extractorName << " or it is not available in OpenCV version: "
2226  << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2227  throw vpException(vpException::fatalError, ss_msg.str());
2228  }
2229 
2230 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2231  if(extractorName == "SURF") {
2232  //Use extended set of SURF descriptors (128 instead of 64)
2233  m_extractors[extractorName]->set("extended", 1);
2234  }
2235 #endif
2236 }
2237 
2243 void vpKeyPoint::initExtractors(const std::vector<std::string> &extractorNames) {
2244  for(std::vector<std::string>::const_iterator it = extractorNames.begin(); it != extractorNames.end(); ++it) {
2245  initExtractor(*it);
2246  }
2247 
2248  int descriptorType = CV_32F;
2249  bool firstIteration = true;
2250  for(std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2251  it != m_extractors.end(); ++it) {
2252  if(firstIteration) {
2253  firstIteration = false;
2254  descriptorType = it->second->descriptorType();
2255  } else {
2256  if(descriptorType != it->second->descriptorType()) {
2257  throw vpException(vpException::fatalError, "All the descriptors must have the same type !");
2258  }
2259  }
2260  }
2261 }
2262 
2263 void vpKeyPoint::initFeatureNames() {
2264  //Create map enum to string
2265 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2266  m_mapOfDetectorNames[DETECTOR_FAST] = "FAST";
2267  m_mapOfDetectorNames[DETECTOR_MSER] = "MSER";
2268  m_mapOfDetectorNames[DETECTOR_ORB] = "ORB";
2269  m_mapOfDetectorNames[DETECTOR_BRISK] = "BRISK";
2270  m_mapOfDetectorNames[DETECTOR_GFTT] = "GFTT";
2271  m_mapOfDetectorNames[DETECTOR_SimpleBlob] = "SimpleBlob";
2272  #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined (VISP_HAVE_OPENCV_XFEATURES2D))
2273  m_mapOfDetectorNames[DETECTOR_STAR] = "STAR";
2274  #endif
2275  #if defined(VISP_HAVE_OPENCV_NONFREE) || defined (VISP_HAVE_OPENCV_XFEATURES2D)
2276  m_mapOfDetectorNames[DETECTOR_SIFT] = "SIFT";
2277  m_mapOfDetectorNames[DETECTOR_SURF] = "SURF";
2278  #endif
2279  #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2280  m_mapOfDetectorNames[DETECTOR_KAZE] = "KAZE";
2281  m_mapOfDetectorNames[DETECTOR_AKAZE] = "AKAZE";
2282  m_mapOfDetectorNames[DETECTOR_AGAST] = "AGAST";
2283  #endif
2284  #if (VISP_HAVE_OPENCV_VERSION >= 0x030100) && defined (VISP_HAVE_OPENCV_XFEATURES2D)
2285  m_mapOfDetectorNames[DETECTOR_MSD] = "MSD";
2286  #endif
2287 #endif
2288 
2289 #if (VISP_HAVE_OPENCV_VERSION >= 0x020403)
2290  m_mapOfDescriptorNames[DESCRIPTOR_ORB] = "ORB";
2291  m_mapOfDescriptorNames[DESCRIPTOR_BRISK] = "BRISK";
2292  #if (VISP_HAVE_OPENCV_VERSION < 0x030000) || (defined (VISP_HAVE_OPENCV_XFEATURES2D))
2293  m_mapOfDescriptorNames[DESCRIPTOR_FREAK] = "FREAK";
2294  m_mapOfDescriptorNames[DESCRIPTOR_BRIEF] = "BRIEF";
2295 #endif
2296  #if defined(VISP_HAVE_OPENCV_NONFREE) || defined (VISP_HAVE_OPENCV_XFEATURES2D)
2297  m_mapOfDescriptorNames[DESCRIPTOR_SIFT] = "SIFT";
2298  m_mapOfDescriptorNames[DESCRIPTOR_SURF] = "SURF";
2299  #endif
2300  #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2301  m_mapOfDescriptorNames[DESCRIPTOR_KAZE] = "KAZE";
2302  m_mapOfDescriptorNames[DESCRIPTOR_AKAZE] = "AKAZE";
2303  #if defined (VISP_HAVE_OPENCV_XFEATURES2D)
2304  m_mapOfDescriptorNames[DESCRIPTOR_DAISY] = "DAISY";
2305  m_mapOfDescriptorNames[DESCRIPTOR_LATCH] = "LATCH";
2306  #endif
2307  #endif
2308  #if (VISP_HAVE_OPENCV_VERSION >= 0x030200) && defined (VISP_HAVE_OPENCV_XFEATURES2D)
2309  m_mapOfDescriptorNames[DESCRIPTOR_VGG] = "VGG";
2310  m_mapOfDescriptorNames[DESCRIPTOR_BoostDesc] = "BoostDesc";
2311  #endif
2312 #endif
2313 }
2314 
2320 void vpKeyPoint::initMatcher(const std::string &matcherName) {
2321  int descriptorType = CV_32F;
2322  bool firstIteration = true;
2323  for(std::map<std::string, cv::Ptr<cv::DescriptorExtractor> >::const_iterator it = m_extractors.begin();
2324  it != m_extractors.end(); ++it) {
2325  if(firstIteration) {
2326  firstIteration = false;
2327  descriptorType = it->second->descriptorType();
2328  } else {
2329  if(descriptorType != it->second->descriptorType()) {
2330  throw vpException(vpException::fatalError, "All the descriptors must have the same type !");
2331  }
2332  }
2333  }
2334 
2335  if(matcherName == "FlannBased") {
2336  if(m_extractors.empty()) {
2337  std::cout << "Warning: No extractor initialized, by default use floating values (CV_32F) "
2338  "for descriptor type !" << std::endl;
2339  }
2340 
2341  if(descriptorType == CV_8U) {
2342 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2343  m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::LshIndexParams>(12, 20, 2));
2344 #else
2345  m_matcher = new cv::FlannBasedMatcher(new cv::flann::LshIndexParams(12, 20, 2));
2346 #endif
2347  } else {
2348 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
2349  m_matcher = cv::makePtr<cv::FlannBasedMatcher>(cv::makePtr<cv::flann::KDTreeIndexParams>());
2350 #else
2351  m_matcher = new cv::FlannBasedMatcher(new cv::flann::KDTreeIndexParams());
2352 #endif
2353  }
2354  } else {
2355  m_matcher = cv::DescriptorMatcher::create(matcherName);
2356  }
2357 
2358 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
2359  if(m_matcher != NULL && !m_useKnn && matcherName == "BruteForce") {
2360  m_matcher->set("crossCheck", m_useBruteForceCrossCheck);
2361  }
2362 #endif
2363 
2364  if(m_matcher == NULL) {
2365  std::stringstream ss_msg;
2366  ss_msg << "Fail to initialize the matcher: " << matcherName << " or it is not available in OpenCV version: "
2367  << std::hex << VISP_HAVE_OPENCV_VERSION << ".";
2368  throw vpException(vpException::fatalError, ss_msg.str());
2369  }
2370 }
2371 
2381  vpImage<unsigned char> &IMatching) {
2382  vpImagePoint topLeftCorner(0, 0);
2383  IMatching.insert(IRef, topLeftCorner);
2384  topLeftCorner = vpImagePoint(0, IRef.getWidth());
2385  IMatching.insert(ICurrent, topLeftCorner);
2386 }
2387 
2396  //Nb images in the training database + the current image we want to detect the object
2397  int nbImg = (int) (m_mapOfImages.size() + 1);
2398 
2399  if(m_mapOfImages.empty()) {
2400  std::cerr << "There is no training image loaded !" << std::endl;
2401  return;
2402  }
2403 
2404  if(nbImg == 2) {
2405  //Only one training image, so we display them side by side
2406  insertImageMatching(m_mapOfImages.begin()->second, ICurrent, IMatching);
2407  } else {
2408  //Multiple training images, display them as a mosaic image
2409  int nbImgSqrt = vpMath::round(std::sqrt((double) nbImg)); //(int) std::floor(std::sqrt((double) nbImg) + 0.5);
2410  int nbWidth = nbImgSqrt;
2411  int nbHeight = nbImgSqrt;
2412 
2413  if(nbImgSqrt * nbImgSqrt < nbImg) {
2414  nbWidth++;
2415  }
2416 
2417  unsigned int maxW = ICurrent.getWidth(), maxH = ICurrent.getHeight();
2418  for(std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end(); ++it) {
2419  if(maxW < it->second.getWidth()) {
2420  maxW = it->second.getWidth();
2421  }
2422 
2423  if(maxH < it->second.getHeight()) {
2424  maxH = it->second.getHeight();
2425  }
2426  }
2427 
2428  //Indexes of the current image in the grid made in order to the image is in the center square in the mosaic grid
2429  int medianI = nbHeight / 2;
2430  int medianJ = nbWidth / 2;
2431  int medianIndex = medianI * nbWidth + medianJ;
2432 
2433  int cpt = 0;
2434  for(std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end(); ++it, cpt++) {
2435  int local_cpt = cpt;
2436  if(cpt >= medianIndex) {
2437  //Shift of one unity the index of the training images which are after the current image
2438  local_cpt++;
2439  }
2440  int indexI = local_cpt / nbWidth;
2441  int indexJ = local_cpt - (indexI * nbWidth);
2442  vpImagePoint topLeftCorner((int)maxH*indexI, (int)maxW*indexJ);
2443 
2444  IMatching.insert(it->second, topLeftCorner);
2445  }
2446 
2447  vpImagePoint topLeftCorner((int)maxH*medianI, (int)maxW*medianJ);
2448  IMatching.insert(ICurrent, topLeftCorner);
2449  }
2450 }
2451 
2452 #ifdef VISP_HAVE_XML2
2453 
2458 void vpKeyPoint::loadConfigFile(const std::string &configFile) {
2460 
2461  try {
2462  //Reset detector and extractor
2463  m_detectorNames.clear();
2464  m_extractorNames.clear();
2465  m_detectors.clear();
2466  m_extractors.clear();
2467 
2468  std::cout << " *********** Parsing XML for configuration for vpKeyPoint ************ " << std::endl;
2469  xmlp.parse(configFile);
2470 
2471  m_detectorNames.push_back(xmlp.getDetectorName());
2472  m_extractorNames.push_back(xmlp.getExtractorName());
2473  m_matcherName = xmlp.getMatcherName();
2474 
2475  switch(xmlp.getMatchingMethod()) {
2477  m_filterType = constantFactorDistanceThreshold;
2478  break;
2479 
2481  m_filterType = stdDistanceThreshold;
2482  break;
2483 
2485  m_filterType = ratioDistanceThreshold;
2486  break;
2487 
2489  m_filterType = stdAndRatioDistanceThreshold;
2490  break;
2491 
2493  m_filterType = noFilterMatching;
2494  break;
2495 
2496  default:
2497  break;
2498  }
2499 
2500  m_matchingFactorThreshold = xmlp.getMatchingFactorThreshold();
2501  m_matchingRatioThreshold = xmlp.getMatchingRatioThreshold();
2502 
2503  m_useRansacVVS = xmlp.getUseRansacVVSPoseEstimation();
2504  m_useConsensusPercentage = xmlp.getUseRansacConsensusPercentage();
2505  m_nbRansacIterations = xmlp.getNbRansacIterations();
2506  m_ransacReprojectionError = xmlp.getRansacReprojectionError();
2507  m_nbRansacMinInlierCount = xmlp.getNbRansacMinInlierCount();
2508  m_ransacThreshold = xmlp.getRansacThreshold();
2509  m_ransacConsensusPercentage = xmlp.getRansacConsensusPercentage();
2510 
2511  if(m_filterType == ratioDistanceThreshold || m_filterType == stdAndRatioDistanceThreshold) {
2512  m_useKnn = true;
2513  } else {
2514  m_useKnn = false;
2515  }
2516 
2517  init();
2518  }
2519  catch(...) {
2520  throw vpException(vpException::ioError, "Can't open XML file \"%s\"\n ", configFile.c_str());
2521  }
2522 }
2523 #endif
2524 
2532 void vpKeyPoint::loadLearningData(const std::string &filename, const bool binaryMode, const bool append) {
2533  int startClassId = 0;
2534  int startImageId = 0;
2535  if(!append) {
2536  m_trainKeyPoints.clear();
2537  m_trainPoints.clear();
2538  m_mapOfImageId.clear();
2539  m_mapOfImages.clear();
2540  } else {
2541  //In append case, find the max index of keypoint class Id
2542  for(std::map<int, int>::const_iterator it = m_mapOfImageId.begin(); it != m_mapOfImageId.end(); ++it) {
2543  if(startClassId < it->first) {
2544  startClassId = it->first;
2545  }
2546  }
2547 
2548  //In append case, find the max index of images Id
2549  for(std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end(); ++it) {
2550  if(startImageId < it->first) {
2551  startImageId = it->first;
2552  }
2553  }
2554  }
2555 
2556  //Get parent directory
2557  std::string parent = vpIoTools::getParent(filename);
2558  if(!parent.empty()) {
2559  parent += "/";
2560  }
2561 
2562  if(binaryMode) {
2563  std::ifstream file(filename.c_str(), std::ifstream::binary);
2564  if(!file.is_open()){
2565  throw vpException(vpException::ioError, "Cannot open the file.");
2566  }
2567 
2568  //Read info about training images
2569  int nbImgs = 0;
2570 // file.read((char *)(&nbImgs), sizeof(nbImgs));
2571  readBinaryIntLE(file, nbImgs);
2572 
2573 #if !defined(VISP_HAVE_MODULE_IO)
2574  if(nbImgs > 0) {
2575  std::cout << "Warning: The learning file contains image data that will not be loaded as visp_io module "
2576  "is not available !" << std::endl;
2577  }
2578 #endif
2579 
2580  for(int i = 0; i < nbImgs; i++) {
2581  //Read image_id
2582  int id = 0;
2583 // file.read((char *)(&id), sizeof(id));
2584  readBinaryIntLE(file, id);
2585 
2586  int length = 0;
2587 // file.read((char *)(&length), sizeof(length));
2588  readBinaryIntLE(file, length);
2589  //Will contain the path to the training images
2590  char* path = new char[length + 1];//char path[length + 1];
2591 
2592  for(int cpt = 0; cpt < length; cpt++) {
2593  char c;
2594  file.read((char *)(&c), sizeof(c));
2595  path[cpt] = c;
2596  }
2597  path[length] = '\0';
2598 
2600 #ifdef VISP_HAVE_MODULE_IO
2601  if(vpIoTools::isAbsolutePathname(std::string(path))) {
2602  vpImageIo::read(I, path);
2603  } else {
2604  vpImageIo::read(I, parent + path);
2605  }
2606 
2607  //Add the image previously loaded only if VISP_HAVE_MODULE_IO
2608  m_mapOfImages[id + startImageId] = I;
2609 #endif
2610 
2611  //Delete path
2612  delete[] path;
2613  }
2614 
2615  //Read if 3D point information are saved or not
2616  int have3DInfoInt = 0;
2617 // file.read((char *)(&have3DInfoInt), sizeof(have3DInfoInt));
2618  readBinaryIntLE(file, have3DInfoInt);
2619  bool have3DInfo = have3DInfoInt != 0;
2620 
2621  //Read the number of descriptors
2622  int nRows = 0;
2623 // file.read((char *)(&nRows), sizeof(nRows));
2624  readBinaryIntLE(file, nRows);
2625 
2626  //Read the size of the descriptor
2627  int nCols = 0;
2628 // file.read((char *)(&nCols), sizeof(nCols));
2629  readBinaryIntLE(file, nCols);
2630 
2631  //Read the type of the descriptor
2632  int descriptorType = 5; //CV_32F
2633 // file.read((char *)(&descriptorType), sizeof(descriptorType));
2634  readBinaryIntLE(file, descriptorType);
2635 
2636  cv::Mat trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
2637  for(int i = 0; i < nRows; i++) {
2638  //Read information about keyPoint
2639  float u, v, size, angle, response;
2640  int octave, class_id, image_id;
2641 // file.read((char *)(&u), sizeof(u));
2642  readBinaryFloatLE(file, u);
2643 // file.read((char *)(&v), sizeof(v));
2644  readBinaryFloatLE(file, v);
2645 // file.read((char *)(&size), sizeof(size));
2646  readBinaryFloatLE(file, size);
2647 // file.read((char *)(&angle), sizeof(angle));
2648  readBinaryFloatLE(file, angle);
2649 // file.read((char *)(&response), sizeof(response));
2650  readBinaryFloatLE(file, response);
2651 // file.read((char *)(&octave), sizeof(octave));
2652  readBinaryIntLE(file, octave);
2653 // file.read((char *)(&class_id), sizeof(class_id));
2654  readBinaryIntLE(file, class_id);
2655 // file.read((char *)(&image_id), sizeof(image_id));
2656  readBinaryIntLE(file, image_id);
2657  cv::KeyPoint keyPoint(cv::Point2f(u, v), size, angle, response, octave, (class_id + startClassId));
2658  m_trainKeyPoints.push_back(keyPoint);
2659 
2660  if(image_id != -1) {
2661 #ifdef VISP_HAVE_MODULE_IO
2662  //No training images if image_id == -1
2663  m_mapOfImageId[class_id] = image_id + startImageId;
2664 #endif
2665  }
2666 
2667  if(have3DInfo) {
2668  //Read oX, oY, oZ
2669  float oX, oY, oZ;
2670 // file.read((char *)(&oX), sizeof(oX));
2671  readBinaryFloatLE(file, oX);
2672 // file.read((char *)(&oY), sizeof(oY));
2673  readBinaryFloatLE(file, oY);
2674 // file.read((char *)(&oZ), sizeof(oZ));
2675  readBinaryFloatLE(file, oZ);
2676  m_trainPoints.push_back(cv::Point3f(oX, oY, oZ));
2677  }
2678 
2679  for(int j = 0; j < nCols; j++) {
2680  //Read the value of the descriptor
2681  switch(descriptorType) {
2682  case CV_8U:
2683  {
2684  unsigned char value;
2685  file.read((char *)(&value), sizeof(value));
2686  trainDescriptorsTmp.at<unsigned char>(i, j) = value;
2687  }
2688  break;
2689 
2690  case CV_8S:
2691  {
2692  char value;
2693  file.read((char *)(&value), sizeof(value));
2694  trainDescriptorsTmp.at<char>(i, j) = value;
2695  }
2696  break;
2697 
2698  case CV_16U:
2699  {
2700  unsigned short int value;
2701 // file.read((char *)(&value), sizeof(value));
2702  readBinaryUShortLE(file, value);
2703  trainDescriptorsTmp.at<unsigned short int>(i, j) = value;
2704  }
2705  break;
2706 
2707  case CV_16S:
2708  {
2709  short int value;
2710 // file.read((char *)(&value), sizeof(value));
2711  readBinaryShortLE(file, value);
2712  trainDescriptorsTmp.at<short int>(i, j) = value;
2713  }
2714  break;
2715 
2716  case CV_32S:
2717  {
2718  int value;
2719 // file.read((char *)(&value), sizeof(value));
2720  readBinaryIntLE(file, value);
2721  trainDescriptorsTmp.at<int>(i, j) = value;
2722  }
2723  break;
2724 
2725  case CV_32F:
2726  {
2727  float value;
2728 // file.read((char *)(&value), sizeof(value));
2729  readBinaryFloatLE(file, value);
2730  trainDescriptorsTmp.at<float>(i, j) = value;
2731  }
2732  break;
2733 
2734  case CV_64F:
2735  {
2736  double value;
2737 // file.read((char *)(&value), sizeof(value));
2738  readBinaryDoubleLE(file, value);
2739  trainDescriptorsTmp.at<double>(i, j) = value;
2740  }
2741  break;
2742 
2743  default:
2744  {
2745  float value;
2746 // file.read((char *)(&value), sizeof(value));
2747  readBinaryFloatLE(file, value);
2748  trainDescriptorsTmp.at<float>(i, j) = value;
2749  }
2750  break;
2751  }
2752  }
2753  }
2754 
2755  if(!append || m_trainDescriptors.empty()) {
2756  trainDescriptorsTmp.copyTo(m_trainDescriptors);
2757  } else {
2758  cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
2759  }
2760 
2761  file.close();
2762  } else {
2763 #ifdef VISP_HAVE_XML2
2764  xmlDocPtr doc = NULL;
2765  xmlNodePtr root_element = NULL;
2766 
2767  /*
2768  * this initialize the library and check potential ABI mismatches
2769  * between the version it was compiled for and the actual shared
2770  * library used.
2771  */
2772  LIBXML_TEST_VERSION
2773 
2774  /*parse the file and get the DOM */
2775  doc = xmlReadFile(filename.c_str(), NULL, 0);
2776 
2777  if (doc == NULL) {
2778  throw vpException(vpException::ioError, "Error with file " + filename);
2779  }
2780 
2781  root_element = xmlDocGetRootElement(doc);
2782 
2783  xmlNodePtr first_level_node = NULL;
2784  char *pEnd = NULL;
2785 
2786  int descriptorType = CV_32F; //float
2787  int nRows = 0, nCols = 0;
2788  int i = 0, j = 0;
2789 
2790  cv::Mat trainDescriptorsTmp;
2791 
2792  for (first_level_node = root_element->children; first_level_node;
2793  first_level_node = first_level_node->next) {
2794 
2795  std::string name((char *) first_level_node->name);
2796  if (first_level_node->type == XML_ELEMENT_NODE && name == "TrainingImageInfo") {
2797  xmlNodePtr image_info_node = NULL;
2798 
2799  for (image_info_node = first_level_node->children; image_info_node; image_info_node =
2800  image_info_node->next) {
2801  name = std::string ((char *) image_info_node->name);
2802 
2803  if(name == "trainImg") {
2804  //Read image_id
2805  xmlChar *image_id_property = xmlGetProp(image_info_node, BAD_CAST "image_id");
2806  int id = 0;
2807  if(image_id_property) {
2808  id = std::atoi((char *) image_id_property);
2809  }
2810  xmlFree(image_id_property);
2811 
2813 #ifdef VISP_HAVE_MODULE_IO
2814  std::string path((char *) image_info_node->children->content);
2815  //Read path to the training images
2816  if(vpIoTools::isAbsolutePathname(std::string(path))) {
2817  vpImageIo::read(I, path);
2818  } else {
2819  vpImageIo::read(I, parent + path);
2820  }
2821 
2822  //Add the image previously loaded only if VISP_HAVE_MODULE_IO
2823  m_mapOfImages[id + startImageId] = I;
2824 #endif
2825  }
2826  }
2827  } else if(first_level_node->type == XML_ELEMENT_NODE && name == "DescriptorsInfo") {
2828  xmlNodePtr descriptors_info_node = NULL;
2829  for (descriptors_info_node = first_level_node->children; descriptors_info_node; descriptors_info_node =
2830  descriptors_info_node->next) {
2831  if (descriptors_info_node->type == XML_ELEMENT_NODE) {
2832  name = std::string ((char *) descriptors_info_node->name);
2833 
2834  if(name == "nrows") {
2835  nRows = std::atoi((char *) descriptors_info_node->children->content);
2836  } else if(name == "ncols") {
2837  nCols = std::atoi((char *) descriptors_info_node->children->content);
2838  } else if(name == "type") {
2839  descriptorType = std::atoi((char *) descriptors_info_node->children->content);
2840  }
2841  }
2842  }
2843 
2844  trainDescriptorsTmp = cv::Mat(nRows, nCols, descriptorType);
2845  } else if (first_level_node->type == XML_ELEMENT_NODE && name == "DescriptorInfo") {
2846  xmlNodePtr point_node = NULL;
2847  double u = 0.0, v = 0.0, size = 0.0, angle = 0.0, response = 0.0;
2848  int octave = 0, class_id = 0, image_id = 0;
2849  double oX = 0.0, oY = 0.0, oZ = 0.0;
2850 
2851  std::stringstream ss;
2852 
2853  for (point_node = first_level_node->children; point_node; point_node =
2854  point_node->next) {
2855  if (point_node->type == XML_ELEMENT_NODE) {
2856  name = std::string ((char *) point_node->name);
2857 
2858  //Read information about keypoints
2859  if(name == "u") {
2860  u = std::strtod((char *) point_node->children->content, &pEnd);
2861  } else if(name == "v") {
2862  v = std::strtod((char *) point_node->children->content, &pEnd);
2863  } else if(name == "size") {
2864  size = std::strtod((char *) point_node->children->content, &pEnd);
2865  } else if(name == "angle") {
2866  angle = std::strtod((char *) point_node->children->content, &pEnd);
2867  } else if(name == "response") {
2868  response = std::strtod((char *) point_node->children->content, &pEnd);
2869  } else if(name == "octave") {
2870  octave = std::atoi((char *) point_node->children->content);
2871  } else if(name == "class_id") {
2872  class_id = std::atoi((char *) point_node->children->content);
2873  cv::KeyPoint keyPoint(cv::Point2f((float) u, (float) v), (float) size,
2874  (float) angle, (float) response, octave, (class_id + startClassId));
2875  m_trainKeyPoints.push_back(keyPoint);
2876  } else if(name == "image_id") {
2877  image_id = std::atoi((char *) point_node->children->content);
2878  if(image_id != -1) {
2879 #ifdef VISP_HAVE_MODULE_IO
2880  //No training images if image_id == -1
2881  m_mapOfImageId[m_trainKeyPoints.back().class_id] = image_id + startImageId;
2882 #endif
2883  }
2884  } else if (name == "oX") {
2885  oX = std::atof((char *) point_node->children->content);
2886  } else if (name == "oY") {
2887  oY = std::atof((char *) point_node->children->content);
2888  } else if (name == "oZ") {
2889  oZ = std::atof((char *) point_node->children->content);
2890  m_trainPoints.push_back(cv::Point3f((float) oX, (float) oY, (float) oZ));
2891  } else if (name == "desc") {
2892  xmlNodePtr descriptor_value_node = NULL;
2893  j = 0;
2894 
2895  for (descriptor_value_node = point_node->children;
2896  descriptor_value_node; descriptor_value_node =
2897  descriptor_value_node->next) {
2898 
2899  if (descriptor_value_node->type == XML_ELEMENT_NODE) {
2900  //Read descriptors values
2901  std::string parseStr((char *) descriptor_value_node->children->content);
2902  ss.clear();
2903  ss.str(parseStr);
2904 
2905  if(!ss.fail()) {
2906  switch(descriptorType) {
2907  case CV_8U:
2908  {
2909  //Parse the numeric value [0 ; 255] to an int
2910  int parseValue;
2911  ss >> parseValue;
2912  trainDescriptorsTmp.at<unsigned char>(i, j) = (unsigned char) parseValue;
2913  }
2914  break;
2915 
2916  case CV_8S:
2917  //Parse the numeric value [-128 ; 127] to an int
2918  int parseValue;
2919  ss >> parseValue;
2920  trainDescriptorsTmp.at<char>(i, j) = (char) parseValue;
2921  break;
2922 
2923  case CV_16U:
2924  ss >> trainDescriptorsTmp.at<unsigned short int>(i, j);
2925  break;
2926 
2927  case CV_16S:
2928  ss >> trainDescriptorsTmp.at<short int>(i, j);
2929  break;
2930 
2931  case CV_32S:
2932  ss >> trainDescriptorsTmp.at<int>(i, j);
2933  break;
2934 
2935  case CV_32F:
2936  ss >> trainDescriptorsTmp.at<float>(i, j);
2937  break;
2938 
2939  case CV_64F:
2940  ss >> trainDescriptorsTmp.at<double>(i, j);
2941  break;
2942 
2943  default:
2944  ss >> trainDescriptorsTmp.at<float>(i, j);
2945  break;
2946  }
2947  } else {
2948  std::cerr << "Error when converting:" << ss.str() << std::endl;
2949  }
2950 
2951  j++;
2952  }
2953  }
2954  }
2955  }
2956  }
2957  i++;
2958  }
2959  }
2960 
2961  if(!append || m_trainDescriptors.empty()) {
2962  trainDescriptorsTmp.copyTo(m_trainDescriptors);
2963  } else {
2964  cv::vconcat(m_trainDescriptors, trainDescriptorsTmp, m_trainDescriptors);
2965  }
2966 
2967  /*free the document */
2968  xmlFreeDoc(doc);
2969 
2970  /*
2971  *Free the global variables that may
2972  *have been allocated by the parser.
2973  */
2974  xmlCleanupParser();
2975 #else
2976  std::cout << "Error: libxml2 is required !" << std::endl;
2977 #endif
2978  }
2979 
2980  //Convert OpenCV type to ViSP type for compatibility
2982  vpConvert::convertFromOpenCV(this->m_trainPoints, m_trainVpPoints);
2983 
2984  //Add train descriptors in matcher object
2985  m_matcher->clear();
2986  m_matcher->add(std::vector<cv::Mat>(1, m_trainDescriptors));
2987 
2988  //Set _reference_computed to true as we load learning file
2989  _reference_computed = true;
2990 }
2991 
3000 void vpKeyPoint::match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors,
3001  std::vector<cv::DMatch> &matches, double &elapsedTime) {
3002  double t = vpTime::measureTimeMs();
3003 
3004  if(m_useKnn) {
3005  m_knnMatches.clear();
3006 
3007  if(m_useMatchTrainToQuery) {
3008  std::vector<std::vector<cv::DMatch> > knnMatchesTmp;
3009 
3010  //Match train descriptors to query descriptors
3011  cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(true);
3012  matcherTmp->knnMatch(trainDescriptors, queryDescriptors, knnMatchesTmp, 2);
3013 
3014  for(std::vector<std::vector<cv::DMatch> >::const_iterator it1 = knnMatchesTmp.begin(); it1 != knnMatchesTmp.end(); ++it1) {
3015  std::vector<cv::DMatch> tmp;
3016  for(std::vector<cv::DMatch>::const_iterator it2 = it1->begin(); it2 != it1->end(); ++it2) {
3017  tmp.push_back(cv::DMatch(it2->trainIdx, it2->queryIdx, it2->distance));
3018  }
3019  m_knnMatches.push_back(tmp);
3020  }
3021 
3022  matches.resize(m_knnMatches.size());
3023  std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3024  } else {
3025  //Match query descriptors to train descriptors
3026  m_matcher->knnMatch(queryDescriptors, m_knnMatches, 2);
3027  matches.resize(m_knnMatches.size());
3028  std::transform(m_knnMatches.begin(), m_knnMatches.end(), matches.begin(), knnToDMatch);
3029  }
3030  } else {
3031  matches.clear();
3032 
3033  if(m_useMatchTrainToQuery) {
3034  std::vector<cv::DMatch> matchesTmp;
3035  //Match train descriptors to query descriptors
3036  cv::Ptr<cv::DescriptorMatcher> matcherTmp = m_matcher->clone(true);
3037  matcherTmp->match(trainDescriptors, queryDescriptors, matchesTmp);
3038 
3039  for(std::vector<cv::DMatch>::const_iterator it = matchesTmp.begin(); it != matchesTmp.end(); ++it) {
3040  matches.push_back(cv::DMatch(it->trainIdx, it->queryIdx, it->distance));
3041  }
3042  } else {
3043  //Match query descriptors to train descriptors
3044  m_matcher->match(queryDescriptors, matches);
3045  }
3046  }
3047  elapsedTime = vpTime::measureTimeMs() - t;
3048 }
3049 
3057  return matchPoint(I, vpRect());
3058 }
3059 
3071  const vpImagePoint &iP,
3072  const unsigned int height, const unsigned int width) {
3073  return matchPoint(I, vpRect(iP, width, height));
3074 }
3075 
3085  const vpRect& rectangle) {
3086  if(m_trainDescriptors.empty()) {
3087  std::cerr << "Reference is empty." << std::endl;
3088  if(!_reference_computed) {
3089  std::cerr << "Reference is not computed." << std::endl;
3090  }
3091  std::cerr << "Matching is not possible." << std::endl;
3092 
3093  return 0;
3094  }
3095 
3096  if(m_useAffineDetection) {
3097  std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3098  std::vector<cv::Mat> listOfQueryDescriptors;
3099 
3100  //Detect keypoints and extract descriptors on multiple images
3101  detectExtractAffine(I, listOfQueryKeyPoints, listOfQueryDescriptors);
3102 
3103  //Flatten the different train lists
3104  m_queryKeyPoints.clear();
3105  for(std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3106  it != listOfQueryKeyPoints.end(); ++it) {
3107  m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3108  }
3109 
3110  bool first = true;
3111  for(std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end(); ++it) {
3112  if(first) {
3113  first = false;
3114  it->copyTo(m_queryDescriptors);
3115  } else {
3116  m_queryDescriptors.push_back(*it);
3117  }
3118  }
3119  } else {
3120  detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3121  extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3122  }
3123 
3124  match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3125 
3126  if(m_filterType != noFilterMatching) {
3127  m_queryFilteredKeyPoints.clear();
3128  m_objectFilteredPoints.clear();
3129  m_filteredMatches.clear();
3130 
3131  filterMatches();
3132  } else {
3133  if(m_useMatchTrainToQuery) {
3134  //Add only query keypoints matched with a train keypoints
3135  m_queryFilteredKeyPoints.clear();
3136  m_filteredMatches.clear();
3137  for(std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3138  m_filteredMatches.push_back(cv::DMatch((int) m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3139  m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(size_t) it->queryIdx]);
3140  }
3141  } else {
3142  m_queryFilteredKeyPoints = m_queryKeyPoints;
3143  m_filteredMatches = m_matches;
3144  }
3145 
3146  if(!m_trainPoints.empty()) {
3147  m_objectFilteredPoints.clear();
3148  //Add 3D object points such as the same index in m_queryFilteredKeyPoints and in m_objectFilteredPoints
3149  // matches to the same train object
3150  for(std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3151  //m_matches is normally ordered following the queryDescriptor index
3152  m_objectFilteredPoints.push_back(m_trainPoints[(size_t) it->trainIdx]);
3153  }
3154  }
3155  }
3156 
3157  //Convert OpenCV type to ViSP type for compatibility
3158  vpConvert::convertFromOpenCV(m_queryFilteredKeyPoints, currentImagePointsList);
3160 
3161  return static_cast<unsigned int>(m_filteredMatches.size());
3162 }
3163 
3176  bool (*func)(vpHomogeneousMatrix *), const vpRect& rectangle) {
3177  double error, elapsedTime;
3178  return matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3179 }
3180 
3196  double &error, double &elapsedTime, bool (*func)(vpHomogeneousMatrix *), const vpRect& rectangle) {
3197  //Check if we have training descriptors
3198  if(m_trainDescriptors.empty()) {
3199  std::cerr << "Reference is empty." << std::endl;
3200  if(!_reference_computed) {
3201  std::cerr << "Reference is not computed." << std::endl;
3202  }
3203  std::cerr << "Matching is not possible." << std::endl;
3204 
3205  return false;
3206  }
3207 
3208  if(m_useAffineDetection) {
3209  std::vector<std::vector<cv::KeyPoint> > listOfQueryKeyPoints;
3210  std::vector<cv::Mat> listOfQueryDescriptors;
3211 
3212  //Detect keypoints and extract descriptors on multiple images
3213  detectExtractAffine(I, listOfQueryKeyPoints, listOfQueryDescriptors);
3214 
3215  //Flatten the different train lists
3216  m_queryKeyPoints.clear();
3217  for(std::vector<std::vector<cv::KeyPoint> >::const_iterator it = listOfQueryKeyPoints.begin();
3218  it != listOfQueryKeyPoints.end(); ++it) {
3219  m_queryKeyPoints.insert(m_queryKeyPoints.end(), it->begin(), it->end());
3220  }
3221 
3222  bool first = true;
3223  for(std::vector<cv::Mat>::const_iterator it = listOfQueryDescriptors.begin(); it != listOfQueryDescriptors.end(); ++it) {
3224  if(first) {
3225  first = false;
3226  it->copyTo(m_queryDescriptors);
3227  } else {
3228  m_queryDescriptors.push_back(*it);
3229  }
3230  }
3231  } else {
3232  detect(I, m_queryKeyPoints, m_detectionTime, rectangle);
3233  extract(I, m_queryKeyPoints, m_queryDescriptors, m_extractionTime);
3234  }
3235 
3236  match(m_trainDescriptors, m_queryDescriptors, m_matches, m_matchingTime);
3237 
3238  elapsedTime = m_detectionTime + m_extractionTime + m_matchingTime;
3239 
3240  if(m_filterType != noFilterMatching) {
3241  m_queryFilteredKeyPoints.clear();
3242  m_objectFilteredPoints.clear();
3243  m_filteredMatches.clear();
3244 
3245  filterMatches();
3246  } else {
3247  if(m_useMatchTrainToQuery) {
3248  //Add only query keypoints matched with a train keypoints
3249  m_queryFilteredKeyPoints.clear();
3250  m_filteredMatches.clear();
3251  for(std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3252  m_filteredMatches.push_back(cv::DMatch((int) m_queryFilteredKeyPoints.size(), it->trainIdx, it->distance));
3253  m_queryFilteredKeyPoints.push_back(m_queryKeyPoints[(size_t) it->queryIdx]);
3254  }
3255  } else {
3256  m_queryFilteredKeyPoints = m_queryKeyPoints;
3257  m_filteredMatches = m_matches;
3258  }
3259 
3260  if(!m_trainPoints.empty()) {
3261  m_objectFilteredPoints.clear();
3262  //Add 3D object points such as the same index in m_queryFilteredKeyPoints and in m_objectFilteredPoints
3263  // matches to the same train object
3264  for(std::vector<cv::DMatch>::const_iterator it = m_matches.begin(); it != m_matches.end(); ++it) {
3265  //m_matches is normally ordered following the queryDescriptor index
3266  m_objectFilteredPoints.push_back(m_trainPoints[(size_t) it->trainIdx]);
3267  }
3268  }
3269  }
3270 
3271  //Convert OpenCV type to ViSP type for compatibility
3272  vpConvert::convertFromOpenCV(m_queryFilteredKeyPoints, currentImagePointsList);
3274 
3275  //error = std::numeric_limits<double>::max(); // create an error under Windows. To fix it we have to add #undef max
3276  error = DBL_MAX;
3277  m_ransacInliers.clear();
3278  m_ransacOutliers.clear();
3279 
3280  if(m_useRansacVVS) {
3281  std::vector<vpPoint> objectVpPoints(m_objectFilteredPoints.size());
3282  size_t cpt = 0;
3283  //Create a list of vpPoint with 2D coordinates (current keypoint location) + 3D coordinates (world/object coordinates)
3284  for(std::vector<cv::Point3f>::const_iterator it = m_objectFilteredPoints.begin(); it != m_objectFilteredPoints.end();
3285  ++it, cpt++) {
3286  vpPoint pt;
3287  pt.setWorldCoordinates(it->x, it->y, it->z);
3288 
3289  vpImagePoint imP(m_queryFilteredKeyPoints[cpt].pt.y, m_queryFilteredKeyPoints[cpt].pt.x);
3290 
3291  double x = 0.0, y = 0.0;
3292  vpPixelMeterConversion::convertPoint(cam, imP, x, y);
3293  pt.set_x(x);
3294  pt.set_y(y);
3295 
3296  objectVpPoints[cpt] = pt;
3297  }
3298 
3299  std::vector<vpPoint> inliers;
3300  std::vector<unsigned int> inlierIndex;
3301 
3302  bool res = computePose(objectVpPoints, cMo, inliers, inlierIndex, m_poseTime, func);
3303 
3304  std::map<unsigned int, bool> mapOfInlierIndex;
3305  m_matchRansacKeyPointsToPoints.clear();
3306 
3307  for (std::vector<unsigned int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3308  m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(m_queryFilteredKeyPoints[(size_t)(*it)],
3309  m_objectFilteredPoints[(size_t)(*it)]));
3310  mapOfInlierIndex[*it] = true;
3311  }
3312 
3313  for(size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3314  if(mapOfInlierIndex.find((unsigned int) i) == mapOfInlierIndex.end()) {
3315  m_ransacOutliers.push_back(vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3316  }
3317  }
3318 
3319  error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3320 
3321  m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3322  std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(), m_ransacInliers.begin(),
3323  matchRansacToVpImage);
3324 
3325  elapsedTime += m_poseTime;
3326 
3327  return res;
3328  } else {
3329  std::vector<cv::Point2f> imageFilteredPoints;
3330  cv::KeyPoint::convert(m_queryFilteredKeyPoints, imageFilteredPoints);
3331  std::vector<int> inlierIndex;
3332  bool res = computePose(imageFilteredPoints, m_objectFilteredPoints, cam, cMo, inlierIndex, m_poseTime);
3333 
3334  std::map<int, bool> mapOfInlierIndex;
3335  m_matchRansacKeyPointsToPoints.clear();
3336 
3337  for (std::vector<int>::const_iterator it = inlierIndex.begin(); it != inlierIndex.end(); ++it) {
3338  m_matchRansacKeyPointsToPoints.push_back(std::pair<cv::KeyPoint, cv::Point3f>(m_queryFilteredKeyPoints[(size_t)(*it)],
3339  m_objectFilteredPoints[(size_t)(*it)]));
3340  mapOfInlierIndex[*it] = true;
3341  }
3342 
3343  for(size_t i = 0; i < m_queryFilteredKeyPoints.size(); i++) {
3344  if(mapOfInlierIndex.find((int) i) == mapOfInlierIndex.end()) {
3345  m_ransacOutliers.push_back(vpImagePoint(m_queryFilteredKeyPoints[i].pt.y, m_queryFilteredKeyPoints[i].pt.x));
3346  }
3347  }
3348 
3349  error = computePoseEstimationError(m_matchRansacKeyPointsToPoints, cam, cMo);
3350 
3351  m_ransacInliers.resize(m_matchRansacKeyPointsToPoints.size());
3352  std::transform(m_matchRansacKeyPointsToPoints.begin(), m_matchRansacKeyPointsToPoints.end(), m_ransacInliers.begin(),
3353  matchRansacToVpImage);
3354 
3355  elapsedTime += m_poseTime;
3356 
3357  return res;
3358  }
3359 }
3360 
3378 bool vpKeyPoint::matchPointAndDetect(const vpImage<unsigned char> &I, vpRect &boundingBox, vpImagePoint &centerOfGravity,
3379  const bool isPlanarObject, std::vector<vpImagePoint> *imPts1, std::vector<vpImagePoint> *imPts2,
3380  double *meanDescriptorDistance, double *detection_score, const vpRect& rectangle) {
3381  if(imPts1 != NULL && imPts2 != NULL) {
3382  imPts1->clear();
3383  imPts2->clear();
3384  }
3385 
3386  matchPoint(I, rectangle);
3387 
3388  double meanDescriptorDistanceTmp = 0.0;
3389  for(std::vector<cv::DMatch>::const_iterator it = m_filteredMatches.begin(); it != m_filteredMatches.end(); ++it) {
3390  meanDescriptorDistanceTmp += (double) it->distance;
3391  }
3392 
3393  meanDescriptorDistanceTmp /= (double) m_filteredMatches.size();
3394  double score = (double) m_filteredMatches.size() / meanDescriptorDistanceTmp;
3395 
3396  if(meanDescriptorDistance != NULL) {
3397  *meanDescriptorDistance = meanDescriptorDistanceTmp;
3398  }
3399  if(detection_score != NULL) {
3400  *detection_score = score;
3401  }
3402 
3403  if(m_filteredMatches.size() >= 4) {
3404  //Training / Reference 2D points
3405  std::vector<cv::Point2f> points1(m_filteredMatches.size());
3406  //Query / Current 2D points
3407  std::vector<cv::Point2f> points2(m_filteredMatches.size());
3408 
3409  for(size_t i = 0; i < m_filteredMatches.size(); i++) {
3410  points1[i] = cv::Point2f(m_trainKeyPoints[(size_t)m_filteredMatches[i].trainIdx].pt);
3411  points2[i] = cv::Point2f(m_queryFilteredKeyPoints[(size_t)m_filteredMatches[i].queryIdx].pt);
3412  }
3413 
3414  std::vector<vpImagePoint> inliers;
3415  if(isPlanarObject) {
3416 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
3417  cv::Mat homographyMatrix = cv::findHomography(points1, points2, CV_RANSAC);
3418 #else
3419  cv::Mat homographyMatrix = cv::findHomography(points1, points2, cv::RANSAC);
3420 #endif
3421 
3422  for(size_t i = 0; i < m_filteredMatches.size(); i++ ) {
3423  //Compute reprojection error
3424  cv::Mat realPoint = cv::Mat(3, 1, CV_64F);
3425  realPoint.at<double>(0,0) = points1[i].x;
3426  realPoint.at<double>(1,0) = points1[i].y;
3427  realPoint.at<double>(2,0) = 1.f;
3428 
3429  cv::Mat reprojectedPoint = homographyMatrix * realPoint;
3430  double err_x = (reprojectedPoint.at<double>(0,0) / reprojectedPoint.at<double>(2,0)) - points2[i].x;
3431  double err_y = (reprojectedPoint.at<double>(1,0) / reprojectedPoint.at<double>(2,0)) - points2[i].y;
3432  double reprojectionError = std::sqrt(err_x*err_x + err_y*err_y);
3433 
3434  if(reprojectionError < 6.0) {
3435  inliers.push_back(vpImagePoint((double) points2[i].y, (double) points2[i].x));
3436  if(imPts1 != NULL) {
3437  imPts1->push_back(vpImagePoint((double) points1[i].y, (double) points1[i].x));
3438  }
3439 
3440  if(imPts2 != NULL) {
3441  imPts2->push_back(vpImagePoint((double) points2[i].y, (double) points2[i].x));
3442  }
3443  }
3444  }
3445  } else if(m_filteredMatches.size() >= 8) {
3446  cv::Mat fundamentalInliers;
3447  cv::Mat fundamentalMatrix = cv::findFundamentalMat(points1, points2, cv::FM_RANSAC, 3, 0.99, fundamentalInliers);
3448 
3449  for(size_t i = 0; i < (size_t) fundamentalInliers.rows; i++) {
3450  if(fundamentalInliers.at<uchar>((int) i, 0)) {
3451  inliers.push_back(vpImagePoint((double) points2[i].y, (double) points2[i].x));
3452 
3453  if(imPts1 != NULL) {
3454  imPts1->push_back(vpImagePoint((double) points1[i].y, (double) points1[i].x));
3455  }
3456 
3457  if(imPts2 != NULL) {
3458  imPts2->push_back(vpImagePoint((double) points2[i].y, (double) points2[i].x));
3459  }
3460  }
3461  }
3462  }
3463 
3464  if(!inliers.empty()) {
3465  //Build a polygon with the list of inlier keypoints detected in the current image to get the bounding box
3466  vpPolygon polygon(inliers);
3467  boundingBox = polygon.getBoundingBox();
3468 
3469  //Compute the center of gravity
3470  double meanU = 0.0, meanV = 0.0;
3471  for(std::vector<vpImagePoint>::const_iterator it = inliers.begin(); it != inliers.end(); ++it) {
3472  meanU += it->get_u();
3473  meanV += it->get_v();
3474  }
3475 
3476  meanU /= (double) inliers.size();
3477  meanV /= (double) inliers.size();
3478 
3479  centerOfGravity.set_u(meanU);
3480  centerOfGravity.set_v(meanV);
3481  }
3482  } else {
3483  //Too few matches
3484  return false;
3485  }
3486 
3487  if(m_detectionMethod == detectionThreshold) {
3488  return meanDescriptorDistanceTmp < m_detectionThreshold;
3489  } else {
3490  return score > m_detectionScore;
3491  }
3492 }
3493 
3513  double &error, double &elapsedTime, vpRect &boundingBox, vpImagePoint &centerOfGravity,
3514  bool (*func)(vpHomogeneousMatrix *), const vpRect& rectangle) {
3515  bool isMatchOk = matchPoint(I, cam, cMo, error, elapsedTime, func, rectangle);
3516  if(isMatchOk) {
3517  //Use the pose estimated to project the model points in the image
3518  vpPoint pt;
3519  vpImagePoint imPt;
3520  std::vector<vpImagePoint> modelImagePoints(m_trainVpPoints.size());
3521  size_t cpt = 0;
3522  for(std::vector<vpPoint>::const_iterator it = m_trainVpPoints.begin(); it != m_trainVpPoints.end(); ++it, cpt++) {
3523  pt = *it;
3524  pt.project(cMo);
3525  vpMeterPixelConversion::convertPoint(cam, pt.get_x(), pt.get_y(), imPt);
3526  modelImagePoints[cpt] = imPt;
3527  }
3528 
3529  //Build a polygon with the list of model image points to get the bounding box
3530  vpPolygon polygon(modelImagePoints);
3531  boundingBox = polygon.getBoundingBox();
3532 
3533  //Compute the center of gravity of the current inlier keypoints
3534  double meanU = 0.0, meanV = 0.0;
3535  for(std::vector<vpImagePoint>::const_iterator it = m_ransacInliers.begin(); it != m_ransacInliers.end();
3536  ++it) {
3537  meanU += it->get_u();
3538  meanV += it->get_v();
3539  }
3540 
3541  meanU /= (double) m_ransacInliers.size();
3542  meanV /= (double) m_ransacInliers.size();
3543 
3544  centerOfGravity.set_u(meanU);
3545  centerOfGravity.set_v(meanV);
3546  }
3547 
3548  return isMatchOk;
3549 }
3550 
3562 void vpKeyPoint::detectExtractAffine(const vpImage<unsigned char> &I,std::vector<std::vector<cv::KeyPoint> >& listOfKeypoints,
3563  std::vector<cv::Mat>& listOfDescriptors, std::vector<vpImage<unsigned char> > *listOfAffineI) {
3564 #if 0
3565  cv::Mat img;
3566  vpImageConvert::convert(I, img);
3567  listOfKeypoints.clear();
3568  listOfDescriptors.clear();
3569 
3570  for (int tl = 1; tl < 6; tl++) {
3571  double t = pow(2, 0.5 * tl);
3572  for (int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
3573  std::vector<cv::KeyPoint> keypoints;
3574  cv::Mat descriptors;
3575 
3576  cv::Mat timg, mask, Ai;
3577  img.copyTo(timg);
3578 
3579  affineSkew(t, phi, timg, mask, Ai);
3580 
3581 
3582  if(listOfAffineI != NULL) {
3583  cv::Mat img_disp;
3584  bitwise_and(mask, timg, img_disp);
3586  vpImageConvert::convert(img_disp, tI);
3587  listOfAffineI->push_back(tI);
3588  }
3589 #if 0
3590  cv::Mat img_disp;
3591  cv::bitwise_and(mask, timg, img_disp);
3592  cv::namedWindow( "Skew", cv::WINDOW_AUTOSIZE ); // Create a window for display.
3593  cv::imshow( "Skew", img_disp );
3594  cv::waitKey(0);
3595 #endif
3596 
3597  for(std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
3598  it != m_detectors.end(); ++it) {
3599  std::vector<cv::KeyPoint> kp;
3600  it->second->detect(timg, kp, mask);
3601  keypoints.insert(keypoints.end(), kp.begin(), kp.end());
3602  }
3603 
3604  double elapsedTime;
3605  extract(timg, keypoints, descriptors, elapsedTime);
3606 
3607  for(unsigned int i = 0; i < keypoints.size(); i++) {
3608  cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
3609  cv::Mat kpt_t = Ai * cv::Mat(kpt);
3610  keypoints[i].pt.x = kpt_t.at<float>(0, 0);
3611  keypoints[i].pt.y = kpt_t.at<float>(1, 0);
3612  }
3613 
3614  listOfKeypoints.push_back(keypoints);
3615  listOfDescriptors.push_back(descriptors);
3616  }
3617  }
3618 
3619 #else
3620  cv::Mat img;
3621  vpImageConvert::convert(I, img);
3622 
3623  //Create a vector for storing the affine skew parameters
3624  std::vector<std::pair<double, int> > listOfAffineParams;
3625  for (int tl = 1; tl < 6; tl++) {
3626  double t = pow(2, 0.5 * tl);
3627  for (int phi = 0; phi < 180; phi += (int)(72.0 / t)) {
3628  listOfAffineParams.push_back(std::pair<double, int>(t, phi));
3629  }
3630  }
3631 
3632  listOfKeypoints.resize(listOfAffineParams.size());
3633  listOfDescriptors.resize(listOfAffineParams.size());
3634 
3635  if(listOfAffineI != NULL) {
3636  listOfAffineI->resize(listOfAffineParams.size());
3637  }
3638 
3639 #ifdef VISP_HAVE_OPENMP
3640  #pragma omp parallel for
3641 #endif
3642  for(int cpt = 0; cpt < static_cast<int>(listOfAffineParams.size()); cpt++) {
3643  std::vector<cv::KeyPoint> keypoints;
3644  cv::Mat descriptors;
3645 
3646  cv::Mat timg, mask, Ai;
3647  img.copyTo(timg);
3648 
3649  affineSkew(listOfAffineParams[(size_t) cpt].first, listOfAffineParams[(size_t) cpt].second, timg, mask, Ai);
3650 
3651 
3652  if(listOfAffineI != NULL) {
3653  cv::Mat img_disp;
3654  bitwise_and(mask, timg, img_disp);
3656  vpImageConvert::convert(img_disp, tI);
3657  (*listOfAffineI)[(size_t) cpt] = tI;
3658  }
3659 
3660 #if 0
3661  cv::Mat img_disp;
3662  cv::bitwise_and(mask, timg, img_disp);
3663  cv::namedWindow( "Skew", cv::WINDOW_AUTOSIZE ); // Create a window for display.
3664  cv::imshow( "Skew", img_disp );
3665  cv::waitKey(0);
3666 #endif
3667 
3668  for(std::map<std::string, cv::Ptr<cv::FeatureDetector> >::const_iterator it = m_detectors.begin();
3669  it != m_detectors.end(); ++it) {
3670  std::vector<cv::KeyPoint> kp;
3671  it->second->detect(timg, kp, mask);
3672  keypoints.insert(keypoints.end(), kp.begin(), kp.end());
3673  }
3674 
3675  double elapsedTime;
3676  extract(timg, keypoints, descriptors, elapsedTime);
3677 
3678  for(size_t i = 0; i < keypoints.size(); i++) {
3679  cv::Point3f kpt(keypoints[i].pt.x, keypoints[i].pt.y, 1.f);
3680  cv::Mat kpt_t = Ai * cv::Mat(kpt);
3681  keypoints[i].pt.x = kpt_t.at<float>(0, 0);
3682  keypoints[i].pt.y = kpt_t.at<float>(1, 0);
3683  }
3684 
3685  listOfKeypoints[(size_t) cpt] = keypoints;
3686  listOfDescriptors[(size_t) cpt] = descriptors;
3687  }
3688 #endif
3689 }
3690 
3695  //vpBasicKeyPoint class
3697 
3698 
3699  m_computeCovariance = false; m_covarianceMatrix = vpMatrix(); m_currentImageId = 0; m_detectionMethod = detectionScore;
3700  m_detectionScore = 0.15; m_detectionThreshold = 100.0; m_detectionTime = 0.0; m_detectorNames.clear();
3701  m_detectors.clear(); m_extractionTime = 0.0; m_extractorNames.clear(); m_extractors.clear(); m_filteredMatches.clear();
3702  m_filterType = ratioDistanceThreshold;
3703  m_imageFormat = jpgImageFormat; m_knnMatches.clear(); m_mapOfImageId.clear(); m_mapOfImages.clear();
3704  m_matcher = cv::Ptr<cv::DescriptorMatcher>(); m_matcherName = "BruteForce-Hamming";
3705  m_matches.clear(); m_matchingFactorThreshold = 2.0; m_matchingRatioThreshold = 0.85; m_matchingTime = 0.0;
3706  m_matchRansacKeyPointsToPoints.clear(); m_nbRansacIterations = 200; m_nbRansacMinInlierCount = 100;
3707  m_objectFilteredPoints.clear();
3708  m_poseTime = 0.0; m_queryDescriptors = cv::Mat(); m_queryFilteredKeyPoints.clear(); m_queryKeyPoints.clear();
3709  m_ransacConsensusPercentage = 20.0; m_ransacInliers.clear(); m_ransacOutliers.clear(); m_ransacReprojectionError = 6.0;
3710  m_ransacThreshold = 0.01; m_trainDescriptors = cv::Mat(); m_trainKeyPoints.clear(); m_trainPoints.clear();
3711  m_trainVpPoints.clear(); m_useAffineDetection = false;
3712 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
3713  m_useBruteForceCrossCheck = true;
3714 #endif
3715  m_useConsensusPercentage = false;
3716  m_useKnn = true; //as m_filterType == ratioDistanceThreshold
3717  m_useMatchTrainToQuery = false; m_useRansacVVS = true; m_useSingleMatchFilter = true;
3718 
3719  m_detectorNames.push_back("ORB");
3720  m_extractorNames.push_back("ORB");
3721 
3722  init();
3723 }
3724 
3732 void vpKeyPoint::saveLearningData(const std::string &filename, bool binaryMode, const bool saveTrainingImages) {
3733  std::string parent = vpIoTools::getParent(filename);
3734  if(!parent.empty()) {
3735  vpIoTools::makeDirectory(parent);
3736  }
3737 
3738  std::map<int, std::string> mapOfImgPath;
3739  if(saveTrainingImages) {
3740 #ifdef VISP_HAVE_MODULE_IO
3741  //Save the training image files in the same directory
3742  int cpt = 0;
3743 
3744  for(std::map<int, vpImage<unsigned char> >::const_iterator it = m_mapOfImages.begin(); it != m_mapOfImages.end(); ++it, cpt++) {
3745  if(cpt > 999) {
3746  throw vpException(vpException::fatalError, "The number of training images to save is too big !");
3747  }
3748 
3749  char buffer[4];
3750  sprintf(buffer, "%03d", cpt);
3751  std::stringstream ss;
3752  ss << "train_image_" << buffer;
3753 
3754  switch(m_imageFormat) {
3755  case jpgImageFormat:
3756  ss << ".jpg";
3757  break;
3758 
3759  case pngImageFormat:
3760  ss << ".png";
3761  break;
3762 
3763  case ppmImageFormat:
3764  ss << ".ppm";
3765  break;
3766 
3767  case pgmImageFormat:
3768  ss << ".pgm";
3769  break;
3770 
3771  default:
3772  ss << ".png";
3773  break;
3774  }
3775 
3776  std::string imgFilename = ss.str();
3777  mapOfImgPath[it->first] = imgFilename;
3778  vpImageIo::write(it->second, parent + (!parent.empty() ? "/" : "") + imgFilename);
3779  }
3780 #else
3781  std::cout << "Warning: in vpKeyPoint::saveLearningData() training images are not saved because "
3782  "visp_io module is not available !" << std::endl;
3783 #endif
3784  }
3785 
3786  bool have3DInfo = m_trainPoints.size() > 0;
3787  if(have3DInfo && m_trainPoints.size() != m_trainKeyPoints.size()) {
3788  throw vpException(vpException::fatalError, "List of keypoints and list of 3D points have different size !");
3789  }
3790 
3791  if(binaryMode) {
3792  //Save the learning data into little endian binary file.
3793  std::ofstream file(filename.c_str(), std::ofstream::binary);
3794  if(!file.is_open()) {
3795  throw vpException(vpException::ioError, "Cannot create the file.");
3796  }
3797 
3798  //Write info about training images
3799  int nbImgs = (int) mapOfImgPath.size();
3800 // file.write((char *)(&nbImgs), sizeof(nbImgs));
3801  writeBinaryIntLE(file, nbImgs);
3802 
3803 #ifdef VISP_HAVE_MODULE_IO
3804  for(std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
3805  //Write image_id
3806  int id = it->first;
3807 // file.write((char *)(&id), sizeof(id));
3808  writeBinaryIntLE(file, id);
3809 
3810  //Write image path
3811  std::string path = it->second;
3812  int length = (int) path.length();
3813 // file.write((char *)(&length), sizeof(length));
3814  writeBinaryIntLE(file, length);
3815 
3816  for(int cpt = 0; cpt < length; cpt++) {
3817  file.write((char *) (&path[(size_t)cpt]), sizeof(path[(size_t)cpt]));
3818  }
3819  }
3820 #endif
3821 
3822  //Write if we have 3D point information
3823  int have3DInfoInt = have3DInfo ? 1 : 0;
3824 // file.write((char *)(&have3DInfoInt), sizeof(have3DInfoInt));
3825  writeBinaryIntLE(file, have3DInfoInt);
3826 
3827 
3828  int nRows = m_trainDescriptors.rows,
3829  nCols = m_trainDescriptors.cols;
3830  int descriptorType = m_trainDescriptors.type();
3831 
3832  //Write the number of descriptors
3833 // file.write((char *)(&nRows), sizeof(nRows));
3834  writeBinaryIntLE(file, nRows);
3835 
3836  //Write the size of the descriptor
3837 // file.write((char *)(&nCols), sizeof(nCols));
3838  writeBinaryIntLE(file, nCols);
3839 
3840  //Write the type of the descriptor
3841 // file.write((char *)(&descriptorType), sizeof(descriptorType));
3842  writeBinaryIntLE(file, descriptorType);
3843 
3844  for (int i = 0; i < nRows; i++) {
3845  unsigned int i_ = (unsigned int) i;
3846  //Write u
3847  float u = m_trainKeyPoints[i_].pt.x;
3848 // file.write((char *)(&u), sizeof(u));
3849  writeBinaryFloatLE(file, u);
3850 
3851  //Write v
3852  float v = m_trainKeyPoints[i_].pt.y;
3853 // file.write((char *)(&v), sizeof(v));
3854  writeBinaryFloatLE(file, v);
3855 
3856  //Write size
3857  float size = m_trainKeyPoints[i_].size;
3858 // file.write((char *)(&size), sizeof(size));
3859  writeBinaryFloatLE(file, size);
3860 
3861  //Write angle
3862  float angle = m_trainKeyPoints[i_].angle;
3863 // file.write((char *)(&angle), sizeof(angle));
3864  writeBinaryFloatLE(file, angle);
3865 
3866  //Write response
3867  float response = m_trainKeyPoints[i_].response;
3868 // file.write((char *)(&response), sizeof(response));
3869  writeBinaryFloatLE(file, response);
3870 
3871  //Write octave
3872  int octave = m_trainKeyPoints[i_].octave;
3873 // file.write((char *)(&octave), sizeof(octave));
3874  writeBinaryIntLE(file, octave);
3875 
3876  //Write class_id
3877  int class_id = m_trainKeyPoints[i_].class_id;
3878 // file.write((char *)(&class_id), sizeof(class_id));
3879  writeBinaryIntLE(file, class_id);
3880 
3881  //Write image_id
3882 #ifdef VISP_HAVE_MODULE_IO
3883  std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
3884  int image_id = (saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1;
3885 // file.write((char *)(&image_id), sizeof(image_id));
3886  writeBinaryIntLE(file, image_id);
3887 #else
3888  int image_id = -1;
3889 // file.write((char *)(&image_id), sizeof(image_id));
3890  writeBinaryIntLE(file, image_id);
3891 #endif
3892 
3893  if(have3DInfo) {
3894  float oX = m_trainPoints[i_].x, oY = m_trainPoints[i_].y, oZ = m_trainPoints[i_].z;
3895  //Write oX
3896 // file.write((char *)(&oX), sizeof(oX));
3897  writeBinaryFloatLE(file, oX);
3898 
3899  //Write oY
3900 // file.write((char *)(&oY), sizeof(oY));
3901  writeBinaryFloatLE(file, oY);
3902 
3903  //Write oZ
3904 // file.write((char *)(&oZ), sizeof(oZ));
3905  writeBinaryFloatLE(file, oZ);
3906  }
3907 
3908  for (int j = 0; j < nCols; j++) {
3909  //Write the descriptor value
3910  switch(descriptorType) {
3911  case CV_8U:
3912  file.write((char *)(&m_trainDescriptors.at<unsigned char>(i, j)), sizeof(m_trainDescriptors.at<unsigned char>(i, j)));
3913  break;
3914 
3915  case CV_8S:
3916  file.write((char *)(&m_trainDescriptors.at<char>(i, j)), sizeof(m_trainDescriptors.at<char>(i, j)));
3917  break;
3918 
3919  case CV_16U:
3920 // file.write((char *)(&m_trainDescriptors.at<unsigned short int>(i, j)), sizeof(m_trainDescriptors.at<unsigned short int>(i, j)));
3921  writeBinaryUShortLE(file, m_trainDescriptors.at<unsigned short int>(i, j));
3922  break;
3923 
3924  case CV_16S:
3925 // file.write((char *)(&m_trainDescriptors.at<short int>(i, j)), sizeof(m_trainDescriptors.at<short int>(i, j)));
3926  writeBinaryShortLE(file, m_trainDescriptors.at<short int>(i, j));
3927  break;
3928 
3929  case CV_32S:
3930 // file.write((char *)(&m_trainDescriptors.at<int>(i, j)), sizeof(m_trainDescriptors.at<int>(i, j)));
3931  writeBinaryIntLE(file, m_trainDescriptors.at<int>(i, j));
3932  break;
3933 
3934  case CV_32F:
3935 // file.write((char *)(&m_trainDescriptors.at<float>(i, j)), sizeof(m_trainDescriptors.at<float>(i, j)));
3936  writeBinaryFloatLE(file, m_trainDescriptors.at<float>(i, j));
3937  break;
3938 
3939  case CV_64F:
3940 // file.write((char *)(&m_trainDescriptors.at<double>(i, j)), sizeof(m_trainDescriptors.at<double>(i, j)));
3941  writeBinaryDoubleLE(file, m_trainDescriptors.at<double>(i, j));
3942  break;
3943 
3944  default:
3945  throw vpException(vpException::fatalError, "Problem with the data type of descriptors !");
3946  break;
3947  }
3948  }
3949  }
3950 
3951 
3952  file.close();
3953  } else {
3954 #ifdef VISP_HAVE_XML2
3955  xmlDocPtr doc = NULL;
3956  xmlNodePtr root_node = NULL, image_node = NULL, image_info_node = NULL, descriptors_info_node = NULL,
3957  descriptor_node = NULL, desc_node = NULL;
3958 
3959  /*
3960  * this initialize the library and check potential ABI mismatches
3961  * between the version it was compiled for and the actual shared
3962  * library used.
3963  */
3964  LIBXML_TEST_VERSION
3965 
3966  doc = xmlNewDoc(BAD_CAST "1.0");
3967  if (doc == NULL) {
3968  throw vpException(vpException::ioError, "Error with file " + filename);
3969  }
3970 
3971  root_node = xmlNewNode(NULL, BAD_CAST "LearningData");
3972  xmlDocSetRootElement(doc, root_node);
3973 
3974  std::stringstream ss;
3975 
3976  //Write the training images info
3977  image_node = xmlNewChild(root_node, NULL, BAD_CAST "TrainingImageInfo", NULL);
3978 
3979 #ifdef VISP_HAVE_MODULE_IO
3980  for(std::map<int, std::string>::const_iterator it = mapOfImgPath.begin(); it != mapOfImgPath.end(); ++it) {
3981  image_info_node = xmlNewChild(image_node, NULL, BAD_CAST "trainImg",
3982  BAD_CAST it->second.c_str());
3983  ss.str("");
3984  ss << it->first;
3985  xmlNewProp(image_info_node, BAD_CAST "image_id", BAD_CAST ss.str().c_str());
3986  }
3987 #endif
3988 
3989  //Write information about descriptors
3990  descriptors_info_node = xmlNewChild(root_node, NULL, BAD_CAST "DescriptorsInfo", NULL);
3991 
3992  int nRows = m_trainDescriptors.rows,
3993  nCols = m_trainDescriptors.cols;
3994  int descriptorType = m_trainDescriptors.type();
3995 
3996  //Write the number of rows
3997  ss.str("");
3998  ss << nRows;
3999  xmlNewChild(descriptors_info_node, NULL, BAD_CAST "nrows", BAD_CAST ss.str().c_str());
4000 
4001  //Write the number of cols
4002  ss.str("");
4003  ss << nCols;
4004  xmlNewChild(descriptors_info_node, NULL, BAD_CAST "ncols", BAD_CAST ss.str().c_str());
4005 
4006  //Write the descriptors type
4007  ss.str("");
4008  ss << descriptorType;
4009  xmlNewChild(descriptors_info_node, NULL, BAD_CAST "type", BAD_CAST ss.str().c_str());
4010 
4011  for (int i = 0; i < nRows; i++) {
4012  unsigned int i_ = (unsigned int) i;
4013  descriptor_node = xmlNewChild(root_node, NULL, BAD_CAST "DescriptorInfo",
4014  NULL);
4015 
4016  ss.str("");
4017  //max_digits10 == 9 for float
4018  ss << std::fixed << std::setprecision(9) << m_trainKeyPoints[i_].pt.x;
4019  xmlNewChild(descriptor_node, NULL, BAD_CAST "u",
4020  BAD_CAST ss.str().c_str());
4021 
4022  ss.str("");
4023  //max_digits10 == 9 for float
4024  ss << std::fixed << std::setprecision(9) << m_trainKeyPoints[i_].pt.y;
4025  xmlNewChild(descriptor_node, NULL, BAD_CAST "v",
4026  BAD_CAST ss.str().c_str());
4027 
4028  ss.str("");
4029  //max_digits10 == 9 for float
4030  ss << std::fixed << std::setprecision(9) << m_trainKeyPoints[i_].size;
4031  xmlNewChild(descriptor_node, NULL, BAD_CAST "size",
4032  BAD_CAST ss.str().c_str());
4033 
4034  ss.str("");
4035  //max_digits10 == 9 for float
4036  ss << std::fixed << std::setprecision(9) << m_trainKeyPoints[i_].angle;
4037  xmlNewChild(descriptor_node, NULL, BAD_CAST "angle",
4038  BAD_CAST ss.str().c_str());
4039 
4040  ss.str("");
4041  //max_digits10 == 9 for float
4042  ss << std::fixed << std::setprecision(9) << m_trainKeyPoints[i_].response;
4043  xmlNewChild(descriptor_node, NULL, BAD_CAST "response",
4044  BAD_CAST ss.str().c_str());
4045 
4046  ss.str("");
4047  ss << m_trainKeyPoints[i_].octave;
4048  xmlNewChild(descriptor_node, NULL, BAD_CAST "octave",
4049  BAD_CAST ss.str().c_str());
4050 
4051  ss.str("");
4052  ss << m_trainKeyPoints[i_].class_id;
4053  xmlNewChild(descriptor_node, NULL, BAD_CAST "class_id",
4054  BAD_CAST ss.str().c_str());
4055 
4056  ss.str("");
4057 #ifdef VISP_HAVE_MODULE_IO
4058  std::map<int, int>::const_iterator it_findImgId = m_mapOfImageId.find(m_trainKeyPoints[i_].class_id);
4059  ss << ((saveTrainingImages && it_findImgId != m_mapOfImageId.end()) ? it_findImgId->second : -1);
4060  xmlNewChild(descriptor_node, NULL, BAD_CAST "image_id",
4061  BAD_CAST ss.str().c_str());
4062 #else
4063  ss << -1;
4064  xmlNewChild(descriptor_node, NULL, BAD_CAST "image_id",
4065  BAD_CAST ss.str().c_str());
4066 #endif
4067 
4068  if (have3DInfo) {
4069  ss.str("");
4070  //max_digits10 == 9 for float
4071  ss << std::fixed << std::setprecision(9) << m_trainPoints[i_].x;
4072  xmlNewChild(descriptor_node, NULL, BAD_CAST "oX",
4073  BAD_CAST ss.str().c_str());
4074 
4075  ss.str("");
4076  //max_digits10 == 9 for float
4077  ss << std::fixed << std::setprecision(9) << m_trainPoints[i_].y;
4078  xmlNewChild(descriptor_node, NULL, BAD_CAST "oY",
4079  BAD_CAST ss.str().c_str());
4080 
4081  ss.str("");
4082  //max_digits10 == 9 for float
4083  ss << std::fixed << std::setprecision(9) << m_trainPoints[i_].z;
4084  xmlNewChild(descriptor_node, NULL, BAD_CAST "oZ",
4085  BAD_CAST ss.str().c_str());
4086  }
4087 
4088  desc_node = xmlNewChild(descriptor_node, NULL, BAD_CAST "desc", NULL);
4089 
4090  for (int j = 0; j < nCols; j++) {
4091  ss.str("");
4092 
4093  switch(descriptorType) {
4094  case CV_8U:
4095  {
4096  //Promote an unsigned char to an int
4097  //val_tmp holds the numeric value that will be written
4098  //We save the value in numeric form otherwise libxml2 will not be able to parse
4099  //A better solution could be possible
4100  int val_tmp = m_trainDescriptors.at<unsigned char>(i, j);
4101  ss << val_tmp;
4102  }
4103  break;
4104 
4105  case CV_8S:
4106  {
4107  //Promote a char to an int
4108  //val_tmp holds the numeric value that will be written
4109  //We save the value in numeric form otherwise libxml2 will not be able to parse
4110  //A better solution could be possible
4111  int val_tmp = m_trainDescriptors.at<char>(i, j);
4112  ss << val_tmp;
4113  }
4114  break;
4115 
4116  case CV_16U:
4117  ss << m_trainDescriptors.at<unsigned short int>(i, j);
4118  break;
4119 
4120  case CV_16S:
4121  ss << m_trainDescriptors.at<short int>(i, j);
4122  break;
4123 
4124  case CV_32S:
4125  ss << m_trainDescriptors.at<int>(i, j);
4126  break;
4127 
4128  case CV_32F:
4129  //max_digits10 == 9 for float
4130  ss << std::fixed << std::setprecision(9) << m_trainDescriptors.at<float>(i, j);
4131  break;
4132 
4133  case CV_64F:
4134  //max_digits10 == 17 for double
4135  ss << std::fixed << std::setprecision(17) << m_trainDescriptors.at<double>(i, j);
4136  break;
4137 
4138  default:
4139  throw vpException(vpException::fatalError, "Problem with the data type of descriptors !");
4140  break;
4141  }
4142  xmlNewChild(desc_node, NULL, BAD_CAST "val",
4143  BAD_CAST ss.str().c_str());
4144  }
4145  }
4146 
4147  xmlSaveFormatFileEnc(filename.c_str(), doc, "UTF-8", 1);
4148 
4149  /*free the document */
4150  xmlFreeDoc(doc);
4151 
4152  /*
4153  *Free the global variables that may
4154  *have been allocated by the parser.
4155  */
4156  xmlCleanupParser();
4157 #else
4158  std::cerr << "Error: libxml2 is required !" << std::endl;
4159 #endif
4160  }
4161 }
4162 
4163 #if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x030000)
4164 //From OpenCV 2.4.11 source code.
4165 struct KeypointResponseGreaterThanThreshold {
4166  KeypointResponseGreaterThanThreshold(float _value) :
4167  value(_value) {
4168  }
4169  inline bool operator()(const cv::KeyPoint& kpt) const {
4170  return kpt.response >= value;
4171  }
4172  float value;
4173 };
4174 
4175 struct KeypointResponseGreater {
4176  inline bool operator()(const cv::KeyPoint& kp1,
4177  const cv::KeyPoint& kp2) const {
4178  return kp1.response > kp2.response;
4179  }
4180 };
4181 
4182 // takes keypoints and culls them by the response
4183 void vpKeyPoint::KeyPointsFilter::retainBest(
4184  std::vector<cv::KeyPoint>& keypoints, int n_points) {
4185  //this is only necessary if the keypoints size is greater than the number of desired points.
4186  if (n_points >= 0 && keypoints.size() > (size_t) n_points) {
4187  if (n_points == 0) {
4188  keypoints.clear();
4189  return;
4190  }
4191  //first use nth element to partition the keypoints into the best and worst.
4192  std::nth_element(keypoints.begin(), keypoints.begin() + n_points,
4193  keypoints.end(), KeypointResponseGreater());
4194  //this is the boundary response, and in the case of FAST may be ambiguous
4195  float ambiguous_response = keypoints[(size_t) (n_points - 1)].response;
4196  //use std::partition to grab all of the keypoints with the boundary response.
4197  std::vector<cv::KeyPoint>::const_iterator new_end = std::partition(
4198  keypoints.begin() + n_points, keypoints.end(),
4199  KeypointResponseGreaterThanThreshold(ambiguous_response));
4200  //resize the keypoints, given this new end point. nth_element and partition reordered the points inplace
4201  keypoints.resize((size_t) (new_end - keypoints.begin()));
4202  }
4203 }
4204 
4205 struct RoiPredicate {
4206  RoiPredicate(const cv::Rect& _r) :
4207  r(_r) {
4208  }
4209 
4210  bool operator()(const cv::KeyPoint& keyPt) const {
4211  return !r.contains(keyPt.pt);
4212  }
4213 
4214  cv::Rect r;
4215 };
4216 
4217 void vpKeyPoint::KeyPointsFilter::runByImageBorder(
4218  std::vector<cv::KeyPoint>& keypoints, cv::Size imageSize, int borderSize) {
4219  if (borderSize > 0) {
4220  if (imageSize.height <= borderSize * 2 || imageSize.width <= borderSize * 2)
4221  keypoints.clear();
4222  else
4223  keypoints.erase(
4224  std::remove_if(keypoints.begin(), keypoints.end(),
4225  RoiPredicate(
4226  cv::Rect(cv::Point(borderSize, borderSize),
4227  cv::Point(imageSize.width - borderSize,
4228  imageSize.height - borderSize)))), keypoints.end());
4229  }
4230 }
4231 
4232 struct SizePredicate {
4233  SizePredicate(float _minSize, float _maxSize) :
4234  minSize(_minSize), maxSize(_maxSize) {
4235  }
4236 
4237  bool operator()(const cv::KeyPoint& keyPt) const {
4238  float size = keyPt.size;
4239  return (size < minSize) || (size > maxSize);
4240  }
4241 
4242  float minSize, maxSize;
4243 };
4244 
4245 void vpKeyPoint::KeyPointsFilter::runByKeypointSize(
4246  std::vector<cv::KeyPoint>& keypoints, float minSize, float maxSize) {
4247  CV_Assert(minSize >= 0);
4248  CV_Assert(maxSize >= 0);
4249  CV_Assert(minSize <= maxSize);
4250 
4251  keypoints.erase(
4252  std::remove_if(keypoints.begin(), keypoints.end(),
4253  SizePredicate(minSize, maxSize)), keypoints.end());
4254 }
4255 
4256 class MaskPredicate {
4257 public:
4258  MaskPredicate(const cv::Mat& _mask) :
4259  mask(_mask) {
4260  }
4261  bool operator()(const cv::KeyPoint& key_pt) const {
4262  return mask.at<uchar>((int) (key_pt.pt.y + 0.5f),
4263  (int) (key_pt.pt.x + 0.5f)) == 0;
4264  }
4265 
4266 private:
4267  const cv::Mat mask;
4268  MaskPredicate& operator=(const MaskPredicate&);
4269 };
4270 
4271 void vpKeyPoint::KeyPointsFilter::runByPixelsMask(
4272  std::vector<cv::KeyPoint>& keypoints, const cv::Mat& mask) {
4273  if (mask.empty())
4274  return;
4275 
4276  keypoints.erase(
4277  std::remove_if(keypoints.begin(), keypoints.end(), MaskPredicate(mask)),
4278  keypoints.end());
4279 }
4280 
4281 struct KeyPoint_LessThan {
4282  KeyPoint_LessThan(const std::vector<cv::KeyPoint>& _kp) :
4283  kp(&_kp) {
4284  }
4285  bool operator()(/*int i, int j*/ size_t i, size_t j) const {
4286  const cv::KeyPoint& kp1 = (*kp)[/*(size_t)*/ i];
4287  const cv::KeyPoint& kp2 = (*kp)[/*(size_t)*/ j];
4288  if (!vpMath::equal(kp1.pt.x, kp2.pt.x, std::numeric_limits<float>::epsilon())) { //if (kp1.pt.x != kp2.pt.x) {
4289  return kp1.pt.x < kp2.pt.x;
4290  }
4291 
4292  if (!vpMath::equal(kp1.pt.y, kp2.pt.y, std::numeric_limits<float>::epsilon())) { //if (kp1.pt.y != kp2.pt.y) {
4293  return kp1.pt.y < kp2.pt.y;
4294  }
4295 
4296  if (!vpMath::equal(kp1.size, kp2.size, std::numeric_limits<float>::epsilon())) { //if (kp1.size != kp2.size) {
4297  return kp1.size > kp2.size;
4298  }
4299 
4300  if (!vpMath::equal(kp1.angle, kp2.angle, std::numeric_limits<float>::epsilon())) { //if (kp1.angle != kp2.angle) {
4301  return kp1.angle < kp2.angle;
4302  }
4303 
4304  if (!vpMath::equal(kp1.response, kp2.response, std::numeric_limits<float>::epsilon())) { //if (kp1.response != kp2.response) {
4305  return kp1.response > kp2.response;
4306  }
4307 
4308  if (kp1.octave != kp2.octave) {
4309  return kp1.octave > kp2.octave;
4310  }
4311 
4312  if (kp1.class_id != kp2.class_id) {
4313  return kp1.class_id > kp2.class_id;
4314  }
4315 
4316  return i < j;
4317  }
4318  const std::vector<cv::KeyPoint>* kp;
4319 };
4320 
4321 void vpKeyPoint::KeyPointsFilter::removeDuplicated(
4322  std::vector<cv::KeyPoint>& keypoints) {
4323  size_t i, j, n = keypoints.size();
4324  std::vector<size_t> kpidx(n);
4325  std::vector<uchar> mask(n, (uchar) 1);
4326 
4327  for (i = 0; i < n; i++) {
4328  kpidx[i] = i;
4329  }
4330  std::sort(kpidx.begin(), kpidx.end(), KeyPoint_LessThan(keypoints));
4331  for (i = 1, j = 0; i < n; i++) {
4332  cv::KeyPoint& kp1 = keypoints[kpidx[i]];
4333  cv::KeyPoint& kp2 = keypoints[kpidx[j]];
4334 // if (kp1.pt.x != kp2.pt.x || kp1.pt.y != kp2.pt.y || kp1.size != kp2.size || kp1.angle != kp2.angle) {
4335  if (!vpMath::equal(kp1.pt.x, kp2.pt.x, std::numeric_limits<float>::epsilon()) ||
4336  !vpMath::equal(kp1.pt.y, kp2.pt.y, std::numeric_limits<float>::epsilon()) ||
4337  !vpMath::equal(kp1.size, kp2.size, std::numeric_limits<float>::epsilon()) ||
4338  !vpMath::equal(kp1.angle, kp2.angle, std::numeric_limits<float>::epsilon())) {
4339  j = i;
4340  } else {
4341  mask[kpidx[i]] = 0;
4342  }
4343  }
4344 
4345  for (i = j = 0; i < n; i++) {
4346  if (mask[i]) {
4347  if (i != j) {
4348  keypoints[j] = keypoints[i];
4349  }
4350  j++;
4351  }
4352  }
4353  keypoints.resize(j);
4354 }
4355 
4356 /*
4357  * PyramidAdaptedFeatureDetector
4358  */
4359 vpKeyPoint::PyramidAdaptedFeatureDetector::PyramidAdaptedFeatureDetector(
4360  const cv::Ptr<cv::FeatureDetector>& _detector, int _maxLevel) :
4361  detector(_detector), maxLevel(_maxLevel) {
4362 }
4363 
4364 bool vpKeyPoint::PyramidAdaptedFeatureDetector::empty() const {
4365  return detector.empty() || (cv::FeatureDetector*) detector->empty();
4366 }
4367 
4368 void vpKeyPoint::PyramidAdaptedFeatureDetector::detect( cv::InputArray image, CV_OUT std::vector<cv::KeyPoint>& keypoints,
4369  cv::InputArray mask ) {
4370  detectImpl(image.getMat(), keypoints, mask.getMat());
4371 }
4372 
4373 void vpKeyPoint::PyramidAdaptedFeatureDetector::detectImpl(const cv::Mat& image,
4374  std::vector<cv::KeyPoint>& keypoints, const cv::Mat& mask) const {
4375  cv::Mat src = image;
4376  cv::Mat src_mask = mask;
4377 
4378  cv::Mat dilated_mask;
4379  if (!mask.empty()) {
4380  cv::dilate(mask, dilated_mask, cv::Mat());
4381  cv::Mat mask255(mask.size(), CV_8UC1, cv::Scalar(0));
4382  mask255.setTo(cv::Scalar(255), dilated_mask != 0);
4383  dilated_mask = mask255;
4384  }
4385 
4386  for (int l = 0, multiplier = 1; l <= maxLevel; ++l, multiplier *= 2) {
4387  // Detect on current level of the pyramid
4388  std::vector<cv::KeyPoint> new_pts;
4389  detector->detect(src, new_pts, src_mask);
4390  std::vector<cv::KeyPoint>::iterator it = new_pts.begin(), end =
4391  new_pts.end();
4392  for (; it != end; ++it) {
4393  it->pt.x *= multiplier;
4394  it->pt.y *= multiplier;
4395  it->size *= multiplier;
4396  it->octave = l;
4397  }
4398  keypoints.insert(keypoints.end(), new_pts.begin(), new_pts.end());
4399 
4400  // Downsample
4401  if (l < maxLevel) {
4402  cv::Mat dst;
4403  pyrDown(src, dst);
4404  src = dst;
4405 
4406  if (!mask.empty())
4407  resize(dilated_mask, src_mask, src.size(), 0, 0, CV_INTER_AREA);
4408  }
4409  }
4410 
4411  if (!mask.empty())
4412  vpKeyPoint::KeyPointsFilter::runByPixelsMask(keypoints, mask);
4413 }
4414 #endif
4415 
4416 #elif !defined(VISP_BUILD_SHARED_LIBS)
4417 // Work around to avoid warning: libvisp_vision.a(vpKeyPoint.cpp.o) has no symbols
4418 void dummy_vpKeyPoint() {};
4419 #endif
void getTrainKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
Implementation of a matrix and operations on matrices.
Definition: vpMatrix.h:97
void displayMatching(const vpImage< unsigned char > &IRef, vpImage< unsigned char > &IMatching, unsigned int crossSize, unsigned int lineThickness=1, const vpColor &color=vpColor::green)
double getTop() const
Definition: vpRect.h:178
static void compute3D(const cv::KeyPoint &candidate, const std::vector< vpPoint > &roi, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, cv::Point3f &point)
Definition: vpKeyPoint.cpp:688
void set_oZ(const double oZ)
Set the point Z coordinate in the object frame.
Definition: vpPoint.cpp:491
void detectExtractAffine(const vpImage< unsigned char > &I, std::vector< std::vector< cv::KeyPoint > > &listOfKeypoints, std::vector< cv::Mat > &listOfDescriptors, std::vector< vpImage< unsigned char > > *listOfAffineI=NULL)
double get_u0() const
void getQueryKeyPoints(std::vector< cv::KeyPoint > &keyPoints) const
double get_i() const
Definition: vpImagePoint.h:199
unsigned int getWidth() const
Definition: vpImage.h:226
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static bool isAbsolutePathname(const std::string &pathname)
Definition: vpIoTools.cpp:1412
vpKeyPoint(const vpFeatureDetectorType &detectorType, const vpFeatureDescriptorType &descriptorType, const std::string &matcherName, const vpFilterMatchingType &filterType=ratioDistanceThreshold)
Definition: vpKeyPoint.cpp:334
void createImageMatching(vpImage< unsigned char > &IRef, vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void convertPoint(const vpCameraParameters &cam, const double &x, const double &y, double &u, double &v)
Point coordinates conversion from normalized coordinates in meter to pixel coordinates ...
static bool isNaN(const double value)
Definition: vpMath.cpp:85
Class to define colors available for display functionnalities.
Definition: vpColor.h:121
static bool equal(double x, double y, double s=0.001)
Definition: vpMath.h:306
static const vpColor none
Definition: vpColor.h:175
double get_oY() const
Get the point Y coordinate in the object frame.
Definition: vpPoint.cpp:449
error that can be emited by ViSP classes.
Definition: vpException.h:73
void setRansacThreshold(const double &t)
Definition: vpPose.h:227
void set_x(const double x)
Set the point x coordinate in the image plane.
Definition: vpPoint.cpp:496
double getHeight() const
Definition: vpRect.h:152
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
Point coordinates conversion from pixel coordinates to normalized coordinates in meter...
std::vector< unsigned int > getRansacInlierIndex() const
Definition: vpPose.h:237
double get_py() const
double get_y() const
Get the point y coordinate in the image plane.
Definition: vpPoint.cpp:458
double getRight() const
Definition: vpRect.h:165
static std::string getParent(const std::string &pathname)
Definition: vpIoTools.cpp:1306
static const vpColor green
Definition: vpColor.h:166
void match(const cv::Mat &trainDescriptors, const cv::Mat &queryDescriptors, std::vector< cv::DMatch > &matches, double &elapsedTime)
static int round(const double x)
Definition: vpMath.h:249
double get_j() const
Definition: vpImagePoint.h:210
void insertImageMatching(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, vpImage< unsigned char > &IMatching)
VISP_EXPORT double measureTimeMs()
Definition: vpTime.cpp:93
static const vpColor red
Definition: vpColor.h:163
Class that defines what is a point.
Definition: vpPoint.h:59
bool isInside(const vpImagePoint &iP, const PointInPolygonMethod &method=PnPolyRayCasting) const
Definition: vpPolygon.cpp:303
static void write(const vpImage< unsigned char > &I, const std::string &filename)
Definition: vpImageIo.cpp:368
static void makeDirectory(const char *dirname)
Definition: vpIoTools.cpp:427
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
Definition: vpKeyPoint.cpp:765
Defines a generic 2D polygon.
Definition: vpPolygon.h:99
const char * what() const
double getBottom() const
Definition: vpRect.h:100
vpRect getBoundingBox() const
Definition: vpPolygon.h:174
double getWidth() const
Definition: vpRect.h:199
double get_v0() const
vpFeatureDetectorType
Definition: vpKeyPoint.h:246
void set_u(const double u)
Definition: vpImagePoint.h:221
static void compute3DForPointsOnCylinders(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpCylinder > &cylinders, const std::vector< std::vector< std::vector< vpImagePoint > > > &vectorOfCylinderRois, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
Definition: vpKeyPoint.cpp:874
bool _reference_computed
flag to indicate if the reference has been built.
void set_v(const double v)
Definition: vpImagePoint.h:232
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
Definition: vpPose.h:76
Generic class defining intrinsic camera parameters.
double get_oZ() const
Get the point Z coordinate in the object frame.
Definition: vpPoint.cpp:451
void set_y(const double y)
Set the point y coordinate in the image plane.
Definition: vpPoint.cpp:498
void set_oX(const double oX)
Set the point X coordinate in the object frame.
Definition: vpPoint.cpp:487
void display(const vpImage< unsigned char > &IRef, const vpImage< unsigned char > &ICurrent, unsigned int size=3)
vpFeatureDescriptorType
Definition: vpKeyPoint.h:274
unsigned int buildReference(const vpImage< unsigned char > &I)
Definition: vpKeyPoint.cpp:490
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool(*func)(vpHomogeneousMatrix *)=NULL)
Definition: vpPose.cpp:372
double get_x() const
Get the point x coordinate in the image plane.
Definition: vpPoint.cpp:456
vpMatrix getCovarianceMatrix() const
Definition: vpPose.h:254
std::vector< vpImagePoint > referenceImagePointsList
void getObjectPoints(std::vector< cv::Point3f > &objectPoints) const
double get_px() const
bool computePose(const std::vector< cv::Point2f > &imagePoints, const std::vector< cv::Point3f > &objectPoints, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo, std::vector< int > &inlierIndex, double &elapsedTime, bool(*func)(vpHomogeneousMatrix *)=NULL)
unsigned int matchPoint(const vpImage< unsigned char > &I)
void setRansacMaxTrials(const int &rM)
Definition: vpPose.h:235
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
Definition: vpPose.h:226
static void displayCircle(const vpImage< unsigned char > &I, const vpImagePoint &center, unsigned int radius, const vpColor &color, bool fill=false, unsigned int thickness=1)
double get_oX() const
Get the point X coordinate in the object frame.
Definition: vpPoint.cpp:447
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
void loadLearningData(const std::string &filename, const bool binaryMode=false, const bool append=false)
void setWorldCoordinates(const double oX, const double oY, const double oZ)
Definition: vpPoint.cpp:111
static void read(vpImage< unsigned char > &I, const std::string &filename)
Definition: vpImageIo.cpp:205
Implementation of column vector and the associated operations.
Definition: vpColVector.h:72
void set_oY(const double oY)
Set the point Y coordinate in the object frame.
Definition: vpPoint.cpp:489
void saveLearningData(const std::string &filename, const bool binaryMode=false, const bool saveTrainingImages=true)
void getTrainPoints(std::vector< cv::Point3f > &points) const
void insert(const vpImage< Type > &src, const vpImagePoint topLeft)
Definition: vpImage.h:1226
vpHomogeneousMatrix inverse() const
vpFilterMatchingType
Definition: vpKeyPoint.h:222
double getB() const
Definition: vpPlane.h:108
bool matchPointAndDetect(const vpImage< unsigned char > &I, vpRect &boundingBox, vpImagePoint &centerOfGravity, const bool isPlanarObject=true, std::vector< vpImagePoint > *imPts1=NULL, std::vector< vpImagePoint > *imPts2=NULL, double *meanDescriptorDistance=NULL, double *detectionScore=NULL, const vpRect &rectangle=vpRect())
std::vector< unsigned int > matchedReferencePoints
double getA() const
Definition: vpPlane.h:106
unsigned int getHeight() const
Definition: vpImage.h:175
Defines a rectangle in the plane.
Definition: vpRect.h:82
std::vector< vpImagePoint > currentImagePointsList
double getC() const
Definition: vpPlane.h:110
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:88
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1)
This class defines the container for a plane geometrical structure.
Definition: vpPlane.h:58
void loadConfigFile(const std::string &configFile)
void reset()
void addPoint(const vpPoint &P)
Definition: vpPose.cpp:145
double getLeft() const
Definition: vpRect.h:159
static void convertFromOpenCV(const cv::KeyPoint &from, vpImagePoint &to)
Definition: vpConvert.cpp:213
Class that consider the case of a translation vector.
Implementation of a rotation vector as axis-angle minimal representation.
std::vector< vpPoint > getRansacInliers() const
Definition: vpPose.h:238
void setCovarianceComputation(const bool &flag)
Definition: vpPose.h:245
double getD() const
Definition: vpPlane.h:112
void set_ij(const double ii, const double jj)
Definition: vpImagePoint.h:185
vpMatchingMethodEnum getMatchingMethod() const
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
void initMatcher(const std::string &matcherName)
void extract(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, cv::Mat &descriptors, std::vector< cv::Point3f > *trainPoints=NULL)
void parse(const std::string &filename)