Visual Servoing Platform  version 3.6.1 under development (2024-11-15)
testGenericTracker.cpp
1 /*
2  * ViSP, open source Visual Servoing Platform software.
3  * Copyright (C) 2005 - 2024 by Inria. All rights reserved.
4  *
5  * This software is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  * See the file LICENSE.txt at the root directory of this source
10  * distribution for additional information about the GNU GPL.
11  *
12  * For using ViSP with software that can not be combined with the GNU
13  * GPL, please contact Inria about acquiring a ViSP Professional
14  * Edition License.
15  *
16  * See https://visp.inria.fr for more information.
17  *
18  * This software was developed at:
19  * Inria Rennes - Bretagne Atlantique
20  * Campus Universitaire de Beaulieu
21  * 35042 Rennes Cedex
22  * France
23  *
24  * If you have questions regarding the use of this file, please contact
25  * Inria at visp@inria.fr
26  *
27  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
28  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
29  *
30  * Description:
31  * Regression test for MBT.
32  */
33 
40 #include <cstdlib>
41 #include <iostream>
42 #include <visp3/core/vpConfig.h>
43 
44 #if defined(VISP_HAVE_MODULE_MBT) && \
45  (defined(VISP_HAVE_LAPACK) || defined(VISP_HAVE_EIGEN3) || defined(VISP_HAVE_OPENCV))
46 
47 #if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
48 #include <type_traits>
49 #endif
50 
51 #include <visp3/core/vpFont.h>
52 #include <visp3/core/vpImageDraw.h>
53 #include <visp3/core/vpIoTools.h>
54 #include <visp3/gui/vpDisplayD3D.h>
55 #include <visp3/gui/vpDisplayGDI.h>
56 #include <visp3/gui/vpDisplayGTK.h>
57 #include <visp3/gui/vpDisplayOpenCV.h>
58 #include <visp3/gui/vpDisplayX.h>
59 #include <visp3/io/vpImageIo.h>
60 #include <visp3/io/vpParseArgv.h>
61 #include <visp3/mbt/vpMbGenericTracker.h>
62 
63 #define GETOPTARGS "i:dsclt:e:DmCh"
64 
65 #ifdef ENABLE_VISP_NAMESPACE
66 using namespace VISP_NAMESPACE_NAME;
67 #endif
68 
69 namespace
70 {
71 void usage(const char *name, const char *badparam)
72 {
73  fprintf(stdout, "\n\
74  Regression test for vpGenericTracker.\n\
75  \n\
76  SYNOPSIS\n\
77  %s [-i <test image path>] [-c] [-d] [-s] [-h] [-l] \n\
78  [-t <tracker type>] [-e <last frame index>] [-D] [-m] [-C]\n",
79  name);
80 
81  fprintf(stdout, "\n\
82  OPTIONS: \n\
83  -i <input image path> \n\
84  Set image input path.\n\
85  These images come from ViSP-images-x.y.z.tar.gz available \n\
86  on the ViSP website.\n\
87  Setting the VISP_INPUT_IMAGE_PATH environment\n\
88  variable produces the same behavior than using\n\
89  this option.\n\
90  \n\
91  -d \n\
92  Turn off the display.\n\
93  \n\
94  -s \n\
95  If display is turn off, tracking results are saved in a video folder.\n\
96  \n\
97  -c\n\
98  Disable the mouse click. Useful to automate the \n\
99  execution of this program without human intervention.\n\
100  \n\
101  -t <tracker type>\n\
102  Set tracker type (<1 (Edge)>, <2 (KLT)>, <3 (both)>) for color sensor.\n\
103  \n\
104  -l\n\
105  Use the scanline for visibility tests.\n\
106  \n\
107  -e <last frame index>\n\
108  Specify the index of the last frame. Once reached, the tracking is stopped.\n\
109  \n\
110  -D \n\
111  Use depth.\n\
112  \n\
113  -m \n\
114  Set a tracking mask.\n\
115  \n\
116  -C \n\
117  Use color images.\n\
118  \n\
119  -h \n\
120  Print the help.\n\n");
121 
122  if (badparam)
123  fprintf(stdout, "\nERROR: Bad parameter [%s]\n", badparam);
124 }
125 
126 bool getOptions(int argc, const char **argv, std::string &ipath, bool &click_allowed, bool &display, bool &save,
127  bool &useScanline, int &trackerType, int &lastFrame, bool &use_depth, bool &use_mask,
128  bool &use_color_image)
129 {
130  const char *optarg_;
131  int c;
132  while ((c = vpParseArgv::parse(argc, argv, GETOPTARGS, &optarg_)) > 1) {
133 
134  switch (c) {
135  case 'i':
136  ipath = optarg_;
137  break;
138  case 'c':
139  click_allowed = false;
140  break;
141  case 'd':
142  display = false;
143  break;
144  case 's':
145  save = true;
146  break;
147  case 'l':
148  useScanline = true;
149  break;
150  case 't':
151  trackerType = atoi(optarg_);
152  break;
153  case 'e':
154  lastFrame = atoi(optarg_);
155  break;
156  case 'D':
157  use_depth = true;
158  break;
159  case 'm':
160  use_mask = true;
161  break;
162  case 'C':
163  use_color_image = true;
164  break;
165  case 'h':
166  usage(argv[0], nullptr);
167  return false;
168  break;
169 
170  default:
171  usage(argv[0], optarg_);
172  return false;
173  break;
174  }
175  }
176 
177  if ((c == 1) || (c == -1)) {
178  // standalone param or error
179  usage(argv[0], nullptr);
180  std::cerr << "ERROR: " << std::endl;
181  std::cerr << " Bad argument " << optarg_ << std::endl << std::endl;
182  return false;
183  }
184 
185  return true;
186 }
187 
188 template <typename Type>
189 bool read_data(const std::string &input_directory, int cpt, const vpCameraParameters &cam_depth, vpImage<Type> &I,
190  vpImage<uint16_t> &I_depth, std::vector<vpColVector> &pointcloud, vpHomogeneousMatrix &cMo)
191 {
192 #if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
193  static_assert(std::is_same<Type, unsigned char>::value || std::is_same<Type, vpRGBa>::value,
194  "Template function supports only unsigned char and vpRGBa images!");
195 #endif
196 #if VISP_HAVE_DATASET_VERSION >= 0x030600
197  std::string ext("png");
198 #else
199  std::string ext("pgm");
200 #endif
201  char buffer[FILENAME_MAX];
202  snprintf(buffer, FILENAME_MAX, std::string(input_directory + "/Images/Image_%04d." + ext).c_str(), cpt);
203  std::string image_filename = buffer;
204 
205  snprintf(buffer, FILENAME_MAX, std::string(input_directory + "/Depth/Depth_%04d.bin").c_str(), cpt);
206  std::string depth_filename = buffer;
207 
208  snprintf(buffer, FILENAME_MAX, std::string(input_directory + "/CameraPose/Camera_%03d.txt").c_str(), cpt);
209  std::string pose_filename = buffer;
210 
211  if (!vpIoTools::checkFilename(image_filename) || !vpIoTools::checkFilename(depth_filename) ||
212  !vpIoTools::checkFilename(pose_filename))
213  return false;
214 
215  vpImageIo::read(I, image_filename);
216 
217  unsigned int depth_width = 0, depth_height = 0;
218  std::ifstream file_depth(depth_filename.c_str(), std::ios::in | std::ios::binary);
219  if (!file_depth.is_open())
220  return false;
221 
222  vpIoTools::readBinaryValueLE(file_depth, depth_height);
223  vpIoTools::readBinaryValueLE(file_depth, depth_width);
224  I_depth.resize(depth_height, depth_width);
225  pointcloud.resize(depth_height * depth_width);
226 
227  const float depth_scale = 0.000030518f;
228  for (unsigned int i = 0; i < I_depth.getHeight(); i++) {
229  for (unsigned int j = 0; j < I_depth.getWidth(); j++) {
230  vpIoTools::readBinaryValueLE(file_depth, I_depth[i][j]);
231  double x = 0.0, y = 0.0, Z = I_depth[i][j] * depth_scale;
232  vpPixelMeterConversion::convertPoint(cam_depth, j, i, x, y);
233  vpColVector pt3d(4, 1.0);
234  pt3d[0] = x * Z;
235  pt3d[1] = y * Z;
236  pt3d[2] = Z;
237  pointcloud[i * I_depth.getWidth() + j] = pt3d;
238  }
239  }
240 
241  std::ifstream file_pose(pose_filename.c_str());
242  if (!file_pose.is_open()) {
243  return false;
244  }
245 
246  for (unsigned int i = 0; i < 4; i++) {
247  for (unsigned int j = 0; j < 4; j++) {
248  file_pose >> cMo[i][j];
249  }
250  }
251 
252  return true;
253 }
254 
255 void convert(const vpImage<vpRGBa> &src, vpImage<vpRGBa> &dst) { dst = src; }
256 
257 void convert(const vpImage<unsigned char> &src, vpImage<vpRGBa> &dst) { vpImageConvert::convert(src, dst); }
258 
259 template <typename Type>
260 bool run(const std::string &input_directory, bool opt_click_allowed, bool opt_display, bool useScanline,
261  int trackerType_image, int opt_lastFrame, bool use_depth, bool use_mask, bool save)
262 {
263 #if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
264  static_assert(std::is_same<Type, unsigned char>::value || std::is_same<Type, vpRGBa>::value,
265  "Template function supports only unsigned char and vpRGBa images!");
266 #endif
267  // Initialise a display
268 #if defined(VISP_HAVE_X11)
269  vpDisplayX display1, display2;
270 #elif defined(VISP_HAVE_GDI)
271  vpDisplayGDI display1, display2;
272 #elif defined(HAVE_OPENCV_HIGHGUI)
273  vpDisplayOpenCV display1, display2;
274 #elif defined(VISP_HAVE_D3D9)
275  vpDisplayD3D display1, display2;
276 #elif defined(VISP_HAVE_GTK)
277  vpDisplayGTK display1, display2;
278 #else
279  opt_display = false;
280 #endif
281 
282  std::vector<int> tracker_type(2);
283  tracker_type[0] = trackerType_image;
284  tracker_type[1] = vpMbGenericTracker::DEPTH_DENSE_TRACKER;
285  vpMbGenericTracker tracker(tracker_type);
286 
287 #if defined(VISP_HAVE_PUGIXML)
288  std::string configFileCam1 = input_directory + std::string("/Config/chateau.xml");
289  std::string configFileCam2 = input_directory + std::string("/Config/chateau_depth.xml");
290  std::cout << "Load config file for camera 1: " << configFileCam1 << std::endl;
291  std::cout << "Load config file for camera 2: " << configFileCam2 << std::endl;
292  tracker.loadConfigFile(configFileCam1, configFileCam2);
293 #else
294  // Corresponding parameters manually set to have an example code
295  {
296  vpCameraParameters cam_color, cam_depth;
297  cam_color.initPersProjWithoutDistortion(700.0, 700.0, 320.0, 240.0);
298  cam_depth.initPersProjWithoutDistortion(700.0, 700.0, 320.0, 240.0);
299  tracker.setCameraParameters(cam_color, cam_depth);
300  }
301 
302  // Edge
303  vpMe me;
304  me.setMaskSize(5);
305  me.setMaskNumber(180);
306  me.setRange(8);
308  me.setThreshold(5);
309  me.setMu1(0.5);
310  me.setMu2(0.5);
311  me.setSampleStep(5);
312  tracker.setMovingEdge(me);
313 
314  // Klt
315 #if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
316  vpKltOpencv klt;
317  tracker.setKltMaskBorder(5);
318  klt.setMaxFeatures(10000);
319  klt.setWindowSize(5);
320  klt.setQuality(0.01);
321  klt.setMinDistance(5);
322  klt.setHarrisFreeParameter(0.02);
323  klt.setBlockSize(3);
324  klt.setPyramidLevels(3);
325 
326  tracker.setKltOpencv(klt);
327 #endif
328 
329  // Depth
330  tracker.setDepthNormalFeatureEstimationMethod(vpMbtFaceDepthNormal::ROBUST_FEATURE_ESTIMATION);
331  tracker.setDepthNormalPclPlaneEstimationMethod(2);
332  tracker.setDepthNormalPclPlaneEstimationRansacMaxIter(200);
333  tracker.setDepthNormalPclPlaneEstimationRansacThreshold(0.001);
334  tracker.setDepthNormalSamplingStep(2, 2);
335 
336  tracker.setDepthDenseSamplingStep(4, 4);
337 
338  tracker.setAngleAppear(vpMath::rad(85.0));
339  tracker.setAngleDisappear(vpMath::rad(89.0));
340  tracker.setNearClippingDistance(0.01);
341  tracker.setFarClippingDistance(2.0);
342  tracker.setClipping(tracker.getClipping() | vpMbtPolygon::FOV_CLIPPING);
343 #endif
344 
345 #ifdef VISP_HAVE_COIN3D
346  tracker.loadModel(input_directory + "/Models/chateau.wrl", input_directory + "/Models/chateau.cao");
347 #else
348  tracker.loadModel(input_directory + "/Models/chateau.cao", input_directory + "/Models/chateau.cao");
349 #endif
351  T[0][0] = -1;
352  T[0][3] = -0.2;
353  T[1][1] = 0;
354  T[1][2] = 1;
355  T[1][3] = 0.12;
356  T[2][1] = 1;
357  T[2][2] = 0;
358  T[2][3] = -0.15;
359  tracker.loadModel(input_directory + "/Models/cube.cao", false, T);
360  vpCameraParameters cam_color, cam_depth;
361  tracker.getCameraParameters(cam_color, cam_depth);
362  tracker.setDisplayFeatures(true);
363  tracker.setScanLineVisibilityTest(useScanline);
364 
365  std::map<int, std::pair<double, double> > map_thresh;
366  // Take the highest thresholds between all CI machines
367 #ifdef VISP_HAVE_COIN3D
369  useScanline ? std::pair<double, double>(0.007, 6.) : std::pair<double, double>(0.008, 3.9);
370 #if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
371  map_thresh[vpMbGenericTracker::KLT_TRACKER] =
372  useScanline ? std::pair<double, double>(0.007, 1.9) : std::pair<double, double>(0.007, 1.8);
373  map_thresh[vpMbGenericTracker::EDGE_TRACKER | vpMbGenericTracker::KLT_TRACKER] =
374  useScanline ? std::pair<double, double>(0.005, 3.7) : std::pair<double, double>(0.007, 3.4);
375 #endif
377  useScanline ? std::pair<double, double>(0.003, 1.7) : std::pair<double, double>(0.002, 0.8);
378 #if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
379  map_thresh[vpMbGenericTracker::KLT_TRACKER | vpMbGenericTracker::DEPTH_DENSE_TRACKER] =
380  std::pair<double, double>(0.002, 0.3);
381  map_thresh[vpMbGenericTracker::EDGE_TRACKER | vpMbGenericTracker::KLT_TRACKER |
383  useScanline ? std::pair<double, double>(0.002, 1.8) : std::pair<double, double>(0.002, 0.7);
384 #endif
385 #else
387  useScanline ? std::pair<double, double>(0.015, 3.0) : std::pair<double, double>(0.009, 4.0);
388 #if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
389  map_thresh[vpMbGenericTracker::KLT_TRACKER] =
390  useScanline ? std::pair<double, double>(0.006, 1.7) : std::pair<double, double>(0.005, 1.4);
391  map_thresh[vpMbGenericTracker::EDGE_TRACKER | vpMbGenericTracker::KLT_TRACKER] =
392  useScanline ? std::pair<double, double>(0.004, 1.2) : std::pair<double, double>(0.004, 1.2);
393 #endif
395  useScanline ? std::pair<double, double>(0.002, 0.7) : std::pair<double, double>(0.001, 0.4);
396 #if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
397  map_thresh[vpMbGenericTracker::KLT_TRACKER | vpMbGenericTracker::DEPTH_DENSE_TRACKER] =
398  std::pair<double, double>(0.002, 0.3);
399  map_thresh[vpMbGenericTracker::EDGE_TRACKER | vpMbGenericTracker::KLT_TRACKER |
401  useScanline ? std::pair<double, double>(0.001, 0.5) : std::pair<double, double>(0.001, 0.4);
402 #endif
403 #endif
404 
405  vpImage<Type> I, I_depth;
406  vpImage<uint16_t> I_depth_raw;
407  vpHomogeneousMatrix cMo_truth;
408  std::vector<vpColVector> pointcloud;
409  int cpt_frame = 1;
410  if (!read_data(input_directory, cpt_frame, cam_depth, I, I_depth_raw, pointcloud, cMo_truth)) {
411  std::cerr << "Cannot read first frame!" << std::endl;
412  return EXIT_FAILURE;
413  }
414 
415  vpImage<bool> mask(I.getHeight(), I.getWidth());
416  const double roi_step = 7.0;
417  const double roi_step2 = 6.0;
418  if (use_mask) {
419  mask = false;
420  for (unsigned int i = (unsigned int)(I.getRows() / roi_step);
421  i < (unsigned int)(I.getRows() * roi_step2 / roi_step); i++) {
422  for (unsigned int j = (unsigned int)(I.getCols() / roi_step);
423  j < (unsigned int)(I.getCols() * roi_step2 / roi_step); j++) {
424  mask[i][j] = true;
425  }
426  }
427  tracker.setMask(mask);
428  }
429 
430  vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
431 
432  vpImage<vpRGBa> results(I.getHeight(), I.getWidth() + I_depth.getWidth());
433  vpImage<vpRGBa> resultsColor(I.getHeight(), I.getWidth());
434  vpImage<vpRGBa> resultsDepth(I_depth.getHeight(), I_depth.getWidth());
435  if (save) {
436  vpIoTools::makeDirectory("results");
437  }
438  if (opt_display) {
439 #ifdef VISP_HAVE_DISPLAY
440  display1.init(I, 0, 0, "Image");
441  display2.init(I_depth, (int)I.getWidth(), 0, "Depth");
442 #endif
443  }
444 
445  vpHomogeneousMatrix depth_M_color;
446  depth_M_color[0][3] = -0.05;
447  tracker.setCameraTransformationMatrix("Camera2", depth_M_color);
448  tracker.initFromPose(I, cMo_truth);
449 
450  vpFont font(24);
451  bool click = false, quit = false, correct_accuracy = true;
452  std::vector<double> vec_err_t, vec_err_tu;
453  std::vector<double> time_vec;
454  while (read_data(input_directory, cpt_frame, cam_depth, I, I_depth_raw, pointcloud, cMo_truth) && !quit &&
455  (opt_lastFrame > 0 ? (int)cpt_frame <= opt_lastFrame : true)) {
456  vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
457 
458  if (opt_display) {
460  vpDisplay::display(I_depth);
461  }
462  else if (save) {
463  convert(I, resultsColor);
464  convert(I_depth, resultsDepth);
465  }
466 
467  double t = vpTime::measureTimeMs();
468  std::map<std::string, const vpImage<Type> *> mapOfImages;
469  mapOfImages["Camera1"] = &I;
470  std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
471  mapOfPointclouds["Camera2"] = &pointcloud;
472  std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
473  if (!use_depth) {
474  mapOfWidths["Camera2"] = 0;
475  mapOfHeights["Camera2"] = 0;
476  }
477  else {
478  mapOfWidths["Camera2"] = I_depth.getWidth();
479  mapOfHeights["Camera2"] = I_depth.getHeight();
480  }
481 
482  tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
483  vpHomogeneousMatrix cMo = tracker.getPose();
484  t = vpTime::measureTimeMs() - t;
485  time_vec.push_back(t);
486 
487  if (opt_display) {
488  tracker.display(I, I_depth, cMo, depth_M_color * cMo, cam_color, cam_depth, vpColor::red, 3);
489  vpDisplay::displayFrame(I, cMo, cam_depth, 0.05, vpColor::none, 3);
490  vpDisplay::displayFrame(I_depth, depth_M_color * cMo, cam_depth, 0.05, vpColor::none, 3);
491 
492  std::stringstream ss;
493  ss << "Frame: " << cpt_frame;
494  vpDisplay::displayText(I_depth, 20, 20, ss.str(), vpColor::red);
495  ss.str("");
496  ss << "Nb features: " << tracker.getError().getRows();
497  vpDisplay::displayText(I_depth, 40, 20, ss.str(), vpColor::red);
498  }
499  else if (save) {
501  std::map<std::string, std::vector<std::vector<double> > > mapOfModels;
502  std::map<std::string, unsigned int> mapOfW;
503  mapOfW["Camera1"] = I.getWidth();
504  mapOfW["Camera2"] = I_depth.getWidth();
505  std::map<std::string, unsigned int> mapOfH;
506  mapOfH["Camera1"] = I.getHeight();
507  mapOfH["Camera2"] = I_depth.getHeight();
508  std::map<std::string, vpHomogeneousMatrix> mapOfcMos;
509  mapOfcMos["Camera1"] = cMo;
510  mapOfcMos["Camera2"] = depth_M_color * cMo;
511  std::map<std::string, vpCameraParameters> mapOfCams;
512  mapOfCams["Camera1"] = cam_color;
513  mapOfCams["Camera2"] = cam_depth;
514  tracker.getModelForDisplay(mapOfModels, mapOfW, mapOfH, mapOfcMos, mapOfCams);
515  for (std::map<std::string, std::vector<std::vector<double> > >::const_iterator it = mapOfModels.begin();
516  it != mapOfModels.end(); ++it) {
517  for (size_t i = 0; i < it->second.size(); i++) {
518  // test if it->second[i][0] = 0
519  if (std::fabs(it->second[i][0]) <= std::numeric_limits<double>::epsilon()) {
520  vpImageDraw::drawLine(it->first == "Camera1" ? resultsColor : resultsDepth,
521  vpImagePoint(it->second[i][1], it->second[i][2]),
522  vpImagePoint(it->second[i][3], it->second[i][4]), vpColor::red, 3);
523  }
524  }
525  }
527 
529  std::map<std::string, std::vector<std::vector<double> > > mapOfFeatures;
530  tracker.getFeaturesForDisplay(mapOfFeatures);
531  for (std::map<std::string, std::vector<std::vector<double> > >::const_iterator it = mapOfFeatures.begin();
532  it != mapOfFeatures.end(); ++it) {
533  for (size_t i = 0; i < it->second.size(); i++) {
534  if (std::fabs(it->second[i][0]) <=
535  std::numeric_limits<double>::epsilon()) { // test it->second[i][0] = 0 for ME
536  vpColor color = vpColor::yellow;
537  if (std::fabs(it->second[i][3]) <= std::numeric_limits<double>::epsilon()) { // test it->second[i][3] = 0
538  color = vpColor::green;
539  }
540  else if (std::fabs(it->second[i][3] - 1) <=
541  std::numeric_limits<double>::epsilon()) { // test it->second[i][3] = 1
542  color = vpColor::blue;
543  }
544  else if (std::fabs(it->second[i][3] - 2) <=
545  std::numeric_limits<double>::epsilon()) { // test it->second[i][3] = 2
546  color = vpColor::purple;
547  }
548  else if (std::fabs(it->second[i][3] - 3) <=
549  std::numeric_limits<double>::epsilon()) { // test it->second[i][3] = 3
550  color = vpColor::red;
551  }
552  else if (std::fabs(it->second[i][3] - 4) <=
553  std::numeric_limits<double>::epsilon()) { // test it->second[i][3] = 4
554  color = vpColor::cyan;
555  }
556  vpImageDraw::drawCross(it->first == "Camera1" ? resultsColor : resultsDepth,
557  vpImagePoint(it->second[i][1], it->second[i][2]), 3, color, 1);
558  }
559  else if (std::fabs(it->second[i][0] - 1) <=
560  std::numeric_limits<double>::epsilon()) { // test it->second[i][0] = 1 for KLT
561  vpImageDraw::drawCross(it->first == "Camera1" ? resultsColor : resultsDepth,
562  vpImagePoint(it->second[i][1], it->second[i][2]), 10, vpColor::red, 1);
563  }
564  }
565  }
567 
568  // Computation time
569  std::ostringstream oss;
570  oss << "Tracking time: " << t << " ms";
571  font.drawText(resultsColor, oss.str(), vpImagePoint(20, 20), vpColor::red);
572  }
573 
574  vpPoseVector pose_est(cMo);
575  vpPoseVector pose_truth(cMo_truth);
576  vpColVector t_est(3), t_truth(3);
577  vpColVector tu_est(3), tu_truth(3);
578  for (unsigned int i = 0; i < 3; i++) {
579  t_est[i] = pose_est[i];
580  t_truth[i] = pose_truth[i];
581  tu_est[i] = pose_est[i + 3];
582  tu_truth[i] = pose_truth[i + 3];
583  }
584 
585  vpColVector t_err = t_truth - t_est, tu_err = tu_truth - tu_est;
586  const double t_thresh =
587  map_thresh[!use_depth ? trackerType_image : trackerType_image | vpMbGenericTracker::DEPTH_DENSE_TRACKER].first;
588  const double tu_thresh =
589  map_thresh[!use_depth ? trackerType_image : trackerType_image | vpMbGenericTracker::DEPTH_DENSE_TRACKER].second;
590  double t_err2 = sqrt(t_err.sumSquare()), tu_err2 = vpMath::deg(sqrt(tu_err.sumSquare()));
591  vec_err_t.push_back(t_err2);
592  vec_err_tu.push_back(tu_err2);
593  if (!use_mask && (t_err2 > t_thresh || tu_err2 > tu_thresh)) { // no accuracy test with mask
594  std::cerr << "Pose estimated exceeds the threshold (t_thresh = " << t_thresh << " ; tu_thresh = " << tu_thresh
595  << ")!" << std::endl;
596  std::cout << "t_err: " << t_err2 << " ; tu_err: " << tu_err2 << std::endl;
597  correct_accuracy = false;
598  }
599 
600  if (opt_display) {
601  if (use_mask) {
602  vpRect roi(vpImagePoint(I.getRows() / roi_step, I.getCols() / roi_step),
603  vpImagePoint(I.getRows() * roi_step2 / roi_step, I.getCols() * roi_step2 / roi_step));
606  }
607 
608  vpDisplay::flush(I);
609  vpDisplay::flush(I_depth);
610  }
611  else if (save) {
613  char buffer[FILENAME_MAX];
614  std::ostringstream oss;
615  oss << "results/image_%04d.png";
616  snprintf(buffer, FILENAME_MAX, oss.str().c_str(), cpt_frame);
617 
618  results.insert(resultsColor, vpImagePoint());
619  results.insert(resultsDepth, vpImagePoint(0, resultsColor.getWidth()));
620 
621  vpImageIo::write(results, buffer);
623  }
624 
625  if (opt_display && opt_click_allowed) {
627  if (vpDisplay::getClick(I, button, click)) {
628  switch (button) {
630  quit = !click;
631  break;
632 
634  click = !click;
635  break;
636 
637  default:
638  break;
639  }
640  }
641  }
642 
643  cpt_frame++;
644  }
645 
646  if (!time_vec.empty())
647  std::cout << "Computation time, Mean: " << vpMath::getMean(time_vec)
648  << " ms ; Median: " << vpMath::getMedian(time_vec) << " ms ; Std: " << vpMath::getStdev(time_vec) << " ms"
649  << std::endl;
650 
651  if (!vec_err_t.empty())
652  std::cout << "Max translation error: " << *std::max_element(vec_err_t.begin(), vec_err_t.end()) << std::endl;
653 
654  if (!vec_err_tu.empty())
655  std::cout << "Max thetau error: " << *std::max_element(vec_err_tu.begin(), vec_err_tu.end()) << std::endl;
656 
657  std::cout << "Test result: " << (correct_accuracy ? "success" : "failure") << std::endl;
658  return correct_accuracy ? EXIT_SUCCESS : EXIT_FAILURE;
659 }
660 } // namespace
661 
662 int main(int argc, const char *argv[])
663 {
664  try {
665  std::string env_ipath;
666  std::string opt_ipath = "";
667  bool opt_click_allowed = true;
668  bool opt_display = true;
669  bool opt_save = false;
670  bool useScanline = false;
671  int trackerType_image = vpMbGenericTracker::EDGE_TRACKER;
672 #if defined(__mips__) || defined(__mips) || defined(mips) || defined(__MIPS__)
673  // To avoid Debian test timeout
674  int opt_lastFrame = 5;
675 #else
676  int opt_lastFrame = -1;
677 #endif
678  bool use_depth = false;
679  bool use_mask = false;
680  bool use_color_image = false;
681 
682  // Get the visp-images-data package path or VISP_INPUT_IMAGE_PATH
683  // environment variable value
684  env_ipath = vpIoTools::getViSPImagesDataPath();
685 
686  // Read the command line options
687  if (!getOptions(argc, argv, opt_ipath, opt_click_allowed, opt_display, opt_save, useScanline, trackerType_image,
688  opt_lastFrame, use_depth, use_mask, use_color_image)) {
689  return EXIT_FAILURE;
690  }
691 
692 #if ! (defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO))
693  if (trackerType_image == 2 || trackerType_image == 3) {
694  std::cout << "Using klt tracker is not possible without OpenCV imgproc and video modules." << std::endl;
695  std::cout << "Use rather command line option -t 1 to use edges." << std::endl;
696  return EXIT_SUCCESS;
697  }
698 #endif
699  std::cout << "trackerType_image: " << trackerType_image << std::endl;
700  std::cout << "useScanline: " << useScanline << std::endl;
701  std::cout << "use_depth: " << use_depth << std::endl;
702  std::cout << "use_mask: " << use_mask << std::endl;
703  std::cout << "use_color_image: " << use_color_image << std::endl;
704 #ifdef VISP_HAVE_COIN3D
705  std::cout << "COIN3D available." << std::endl;
706 #endif
707 
708 #if !defined(VISP_HAVE_MODULE_KLT) || (!defined(VISP_HAVE_OPENCV) || (VISP_HAVE_OPENCV_VERSION < 0x020100))
709  if (trackerType_image & 2) {
710  std::cout << "KLT features cannot be used: ViSP is not built with "
711  "KLT module or OpenCV is not available.\nTest is not run."
712  << std::endl;
713  return EXIT_SUCCESS;
714  }
715 #endif
716 
717  // Test if an input path is set
718  if (opt_ipath.empty() && env_ipath.empty()) {
719  usage(argv[0], nullptr);
720  std::cerr << std::endl << "ERROR:" << std::endl;
721  std::cerr << " Use -i <visp image path> option or set VISP_INPUT_IMAGE_PATH " << std::endl
722  << " environment variable to specify the location of the " << std::endl
723  << " image path where test images are located." << std::endl
724  << std::endl;
725 
726  return EXIT_FAILURE;
727  }
728 
729  std::string input_directory =
730  vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth/Castle-simu");
731  if (!vpIoTools::checkDirectory(input_directory)) {
732  std::cerr << "ViSP-images does not contain the folder: " << input_directory << "!" << std::endl;
733  return EXIT_SUCCESS;
734  }
735 
736  if (use_color_image) {
737  return run<vpRGBa>(input_directory, opt_click_allowed, opt_display, useScanline, trackerType_image, opt_lastFrame,
738  use_depth, use_mask, opt_save);
739  }
740  else {
741  return run<unsigned char>(input_directory, opt_click_allowed, opt_display, useScanline, trackerType_image,
742  opt_lastFrame, use_depth, use_mask, opt_save);
743  }
744 
745  std::cout << "Test succeed" << std::endl;
746  return EXIT_SUCCESS;
747  }
748  catch (const vpException &e) {
749  std::cout << "Catch an exception: " << e << std::endl;
750  return EXIT_FAILURE;
751  }
752 }
753 #elif !(defined(VISP_HAVE_LAPACK) || defined(VISP_HAVE_EIGEN3) || defined(VISP_HAVE_OPENCV))
754 int main()
755 {
756  std::cout << "Cannot run this example: install Lapack, Eigen3 or OpenCV" << std::endl;
757  return EXIT_SUCCESS;
758 }
759 #else
760 int main()
761 {
762  std::cout << "Enable MBT module (VISP_HAVE_MODULE_MBT) to launch this test." << std::endl;
763  return EXIT_SUCCESS;
764 }
765 #endif
Generic class defining intrinsic camera parameters.
void initPersProjWithoutDistortion(double px, double py, double u0, double v0)
Implementation of column vector and the associated operations.
Definition: vpColVector.h:191
double sumSquare() const
Class to define RGB colors available for display functionalities.
Definition: vpColor.h:157
static const vpColor red
Definition: vpColor.h:217
static const vpColor cyan
Definition: vpColor.h:226
static const vpColor none
Definition: vpColor.h:229
static const vpColor blue
Definition: vpColor.h:223
static const vpColor purple
Definition: vpColor.h:228
static const vpColor yellow
Definition: vpColor.h:225
static const vpColor green
Definition: vpColor.h:220
Display for windows using Direct3D 3rd party. Thus to enable this class Direct3D should be installed....
Definition: vpDisplayD3D.h:106
Display for windows using GDI (available on any windows 32 platform).
Definition: vpDisplayGDI.h:130
The vpDisplayGTK allows to display image using the GTK 3rd party library. Thus to enable this class G...
Definition: vpDisplayGTK.h:133
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
static void displayRectangle(const vpImage< unsigned char > &I, const vpImagePoint &topLeft, unsigned int width, unsigned int height, const vpColor &color, bool fill=false, unsigned int thickness=1)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Definition: vpException.h:60
Font drawing functions for image.
Definition: vpFont.h:55
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void createDepthHistogram(const vpImage< uint16_t > &src_depth, vpImage< vpRGBa > &dest_rgba)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static void drawLine(vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, unsigned char color, unsigned int thickness=1)
static void drawCross(vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, unsigned char color, unsigned int thickness=1)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Definition: vpImageIo.cpp:147
static void write(const vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Definition: vpImageIo.cpp:291
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:82
Definition of the vpImage class member functions.
Definition: vpImage.h:131
unsigned int getWidth() const
Definition: vpImage.h:242
void resize(unsigned int h, unsigned int w)
resize the image : Image initialization
Definition: vpImage.h:542
unsigned int getCols() const
Definition: vpImage.h:171
unsigned int getHeight() const
Definition: vpImage.h:181
unsigned int getRows() const
Definition: vpImage.h:212
static std::string getViSPImagesDataPath()
Definition: vpIoTools.cpp:1053
static bool checkFilename(const std::string &filename)
Definition: vpIoTools.cpp:786
static void readBinaryValueLE(std::ifstream &file, int16_t &short_value)
static bool checkDirectory(const std::string &dirname)
Definition: vpIoTools.cpp:396
static std::string createFilePath(const std::string &parent, const std::string &child)
Definition: vpIoTools.cpp:1427
static void makeDirectory(const std::string &dirname)
Definition: vpIoTools.cpp:550
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
Definition: vpKltOpencv.h:74
void setBlockSize(int blockSize)
Definition: vpKltOpencv.h:267
void setQuality(double qualityLevel)
Definition: vpKltOpencv.h:356
void setHarrisFreeParameter(double harris_k)
Definition: vpKltOpencv.h:275
void setMaxFeatures(int maxCount)
Definition: vpKltOpencv.h:315
void setMinDistance(double minDistance)
Definition: vpKltOpencv.h:324
void setWindowSize(int winSize)
Definition: vpKltOpencv.h:377
void setPyramidLevels(int pyrMaxLevel)
Definition: vpKltOpencv.h:343
static double rad(double deg)
Definition: vpMath.h:129
static double getMedian(const std::vector< double > &v)
Definition: vpMath.cpp:322
static double getStdev(const std::vector< double > &v, bool useBesselCorrection=false)
Definition: vpMath.cpp:353
static double getMean(const std::vector< double > &v)
Definition: vpMath.cpp:302
static double deg(double rad)
Definition: vpMath.h:119
Real-time 6D object pose tracking using its CAD model.
@ ROBUST_FEATURE_ESTIMATION
Robust scheme to estimate the normal of the plane.
Definition: vpMe.h:134
void setMu1(const double &mu_1)
Definition: vpMe.h:385
void setRange(const unsigned int &range)
Definition: vpMe.h:415
void setLikelihoodThresholdType(const vpLikelihoodThresholdType likelihood_threshold_type)
Definition: vpMe.h:505
void setMaskNumber(const unsigned int &mask_number)
Definition: vpMe.cpp:552
void setThreshold(const double &threshold)
Definition: vpMe.h:466
void setSampleStep(const double &sample_step)
Definition: vpMe.h:422
void setMaskSize(const unsigned int &mask_size)
Definition: vpMe.cpp:560
void setMu2(const double &mu_2)
Definition: vpMe.h:392
@ NORMALIZED_THRESHOLD
Definition: vpMe.h:145
static bool parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, int flags)
Definition: vpParseArgv.cpp:70
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
Implementation of a pose vector and operations on poses.
Definition: vpPoseVector.h:203
Defines a rectangle in the plane.
Definition: vpRect.h:79
VISP_EXPORT double measureTimeMs()