Visual Servoing Platform  version 3.2.0 under development (2019-01-22)
mbtGenericTrackingDepth.cpp
1 /****************************************************************************
2  *
3  * ViSP, open source Visual Servoing Platform software.
4  * Copyright (C) 2005 - 2019 by Inria. All rights reserved.
5  *
6  * This software is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  * See the file LICENSE.txt at the root directory of this source
11  * distribution for additional information about the GNU GPL.
12  *
13  * For using ViSP with software that can not be combined with the GNU
14  * GPL, please contact Inria about acquiring a ViSP Professional
15  * Edition License.
16  *
17  * See http://visp.inria.fr for more information.
18  *
19  * This software was developed at:
20  * Inria Rennes - Bretagne Atlantique
21  * Campus Universitaire de Beaulieu
22  * 35042 Rennes Cedex
23  * France
24  *
25  * If you have questions regarding the use of this file, please contact
26  * Inria at visp@inria.fr
27  *
28  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
29  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
30  *
31  * Description:
32  * Example of tracking with vpGenericTracker on Castel.
33  *
34  *****************************************************************************/
35 
42 #include <cstdlib>
43 #include <iostream>
44 #include <visp3/core/vpConfig.h>
45 
46 #if defined(VISP_HAVE_MODULE_MBT) && defined(VISP_HAVE_DISPLAY)
47 
48 #include <visp3/core/vpDebug.h>
49 #include <visp3/core/vpHomogeneousMatrix.h>
50 #include <visp3/core/vpIoTools.h>
51 #include <visp3/core/vpMath.h>
52 #include <visp3/gui/vpDisplayD3D.h>
53 #include <visp3/gui/vpDisplayGDI.h>
54 #include <visp3/gui/vpDisplayGTK.h>
55 #include <visp3/gui/vpDisplayOpenCV.h>
56 #include <visp3/gui/vpDisplayX.h>
57 #include <visp3/io/vpImageIo.h>
58 #include <visp3/io/vpParseArgv.h>
59 #include <visp3/io/vpVideoReader.h>
60 #include <visp3/mbt/vpMbGenericTracker.h>
61 
62 #define GETOPTARGS "x:X:m:M:i:n:dchfolwvpt:T:e:"
63 
64 #define USE_XML 1
65 #define USE_SMALL_DATASET 1 // small depth dataset in ViSP-images
66 
67 namespace
68 {
69 void usage(const char *name, const char *badparam)
70 {
71  fprintf(stdout, "\n\
72  Example of tracking with vpGenericTracker.\n\
73  \n\
74  SYNOPSIS\n\
75  %s [-i <test image path>] [-x <config file>] [-X <config file depth>]\n\
76  [-m <model name>] [-M <model name depth>] [-n <initialisation file base name>]\n\
77  [-f] [-c] [-d] [-h] [-o] [-w] [-l] [-v] [-p]\n\
78  [-t <tracker type>] [-T <tracker type>] [-e <last frame index>]\n", name);
79 
80  fprintf(stdout, "\n\
81  OPTIONS: \n\
82  -i <input image path> \n\
83  Set image input path.\n\
84  These images come from ViSP-images-x.y.z.tar.gz available \n\
85  on the ViSP website.\n\
86  Setting the VISP_INPUT_IMAGE_PATH environment\n\
87  variable produces the same behavior than using\n\
88  this option.\n\
89  \n\
90  -x <config file> \n\
91  Set the config file (the xml file) to use.\n\
92  The config file is used to specify the parameters of the tracker.\n\
93  \n\
94  -X <config file> \n\
95  Set the config file (the xml file) to use for the depth sensor.\n\
96  The config file is used to specify the parameters of the tracker.\n\
97  \n\
98  -m <model name> \n\
99  Specify the name of the file of the model.\n\
100  The model can either be a vrml model (.wrl) or a .cao file.\n\
101  \n\
102  -M <model name> \n\
103  Specify the name of the file of the model for the depth sensor.\n\
104  The model can either be a vrml model (.wrl) or a .cao file.\n\
105  \n\
106  -n <initialisation file base name> \n\
107  Base name of the initialisation file. The file will be 'base_name'.init .\n\
108  This base name is also used for the optional picture specifying where to \n\
109  click (a .ppm picture).\n\
110  \n\
111  -f \n\
112  Turn off the display of the the moving edges and Klt points. \n\
113  \n\
114  -d \n\
115  Turn off the display.\n\
116  \n\
117  -c\n\
118  Disable the mouse click. Useful to automate the \n\
119  execution of this program without human intervention.\n\
120  \n\
121  -o\n\
122  Use Ogre3D for visibility tests\n\
123  \n\
124  -w\n\
125  When Ogre3D is enable [-o] show Ogre3D configuration dialog that allows to set the renderer.\n\
126  \n\
127  -l\n\
128  Use the scanline for visibility tests.\n\
129  \n\
130  -v\n\
131  Compute covariance matrix.\n\
132  \n\
133  -p\n\
134  Compute gradient projection error.\n\
135  \n\
136  -t <tracker type>\n\
137  Set tracker type (<1 (Edge)>, <2 (KLT)>, <3 (both)>) for color sensor.\n\
138  \n\
139  -T <tracker type>\n\
140  Set tracker type (<4 (Depth normal)>, <8 (Depth dense)>, <12 (both)>) for depth sensor.\n\
141  \n\
142  -e <last frame index>\n\
143  Specify the index of the last frame. Once reached, the tracking is stopped.\n\
144  \n\
145  -h \n\
146  Print the help.\n\n");
147 
148  if (badparam)
149  fprintf(stdout, "\nERROR: Bad parameter [%s]\n", badparam);
150 }
151 
152 bool getOptions(int argc, const char **argv, std::string &ipath, std::string &configFile, std::string &configFile_depth,
153  std::string &modelFile, std::string &modelFile_depth, std::string &initFile, bool &displayFeatures,
154  bool &click_allowed, bool &display, bool &useOgre, bool &showOgreConfigDialog, bool &useScanline,
155  bool &computeCovariance, bool &projectionError, int &trackerType, int &tracker_type_depth,
156  int &lastFrame)
157 {
158  const char *optarg_;
159  int c;
160  while ((c = vpParseArgv::parse(argc, argv, GETOPTARGS, &optarg_)) > 1) {
161 
162  switch (c) {
163  case 'i':
164  ipath = optarg_;
165  break;
166  case 'x':
167  configFile = optarg_;
168  break;
169  case 'X':
170  configFile_depth = optarg_;
171  break;
172  case 'm':
173  modelFile = optarg_;
174  break;
175  case 'M':
176  modelFile_depth = optarg_;
177  break;
178  case 'n':
179  initFile = optarg_;
180  break;
181  case 'f':
182  displayFeatures = false;
183  break;
184  case 'c':
185  click_allowed = false;
186  break;
187  case 'd':
188  display = false;
189  break;
190  case 'o':
191  useOgre = true;
192  break;
193  case 'l':
194  useScanline = true;
195  break;
196  case 'w':
197  showOgreConfigDialog = true;
198  break;
199  case 'v':
200  computeCovariance = true;
201  break;
202  case 'p':
203  projectionError = true;
204  break;
205  case 't':
206  trackerType = atoi(optarg_);
207  break;
208  case 'T':
209  tracker_type_depth = atoi(optarg_);
210  break;
211  case 'e':
212  lastFrame = atoi(optarg_);
213  break;
214  case 'h':
215  usage(argv[0], NULL);
216  return false;
217  break;
218 
219  default:
220  usage(argv[0], optarg_);
221  return false;
222  break;
223  }
224  }
225 
226  if ((c == 1) || (c == -1)) {
227  // standalone param or error
228  usage(argv[0], NULL);
229  std::cerr << "ERROR: " << std::endl;
230  std::cerr << " Bad argument " << optarg_ << std::endl << std::endl;
231  return false;
232  }
233 
234  return true;
235 }
236 
237 struct rs_intrinsics {
238  float ppx;
240  float ppy;
242  float fx;
244  float fy;
246  float coeffs[5];
247 };
248 
249 void rs_deproject_pixel_to_point(float point[3], const rs_intrinsics &intrin, const float pixel[2], float depth)
250 {
251  float x = (pixel[0] - intrin.ppx) / intrin.fx;
252  float y = (pixel[1] - intrin.ppy) / intrin.fy;
253 
254  float r2 = x * x + y * y;
255  float f = 1 + intrin.coeffs[0] * r2 + intrin.coeffs[1] * r2 * r2 + intrin.coeffs[4] * r2 * r2 * r2;
256  float ux = x * f + 2 * intrin.coeffs[2] * x * y + intrin.coeffs[3] * (r2 + 2 * x * x);
257  float uy = y * f + 2 * intrin.coeffs[3] * x * y + intrin.coeffs[2] * (r2 + 2 * y * y);
258 
259  x = ux;
260  y = uy;
261 
262  point[0] = depth * x;
263  point[1] = depth * y;
264  point[2] = depth;
265 }
266 
267 bool read_data(const unsigned int cpt, const std::string &input_directory, vpImage<unsigned char> &I,
268  vpImage<uint16_t> &I_depth_raw, std::vector<vpColVector> &pointcloud, unsigned int &pointcloud_width,
269  unsigned int &pointcloud_height)
270 {
271  char buffer[256];
272 
273  // Read image
274  std::stringstream ss;
275  ss << input_directory << "/image_%04d.pgm";
276  sprintf(buffer, ss.str().c_str(), cpt);
277  std::string filename_image = buffer;
278 
279  if (!vpIoTools::checkFilename(filename_image)) {
280  std::cerr << "Cannot read: " << filename_image << std::endl;
281  return false;
282  }
283  vpImageIo::read(I, filename_image);
284 
285  // Read raw depth
286  ss.str("");
287  ss << input_directory << "/depth_image_%04d.bin";
288  sprintf(buffer, ss.str().c_str(), cpt);
289  std::string filename_depth = buffer;
290 
291  std::ifstream file_depth(filename_depth.c_str(), std::ios::in | std::ios::binary);
292  if (!file_depth.is_open()) {
293  return false;
294  }
295 
296  unsigned int height = 0, width = 0;
297  vpIoTools::readBinaryValueLE(file_depth, height);
298  vpIoTools::readBinaryValueLE(file_depth, width);
299 
300  I_depth_raw.resize(height, width);
301 
302  uint16_t depth_value = 0;
303  for (unsigned int i = 0; i < height; i++) {
304  for (unsigned int j = 0; j < width; j++) {
305  vpIoTools::readBinaryValueLE(file_depth, depth_value);
306  I_depth_raw[i][j] = depth_value;
307  }
308  }
309 
310  // Transform pointcloud
311  pointcloud_width = width;
312  pointcloud_height = height;
313  pointcloud.resize((size_t)width * height);
314 
315  // Only for Creative SR300
316  const float depth_scale = 0.000124986647f;
317  rs_intrinsics depth_intrinsic;
318  depth_intrinsic.ppx = 311.484558f;
319  depth_intrinsic.ppy = 246.283234f;
320  depth_intrinsic.fx = 476.053619f;
321  depth_intrinsic.fy = 476.053497f;
322  depth_intrinsic.coeffs[0] = 0.165056542f;
323  depth_intrinsic.coeffs[1] = -0.0508309528f;
324  depth_intrinsic.coeffs[2] = 0.00435937941f;
325  depth_intrinsic.coeffs[3] = 0.00541406544f;
326  depth_intrinsic.coeffs[4] = 0.250085592f;
327 
328  for (unsigned int i = 0; i < height; i++) {
329  for (unsigned int j = 0; j < width; j++) {
330  float scaled_depth = I_depth_raw[i][j] * depth_scale;
331  float point[3];
332  float pixel[2] = {(float)j, (float)i};
333  rs_deproject_pixel_to_point(point, depth_intrinsic, pixel, scaled_depth);
334 
335  vpColVector data_3D(3);
336  data_3D[0] = point[0];
337  data_3D[1] = point[1];
338  data_3D[2] = point[2];
339 
340  pointcloud[(size_t)(i * width + j)] = data_3D;
341  }
342  }
343 
344  return true;
345 }
346 
347 void loadConfiguration(vpMbTracker *const tracker,
348  const std::string &
349 #if defined(VISP_HAVE_XML2) && USE_XML
350  configFile
351 #endif
352  ,
353  const std::string &
354 #if defined(VISP_HAVE_XML2) && USE_XML
355  configFile_depth
356 #endif
357 )
358 {
359 #if defined(VISP_HAVE_XML2) && USE_XML
360  // From the xml file
361  dynamic_cast<vpMbGenericTracker *>(tracker)->loadConfigFile(configFile, configFile_depth);
362 #else
363  // Edge
364  vpMe me;
365  me.setMaskSize(5);
366  me.setMaskNumber(180);
367  me.setRange(8);
368  me.setThreshold(10000);
369  me.setMu1(0.5);
370  me.setMu2(0.5);
371  me.setSampleStep(4);
372  dynamic_cast<vpMbGenericTracker *>(tracker)->setMovingEdge(me);
373 
374 // Klt
375 #if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
376  vpKltOpencv klt;
377  klt.setMaxFeatures(10000);
378  klt.setWindowSize(5);
379  klt.setQuality(0.01);
380  klt.setMinDistance(5);
381  klt.setHarrisFreeParameter(0.02);
382  klt.setBlockSize(3);
383  klt.setPyramidLevels(3);
384 
385  dynamic_cast<vpMbGenericTracker *>(tracker)->setKltOpencv(klt);
386  dynamic_cast<vpMbGenericTracker *>(tracker)->setKltMaskBorder(5);
387 #endif
388 
389  // Depth
390  dynamic_cast<vpMbGenericTracker *>(tracker)->setDepthNormalFeatureEstimationMethod(
392  dynamic_cast<vpMbGenericTracker *>(tracker)->setDepthNormalPclPlaneEstimationMethod(2);
393  dynamic_cast<vpMbGenericTracker *>(tracker)->setDepthNormalPclPlaneEstimationRansacMaxIter(200);
394  dynamic_cast<vpMbGenericTracker *>(tracker)->setDepthNormalPclPlaneEstimationRansacThreshold(0.001);
395  dynamic_cast<vpMbGenericTracker *>(tracker)->setDepthNormalSamplingStep(2, 2);
396 
397  dynamic_cast<vpMbGenericTracker *>(tracker)->setDepthDenseSamplingStep(4, 4);
398 
399  vpCameraParameters cam1, cam2;
400  cam1.initPersProjWithoutDistortion(615.1674804688, 615.1675415039, 312.1889953613, 243.4373779297);
401  cam2.initPersProjWithoutDistortion(476.0536193848, 476.0534973145, 311.4845581055, 246.2832336426);
402 
403  dynamic_cast<vpMbGenericTracker *>(tracker)->setCameraParameters(cam1, cam2);
404 
405  tracker->setAngleAppear(vpMath::rad(70));
406  tracker->setAngleDisappear(vpMath::rad(80));
407 
408  // Specify the clipping to
409  tracker->setNearClippingDistance(0.01);
410  tracker->setFarClippingDistance(2.0);
411  tracker->setClipping(tracker->getClipping() | vpMbtPolygon::FOV_CLIPPING);
412 // tracker->setClipping(tracker->getClipping() | vpMbtPolygon::LEFT_CLIPPING
413 // | vpMbtPolygon::RIGHT_CLIPPING | vpMbtPolygon::UP_CLIPPING |
414 // vpMbtPolygon::DOWN_CLIPPING); // Equivalent to FOV_CLIPPING
415 #endif
416 }
417 }
418 
419 int main(int argc, const char **argv)
420 {
421  {
422  // Test TukeyEstimator
423  {
424  vpMbtTukeyEstimator<double> tukey_estimator;
425  std::vector<double> residues;
426  residues.push_back(0.5);
427  residues.push_back(0.1);
428  residues.push_back(0.15);
429  residues.push_back(0.14);
430  residues.push_back(0.12);
431  std::vector<double> weights(5, 1);
432 
433  tukey_estimator.MEstimator(residues, weights, 1e-3);
434 
435  for (size_t i = 0; i < weights.size(); i++) {
436  std::cout << "residues[" << i << "]=" << residues[i] << " ; weights[i" << i << "]=" << weights[i] << std::endl;
437  }
438  std::cout << std::endl;
439  }
440 
441  {
442  vpMbtTukeyEstimator<float> tukey_estimator;
443  std::vector<float> residues;
444  residues.push_back(0.5f);
445  residues.push_back(0.1f);
446  residues.push_back(0.15f);
447  residues.push_back(0.14f);
448  residues.push_back(0.12f);
449  std::vector<float> weights(5, 1);
450 
451  tukey_estimator.MEstimator(residues, weights, (float)1e-3);
452 
453  for (size_t i = 0; i < weights.size(); i++) {
454  std::cout << "residues[" << i << "]=" << residues[i] << " ; weights[i" << i << "]=" << weights[i] << std::endl;
455  }
456  std::cout << std::endl;
457  }
458  }
459 
460  try {
461  std::string env_ipath;
462  std::string opt_ipath;
463  std::string ipath;
464  std::string opt_configFile;
465  std::string opt_configFile_depth;
466  std::string opt_modelFile;
467  std::string opt_modelFile_depth;
468  std::string opt_initFile;
469  std::string initFile;
470  bool displayFeatures = true;
471  bool opt_click_allowed = true;
472  bool opt_display = true;
473  bool useOgre = false;
474  bool showOgreConfigDialog = false;
475  bool useScanline = false;
476  bool computeCovariance = false;
477  bool projectionError = false;
478  int trackerType_image = vpMbGenericTracker::EDGE_TRACKER;
479  int trackerType_depth = vpMbGenericTracker::DEPTH_DENSE_TRACKER;
480 #if defined(__mips__) || defined(__mips) || defined(mips) || defined(__MIPS__)
481  // To avoid Debian test timeout
482  int opt_lastFrame = 5;
483 #else
484  int opt_lastFrame = -1;
485 #endif
486 
487  // Get the visp-images-data package path or VISP_INPUT_IMAGE_PATH
488  // environment variable value
489  env_ipath = vpIoTools::getViSPImagesDataPath();
490 
491  // Set the default input path
492  if (!env_ipath.empty())
493  ipath = env_ipath;
494 
495  // Read the command line options
496  if (!getOptions(argc, argv, opt_ipath, opt_configFile, opt_configFile_depth, opt_modelFile, opt_modelFile_depth,
497  opt_initFile, displayFeatures, opt_click_allowed, opt_display, useOgre, showOgreConfigDialog,
498  useScanline, computeCovariance, projectionError, trackerType_image, trackerType_depth,
499  opt_lastFrame)) {
500  return EXIT_FAILURE;
501  }
502 
503 #if !defined(VISP_HAVE_MODULE_KLT) || (!defined(VISP_HAVE_OPENCV) || (VISP_HAVE_OPENCV_VERSION < 0x020100))
504  if (trackerType_image == /*vpMbGenericTracker::KLT_TRACKER*/ 2) {
505  std::cout << "KLT only features cannot be used: ViSP is not built with "
506  "KLT module or OpenCV is not available."
507  << std::endl;
508  return EXIT_SUCCESS;
509  }
510 #endif
511 
512  // Test if an input path is set
513  if (opt_ipath.empty() && env_ipath.empty()) {
514  usage(argv[0], NULL);
515  std::cerr << std::endl << "ERROR:" << std::endl;
516  std::cerr << " Use -i <visp image path> option or set VISP_INPUT_IMAGE_PATH " << std::endl
517  << " environment variable to specify the location of the " << std::endl
518  << " image path where test images are located." << std::endl
519  << std::endl;
520 
521  return EXIT_FAILURE;
522  }
523 
524  // Get the option values
525  ipath = vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth/castel/castel");
526 
527  std::string dir_path = vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth");
528  if (!vpIoTools::checkDirectory(dir_path)) {
529  std::cerr << "ViSP-images does not contain the folder: " << dir_path << "!" << std::endl;
530  return EXIT_SUCCESS;
531  }
532 
533  std::string configFile, configFile_depth;
534  if (!opt_configFile.empty())
535  configFile = opt_configFile;
536  else
537  configFile =
538  vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth/castel/chateau.xml");
539 
540  if (!opt_configFile_depth.empty())
541  configFile_depth = opt_configFile_depth;
542  else
543  configFile_depth =
544  vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth/castel/chateau_depth.xml");
545 
546  std::string modelFile, modelFile_depth;
547  if (!opt_modelFile.empty())
548  modelFile = opt_modelFile;
549  else {
550 #if defined(VISP_HAVE_COIN3D) && (COIN_MAJOR_VERSION == 2 || COIN_MAJOR_VERSION == 3 || COIN_MAJOR_VERSION == 4)
551  modelFile =
552  vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth/castel/chateau_gantry.wrl");
553 #else
554  modelFile = vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth/castel/chateau.cao");
555 #endif
556  }
557 
558  if (!opt_modelFile_depth.empty())
559  modelFile_depth = opt_modelFile_depth;
560  else
561  modelFile_depth =
562  vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth/castel/chateau.cao");
563 
564  std::string vrml_ext = ".wrl";
565  bool use_vrml =
566  (modelFile.compare(modelFile.length() - vrml_ext.length(), vrml_ext.length(), vrml_ext) == 0) ||
567  (modelFile_depth.compare(modelFile_depth.length() - vrml_ext.length(), vrml_ext.length(), vrml_ext) == 0);
568 
569  if (use_vrml) {
570 #if defined(VISP_HAVE_COIN3D) && (COIN_MAJOR_VERSION == 2 || COIN_MAJOR_VERSION == 3 || COIN_MAJOR_VERSION == 4)
571  std::cout << "use_vrml: " << use_vrml << std::endl;
572 #else
573  std::cerr << "Error: vrml model file is only supported if ViSP is "
574  "build with Coin3D 3rd party"
575  << std::endl;
576  return EXIT_FAILURE;
577 #endif
578  }
579 
580  if (!opt_initFile.empty())
581  initFile = opt_initFile;
582  else
583  initFile = vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth/castel/chateau.init");
584 
585  vpImage<unsigned char> I, I_depth;
586  vpImage<uint16_t> I_depth_raw;
587  std::vector<vpColVector> pointcloud;
588  unsigned int pointcloud_width, pointcloud_height;
589  if (!read_data(0, ipath, I, I_depth_raw, pointcloud, pointcloud_width, pointcloud_height)) {
590  std::cerr << "Cannot open sequence: " << ipath << std::endl;
591  return EXIT_FAILURE;
592  }
593 
594  vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
595 
596 // initialise a display
597 #if defined VISP_HAVE_X11
598  vpDisplayX display1, display2;
599 #elif defined VISP_HAVE_GDI
600  vpDisplayGDI display1, display2;
601 #elif defined VISP_HAVE_OPENCV
602  vpDisplayOpenCV display1, display2;
603 #elif defined VISP_HAVE_D3D9
604  vpDisplayD3D display1, display2;
605 #elif defined VISP_HAVE_GTK
606  vpDisplayGTK display1, display2;
607 #else
608  opt_display = false;
609 #endif
610  if (opt_display) {
611 #if defined(VISP_HAVE_DISPLAY)
614  display1.init(I, 100, 100, "Test tracking (Left)");
615  display2.init(I_depth, (int)(I.getWidth() / vpDisplay::getDownScalingFactor(I)) + 110, 100,
616  "Test tracking (Right)");
617 #endif
619  vpDisplay::display(I_depth);
620  vpDisplay::flush(I);
621  vpDisplay::flush(I_depth);
622  }
623 
624  std::vector<int> trackerTypes(2);
625  trackerTypes[0] = trackerType_image;
626  trackerTypes[1] = trackerType_depth;
627  // Object pointer to check that inheritance is ok
628  vpMbTracker *tracker = new vpMbGenericTracker(trackerTypes);
629  vpHomogeneousMatrix c1Mo, c2Mo;
630  vpCameraParameters cam1, cam2;
631 
632  loadConfiguration(tracker, configFile, configFile_depth);
633 
634  vpHomogeneousMatrix depth_M_color;
635  std::string depth_M_color_filename =
636  vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth/castel/depth_M_color.txt");
637  {
638  std::ifstream depth_M_color_file(depth_M_color_filename.c_str());
639  depth_M_color.load(depth_M_color_file);
640  std::map<std::string, vpHomogeneousMatrix> mapOfCameraTransformationMatrices;
641  mapOfCameraTransformationMatrices["Camera2"] = depth_M_color;
642  dynamic_cast<vpMbGenericTracker *>(tracker)->setCameraTransformationMatrix(mapOfCameraTransformationMatrices);
643  }
644 
645  // Display the moving edges, and the Klt points
646  tracker->setDisplayFeatures(displayFeatures);
647 
648  // Tells if the tracker has to use Ogre3D for visibility tests
649  tracker->setOgreVisibilityTest(useOgre);
650  if (useOgre)
651  tracker->setOgreShowConfigDialog(showOgreConfigDialog);
652 
653  // Tells if the tracker has to use the scanline visibility tests
654  tracker->setScanLineVisibilityTest(useScanline);
655 
656  // Tells if the tracker has to compute the covariance matrix
657  tracker->setCovarianceComputation(computeCovariance);
658 
659  // Tells if the tracker has to compute the projection error
660  tracker->setProjectionErrorComputation(projectionError);
661 
662  // Retrieve the camera parameters from the tracker
663  dynamic_cast<vpMbGenericTracker *>(tracker)->getCameraParameters(cam1, cam2);
664 
665  // Loop to position the cube
666  if (opt_display && opt_click_allowed) {
667  while (!vpDisplay::getClick(I, false)) {
669  vpDisplay::displayText(I, 15, 10, "click after positioning the object", vpColor::red);
670  vpDisplay::flush(I);
671  }
672  }
673 
674  // Load the 3D model (either a vrml file or a .cao file)
675  dynamic_cast<vpMbGenericTracker *>(tracker)->loadModel(modelFile, modelFile_depth);
676 
677  if (opt_display && opt_click_allowed) {
678  std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
679  mapOfImages["Camera1"] = &I;
680  mapOfImages["Camera2"] = &I_depth;
681  std::map<std::string, std::string> mapOfInitFiles;
682  mapOfInitFiles["Camera1"] = initFile;
683 
684  // Initialise the tracker by clicking on the image
685  dynamic_cast<vpMbGenericTracker *>(tracker)->initClick(mapOfImages, mapOfInitFiles, true);
686  dynamic_cast<vpMbGenericTracker *>(tracker)->getPose(c1Mo, c2Mo);
687  // display the 3D model at the given pose
688  dynamic_cast<vpMbGenericTracker *>(tracker)->display(I, I_depth, c1Mo, c2Mo, cam1, cam2, vpColor::red);
689  } else {
690  vpHomogeneousMatrix c1Moi(0.06846423368, 0.09062570884, 0.3401096693, -2.671882598, 0.1174275908, -0.6011935263);
691  vpHomogeneousMatrix c2Moi(0.04431452054, 0.09294637757, 0.3357760654, -2.677922443, 0.121297639, -0.6028463357);
692  dynamic_cast<vpMbGenericTracker *>(tracker)->initFromPose(I, I_depth, c1Moi, c2Moi);
693  }
694 
695  // track the model
696  {
697  std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
698  mapOfImages["Camera1"] = &I;
699  std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
700  mapOfPointclouds["Camera2"] = &pointcloud;
701  std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
702  mapOfWidths["Camera2"] = pointcloud_width;
703  mapOfHeights["Camera2"] = pointcloud_height;
704 
705  dynamic_cast<vpMbGenericTracker *>(tracker)->track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
706  }
707  dynamic_cast<vpMbGenericTracker *>(tracker)->getPose(c1Mo, c2Mo);
708 
709  if (opt_display) {
710  vpDisplay::flush(I);
711  vpDisplay::flush(I_depth);
712  }
713 
714  bool quit = false, click = false;
715  unsigned int frame_index = 0;
716  std::vector<double> time_vec;
717  while (read_data(frame_index, ipath, I, I_depth_raw, pointcloud, pointcloud_width, pointcloud_height) && !quit &&
718  (opt_lastFrame > 0 ? (int)frame_index <= opt_lastFrame : true)) {
719  vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
720 
721  if (opt_display) {
723  vpDisplay::display(I_depth);
724 
725  std::stringstream ss;
726  ss << "Num frame: " << frame_index;
727  vpDisplay::displayText(I, 40, 20, ss.str(), vpColor::red);
728  }
729 
730  // Test reset the tracker
731  if (frame_index == 10) {
732  std::cout << "----------Test reset tracker----------" << std::endl;
733  if (opt_display) {
735  vpDisplay::display(I_depth);
736  }
737 
738  tracker->resetTracker();
739 
740  loadConfiguration(tracker, configFile, configFile_depth);
741  dynamic_cast<vpMbGenericTracker *>(tracker)->loadModel(modelFile, modelFile_depth);
742  dynamic_cast<vpMbGenericTracker *>(tracker)->setCameraParameters(cam1, cam2);
743  tracker->setOgreVisibilityTest(useOgre);
744  tracker->setScanLineVisibilityTest(useScanline);
745  tracker->setCovarianceComputation(computeCovariance);
746  tracker->setProjectionErrorComputation(projectionError);
747  dynamic_cast<vpMbGenericTracker *>(tracker)->initFromPose(I, I_depth, c1Mo, c2Mo);
748  }
749 
750 // Test to set an initial pose
751 #if USE_SMALL_DATASET
752  if (frame_index == 20) {
753  c1Mo.buildFrom(0.07734634051, 0.08993639906, 0.342344402, -2.708409543, 0.0669276477, -0.3798958303);
754  c2Mo.buildFrom(0.05319520317, 0.09223511976, 0.3380095812, -2.71438192, 0.07141055397, -0.3810081638);
755 #else
756  if (frame_index == 50) {
757  c1Mo.buildFrom(0.09280663035, 0.09277655672, 0.330415149, -2.724431817, 0.0293932671, 0.02027966377);
758  c2Mo.buildFrom(0.06865933578, 0.09494713501, 0.3260555142, -2.730027451, 0.03498390135, 0.01989831338);
759 #endif
760  std::cout << "Test set pose" << std::endl;
761  dynamic_cast<vpMbGenericTracker *>(tracker)->setPose(I, I_depth, c1Mo, c2Mo);
762  }
763 
764 #if USE_SMALL_DATASET
765  // track the object: stop tracking from frame 15 to 20
766  if (frame_index < 15 || frame_index >= 20) {
767 #else
768  // track the object: stop tracking from frame 30 to 50
769  if (frame_index < 30 || frame_index >= 50) {
770 #endif
771  std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
772  mapOfImages["Camera1"] = &I;
773  std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
774  mapOfPointclouds["Camera2"] = &pointcloud;
775  std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
776  mapOfWidths["Camera2"] = pointcloud_width;
777  mapOfHeights["Camera2"] = pointcloud_height;
778 
779  double t = vpTime::measureTimeMs();
780  dynamic_cast<vpMbGenericTracker *>(tracker)->track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
781  t = vpTime::measureTimeMs() - t;
782  time_vec.push_back(t);
783 
784  dynamic_cast<vpMbGenericTracker *>(tracker)->getPose(c1Mo, c2Mo);
785 
786  if (opt_display) {
787  // display the 3D model
788  dynamic_cast<vpMbGenericTracker *>(tracker)->display(I, I_depth, c1Mo, c2Mo, cam1, cam2, vpColor::darkRed);
789  // display the frame
790  vpDisplay::displayFrame(I, c1Mo, cam1, 0.05);
791  vpDisplay::displayFrame(I_depth, c2Mo, cam2, 0.05);
792  // computation time
793  std::stringstream ss;
794  ss << "Computation time: " << t << " ms";
795  vpDisplay::displayText(I, 60, 20, ss.str(), vpColor::red);
796  // nb features
797  ss.str("");
798  ss << "nb features: " << tracker->getError().getRows();
799  vpDisplay::displayText(I_depth, 80, 20, ss.str(), vpColor::red);
800  }
801  }
802 
803  if (opt_click_allowed && opt_display) {
804  vpDisplay::displayText(I, 10, 10, "Click to quit", vpColor::red);
806  if (vpDisplay::getClick(I, button, click)) {
807  switch (button) {
809  quit = !click;
810  break;
811 
813  click = !click;
814  break;
815 
816  default:
817  break;
818  }
819  }
820  }
821 
822  if (computeCovariance) {
823  std::cout << "Covariance matrix: \n" << tracker->getCovarianceMatrix() << std::endl << std::endl;
824  }
825 
826  if (projectionError) {
827  std::cout << "Projection error: " << tracker->getProjectionError() << std::endl << std::endl;
828  }
829 
830  if (opt_display) {
831  vpDisplay::flush(I);
832  vpDisplay::flush(I_depth);
833  }
834 
835  frame_index++;
836  }
837 
838  std::cout << "\nFinal poses, c1Mo:\n" << c1Mo << "\nc2Mo:\n" << c2Mo << std::endl;
839  std::cout << "\nComputation time, Mean: " << vpMath::getMean(time_vec)
840  << " ms ; Median: " << vpMath::getMedian(time_vec) << " ms ; Std: " << vpMath::getStdev(time_vec) << " ms"
841  << std::endl;
842 
843  if (opt_click_allowed && !quit) {
845  }
846 
847  delete tracker;
848  tracker = NULL;
849 
850 #if defined(VISP_HAVE_XML2) && USE_XML
851  // Cleanup memory allocated by xml library used to parse the xml config
852  // file in vpMbGenericTracker::loadConfigFile()
854 #endif
855 
856 #if defined(VISP_HAVE_COIN3D) && (COIN_MAJOR_VERSION >= 2)
857  // Cleanup memory allocated by Coin library used to load a vrml model in
858  // vpMbGenericTracker::loadModel() We clean only if Coin was used.
859  if (use_vrml)
860  SoDB::finish();
861 #endif
862 
863  return EXIT_SUCCESS;
864  } catch (const vpException &e) {
865  std::cout << "Catch an exception: " << e << std::endl;
866  return EXIT_FAILURE;
867  }
868 }
869 
870 #else
871 int main()
872 {
873  std::cerr << "visp_mbt, visp_gui modules and OpenCV are required to run "
874  "this example."
875  << std::endl;
876  return EXIT_SUCCESS;
877 }
878 #endif
virtual void setDisplayFeatures(const bool displayF)
Definition: vpMbTracker.h:503
virtual void setCovarianceComputation(const bool &flag)
Definition: vpMbTracker.h:485
virtual void setOgreShowConfigDialog(const bool showConfigDialog)
Definition: vpMbTracker.h:629
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static double getStdev(const std::vector< double > &v, const bool useBesselCorrection=false)
Definition: vpMath.cpp:252
static bool checkDirectory(const char *dirname)
Definition: vpIoTools.cpp:467
static std::string getViSPImagesDataPath()
Definition: vpIoTools.cpp:1316
void setHarrisFreeParameter(double harris_k)
virtual unsigned int getClipping() const
Definition: vpMbTracker.h:256
virtual void setAngleDisappear(const double &a)
Definition: vpMbTracker.h:466
Implementation of an homogeneous matrix and operations on such kind of matrices.
void setMaskNumber(const unsigned int &a)
Definition: vpMe.cpp:454
static void readBinaryValueLE(std::ifstream &file, int16_t &short_value)
Definition: vpIoTools.cpp:1864
virtual void setDownScalingFactor(unsigned int scale)
Definition: vpDisplay.cpp:232
static double getMedian(const std::vector< double > &v)
Definition: vpMath.cpp:222
static const vpColor darkRed
Definition: vpColor.h:181
Display for windows using GDI (available on any windows 32 platform).
Definition: vpDisplayGDI.h:129
void setMaxFeatures(const int maxCount)
void setSampleStep(const double &s)
Definition: vpMe.h:278
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Definition: vpDisplayX.h:151
void setMinDistance(double minDistance)
error that can be emited by ViSP classes.
Definition: vpException.h:71
Definition: vpMe.h:60
Real-time 6D object pose tracking using its CAD model.
virtual void resetTracker()=0
static void flush(const vpImage< unsigned char > &I)
void load(std::ifstream &f)
VISP_EXPORT double measureTimeMs()
Definition: vpTime.cpp:88
void setMu1(const double &mu_1)
Definition: vpMe.h:241
static bool parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, int flags)
Definition: vpParseArgv.cpp:69
static const vpColor red
Definition: vpColor.h:180
void setQuality(double qualityLevel)
void initPersProjWithoutDistortion(const double px, const double py, const double u0, const double v0)
static bool checkFilename(const char *filename)
Definition: vpIoTools.cpp:675
static double getMean(const std::vector< double > &v)
Definition: vpMath.cpp:202
Display for windows using Direct3D 3rd party. Thus to enable this class Direct3D should be installed...
Definition: vpDisplayD3D.h:107
static std::string createFilePath(const std::string &parent, const std::string &child)
Definition: vpIoTools.cpp:1541
void setMaskSize(const unsigned int &a)
Definition: vpMe.cpp:461
static void display(const vpImage< unsigned char > &I)
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
virtual double getProjectionError() const
Definition: vpMbTracker.h:310
Generic class defining intrinsic camera parameters.
Main methods for a model-based tracker.
Definition: vpMbTracker.h:110
The vpDisplayGTK allows to display image using the GTK 3rd party library. Thus to enable this class G...
Definition: vpDisplayGTK.h:138
void resize(const unsigned int h, const unsigned int w)
resize the image : Image initialization
Definition: vpImage.h:866
virtual void setAngleAppear(const double &a)
Definition: vpMbTracker.h:455
unsigned int getRows() const
Definition: vpArray2D.h:156
void setPyramidLevels(const int pyrMaxLevel)
void buildFrom(const vpTranslationVector &t, const vpRotationMatrix &R)
static double rad(double deg)
Definition: vpMath.h:102
void init(vpImage< unsigned char > &I, int winx=-1, int winy=-1, const std::string &title="")
static void cleanup()
Definition: vpXmlParser.h:310
void setMu2(const double &mu_2)
Definition: vpMe.h:248
void setWindowSize(const int winSize)
virtual void setOgreVisibilityTest(const bool &v)
static void read(vpImage< unsigned char > &I, const std::string &filename)
Definition: vpImageIo.cpp:207
Implementation of column vector and the associated operations.
Definition: vpColVector.h:72
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0))
unsigned int getDownScalingFactor()
Definition: vpDisplay.h:229
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
Definition: vpKltOpencv.h:78
void setBlockSize(const int blockSize)
void setThreshold(const double &t)
Definition: vpMe.h:300
virtual void setScanLineVisibilityTest(const bool &v)
Definition: vpMbTracker.h:587
virtual void setClipping(const unsigned int &flags)
void setRange(const unsigned int &r)
Definition: vpMe.h:271
virtual void setFarClippingDistance(const double &dist)
static void createDepthHistogram(const vpImage< uint16_t > &src_depth, vpImage< vpRGBa > &dest_rgba)
virtual void setProjectionErrorComputation(const bool &flag)
Definition: vpMbTracker.h:570
virtual vpColVector getError() const =0
virtual void setNearClippingDistance(const double &dist)
virtual vpMatrix getCovarianceMatrix() const
Definition: vpMbTracker.h:265