Visual Servoing Platform  version 3.6.1 under development (2024-11-15)
testGenericTrackerDepth.cpp
1 /*
2  * ViSP, open source Visual Servoing Platform software.
3  * Copyright (C) 2005 - 2024 by Inria. All rights reserved.
4  *
5  * This software is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  * See the file LICENSE.txt at the root directory of this source
10  * distribution for additional information about the GNU GPL.
11  *
12  * For using ViSP with software that can not be combined with the GNU
13  * GPL, please contact Inria about acquiring a ViSP Professional
14  * Edition License.
15  *
16  * See https://visp.inria.fr for more information.
17  *
18  * This software was developed at:
19  * Inria Rennes - Bretagne Atlantique
20  * Campus Universitaire de Beaulieu
21  * 35042 Rennes Cedex
22  * France
23  *
24  * If you have questions regarding the use of this file, please contact
25  * Inria at visp@inria.fr
26  *
27  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
28  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
29  *
30  * Description:
31  * Regression test for depth MBT.
32  */
33 
40 #include <cstdlib>
41 #include <iostream>
42 #include <visp3/core/vpConfig.h>
43 
44 #if defined(VISP_HAVE_MODULE_MBT) && (defined(VISP_HAVE_LAPACK) || defined(VISP_HAVE_EIGEN3) || defined(VISP_HAVE_OPENCV))
45 
46 #if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
47 #include <type_traits>
48 #endif
49 
50 #include <visp3/core/vpIoTools.h>
51 #include <visp3/gui/vpDisplayD3D.h>
52 #include <visp3/gui/vpDisplayGDI.h>
53 #include <visp3/gui/vpDisplayGTK.h>
54 #include <visp3/gui/vpDisplayOpenCV.h>
55 #include <visp3/gui/vpDisplayX.h>
56 #include <visp3/io/vpImageIo.h>
57 #include <visp3/io/vpParseArgv.h>
58 #include <visp3/mbt/vpMbGenericTracker.h>
59 
60 #define GETOPTARGS "i:dcle:mCh"
61 
62 #ifdef ENABLE_VISP_NAMESPACE
63 using namespace VISP_NAMESPACE_NAME;
64 #endif
65 
66 namespace
67 {
68 void usage(const char *name, const char *badparam)
69 {
70  fprintf(stdout, "\n\
71  Regression test for vpGenericTracker and depth.\n\
72  \n\
73  SYNOPSIS\n\
74  %s [-i <test image path>] [-c] [-d] [-h] [-l] \n\
75  [-e <last frame index>] [-m] [-C]\n",
76  name);
77 
78  fprintf(stdout, "\n\
79  OPTIONS: \n\
80  -i <input image path> \n\
81  Set image input path.\n\
82  These images come from ViSP-images-x.y.z.tar.gz available \n\
83  on the ViSP website.\n\
84  Setting the VISP_INPUT_IMAGE_PATH environment\n\
85  variable produces the same behavior than using\n\
86  this option.\n\
87  \n\
88  -d \n\
89  Turn off the display.\n\
90  \n\
91  -c\n\
92  Disable the mouse click. Useful to automate the \n\
93  execution of this program without human intervention.\n\
94  \n\
95  -l\n\
96  Use the scanline for visibility tests.\n\
97  \n\
98  -e <last frame index>\n\
99  Specify the index of the last frame. Once reached, the tracking is stopped.\n\
100  \n\
101  -m \n\
102  Set a tracking mask.\n\
103  \n\
104  -C \n\
105  Use color images.\n\
106  \n\
107  -h \n\
108  Print the help.\n\n");
109 
110  if (badparam)
111  fprintf(stdout, "\nERROR: Bad parameter [%s]\n", badparam);
112 }
113 
114 bool getOptions(int argc, const char **argv, std::string &ipath, bool &click_allowed, bool &display, bool &useScanline,
115  int &lastFrame, bool &use_mask, bool &use_color_image)
116 {
117  const char *optarg_;
118  int c;
119  while ((c = vpParseArgv::parse(argc, argv, GETOPTARGS, &optarg_)) > 1) {
120 
121  switch (c) {
122  case 'i':
123  ipath = optarg_;
124  break;
125  case 'c':
126  click_allowed = false;
127  break;
128  case 'd':
129  display = false;
130  break;
131  case 'l':
132  useScanline = true;
133  break;
134  case 'e':
135  lastFrame = atoi(optarg_);
136  break;
137  case 'm':
138  use_mask = true;
139  break;
140  case 'C':
141  use_color_image = true;
142  break;
143  case 'h':
144  usage(argv[0], nullptr);
145  return false;
146  break;
147 
148  default:
149  usage(argv[0], optarg_);
150  return false;
151  break;
152  }
153  }
154 
155  if ((c == 1) || (c == -1)) {
156  // standalone param or error
157  usage(argv[0], nullptr);
158  std::cerr << "ERROR: " << std::endl;
159  std::cerr << " Bad argument " << optarg_ << std::endl << std::endl;
160  return false;
161  }
162 
163  return true;
164 }
165 
166 template <typename Type>
167 bool read_data(const std::string &input_directory, int cpt, const vpCameraParameters &cam_depth, vpImage<Type> &I,
168  vpImage<uint16_t> &I_depth, std::vector<vpColVector> &pointcloud, vpHomogeneousMatrix &cMo)
169 {
170 #if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
171  static_assert(std::is_same<Type, unsigned char>::value || std::is_same<Type, vpRGBa>::value,
172  "Template function supports only unsigned char and vpRGBa images!");
173 #endif
174 #if VISP_HAVE_DATASET_VERSION >= 0x030600
175  std::string ext("png");
176 #else
177  std::string ext("pgm");
178 #endif
179  char buffer[FILENAME_MAX];
180  snprintf(buffer, FILENAME_MAX, std::string(input_directory + "/Images/Image_%04d." + ext).c_str(), cpt);
181  std::string image_filename = buffer;
182 
183  snprintf(buffer, FILENAME_MAX, std::string(input_directory + "/Depth/Depth_%04d.bin").c_str(), cpt);
184  std::string depth_filename = buffer;
185 
186  snprintf(buffer, FILENAME_MAX, std::string(input_directory + "/CameraPose/Camera_%03d.txt").c_str(), cpt);
187  std::string pose_filename = buffer;
188 
189  if (!vpIoTools::checkFilename(image_filename) || !vpIoTools::checkFilename(depth_filename) ||
190  !vpIoTools::checkFilename(pose_filename))
191  return false;
192 
193  vpImageIo::read(I, image_filename);
194 
195  unsigned int depth_width = 0, depth_height = 0;
196  std::ifstream file_depth(depth_filename.c_str(), std::ios::in | std::ios::binary);
197  if (!file_depth.is_open())
198  return false;
199 
200  vpIoTools::readBinaryValueLE(file_depth, depth_height);
201  vpIoTools::readBinaryValueLE(file_depth, depth_width);
202  I_depth.resize(depth_height, depth_width);
203  pointcloud.resize(depth_height * depth_width);
204 
205  const float depth_scale = 0.000030518f;
206  for (unsigned int i = 0; i < I_depth.getHeight(); i++) {
207  for (unsigned int j = 0; j < I_depth.getWidth(); j++) {
208  vpIoTools::readBinaryValueLE(file_depth, I_depth[i][j]);
209  double x = 0.0, y = 0.0, Z = I_depth[i][j] * depth_scale;
210  vpPixelMeterConversion::convertPoint(cam_depth, j, i, x, y);
211  vpColVector pt3d(4, 1.0);
212  pt3d[0] = x * Z;
213  pt3d[1] = y * Z;
214  pt3d[2] = Z;
215  pointcloud[i * I_depth.getWidth() + j] = pt3d;
216  }
217  }
218 
219  std::ifstream file_pose(pose_filename.c_str());
220  if (!file_pose.is_open()) {
221  return false;
222  }
223 
224  for (unsigned int i = 0; i < 4; i++) {
225  for (unsigned int j = 0; j < 4; j++) {
226  file_pose >> cMo[i][j];
227  }
228  }
229 
230  return true;
231 }
232 
233 template <typename Type>
234 bool run(vpImage<Type> &I, const std::string &input_directory, bool opt_click_allowed, bool opt_display,
235  bool useScanline, int opt_lastFrame, bool use_mask)
236 {
237 #if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
238  static_assert(std::is_same<Type, unsigned char>::value || std::is_same<Type, vpRGBa>::value,
239  "Template function supports only unsigned char and vpRGBa images!");
240 #endif
241 // Initialise a display
242 #if defined(VISP_HAVE_X11)
243  vpDisplayX display1, display2;
244 #elif defined(VISP_HAVE_GDI)
245  vpDisplayGDI display1, display2;
246 #elif defined(HAVE_OPENCV_HIGHGUI)
247  vpDisplayOpenCV display1, display2;
248 #elif defined(VISP_HAVE_D3D9)
249  vpDisplayD3D display1, display2;
250 #elif defined(VISP_HAVE_GTK)
251  vpDisplayGTK display1, display2;
252 #else
253  opt_display = false;
254 #endif
255 
256  std::vector<int> tracker_type;
257  tracker_type.push_back(vpMbGenericTracker::DEPTH_DENSE_TRACKER);
258  vpMbGenericTracker tracker(tracker_type);
259 
260 #if defined(VISP_HAVE_PUGIXML)
261  tracker.loadConfigFile(input_directory + "/Config/chateau_depth.xml");
262 #else
263  // Corresponding parameters manually set to have an example code
264  {
265  vpCameraParameters cam_depth;
266  cam_depth.initPersProjWithoutDistortion(700.0, 700.0, 320.0, 240.0);
267  tracker.setCameraParameters(cam_depth);
268  }
269  // Depth
270  tracker.setDepthNormalFeatureEstimationMethod(vpMbtFaceDepthNormal::ROBUST_FEATURE_ESTIMATION);
271  tracker.setDepthNormalPclPlaneEstimationMethod(2);
272  tracker.setDepthNormalPclPlaneEstimationRansacMaxIter(200);
273  tracker.setDepthNormalPclPlaneEstimationRansacThreshold(0.001);
274  tracker.setDepthNormalSamplingStep(2, 2);
275 
276  tracker.setDepthDenseSamplingStep(4, 4);
277 
278 #if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
279  tracker.setKltMaskBorder(5);
280 #endif
281 
282  tracker.setAngleAppear(vpMath::rad(85.0));
283  tracker.setAngleDisappear(vpMath::rad(89.0));
284  tracker.setNearClippingDistance(0.01);
285  tracker.setFarClippingDistance(2.0);
286  tracker.setClipping(tracker.getClipping() | vpMbtPolygon::FOV_CLIPPING);
287 #endif
288  tracker.loadModel(input_directory + "/Models/chateau.cao");
290  T[0][0] = -1;
291  T[0][3] = -0.2;
292  T[1][1] = 0;
293  T[1][2] = 1;
294  T[1][3] = 0.12;
295  T[2][1] = 1;
296  T[2][2] = 0;
297  T[2][3] = -0.15;
298  tracker.loadModel(input_directory + "/Models/cube.cao", false, T);
299  vpCameraParameters cam_depth;
300  tracker.getCameraParameters(cam_depth);
301  tracker.setDisplayFeatures(true);
302  tracker.setScanLineVisibilityTest(useScanline);
303 
304  vpImage<uint16_t> I_depth_raw;
305  vpImage<vpRGBa> I_depth;
306  vpHomogeneousMatrix cMo_truth;
307  std::vector<vpColVector> pointcloud;
308  int cpt_frame = 1;
309  if (!read_data(input_directory, cpt_frame, cam_depth, I, I_depth_raw, pointcloud, cMo_truth)) {
310  std::cerr << "Cannot read first frame!" << std::endl;
311  return EXIT_FAILURE;
312  }
313 
314  vpImage<bool> mask(I.getHeight(), I.getWidth());
315  const double roi_step = 7.0;
316  const double roi_step2 = 6.0;
317  if (use_mask) {
318  mask = false;
319  for (unsigned int i = (unsigned int)(I.getRows() / roi_step);
320  i < (unsigned int)(I.getRows() * roi_step2 / roi_step); i++) {
321  for (unsigned int j = (unsigned int)(I.getCols() / roi_step);
322  j < (unsigned int)(I.getCols() * roi_step2 / roi_step); j++) {
323  mask[i][j] = true;
324  }
325  }
326  tracker.setMask(mask);
327  }
328 
329  vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
330  if (opt_display) {
331 #ifdef VISP_HAVE_DISPLAY
332  display1.init(I, 0, 0, "Image");
333  display2.init(I_depth, (int)I.getWidth(), 0, "Depth");
334 #endif
335  }
336 
337  vpHomogeneousMatrix depth_M_color;
338  depth_M_color[0][3] = -0.05;
339  tracker.initFromPose(I, depth_M_color * cMo_truth);
340 
341  bool click = false, quit = false, correct_accuracy = true;
342  std::vector<double> vec_err_t, vec_err_tu;
343  std::vector<double> time_vec;
344  while (read_data(input_directory, cpt_frame, cam_depth, I, I_depth_raw, pointcloud, cMo_truth) && !quit &&
345  (opt_lastFrame > 0 ? (int)cpt_frame <= opt_lastFrame : true)) {
346  vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
347 
348  if (opt_display) {
350  vpDisplay::display(I_depth);
351  }
352 
353  double t = vpTime::measureTimeMs();
354  std::map<std::string, const vpImage<Type> *> mapOfImages;
355  std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
356  mapOfPointclouds["Camera"] = &pointcloud;
357  std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
358  mapOfWidths["Camera"] = I_depth.getWidth();
359  mapOfHeights["Camera"] = I_depth.getHeight();
360 
361  tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
362  vpHomogeneousMatrix cMo = tracker.getPose();
363  t = vpTime::measureTimeMs() - t;
364  time_vec.push_back(t);
365 
366  if (opt_display) {
367  tracker.display(I_depth, cMo, cam_depth, vpColor::red, 3);
368  vpDisplay::displayFrame(I_depth, cMo, cam_depth, 0.05, vpColor::none, 3);
369 
370  std::stringstream ss;
371  ss << "Frame: " << cpt_frame;
372  vpDisplay::displayText(I_depth, 20, 20, ss.str(), vpColor::red);
373  ss.str("");
374  ss << "Nb features: " << tracker.getError().getRows();
375  vpDisplay::displayText(I_depth, 40, 20, ss.str(), vpColor::red);
376  }
377 
378  vpPoseVector pose_est(cMo);
379  vpPoseVector pose_truth(depth_M_color * cMo_truth);
380  vpColVector t_est(3), t_truth(3);
381  vpColVector tu_est(3), tu_truth(3);
382  for (unsigned int i = 0; i < 3; i++) {
383  t_est[i] = pose_est[i];
384  t_truth[i] = pose_truth[i];
385  tu_est[i] = pose_est[i + 3];
386  tu_truth[i] = pose_truth[i + 3];
387  }
388 
389  vpColVector t_err = t_truth - t_est, tu_err = tu_truth - tu_est;
390  double t_err2 = sqrt(t_err.sumSquare()), tu_err2 = vpMath::deg(sqrt(tu_err.sumSquare()));
391  vec_err_t.push_back(t_err2);
392  vec_err_tu.push_back(tu_err2);
393  const double t_thresh = useScanline ? 0.003 : 0.002;
394  const double tu_thresh = useScanline ? 0.5 : 0.4;
395  if (!use_mask && (t_err2 > t_thresh || tu_err2 > tu_thresh)) { // no accuracy test with mask
396  std::cerr << "Pose estimated exceeds the threshold (t_thresh = " << t_thresh << ", tu_thresh = " << tu_thresh
397  << ")!" << std::endl;
398  std::cout << "t_err: " << t_err2 << " ; tu_err: " << tu_err2 << std::endl;
399  correct_accuracy = false;
400  }
401 
402  if (opt_display) {
403  if (use_mask) {
404  vpRect roi(vpImagePoint(I.getRows() / roi_step, I.getCols() / roi_step),
405  vpImagePoint(I.getRows() * roi_step2 / roi_step, I.getCols() * roi_step2 / roi_step));
407  }
408 
409  vpDisplay::flush(I);
410  vpDisplay::flush(I_depth);
411  }
412 
413  if (opt_display && opt_click_allowed) {
415  if (vpDisplay::getClick(I, button, click)) {
416  switch (button) {
418  quit = !click;
419  break;
420 
422  click = !click;
423  break;
424 
425  default:
426  break;
427  }
428  }
429  }
430 
431  cpt_frame++;
432  }
433 
434  if (!time_vec.empty())
435  std::cout << "Computation time, Mean: " << vpMath::getMean(time_vec)
436  << " ms ; Median: " << vpMath::getMedian(time_vec) << " ms ; Std: " << vpMath::getStdev(time_vec) << " ms"
437  << std::endl;
438 
439  if (!vec_err_t.empty())
440  std::cout << "Max translation error: " << *std::max_element(vec_err_t.begin(), vec_err_t.end()) << std::endl;
441 
442  if (!vec_err_tu.empty())
443  std::cout << "Max thetau error: " << *std::max_element(vec_err_tu.begin(), vec_err_tu.end()) << std::endl;
444 
445  return correct_accuracy ? EXIT_SUCCESS : EXIT_FAILURE;
446 }
447 } // namespace
448 
449 int main(int argc, const char *argv[])
450 {
451  try {
452  std::string env_ipath;
453  std::string opt_ipath = "";
454  bool opt_click_allowed = true;
455  bool opt_display = true;
456  bool useScanline = false;
457 #if defined(__mips__) || defined(__mips) || defined(mips) || defined(__MIPS__)
458  // To avoid Debian test timeout
459  int opt_lastFrame = 5;
460 #else
461  int opt_lastFrame = -1;
462 #endif
463  bool use_mask = false;
464  bool use_color_image = false;
465 
466  // Get the visp-images-data package path or VISP_INPUT_IMAGE_PATH
467  // environment variable value
468  env_ipath = vpIoTools::getViSPImagesDataPath();
469 
470  // Read the command line options
471  if (!getOptions(argc, argv, opt_ipath, opt_click_allowed, opt_display, useScanline, opt_lastFrame, use_mask,
472  use_color_image)) {
473  return EXIT_FAILURE;
474  }
475 
476  std::cout << "useScanline: " << useScanline << std::endl;
477  std::cout << "use_mask: " << use_mask << std::endl;
478  std::cout << "use_color_image: " << use_color_image << std::endl;
479 
480  // Test if an input path is set
481  if (opt_ipath.empty() && env_ipath.empty()) {
482  usage(argv[0], nullptr);
483  std::cerr << std::endl << "ERROR:" << std::endl;
484  std::cerr << " Use -i <visp image path> option or set VISP_INPUT_IMAGE_PATH " << std::endl
485  << " environment variable to specify the location of the " << std::endl
486  << " image path where test images are located." << std::endl
487  << std::endl;
488 
489  return EXIT_FAILURE;
490  }
491 
492  std::string input_directory =
493  vpIoTools::createFilePath(!opt_ipath.empty() ? opt_ipath : env_ipath, "mbt-depth/Castle-simu");
494  if (!vpIoTools::checkDirectory(input_directory)) {
495  std::cerr << "ViSP-images does not contain the folder: " << input_directory << "!" << std::endl;
496  return EXIT_SUCCESS;
497  }
498 
499  if (use_color_image) {
500  vpImage<vpRGBa> I_color;
501  return run(I_color, input_directory, opt_click_allowed, opt_display, useScanline, opt_lastFrame, use_mask);
502  }
503  else {
504  vpImage<unsigned char> I_gray;
505  return run(I_gray, input_directory, opt_click_allowed, opt_display, useScanline, opt_lastFrame, use_mask);
506  }
507  }
508  catch (const vpException &e) {
509  std::cout << "Catch an exception: " << e << std::endl;
510  return EXIT_FAILURE;
511  }
512 }
513 #elif !(defined(VISP_HAVE_LAPACK) || defined(VISP_HAVE_EIGEN3) || defined(VISP_HAVE_OPENCV))
514 int main()
515 {
516  std::cout << "Cannot run this example: install Lapack, Eigen3 or OpenCV" << std::endl;
517  return EXIT_SUCCESS;
518 }
519 #else
520 int main()
521 {
522  std::cout << "Enable MBT module (VISP_HAVE_MODULE_MBT) to launch this test." << std::endl;
523  return EXIT_SUCCESS;
524 }
525 #endif
Generic class defining intrinsic camera parameters.
void initPersProjWithoutDistortion(double px, double py, double u0, double v0)
Implementation of column vector and the associated operations.
Definition: vpColVector.h:191
double sumSquare() const
static const vpColor red
Definition: vpColor.h:217
static const vpColor none
Definition: vpColor.h:229
static const vpColor yellow
Definition: vpColor.h:225
Display for windows using Direct3D 3rd party. Thus to enable this class Direct3D should be installed....
Definition: vpDisplayD3D.h:106
Display for windows using GDI (available on any windows 32 platform).
Definition: vpDisplayGDI.h:130
The vpDisplayGTK allows to display image using the GTK 3rd party library. Thus to enable this class G...
Definition: vpDisplayGTK.h:133
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
static void displayRectangle(const vpImage< unsigned char > &I, const vpImagePoint &topLeft, unsigned int width, unsigned int height, const vpColor &color, bool fill=false, unsigned int thickness=1)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Definition: vpException.h:60
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void createDepthHistogram(const vpImage< uint16_t > &src_depth, vpImage< vpRGBa > &dest_rgba)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Definition: vpImageIo.cpp:147
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:82
Definition of the vpImage class member functions.
Definition: vpImage.h:131
unsigned int getWidth() const
Definition: vpImage.h:242
void resize(unsigned int h, unsigned int w)
resize the image : Image initialization
Definition: vpImage.h:542
unsigned int getCols() const
Definition: vpImage.h:171
unsigned int getHeight() const
Definition: vpImage.h:181
unsigned int getRows() const
Definition: vpImage.h:212
static std::string getViSPImagesDataPath()
Definition: vpIoTools.cpp:1053
static bool checkFilename(const std::string &filename)
Definition: vpIoTools.cpp:786
static void readBinaryValueLE(std::ifstream &file, int16_t &short_value)
static bool checkDirectory(const std::string &dirname)
Definition: vpIoTools.cpp:396
static std::string createFilePath(const std::string &parent, const std::string &child)
Definition: vpIoTools.cpp:1427
static double rad(double deg)
Definition: vpMath.h:129
static double getMedian(const std::vector< double > &v)
Definition: vpMath.cpp:322
static double getStdev(const std::vector< double > &v, bool useBesselCorrection=false)
Definition: vpMath.cpp:353
static double getMean(const std::vector< double > &v)
Definition: vpMath.cpp:302
static double deg(double rad)
Definition: vpMath.h:119
Real-time 6D object pose tracking using its CAD model.
@ ROBUST_FEATURE_ESTIMATION
Robust scheme to estimate the normal of the plane.
static bool parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, int flags)
Definition: vpParseArgv.cpp:70
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
Implementation of a pose vector and operations on poses.
Definition: vpPoseVector.h:203
Defines a rectangle in the plane.
Definition: vpRect.h:79
VISP_EXPORT double measureTimeMs()