Visual Servoing Platform  version 3.6.1 under development (2024-07-27)
vpMbDepthNormalTracker.cpp
1 /****************************************************************************
2  *
3  * ViSP, open source Visual Servoing Platform software.
4  * Copyright (C) 2005 - 2023 by Inria. All rights reserved.
5  *
6  * This software is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  * See the file LICENSE.txt at the root directory of this source
11  * distribution for additional information about the GNU GPL.
12  *
13  * For using ViSP with software that can not be combined with the GNU
14  * GPL, please contact Inria about acquiring a ViSP Professional
15  * Edition License.
16  *
17  * See https://visp.inria.fr for more information.
18  *
19  * This software was developed at:
20  * Inria Rennes - Bretagne Atlantique
21  * Campus Universitaire de Beaulieu
22  * 35042 Rennes Cedex
23  * France
24  *
25  * If you have questions regarding the use of this file, please contact
26  * Inria at visp@inria.fr
27  *
28  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
29  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
30  *
31  * Description:
32  * Model-based tracker using depth normal features.
33  *
34 *****************************************************************************/
35 
36 #include <iostream>
37 
38 #include <visp3/core/vpConfig.h>
39 
40 #if defined(VISP_HAVE_PCL) && defined(VISP_HAVE_PCL_COMMON)
41 #include <pcl/point_cloud.h>
42 #endif
43 
44 #include <visp3/core/vpDisplay.h>
45 #include <visp3/core/vpExponentialMap.h>
46 #include <visp3/core/vpTrackingException.h>
47 #include <visp3/mbt/vpMbDepthNormalTracker.h>
48 #include <visp3/mbt/vpMbtXmlGenericParser.h>
49 
50 #if DEBUG_DISPLAY_DEPTH_NORMAL
51 #include <visp3/gui/vpDisplayGDI.h>
52 #include <visp3/gui/vpDisplayX.h>
53 #endif
54 
57  : m_depthNormalFeatureEstimationMethod(vpMbtFaceDepthNormal::ROBUST_FEATURE_ESTIMATION),
58  m_depthNormalHiddenFacesDisplay(), m_depthNormalListOfActiveFaces(), m_depthNormalListOfDesiredFeatures(),
59  m_depthNormalFaces(), m_depthNormalPclPlaneEstimationMethod(2), m_depthNormalPclPlaneEstimationRansacMaxIter(200),
60  m_depthNormalPclPlaneEstimationRansacThreshold(0.001), m_depthNormalSamplingStepX(2), m_depthNormalSamplingStepY(2),
61  m_depthNormalUseRobust(false), m_error_depthNormal(), m_featuresToBeDisplayedDepthNormal(), m_L_depthNormal(),
62  m_robust_depthNormal(), m_w_depthNormal(), m_weightedError_depthNormal()
63 #if DEBUG_DISPLAY_DEPTH_NORMAL
64  ,
65  m_debugDisp_depthNormal(nullptr), m_debugImage_depthNormal()
66 #endif
67 {
68 #ifdef VISP_HAVE_OGRE
69  faces.getOgreContext()->setWindowName("MBT Depth");
70 #endif
71 
72 #if defined(VISP_HAVE_X11) && DEBUG_DISPLAY_DEPTH_NORMAL
73  m_debugDisp_depthNormal = new vpDisplayX;
74 #elif defined(VISP_HAVE_GDI) && DEBUG_DISPLAY_DEPTH_NORMAL
75  m_debugDisp_depthNormal = new vpDisplayGDI;
76 #endif
77 }
78 
80 {
81  for (size_t i = 0; i < m_depthNormalFaces.size(); i++) {
82  delete m_depthNormalFaces[i];
83  }
84 }
85 
86 void vpMbDepthNormalTracker::addFace(vpMbtPolygon &polygon, bool alreadyClose)
87 {
88  if (polygon.nbpt < 3) {
89  return;
90  }
91 
92  // Copy hidden faces
94 
95  vpMbtFaceDepthNormal *normal_face = new vpMbtFaceDepthNormal;
96  normal_face->m_hiddenFace = &faces;
97  normal_face->m_polygon = &polygon;
98  normal_face->m_cam = m_cam;
99  normal_face->m_useScanLine = useScanLine;
100  normal_face->m_clippingFlag = clippingFlag;
101  normal_face->m_distNearClip = distNearClip;
102  normal_face->m_distFarClip = distFarClip;
107 
108  // Add lines that compose the face
109  unsigned int nbpt = polygon.getNbPoint();
110  if (nbpt > 0) {
111  for (unsigned int i = 0; i < nbpt - 1; i++) {
112  normal_face->addLine(polygon.p[i], polygon.p[i + 1], &m_depthNormalHiddenFacesDisplay, m_rand, polygon.getIndex(),
113  polygon.getName());
114  }
115 
116  if (!alreadyClose) {
117  // Add last line that closes the face
118  normal_face->addLine(polygon.p[nbpt - 1], polygon.p[0], &m_depthNormalHiddenFacesDisplay, m_rand,
119  polygon.getIndex(), polygon.getName());
120  }
121  }
122 
123  // Construct a vpPlane in object frame
124  vpPoint pts[3];
125  pts[0] = polygon.p[0];
126  pts[1] = polygon.p[1];
127  pts[2] = polygon.p[2];
128  normal_face->m_planeObject = vpPlane(pts[0], pts[1], pts[2], vpPlane::object_frame);
129 
130  m_depthNormalFaces.push_back(normal_face);
131 }
132 
133 void vpMbDepthNormalTracker::computeVisibility(unsigned int width, unsigned int height)
134 {
135  bool changed = false;
136  faces.setVisible(width, height, m_cam, m_cMo, angleAppears, angleDisappears, changed);
137 
138  if (useScanLine) {
139  // if (clippingFlag <= 2) {
140  // cam.computeFov(width, height);
141  // }
142 
144  faces.computeScanLineRender(m_cam, width, height);
145  }
146 
147  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
148  it != m_depthNormalFaces.end(); ++it) {
149  vpMbtFaceDepthNormal *face_normal = *it;
150  face_normal->computeVisibility();
151  }
152 }
153 
155 {
156  double normRes = 0;
157  double normRes_1 = -1;
158  unsigned int iter = 0;
159 
160  computeVVSInit();
161  unsigned int nb_features = (unsigned int)(3 * m_depthNormalListOfDesiredFeatures.size());
162 
163  vpColVector error_prev(nb_features);
164  vpMatrix LTL;
165  vpColVector LTR, v;
166 
167  double mu = m_initialMu;
168  vpHomogeneousMatrix cMo_prev;
169 
170  bool isoJoIdentity = m_isoJoIdentity; // Backup since it can be modified if L is not full rank
171  if (isoJoIdentity)
172  oJo.eye();
173 
175  vpMatrix L_true, LVJ_true;
176 
177  while (std::fabs(normRes_1 - normRes) > m_stopCriteriaEpsilon && (iter < m_maxIter)) {
179 
180  bool reStartFromLastIncrement = false;
181  computeVVSCheckLevenbergMarquardt(iter, m_error_depthNormal, error_prev, cMo_prev, mu, reStartFromLastIncrement);
182 
183  if (!reStartFromLastIncrement) {
186 
187  if (computeCovariance) {
188  L_true = m_L_depthNormal;
189  if (!isoJoIdentity) {
190  cVo.build(m_cMo);
191  LVJ_true = (m_L_depthNormal * (cVo * oJo));
192  }
193  }
194 
195  // Compute DoF only once
196  if (iter == 0) {
197  // If all the 6 dof should be estimated, we check if the interaction
198  // matrix is full rank. If not we remove automatically the dof that
199  // cannot be estimated. This is particularly useful when considering
200  // circles (rank 5) and cylinders (rank 4)
201  if (isoJoIdentity) {
202  cVo.build(m_cMo);
203 
204  vpMatrix K; // kernel
205  unsigned int rank = (m_L_depthNormal * cVo).kernel(K);
206  if (rank == 0) {
207  throw vpException(vpException::fatalError, "Rank=0, cannot estimate the pose !");
208  }
209 
210  if (rank != 6) {
211  vpMatrix I; // Identity
212  I.eye(6);
213  oJo = I - K.AtA();
214 
215  isoJoIdentity = false;
216  }
217  }
218  }
219 
220  double num = 0.0, den = 0.0;
221  for (unsigned int i = 0; i < m_L_depthNormal.getRows(); i++) {
222  // Compute weighted errors and stop criteria
225  den += m_w_depthNormal[i];
226 
227  // weight interaction matrix
228  for (unsigned int j = 0; j < 6; j++) {
229  m_L_depthNormal[i][j] *= m_w_depthNormal[i];
230  }
231  }
232 
234  m_error_depthNormal, error_prev, LTR, mu, v);
235 
236  cMo_prev = m_cMo;
238 
239  normRes_1 = normRes;
240  normRes = sqrt(num / den);
241  }
242 
243  iter++;
244  }
245 
246  computeCovarianceMatrixVVS(isoJoIdentity, m_w_depthNormal, cMo_prev, L_true, LVJ_true, m_error_depthNormal);
247 }
248 
250 {
251  unsigned int nb_features = (unsigned int)(3 * m_depthNormalListOfDesiredFeatures.size());
252 
253  m_L_depthNormal.resize(nb_features, 6, false, false);
254  m_error_depthNormal.resize(nb_features, false);
255  m_weightedError_depthNormal.resize(nb_features, false);
256 
257  m_w_depthNormal.resize(nb_features, false);
258  m_w_depthNormal = 1;
259 
261 }
262 
264 {
265  unsigned int cpt = 0;
266  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalListOfActiveFaces.begin();
267  it != m_depthNormalListOfActiveFaces.end(); ++it) {
268  vpMatrix L_face;
269  vpColVector features_face;
270  (*it)->computeInteractionMatrix(m_cMo, L_face, features_face);
271 
272  vpColVector face_error = features_face - m_depthNormalListOfDesiredFeatures[(size_t)cpt];
273 
274  m_error_depthNormal.insert(cpt * 3, face_error);
275  m_L_depthNormal.insert(L_face, cpt * 3, 0);
276 
277  cpt++;
278  }
279 }
280 
282  const vpCameraParameters &cam, const vpColor &col, unsigned int thickness,
283  bool displayFullModel)
284 {
285  std::vector<std::vector<double> > models =
286  vpMbDepthNormalTracker::getModelForDisplay(I.getWidth(), I.getHeight(), cMo, cam, displayFullModel);
287 
288  for (size_t i = 0; i < models.size(); i++) {
289  if (vpMath::equal(models[i][0], 0)) {
290  vpImagePoint ip1(models[i][1], models[i][2]);
291  vpImagePoint ip2(models[i][3], models[i][4]);
292  vpDisplay::displayLine(I, ip1, ip2, col, thickness);
293  }
294  }
295 
296  if (displayFeatures) {
297  std::vector<std::vector<double> > features = getFeaturesForDisplayDepthNormal();
298  for (size_t i = 0; i < features.size(); i++) {
299  vpImagePoint im_centroid(features[i][1], features[i][2]);
300  vpImagePoint im_extremity(features[i][3], features[i][4]);
301  bool desired = vpMath::equal(features[i][0], 2);
302  vpDisplay::displayArrow(I, im_centroid, im_extremity, desired ? vpColor::blue : vpColor::red, 4, 2, thickness);
303  }
304  }
305 }
306 
308  const vpCameraParameters &cam, const vpColor &col, unsigned int thickness,
309  bool displayFullModel)
310 {
311  std::vector<std::vector<double> > models =
312  vpMbDepthNormalTracker::getModelForDisplay(I.getWidth(), I.getHeight(), cMo, cam, displayFullModel);
313 
314  for (size_t i = 0; i < models.size(); i++) {
315  if (vpMath::equal(models[i][0], 0)) {
316  vpImagePoint ip1(models[i][1], models[i][2]);
317  vpImagePoint ip2(models[i][3], models[i][4]);
318  vpDisplay::displayLine(I, ip1, ip2, col, thickness);
319  }
320  }
321 
322  if (displayFeatures) {
323  std::vector<std::vector<double> > features = getFeaturesForDisplayDepthNormal();
324  for (size_t i = 0; i < features.size(); i++) {
325  vpImagePoint im_centroid(features[i][1], features[i][2]);
326  vpImagePoint im_extremity(features[i][3], features[i][4]);
327  bool desired = vpMath::equal(features[i][0], 2);
328  vpDisplay::displayArrow(I, im_centroid, im_extremity, desired ? vpColor::blue : vpColor::red, 4, 2, thickness);
329  }
330  }
331 }
332 
334 {
335  std::vector<std::vector<double> > features;
336 
337  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
338  it != m_depthNormalFaces.end(); ++it) {
339  vpMbtFaceDepthNormal *face_normal = *it;
340  std::vector<std::vector<double> > currentFeatures = face_normal->getFeaturesForDisplay(m_cMo, m_cam);
341  features.insert(features.end(), currentFeatures.begin(), currentFeatures.end());
342  }
343 
344  return features;
345 }
346 
362 std::vector<std::vector<double> > vpMbDepthNormalTracker::getModelForDisplay(unsigned int width, unsigned int height,
363  const vpHomogeneousMatrix &cMo,
364  const vpCameraParameters &cam,
365  bool displayFullModel)
366 {
367  std::vector<std::vector<double> > models;
368 
369  vpCameraParameters c = cam;
370 
371  bool changed = false;
373 
374  if (useScanLine) {
375  c.computeFov(width, height);
376 
379  }
380 
381  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
382  it != m_depthNormalFaces.end(); ++it) {
383  vpMbtFaceDepthNormal *face_normal = *it;
384  std::vector<std::vector<double> > modelLines =
385  face_normal->getModelForDisplay(width, height, cMo, cam, displayFullModel);
386  models.insert(models.end(), modelLines.begin(), modelLines.end());
387  }
388 
389  return models;
390 }
391 
393 {
394  if (!modelInitialised) {
395  throw vpException(vpException::fatalError, "model not initialized");
396  }
397 
398  bool reInitialisation = false;
399  if (!useOgre) {
400  faces.setVisible(I.getWidth(), I.getHeight(), m_cam, m_cMo, angleAppears, angleDisappears, reInitialisation);
401  }
402  else {
403 #ifdef VISP_HAVE_OGRE
404  if (!faces.isOgreInitialised()) {
408  // Turn off Ogre config dialog display for the next call to this
409  // function since settings are saved in the ogre.cfg file and used
410  // during the next call
411  ogreShowConfigDialog = false;
412  }
413 
415 #else
416  faces.setVisible(I.getWidth(), I.getHeight(), m_cam, m_cMo, angleAppears, angleDisappears, reInitialisation);
417 #endif
418  }
419 
420  if (useScanLine || clippingFlag > 3)
422 
424 }
425 
426 void vpMbDepthNormalTracker::loadConfigFile(const std::string &configFile, bool verbose)
427 {
428 #if defined(VISP_HAVE_PUGIXML)
430  xmlp.setVerbose(verbose);
434 
441 
442  try {
443  if (verbose) {
444  std::cout << " *********** Parsing XML for Mb Depth Tracker ************ " << std::endl;
445  }
446  xmlp.parse(configFile);
447  }
448  catch (const vpException &e) {
449  std::cerr << "Exception: " << e.what() << std::endl;
450  throw vpException(vpException::ioError, "Cannot open XML file \"%s\"", configFile.c_str());
451  }
452 
453  vpCameraParameters camera;
454  xmlp.getCameraParameters(camera);
455  setCameraParameters(camera);
456 
459 
460  if (xmlp.hasNearClippingDistance())
462 
463  if (xmlp.hasFarClippingDistance())
465 
466  if (xmlp.getFovClipping())
468 
474 #else
475  (void)configFile;
476  (void)verbose;
477  throw(vpException(vpException::ioError, "vpMbDepthDenseTracker::loadConfigFile() needs pugixml built-in 3rdparty"));
478 #endif
479 }
480 
481 void vpMbDepthNormalTracker::reInitModel(const vpImage<unsigned char> &I, const std::string &cad_name,
482  const vpHomogeneousMatrix &cMo, bool verbose)
483 {
484  m_cMo.eye();
485 
486  for (size_t i = 0; i < m_depthNormalFaces.size(); i++) {
487  delete m_depthNormalFaces[i];
488  m_depthNormalFaces[i] = nullptr;
489  }
490 
491  m_depthNormalFaces.clear();
492 
493  loadModel(cad_name, verbose);
494  initFromPose(I, cMo);
495 }
496 
497 #if defined(VISP_HAVE_PCL) && defined(VISP_HAVE_PCL_COMMON)
498 void vpMbDepthNormalTracker::reInitModel(const pcl::PointCloud<pcl::PointXYZ>::ConstPtr &point_cloud,
499  const std::string &cad_name, const vpHomogeneousMatrix &cMo, bool verbose)
500 {
501  vpImage<unsigned char> I_dummy(point_cloud->height, point_cloud->width);
502  reInitModel(I_dummy, cad_name, cMo, verbose);
503 }
504 
505 #endif
506 
508 {
509  m_cMo.eye();
510 
511  for (std::vector<vpMbtFaceDepthNormal *>::iterator it = m_depthNormalFaces.begin(); it != m_depthNormalFaces.end();
512  ++it) {
513  vpMbtFaceDepthNormal *normal_face = *it;
514  delete normal_face;
515  normal_face = nullptr;
516  }
517 
518  m_depthNormalFaces.clear();
519 
520  m_computeInteraction = true;
521  computeCovariance = false;
522 
525 
527 
528  m_lambda = 1.0;
529 
530  faces.reset();
531 
533 
534  useScanLine = false;
535 
536 #ifdef VISP_HAVE_OGRE
537  useOgre = false;
538 #endif
539 
542 }
543 
545 {
547 #ifdef VISP_HAVE_OGRE
548  faces.getOgreContext()->setWindowName("MBT Depth");
549 #endif
550 }
551 
553 {
554  m_cMo = cdMo;
555  init(I);
556 }
557 
559 {
560  m_cMo = cdMo;
561  vpImageConvert::convert(I_color, m_I);
562  init(m_I);
563 }
564 
565 #if defined(VISP_HAVE_PCL) && defined(VISP_HAVE_PCL_COMMON)
566 void vpMbDepthNormalTracker::setPose(const pcl::PointCloud<pcl::PointXYZ>::ConstPtr &point_cloud,
567  const vpHomogeneousMatrix &cdMo)
568 {
569  vpImage<unsigned char> I_dummy(point_cloud->height, point_cloud->width);
570  m_cMo = cdMo;
571  init(I_dummy);
572 }
573 #endif
574 
576 {
578 
579  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
580  it != m_depthNormalFaces.end(); ++it) {
581  (*it)->setScanLineVisibilityTest(v);
582  }
583 }
584 
585 void vpMbDepthNormalTracker::setUseDepthNormalTracking(const std::string &name, const bool &useDepthNormalTracking)
586 {
587  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
588  it != m_depthNormalFaces.end(); ++it) {
589  vpMbtFaceDepthNormal *face = *it;
590  if (face->m_polygon->getName() == name) {
591  face->setTracked(useDepthNormalTracking);
592  }
593  }
594 }
595 
597 
598 #if defined(VISP_HAVE_PCL) && defined(VISP_HAVE_PCL_COMMON)
599 void vpMbDepthNormalTracker::segmentPointCloud(const pcl::PointCloud<pcl::PointXYZ>::ConstPtr &point_cloud)
600 {
603 
604 #if DEBUG_DISPLAY_DEPTH_NORMAL
605  if (!m_debugDisp_depthNormal->isInitialised()) {
606  m_debugImage_depthNormal.resize(point_cloud->height, point_cloud->width);
607  m_debugDisp_depthNormal->init(m_debugImage_depthNormal, 50, 0, "Debug display normal depth tracker");
608  }
609 
610  m_debugImage_depthNormal = 0;
611  std::vector<std::vector<vpImagePoint> > roiPts_vec;
612 #endif
613 
614  for (std::vector<vpMbtFaceDepthNormal *>::iterator it = m_depthNormalFaces.begin(); it != m_depthNormalFaces.end();
615  ++it) {
616  vpMbtFaceDepthNormal *face = *it;
617 
618  if (face->isVisible() && face->isTracked()) {
619  vpColVector desired_features;
620 
621 #if DEBUG_DISPLAY_DEPTH_NORMAL
622  std::vector<std::vector<vpImagePoint> > roiPts_vec_;
623 #endif
624  if (face->computeDesiredFeatures(m_cMo, point_cloud->width, point_cloud->height, point_cloud, desired_features,
626 #if DEBUG_DISPLAY_DEPTH_NORMAL
627  ,
628  m_debugImage_depthNormal, roiPts_vec_
629 #endif
630  ,
631  m_mask)) {
632  m_depthNormalListOfDesiredFeatures.push_back(desired_features);
633  m_depthNormalListOfActiveFaces.push_back(face);
634 
635 #if DEBUG_DISPLAY_DEPTH_NORMAL
636  roiPts_vec.insert(roiPts_vec.end(), roiPts_vec_.begin(), roiPts_vec_.end());
637 #endif
638  }
639  }
640  }
641 
642 #if DEBUG_DISPLAY_DEPTH_NORMAL
643  vpDisplay::display(m_debugImage_depthNormal);
644 
645  for (size_t i = 0; i < roiPts_vec.size(); i++) {
646  if (roiPts_vec[i].empty())
647  continue;
648 
649  for (size_t j = 0; j < roiPts_vec[i].size() - 1; j++) {
650  vpDisplay::displayLine(m_debugImage_depthNormal, roiPts_vec[i][j], roiPts_vec[i][j + 1], vpColor::red, 2);
651  }
652  vpDisplay::displayLine(m_debugImage_depthNormal, roiPts_vec[i][0], roiPts_vec[i][roiPts_vec[i].size() - 1],
653  vpColor::red, 2);
654  }
655 
656  vpDisplay::flush(m_debugImage_depthNormal);
657 #endif
658 }
659 #endif
660 
661 void vpMbDepthNormalTracker::segmentPointCloud(const std::vector<vpColVector> &point_cloud, unsigned int width,
662  unsigned int height)
663 {
666 
667 #if DEBUG_DISPLAY_DEPTH_NORMAL
668  if (!m_debugDisp_depthNormal->isInitialised()) {
669  m_debugImage_depthNormal.resize(height, width);
670  m_debugDisp_depthNormal->init(m_debugImage_depthNormal, 50, 0, "Debug display normal depth tracker");
671  }
672 
673  m_debugImage_depthNormal = 0;
674  std::vector<std::vector<vpImagePoint> > roiPts_vec;
675 #endif
676 
677  for (std::vector<vpMbtFaceDepthNormal *>::iterator it = m_depthNormalFaces.begin(); it != m_depthNormalFaces.end();
678  ++it) {
679  vpMbtFaceDepthNormal *face = *it;
680 
681  if (face->isVisible() && face->isTracked()) {
682  vpColVector desired_features;
683 
684 #if DEBUG_DISPLAY_DEPTH_NORMAL
685  std::vector<std::vector<vpImagePoint> > roiPts_vec_;
686 #endif
687 
688  if (face->computeDesiredFeatures(m_cMo, width, height, point_cloud, desired_features, m_depthNormalSamplingStepX,
690 #if DEBUG_DISPLAY_DEPTH_NORMAL
691  ,
692  m_debugImage_depthNormal, roiPts_vec_
693 #endif
694  ,
695  m_mask)) {
696  m_depthNormalListOfDesiredFeatures.push_back(desired_features);
697  m_depthNormalListOfActiveFaces.push_back(face);
698 
699 #if DEBUG_DISPLAY_DEPTH_NORMAL
700  roiPts_vec.insert(roiPts_vec.end(), roiPts_vec_.begin(), roiPts_vec_.end());
701 #endif
702  }
703  }
704  }
705 
706 #if DEBUG_DISPLAY_DEPTH_NORMAL
707  vpDisplay::display(m_debugImage_depthNormal);
708 
709  for (size_t i = 0; i < roiPts_vec.size(); i++) {
710  if (roiPts_vec[i].empty())
711  continue;
712 
713  for (size_t j = 0; j < roiPts_vec[i].size() - 1; j++) {
714  vpDisplay::displayLine(m_debugImage_depthNormal, roiPts_vec[i][j], roiPts_vec[i][j + 1], vpColor::red, 2);
715  }
716  vpDisplay::displayLine(m_debugImage_depthNormal, roiPts_vec[i][0], roiPts_vec[i][roiPts_vec[i].size() - 1],
717  vpColor::red, 2);
718  }
719 
720  vpDisplay::flush(m_debugImage_depthNormal);
721 #endif
722 }
723 
724 void vpMbDepthNormalTracker::segmentPointCloud(const vpMatrix &point_cloud, unsigned int width,
725  unsigned int height)
726 {
729 
730 #if DEBUG_DISPLAY_DEPTH_NORMAL
731  if (!m_debugDisp_depthNormal->isInitialised()) {
732  m_debugImage_depthNormal.resize(height, width);
733  m_debugDisp_depthNormal->init(m_debugImage_depthNormal, 50, 0, "Debug display normal depth tracker");
734  }
735 
736  m_debugImage_depthNormal = 0;
737  std::vector<std::vector<vpImagePoint> > roiPts_vec;
738 #endif
739 
740  for (std::vector<vpMbtFaceDepthNormal *>::iterator it = m_depthNormalFaces.begin(); it != m_depthNormalFaces.end();
741  ++it) {
742  vpMbtFaceDepthNormal *face = *it;
743 
744  if (face->isVisible() && face->isTracked()) {
745  vpColVector desired_features;
746 
747 #if DEBUG_DISPLAY_DEPTH_NORMAL
748  std::vector<std::vector<vpImagePoint> > roiPts_vec_;
749 #endif
750 
751  if (face->computeDesiredFeatures(m_cMo, width, height, point_cloud, desired_features, m_depthNormalSamplingStepX,
753 #if DEBUG_DISPLAY_DEPTH_NORMAL
754  ,
755  m_debugImage_depthNormal, roiPts_vec_
756 #endif
757  ,
758  m_mask)) {
759  m_depthNormalListOfDesiredFeatures.push_back(desired_features);
760  m_depthNormalListOfActiveFaces.push_back(face);
761 
762 #if DEBUG_DISPLAY_DEPTH_NORMAL
763  roiPts_vec.insert(roiPts_vec.end(), roiPts_vec_.begin(), roiPts_vec_.end());
764 #endif
765  }
766  }
767  }
768 
769 #if DEBUG_DISPLAY_DEPTH_NORMAL
770  vpDisplay::display(m_debugImage_depthNormal);
771 
772  for (size_t i = 0; i < roiPts_vec.size(); i++) {
773  if (roiPts_vec[i].empty())
774  continue;
775 
776  for (size_t j = 0; j < roiPts_vec[i].size() - 1; j++) {
777  vpDisplay::displayLine(m_debugImage_depthNormal, roiPts_vec[i][j], roiPts_vec[i][j + 1], vpColor::red, 2);
778  }
779  vpDisplay::displayLine(m_debugImage_depthNormal, roiPts_vec[i][0], roiPts_vec[i][roiPts_vec[i].size() - 1],
780  vpColor::red, 2);
781  }
782 
783  vpDisplay::flush(m_debugImage_depthNormal);
784 #endif
785 }
786 
788 {
789  m_cam = cam;
790 
791  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
792  it != m_depthNormalFaces.end(); ++it) {
793  (*it)->setCameraParameters(cam);
794  }
795 }
796 
798 {
799  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
800  it != m_depthNormalFaces.end(); ++it) {
801  (*it)->setFaceCentroidMethod(method);
802  }
803 }
804 
807 {
809 
810  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
811  it != m_depthNormalFaces.end(); ++it) {
812  (*it)->setFeatureEstimationMethod(method);
813  }
814 }
815 
817 {
819 
820  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
821  it != m_depthNormalFaces.end(); ++it) {
822  (*it)->setPclPlaneEstimationMethod(method);
823  }
824 }
825 
827 {
829 
830  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
831  it != m_depthNormalFaces.end(); ++it) {
832  (*it)->setPclPlaneEstimationRansacMaxIter(maxIter);
833  }
834 }
835 
837 {
839 
840  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
841  it != m_depthNormalFaces.end(); ++it) {
842  (*it)->setPclPlaneEstimationRansacThreshold(threshold);
843  }
844 }
845 
846 void vpMbDepthNormalTracker::setDepthNormalSamplingStep(unsigned int stepX, unsigned int stepY)
847 {
848  if (stepX == 0 || stepY == 0) {
849  std::cerr << "stepX and stepY must be greater than zero!" << std::endl;
850  return;
851  }
852 
855 }
856 
857 // void vpMbDepthNormalTracker::setDepthNormalUseRobust(bool use) {
858 // m_depthNormalUseRobust = use;
859 //}
860 
862 {
863  throw vpException(vpException::fatalError, "Cannot track with a grayscale image!");
864 }
865 
867 {
868  throw vpException(vpException::fatalError, "Cannot track with a color image!");
869 }
870 
871 #if defined(VISP_HAVE_PCL) && defined(VISP_HAVE_PCL_COMMON)
872 void vpMbDepthNormalTracker::track(const pcl::PointCloud<pcl::PointXYZ>::ConstPtr &point_cloud)
873 {
874  segmentPointCloud(point_cloud);
875 
876  computeVVS();
877 
878  computeVisibility(point_cloud->width, point_cloud->height);
879 }
880 #endif
881 
882 void vpMbDepthNormalTracker::track(const std::vector<vpColVector> &point_cloud, unsigned int width, unsigned int height)
883 {
884  segmentPointCloud(point_cloud, width, height);
885 
886  computeVVS();
887 
888  computeVisibility(width, height);
889 }
890 
891 void vpMbDepthNormalTracker::initCircle(const vpPoint & /*p1*/, const vpPoint & /*p2*/, const vpPoint & /*p3*/,
892  double /*radius*/, int /*idFace*/, const std::string & /*name*/)
893 {
894  throw vpException(vpException::fatalError, "vpMbDepthNormalTracker::initCircle() should not be called!");
895 }
896 
897 void vpMbDepthNormalTracker::initCylinder(const vpPoint & /*p1*/, const vpPoint & /*p2*/, double /*radius*/,
898  int /*idFace*/, const std::string & /*name*/)
899 {
900  throw vpException(vpException::fatalError, "vpMbDepthNormalTracker::initCylinder() should not be called!");
901 }
902 
904 
906 END_VISP_NAMESPACE
void setWindowName(const Ogre::String &n)
Definition: vpAROgre.h:265
void resize(unsigned int nrows, unsigned int ncols, bool flagNullify=true, bool recopy_=true)
Definition: vpArray2D.h:362
unsigned int getRows() const
Definition: vpArray2D.h:347
Generic class defining intrinsic camera parameters.
void computeFov(const unsigned int &w, const unsigned int &h)
Implementation of column vector and the associated operations.
Definition: vpColVector.h:191
void insert(unsigned int i, const vpColVector &v)
void resize(unsigned int i, bool flagNullify=true)
Definition: vpColVector.h:1143
Class to define RGB colors available for display functionalities.
Definition: vpColor.h:157
static const vpColor red
Definition: vpColor.h:217
static const vpColor blue
Definition: vpColor.h:223
Display for windows using GDI (available on any windows 32 platform).
Definition: vpDisplayGDI.h:130
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Definition: vpDisplayX.h:135
static void display(const vpImage< unsigned char > &I)
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void flush(const vpImage< unsigned char > &I)
static void displayArrow(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color=vpColor::white, unsigned int w=4, unsigned int h=2, unsigned int thickness=1)
error that can be emitted by ViSP classes.
Definition: vpException.h:60
@ ioError
I/O error.
Definition: vpException.h:67
@ fatalError
Fatal error.
Definition: vpException.h:72
const char * what() const
Definition: vpException.cpp:71
static vpHomogeneousMatrix direct(const vpColVector &v)
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpHomogeneousMatrix inverse() const
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:82
unsigned int getWidth() const
Definition: vpImage.h:242
unsigned int getHeight() const
Definition: vpImage.h:181
static double rad(double deg)
Definition: vpMath.h:129
static double sqr(double x)
Definition: vpMath.h:203
static bool equal(double x, double y, double threshold=0.001)
Definition: vpMath.h:458
static double deg(double rad)
Definition: vpMath.h:119
Implementation of a matrix and operations on matrices.
Definition: vpMatrix.h:169
vpMatrix AtA() const
void insert(const vpMatrix &A, unsigned int r, unsigned int c)
Definition: vpMatrix.cpp:1133
virtual void computeVVSInteractionMatrixAndResidu() VP_OVERRIDE
void segmentPointCloud(const pcl::PointCloud< pcl::PointXYZ >::ConstPtr &point_cloud)
virtual void initCylinder(const vpPoint &p1, const vpPoint &p2, double radius, int idFace=0, const std::string &name="") VP_OVERRIDE
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false) VP_OVERRIDE
virtual void initCircle(const vpPoint &p1, const vpPoint &p2, const vpPoint &p3, double radius, int idFace=0, const std::string &name="") VP_OVERRIDE
vpMatrix m_L_depthNormal
Interaction matrix.
vpRobust m_robust_depthNormal
Robust.
virtual void setDepthNormalSamplingStep(unsigned int stepX, unsigned int stepY)
virtual std::vector< std::vector< double > > getModelForDisplay(unsigned int width, unsigned int height, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, bool displayFullModel=false) VP_OVERRIDE
vpMbtFaceDepthNormal::vpFeatureEstimationType m_depthNormalFeatureEstimationMethod
Method to estimate the desired features.
std::vector< vpMbtFaceDepthNormal * > m_depthNormalFaces
List of faces.
int m_depthNormalPclPlaneEstimationRansacMaxIter
PCL RANSAC maximum number of iterations.
unsigned int m_depthNormalSamplingStepY
Sampling step in y-direction.
virtual void setScanLineVisibilityTest(const bool &v) VP_OVERRIDE
double m_depthNormalPclPlaneEstimationRansacThreshold
PCL RANSAC threshold.
virtual void setDepthNormalPclPlaneEstimationRansacThreshold(double thresold)
virtual void resetTracker() VP_OVERRIDE
void addFace(vpMbtPolygon &polygon, bool alreadyClose)
bool m_depthNormalUseRobust
If true, use Tukey robust M-Estimator.
virtual void track(const vpImage< unsigned char > &) VP_OVERRIDE
void reInitModel(const vpImage< unsigned char > &I, const std::string &cad_name, const vpHomogeneousMatrix &cMo, bool verbose=false)
virtual std::vector< std::vector< double > > getFeaturesForDisplayDepthNormal()
vpColVector m_w_depthNormal
Robust weights.
virtual void init(const vpImage< unsigned char > &I) VP_OVERRIDE
vpColVector m_error_depthNormal
(s - s*)
vpColVector m_weightedError_depthNormal
Weighted error.
virtual void setPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cdMo) VP_OVERRIDE
virtual void setOgreVisibilityTest(const bool &v) VP_OVERRIDE
std::vector< vpMbtFaceDepthNormal * > m_depthNormalListOfActiveFaces
List of current active (visible and with features extracted) faces.
virtual void loadConfigFile(const std::string &configFile, bool verbose=true) VP_OVERRIDE
virtual void initFaceFromCorners(vpMbtPolygon &polygon) VP_OVERRIDE
virtual void setCameraParameters(const vpCameraParameters &camera) VP_OVERRIDE
int m_depthNormalPclPlaneEstimationMethod
PCL plane estimation method.
virtual void setDepthNormalFaceCentroidMethod(const vpMbtFaceDepthNormal::vpFaceCentroidType &method)
virtual void initFaceFromLines(vpMbtPolygon &polygon) VP_OVERRIDE
virtual void setDepthNormalPclPlaneEstimationMethod(int method)
std::vector< vpColVector > m_depthNormalListOfDesiredFeatures
List of desired features.
virtual void computeVVSInit() VP_OVERRIDE
virtual void testTracking() VP_OVERRIDE
void setUseDepthNormalTracking(const std::string &name, const bool &useDepthNormalTracking)
void computeVisibility(unsigned int width, unsigned int height)
vpMbHiddenFaces< vpMbtPolygon > m_depthNormalHiddenFacesDisplay
Set of faces describing the object used only for display with scan line.
virtual void setDepthNormalFeatureEstimationMethod(const vpMbtFaceDepthNormal::vpFeatureEstimationType &method)
virtual void setDepthNormalPclPlaneEstimationRansacMaxIter(int maxIter)
virtual ~vpMbDepthNormalTracker() VP_OVERRIDE
unsigned int m_depthNormalSamplingStepX
Sampling step in x-direction.
vpAROgre * getOgreContext()
void computeClippedPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam)
unsigned int setVisibleOgre(unsigned int width, unsigned int height, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, const double &angleAppears, const double &angleDisappears, bool &changed)
void initOgre(const vpCameraParameters &cam=vpCameraParameters())
unsigned int setVisible(unsigned int width, unsigned int height, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, const double &angle, bool &changed)
void setBackgroundSizeOgre(const unsigned int &h, const unsigned int &w)
void computeScanLineRender(const vpCameraParameters &cam, const unsigned int &w, const unsigned int &h)
void setOgreShowConfigDialog(bool showConfigDialog)
double m_lambda
Gain of the virtual visual servoing stage.
Definition: vpMbTracker.h:189
bool modelInitialised
Definition: vpMbTracker.h:125
vpImage< unsigned char > m_I
Grayscale image buffer, used when passing color images.
Definition: vpMbTracker.h:225
bool m_computeInteraction
Definition: vpMbTracker.h:187
vpMatrix oJo
The Degrees of Freedom to estimate.
Definition: vpMbTracker.h:117
vpUniRand m_rand
Random number generator used in vpMbtDistanceLine::buildFrom()
Definition: vpMbTracker.h:229
double m_initialMu
Initial Mu for Levenberg Marquardt optimization loop.
Definition: vpMbTracker.h:195
vpHomogeneousMatrix m_cMo
The current pose.
Definition: vpMbTracker.h:115
virtual void computeVVSCheckLevenbergMarquardt(unsigned int iter, vpColVector &error, const vpColVector &m_error_prev, const vpHomogeneousMatrix &cMoPrev, double &mu, bool &reStartFromLastIncrement, vpColVector *const w=nullptr, const vpColVector *const m_w_prev=nullptr)
vpCameraParameters m_cam
The camera parameters.
Definition: vpMbTracker.h:113
double m_stopCriteriaEpsilon
Epsilon threshold to stop the VVS optimization loop.
Definition: vpMbTracker.h:193
bool useOgre
Use Ogre3d for visibility tests.
Definition: vpMbTracker.h:157
virtual void computeVVSWeights(vpRobust &robust, const vpColVector &error, vpColVector &w)
vpMbHiddenFaces< vpMbtPolygon > faces
Set of faces describing the object.
Definition: vpMbTracker.h:145
virtual void setScanLineVisibilityTest(const bool &v)
Definition: vpMbTracker.h:610
virtual void setOgreVisibilityTest(const bool &v)
virtual void computeCovarianceMatrixVVS(const bool isoJoIdentity, const vpColVector &w_true, const vpHomogeneousMatrix &cMoPrev, const vpMatrix &L_true, const vpMatrix &LVJ_true, const vpColVector &error)
vpMbtOptimizationMethod m_optimizationMethod
Optimization method used.
Definition: vpMbTracker.h:142
virtual void computeVVSPoseEstimation(const bool isoJoIdentity, unsigned int iter, vpMatrix &L, vpMatrix &LTL, vpColVector &R, const vpColVector &error, vpColVector &error_prev, vpColVector &LTR, double &mu, vpColVector &v, const vpColVector *const w=nullptr, vpColVector *const m_w_prev=nullptr)
bool displayFeatures
If true, the features are displayed.
Definition: vpMbTracker.h:140
double angleDisappears
Angle used to detect a face disappearance.
Definition: vpMbTracker.h:149
virtual void setNearClippingDistance(const double &dist)
virtual void setFarClippingDistance(const double &dist)
double distFarClip
Distance for near clipping.
Definition: vpMbTracker.h:153
bool m_isoJoIdentity
Boolean to know if oJo is identity (for fast computation)
Definition: vpMbTracker.h:119
bool useScanLine
Use Scanline for visibility tests.
Definition: vpMbTracker.h:160
virtual void setClipping(const unsigned int &flags)
double angleAppears
Angle used to detect a face appearance.
Definition: vpMbTracker.h:147
const vpImage< bool > * m_mask
Mask used to disable tracking on a part of image.
Definition: vpMbTracker.h:223
virtual void initFromPose(const vpImage< unsigned char > &I, const std::string &initFile)
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix())
bool computeCovariance
Flag used to specify if the covariance matrix has to be computed or not.
Definition: vpMbTracker.h:130
double distNearClip
Distance for near clipping.
Definition: vpMbTracker.h:151
unsigned int m_maxIter
Maximum number of iterations of the virtual visual servoing stage.
Definition: vpMbTracker.h:191
bool ogreShowConfigDialog
Definition: vpMbTracker.h:158
unsigned int clippingFlag
Flags specifying which clipping to used.
Definition: vpMbTracker.h:155
double m_distNearClip
Distance for near clipping.
std::vector< std::vector< double > > getModelForDisplay(unsigned int width, unsigned int height, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, bool displayFullModel=false)
void addLine(vpPoint &p1, vpPoint &p2, vpMbHiddenFaces< vpMbtPolygon > *const faces, vpUniRand &rand_gen, int polygon=-1, std::string name="")
double m_distFarClip
Distance for near clipping.
vpMbHiddenFaces< vpMbtPolygon > * m_hiddenFace
Pointer to the list of faces.
std::vector< std::vector< double > > getFeaturesForDisplay(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double scale=0.05)
void setPclPlaneEstimationMethod(int method)
void setPclPlaneEstimationRansacThreshold(double threshold)
vpMbtPolygon * m_polygon
Polygon defining the face.
vpCameraParameters m_cam
Camera intrinsic parameters.
vpPlane m_planeObject
Plane equation described in the object frame.
unsigned int m_clippingFlag
Flags specifying which clipping to used.
void setTracked(bool tracked)
void setFeatureEstimationMethod(const vpFeatureEstimationType &method)
void setPclPlaneEstimationRansacMaxIter(int maxIter)
bool m_useScanLine
Scan line visibility.
bool computeDesiredFeatures(const vpHomogeneousMatrix &cMo, unsigned int width, unsigned int height, const pcl::PointCloud< pcl::PointXYZ >::ConstPtr &point_cloud, vpColVector &desired_features, unsigned int stepX, unsigned int stepY, const vpImage< bool > *mask=nullptr)
Implementation of a polygon of the model used by the model-based tracker.
Definition: vpMbtPolygon.h:60
std::string getName() const
Definition: vpMbtPolygon.h:100
int getIndex() const
Definition: vpMbtPolygon.h:93
Parse an Xml file to extract configuration parameters of a mbtConfig object.
void setDepthNormalPclPlaneEstimationRansacMaxIter(int maxIter)
int getDepthNormalPclPlaneEstimationRansacMaxIter() const
unsigned int getDepthNormalSamplingStepX() const
void getCameraParameters(vpCameraParameters &cam) const
unsigned int getDepthNormalSamplingStepY() const
void setDepthNormalFeatureEstimationMethod(const vpMbtFaceDepthNormal::vpFeatureEstimationType &method)
vpMbtFaceDepthNormal::vpFeatureEstimationType getDepthNormalFeatureEstimationMethod() const
void setDepthNormalPclPlaneEstimationMethod(int method)
void setDepthNormalSamplingStepX(unsigned int stepX)
void setAngleDisappear(const double &adisappear)
int getDepthNormalPclPlaneEstimationMethod() const
void setAngleAppear(const double &aappear)
double getDepthNormalPclPlaneEstimationRansacThreshold() const
void parse(const std::string &filename)
void setDepthNormalPclPlaneEstimationRansacThreshold(double threshold)
void setCameraParameters(const vpCameraParameters &cam)
void setDepthNormalSamplingStepY(unsigned int stepY)
This class defines the container for a plane geometrical structure.
Definition: vpPlane.h:57
@ object_frame
Definition: vpPlane.h:65
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Definition: vpPoint.h:79
unsigned int nbpt
Number of points used to define the polygon.
Definition: vpPolygon3D.h:74
unsigned int getNbPoint() const
Definition: vpPolygon3D.h:130
vpPoint * p
corners in the object frame
Definition: vpPolygon3D.h:79
void setMinMedianAbsoluteDeviation(double mad_min)
Definition: vpRobust.h:136
vpVelocityTwistMatrix & build(const vpTranslationVector &t, const vpRotationMatrix &R)