Visual Servoing Platform  version 3.6.1 under development (2024-04-20)
vpMbDepthNormalTracker.cpp
1 /****************************************************************************
2  *
3  * ViSP, open source Visual Servoing Platform software.
4  * Copyright (C) 2005 - 2023 by Inria. All rights reserved.
5  *
6  * This software is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  * See the file LICENSE.txt at the root directory of this source
11  * distribution for additional information about the GNU GPL.
12  *
13  * For using ViSP with software that can not be combined with the GNU
14  * GPL, please contact Inria about acquiring a ViSP Professional
15  * Edition License.
16  *
17  * See https://visp.inria.fr for more information.
18  *
19  * This software was developed at:
20  * Inria Rennes - Bretagne Atlantique
21  * Campus Universitaire de Beaulieu
22  * 35042 Rennes Cedex
23  * France
24  *
25  * If you have questions regarding the use of this file, please contact
26  * Inria at visp@inria.fr
27  *
28  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
29  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
30  *
31  * Description:
32  * Model-based tracker using depth normal features.
33  *
34 *****************************************************************************/
35 
36 #include <iostream>
37 
38 #include <visp3/core/vpConfig.h>
39 
40 #ifdef VISP_HAVE_PCL
41 #include <pcl/point_cloud.h>
42 #endif
43 
44 #include <visp3/core/vpDisplay.h>
45 #include <visp3/core/vpExponentialMap.h>
46 #include <visp3/core/vpTrackingException.h>
47 #include <visp3/mbt/vpMbDepthNormalTracker.h>
48 #include <visp3/mbt/vpMbtXmlGenericParser.h>
49 
50 #if DEBUG_DISPLAY_DEPTH_NORMAL
51 #include <visp3/gui/vpDisplayGDI.h>
52 #include <visp3/gui/vpDisplayX.h>
53 #endif
54 
56  : m_depthNormalFeatureEstimationMethod(vpMbtFaceDepthNormal::ROBUST_FEATURE_ESTIMATION),
57  m_depthNormalHiddenFacesDisplay(), m_depthNormalListOfActiveFaces(), m_depthNormalListOfDesiredFeatures(),
58  m_depthNormalFaces(), m_depthNormalPclPlaneEstimationMethod(2), m_depthNormalPclPlaneEstimationRansacMaxIter(200),
59  m_depthNormalPclPlaneEstimationRansacThreshold(0.001), m_depthNormalSamplingStepX(2), m_depthNormalSamplingStepY(2),
60  m_depthNormalUseRobust(false), m_error_depthNormal(), m_featuresToBeDisplayedDepthNormal(), m_L_depthNormal(),
61  m_robust_depthNormal(), m_w_depthNormal(), m_weightedError_depthNormal()
62 #if DEBUG_DISPLAY_DEPTH_NORMAL
63  ,
64  m_debugDisp_depthNormal(nullptr), m_debugImage_depthNormal()
65 #endif
66 {
67 #ifdef VISP_HAVE_OGRE
68  faces.getOgreContext()->setWindowName("MBT Depth");
69 #endif
70 
71 #if defined(VISP_HAVE_X11) && DEBUG_DISPLAY_DEPTH_NORMAL
72  m_debugDisp_depthNormal = new vpDisplayX;
73 #elif defined(VISP_HAVE_GDI) && DEBUG_DISPLAY_DEPTH_NORMAL
74  m_debugDisp_depthNormal = new vpDisplayGDI;
75 #endif
76 }
77 
79 {
80  for (size_t i = 0; i < m_depthNormalFaces.size(); i++) {
81  delete m_depthNormalFaces[i];
82  }
83 }
84 
85 void vpMbDepthNormalTracker::addFace(vpMbtPolygon &polygon, bool alreadyClose)
86 {
87  if (polygon.nbpt < 3) {
88  return;
89  }
90 
91  // Copy hidden faces
93 
94  vpMbtFaceDepthNormal *normal_face = new vpMbtFaceDepthNormal;
95  normal_face->m_hiddenFace = &faces;
96  normal_face->m_polygon = &polygon;
97  normal_face->m_cam = m_cam;
98  normal_face->m_useScanLine = useScanLine;
99  normal_face->m_clippingFlag = clippingFlag;
100  normal_face->m_distNearClip = distNearClip;
101  normal_face->m_distFarClip = distFarClip;
106 
107  // Add lines that compose the face
108  unsigned int nbpt = polygon.getNbPoint();
109  if (nbpt > 0) {
110  for (unsigned int i = 0; i < nbpt - 1; i++) {
111  normal_face->addLine(polygon.p[i], polygon.p[i + 1], &m_depthNormalHiddenFacesDisplay, m_rand, polygon.getIndex(),
112  polygon.getName());
113  }
114 
115  if (!alreadyClose) {
116  // Add last line that closes the face
117  normal_face->addLine(polygon.p[nbpt - 1], polygon.p[0], &m_depthNormalHiddenFacesDisplay, m_rand,
118  polygon.getIndex(), polygon.getName());
119  }
120  }
121 
122  // Construct a vpPlane in object frame
123  vpPoint pts[3];
124  pts[0] = polygon.p[0];
125  pts[1] = polygon.p[1];
126  pts[2] = polygon.p[2];
127  normal_face->m_planeObject = vpPlane(pts[0], pts[1], pts[2], vpPlane::object_frame);
128 
129  m_depthNormalFaces.push_back(normal_face);
130 }
131 
132 void vpMbDepthNormalTracker::computeVisibility(unsigned int width, unsigned int height)
133 {
134  bool changed = false;
135  faces.setVisible(width, height, m_cam, m_cMo, angleAppears, angleDisappears, changed);
136 
137  if (useScanLine) {
138  // if (clippingFlag <= 2) {
139  // cam.computeFov(width, height);
140  // }
141 
143  faces.computeScanLineRender(m_cam, width, height);
144  }
145 
146  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
147  it != m_depthNormalFaces.end(); ++it) {
148  vpMbtFaceDepthNormal *face_normal = *it;
149  face_normal->computeVisibility();
150  }
151 }
152 
154 {
155  double normRes = 0;
156  double normRes_1 = -1;
157  unsigned int iter = 0;
158 
159  computeVVSInit();
160  unsigned int nb_features = (unsigned int)(3 * m_depthNormalListOfDesiredFeatures.size());
161 
162  vpColVector error_prev(nb_features);
163  vpMatrix LTL;
164  vpColVector LTR, v;
165 
166  double mu = m_initialMu;
167  vpHomogeneousMatrix cMo_prev;
168 
169  bool isoJoIdentity = m_isoJoIdentity; // Backup since it can be modified if L is not full rank
170  if (isoJoIdentity)
171  oJo.eye();
172 
174  vpMatrix L_true, LVJ_true;
175 
176  while (std::fabs(normRes_1 - normRes) > m_stopCriteriaEpsilon && (iter < m_maxIter)) {
178 
179  bool reStartFromLastIncrement = false;
180  computeVVSCheckLevenbergMarquardt(iter, m_error_depthNormal, error_prev, cMo_prev, mu, reStartFromLastIncrement);
181 
182  if (!reStartFromLastIncrement) {
185 
186  if (computeCovariance) {
187  L_true = m_L_depthNormal;
188  if (!isoJoIdentity) {
189  cVo.buildFrom(m_cMo);
190  LVJ_true = (m_L_depthNormal * (cVo * oJo));
191  }
192  }
193 
194  // Compute DoF only once
195  if (iter == 0) {
196  // If all the 6 dof should be estimated, we check if the interaction
197  // matrix is full rank. If not we remove automatically the dof that
198  // cannot be estimated. This is particularly useful when considering
199  // circles (rank 5) and cylinders (rank 4)
200  if (isoJoIdentity) {
201  cVo.buildFrom(m_cMo);
202 
203  vpMatrix K; // kernel
204  unsigned int rank = (m_L_depthNormal * cVo).kernel(K);
205  if (rank == 0) {
206  throw vpException(vpException::fatalError, "Rank=0, cannot estimate the pose !");
207  }
208 
209  if (rank != 6) {
210  vpMatrix I; // Identity
211  I.eye(6);
212  oJo = I - K.AtA();
213 
214  isoJoIdentity = false;
215  }
216  }
217  }
218 
219  double num = 0.0, den = 0.0;
220  for (unsigned int i = 0; i < m_L_depthNormal.getRows(); i++) {
221  // Compute weighted errors and stop criteria
224  den += m_w_depthNormal[i];
225 
226  // weight interaction matrix
227  for (unsigned int j = 0; j < 6; j++) {
228  m_L_depthNormal[i][j] *= m_w_depthNormal[i];
229  }
230  }
231 
233  m_error_depthNormal, error_prev, LTR, mu, v);
234 
235  cMo_prev = m_cMo;
237 
238  normRes_1 = normRes;
239  normRes = sqrt(num / den);
240  }
241 
242  iter++;
243  }
244 
245  computeCovarianceMatrixVVS(isoJoIdentity, m_w_depthNormal, cMo_prev, L_true, LVJ_true, m_error_depthNormal);
246 }
247 
249 {
250  unsigned int nb_features = (unsigned int)(3 * m_depthNormalListOfDesiredFeatures.size());
251 
252  m_L_depthNormal.resize(nb_features, 6, false, false);
253  m_error_depthNormal.resize(nb_features, false);
254  m_weightedError_depthNormal.resize(nb_features, false);
255 
256  m_w_depthNormal.resize(nb_features, false);
257  m_w_depthNormal = 1;
258 
260 }
261 
263 {
264  unsigned int cpt = 0;
265  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalListOfActiveFaces.begin();
266  it != m_depthNormalListOfActiveFaces.end(); ++it) {
267  vpMatrix L_face;
268  vpColVector features_face;
269  (*it)->computeInteractionMatrix(m_cMo, L_face, features_face);
270 
271  vpColVector face_error = features_face - m_depthNormalListOfDesiredFeatures[(size_t)cpt];
272 
273  m_error_depthNormal.insert(cpt * 3, face_error);
274  m_L_depthNormal.insert(L_face, cpt * 3, 0);
275 
276  cpt++;
277  }
278 }
279 
281  const vpCameraParameters &cam, const vpColor &col, unsigned int thickness,
282  bool displayFullModel)
283 {
284  std::vector<std::vector<double> > models =
285  vpMbDepthNormalTracker::getModelForDisplay(I.getWidth(), I.getHeight(), cMo, cam, displayFullModel);
286 
287  for (size_t i = 0; i < models.size(); i++) {
288  if (vpMath::equal(models[i][0], 0)) {
289  vpImagePoint ip1(models[i][1], models[i][2]);
290  vpImagePoint ip2(models[i][3], models[i][4]);
291  vpDisplay::displayLine(I, ip1, ip2, col, thickness);
292  }
293  }
294 
295  if (displayFeatures) {
296  std::vector<std::vector<double> > features = getFeaturesForDisplayDepthNormal();
297  for (size_t i = 0; i < features.size(); i++) {
298  vpImagePoint im_centroid(features[i][1], features[i][2]);
299  vpImagePoint im_extremity(features[i][3], features[i][4]);
300  bool desired = vpMath::equal(features[i][0], 2);
301  vpDisplay::displayArrow(I, im_centroid, im_extremity, desired ? vpColor::blue : vpColor::red, 4, 2, thickness);
302  }
303  }
304 }
305 
307  const vpCameraParameters &cam, const vpColor &col, unsigned int thickness,
308  bool displayFullModel)
309 {
310  std::vector<std::vector<double> > models =
311  vpMbDepthNormalTracker::getModelForDisplay(I.getWidth(), I.getHeight(), cMo, cam, displayFullModel);
312 
313  for (size_t i = 0; i < models.size(); i++) {
314  if (vpMath::equal(models[i][0], 0)) {
315  vpImagePoint ip1(models[i][1], models[i][2]);
316  vpImagePoint ip2(models[i][3], models[i][4]);
317  vpDisplay::displayLine(I, ip1, ip2, col, thickness);
318  }
319  }
320 
321  if (displayFeatures) {
322  std::vector<std::vector<double> > features = getFeaturesForDisplayDepthNormal();
323  for (size_t i = 0; i < features.size(); i++) {
324  vpImagePoint im_centroid(features[i][1], features[i][2]);
325  vpImagePoint im_extremity(features[i][3], features[i][4]);
326  bool desired = vpMath::equal(features[i][0], 2);
327  vpDisplay::displayArrow(I, im_centroid, im_extremity, desired ? vpColor::blue : vpColor::red, 4, 2, thickness);
328  }
329  }
330 }
331 
333 {
334  std::vector<std::vector<double> > features;
335 
336  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
337  it != m_depthNormalFaces.end(); ++it) {
338  vpMbtFaceDepthNormal *face_normal = *it;
339  std::vector<std::vector<double> > currentFeatures = face_normal->getFeaturesForDisplay(m_cMo, m_cam);
340  features.insert(features.end(), currentFeatures.begin(), currentFeatures.end());
341  }
342 
343  return features;
344 }
345 
361 std::vector<std::vector<double> > vpMbDepthNormalTracker::getModelForDisplay(unsigned int width, unsigned int height,
362  const vpHomogeneousMatrix &cMo,
363  const vpCameraParameters &cam,
364  bool displayFullModel)
365 {
366  std::vector<std::vector<double> > models;
367 
368  vpCameraParameters c = cam;
369 
370  bool changed = false;
372 
373  if (useScanLine) {
374  c.computeFov(width, height);
375 
378  }
379 
380  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
381  it != m_depthNormalFaces.end(); ++it) {
382  vpMbtFaceDepthNormal *face_normal = *it;
383  std::vector<std::vector<double> > modelLines =
384  face_normal->getModelForDisplay(width, height, cMo, cam, displayFullModel);
385  models.insert(models.end(), modelLines.begin(), modelLines.end());
386  }
387 
388  return models;
389 }
390 
392 {
393  if (!modelInitialised) {
394  throw vpException(vpException::fatalError, "model not initialized");
395  }
396 
397  bool reInitialisation = false;
398  if (!useOgre) {
399  faces.setVisible(I.getWidth(), I.getHeight(), m_cam, m_cMo, angleAppears, angleDisappears, reInitialisation);
400  }
401  else {
402 #ifdef VISP_HAVE_OGRE
403  if (!faces.isOgreInitialised()) {
407  // Turn off Ogre config dialog display for the next call to this
408  // function since settings are saved in the ogre.cfg file and used
409  // during the next call
410  ogreShowConfigDialog = false;
411  }
412 
414 #else
415  faces.setVisible(I.getWidth(), I.getHeight(), m_cam, m_cMo, angleAppears, angleDisappears, reInitialisation);
416 #endif
417  }
418 
419  if (useScanLine || clippingFlag > 3)
421 
423 }
424 
425 void vpMbDepthNormalTracker::loadConfigFile(const std::string &configFile, bool verbose)
426 {
427 #if defined(VISP_HAVE_PUGIXML)
429  xmlp.setVerbose(verbose);
433 
440 
441  try {
442  if (verbose) {
443  std::cout << " *********** Parsing XML for Mb Depth Tracker ************ " << std::endl;
444  }
445  xmlp.parse(configFile);
446  }
447  catch (const vpException &e) {
448  std::cerr << "Exception: " << e.what() << std::endl;
449  throw vpException(vpException::ioError, "Cannot open XML file \"%s\"", configFile.c_str());
450  }
451 
452  vpCameraParameters camera;
453  xmlp.getCameraParameters(camera);
454  setCameraParameters(camera);
455 
458 
459  if (xmlp.hasNearClippingDistance())
461 
462  if (xmlp.hasFarClippingDistance())
464 
465  if (xmlp.getFovClipping())
467 
473 #else
474  (void)configFile;
475  (void)verbose;
476  throw(vpException(vpException::ioError, "vpMbDepthDenseTracker::loadConfigFile() needs pugixml built-in 3rdparty"));
477 #endif
478 }
479 
480 void vpMbDepthNormalTracker::reInitModel(const vpImage<unsigned char> &I, const std::string &cad_name,
481  const vpHomogeneousMatrix &cMo, bool verbose)
482 {
483  m_cMo.eye();
484 
485  for (size_t i = 0; i < m_depthNormalFaces.size(); i++) {
486  delete m_depthNormalFaces[i];
487  m_depthNormalFaces[i] = nullptr;
488  }
489 
490  m_depthNormalFaces.clear();
491 
492  loadModel(cad_name, verbose);
493  initFromPose(I, cMo);
494 }
495 
496 #if defined(VISP_HAVE_PCL)
497 void vpMbDepthNormalTracker::reInitModel(const pcl::PointCloud<pcl::PointXYZ>::ConstPtr &point_cloud,
498  const std::string &cad_name, const vpHomogeneousMatrix &cMo, bool verbose)
499 {
500  vpImage<unsigned char> I_dummy(point_cloud->height, point_cloud->width);
501  reInitModel(I_dummy, cad_name, cMo, verbose);
502 }
503 
504 #endif
505 
507 {
508  m_cMo.eye();
509 
510  for (std::vector<vpMbtFaceDepthNormal *>::iterator it = m_depthNormalFaces.begin(); it != m_depthNormalFaces.end();
511  ++it) {
512  vpMbtFaceDepthNormal *normal_face = *it;
513  delete normal_face;
514  normal_face = nullptr;
515  }
516 
517  m_depthNormalFaces.clear();
518 
519  m_computeInteraction = true;
520  computeCovariance = false;
521 
524 
526 
527  m_lambda = 1.0;
528 
529  faces.reset();
530 
532 
533  useScanLine = false;
534 
535 #ifdef VISP_HAVE_OGRE
536  useOgre = false;
537 #endif
538 
541 }
542 
544 {
546 #ifdef VISP_HAVE_OGRE
547  faces.getOgreContext()->setWindowName("MBT Depth");
548 #endif
549 }
550 
552 {
553  m_cMo = cdMo;
554  init(I);
555 }
556 
558 {
559  m_cMo = cdMo;
560  vpImageConvert::convert(I_color, m_I);
561  init(m_I);
562 }
563 
564 #if defined(VISP_HAVE_PCL)
565 void vpMbDepthNormalTracker::setPose(const pcl::PointCloud<pcl::PointXYZ>::ConstPtr &point_cloud,
566  const vpHomogeneousMatrix &cdMo)
567 {
568  vpImage<unsigned char> I_dummy(point_cloud->height, point_cloud->width);
569  m_cMo = cdMo;
570  init(I_dummy);
571 }
572 #endif
573 
575 {
577 
578  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
579  it != m_depthNormalFaces.end(); ++it) {
580  (*it)->setScanLineVisibilityTest(v);
581  }
582 }
583 
584 void vpMbDepthNormalTracker::setUseDepthNormalTracking(const std::string &name, const bool &useDepthNormalTracking)
585 {
586  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
587  it != m_depthNormalFaces.end(); ++it) {
588  vpMbtFaceDepthNormal *face = *it;
589  if (face->m_polygon->getName() == name) {
590  face->setTracked(useDepthNormalTracking);
591  }
592  }
593 }
594 
596 
597 #ifdef VISP_HAVE_PCL
598 void vpMbDepthNormalTracker::segmentPointCloud(const pcl::PointCloud<pcl::PointXYZ>::ConstPtr &point_cloud)
599 {
602 
603 #if DEBUG_DISPLAY_DEPTH_NORMAL
604  if (!m_debugDisp_depthNormal->isInitialised()) {
605  m_debugImage_depthNormal.resize(point_cloud->height, point_cloud->width);
606  m_debugDisp_depthNormal->init(m_debugImage_depthNormal, 50, 0, "Debug display normal depth tracker");
607  }
608 
609  m_debugImage_depthNormal = 0;
610  std::vector<std::vector<vpImagePoint> > roiPts_vec;
611 #endif
612 
613  for (std::vector<vpMbtFaceDepthNormal *>::iterator it = m_depthNormalFaces.begin(); it != m_depthNormalFaces.end();
614  ++it) {
615  vpMbtFaceDepthNormal *face = *it;
616 
617  if (face->isVisible() && face->isTracked()) {
618  vpColVector desired_features;
619 
620 #if DEBUG_DISPLAY_DEPTH_NORMAL
621  std::vector<std::vector<vpImagePoint> > roiPts_vec_;
622 #endif
623  if (face->computeDesiredFeatures(m_cMo, point_cloud->width, point_cloud->height, point_cloud, desired_features,
625 #if DEBUG_DISPLAY_DEPTH_NORMAL
626  ,
627  m_debugImage_depthNormal, roiPts_vec_
628 #endif
629  ,
630  m_mask)) {
631  m_depthNormalListOfDesiredFeatures.push_back(desired_features);
632  m_depthNormalListOfActiveFaces.push_back(face);
633 
634 #if DEBUG_DISPLAY_DEPTH_NORMAL
635  roiPts_vec.insert(roiPts_vec.end(), roiPts_vec_.begin(), roiPts_vec_.end());
636 #endif
637  }
638  }
639  }
640 
641 #if DEBUG_DISPLAY_DEPTH_NORMAL
642  vpDisplay::display(m_debugImage_depthNormal);
643 
644  for (size_t i = 0; i < roiPts_vec.size(); i++) {
645  if (roiPts_vec[i].empty())
646  continue;
647 
648  for (size_t j = 0; j < roiPts_vec[i].size() - 1; j++) {
649  vpDisplay::displayLine(m_debugImage_depthNormal, roiPts_vec[i][j], roiPts_vec[i][j + 1], vpColor::red, 2);
650  }
651  vpDisplay::displayLine(m_debugImage_depthNormal, roiPts_vec[i][0], roiPts_vec[i][roiPts_vec[i].size() - 1],
652  vpColor::red, 2);
653  }
654 
655  vpDisplay::flush(m_debugImage_depthNormal);
656 #endif
657 }
658 #endif
659 
660 void vpMbDepthNormalTracker::segmentPointCloud(const std::vector<vpColVector> &point_cloud, unsigned int width,
661  unsigned int height)
662 {
665 
666 #if DEBUG_DISPLAY_DEPTH_NORMAL
667  if (!m_debugDisp_depthNormal->isInitialised()) {
668  m_debugImage_depthNormal.resize(height, width);
669  m_debugDisp_depthNormal->init(m_debugImage_depthNormal, 50, 0, "Debug display normal depth tracker");
670  }
671 
672  m_debugImage_depthNormal = 0;
673  std::vector<std::vector<vpImagePoint> > roiPts_vec;
674 #endif
675 
676  for (std::vector<vpMbtFaceDepthNormal *>::iterator it = m_depthNormalFaces.begin(); it != m_depthNormalFaces.end();
677  ++it) {
678  vpMbtFaceDepthNormal *face = *it;
679 
680  if (face->isVisible() && face->isTracked()) {
681  vpColVector desired_features;
682 
683 #if DEBUG_DISPLAY_DEPTH_NORMAL
684  std::vector<std::vector<vpImagePoint> > roiPts_vec_;
685 #endif
686 
687  if (face->computeDesiredFeatures(m_cMo, width, height, point_cloud, desired_features, m_depthNormalSamplingStepX,
689 #if DEBUG_DISPLAY_DEPTH_NORMAL
690  ,
691  m_debugImage_depthNormal, roiPts_vec_
692 #endif
693  ,
694  m_mask)) {
695  m_depthNormalListOfDesiredFeatures.push_back(desired_features);
696  m_depthNormalListOfActiveFaces.push_back(face);
697 
698 #if DEBUG_DISPLAY_DEPTH_NORMAL
699  roiPts_vec.insert(roiPts_vec.end(), roiPts_vec_.begin(), roiPts_vec_.end());
700 #endif
701  }
702  }
703  }
704 
705 #if DEBUG_DISPLAY_DEPTH_NORMAL
706  vpDisplay::display(m_debugImage_depthNormal);
707 
708  for (size_t i = 0; i < roiPts_vec.size(); i++) {
709  if (roiPts_vec[i].empty())
710  continue;
711 
712  for (size_t j = 0; j < roiPts_vec[i].size() - 1; j++) {
713  vpDisplay::displayLine(m_debugImage_depthNormal, roiPts_vec[i][j], roiPts_vec[i][j + 1], vpColor::red, 2);
714  }
715  vpDisplay::displayLine(m_debugImage_depthNormal, roiPts_vec[i][0], roiPts_vec[i][roiPts_vec[i].size() - 1],
716  vpColor::red, 2);
717  }
718 
719  vpDisplay::flush(m_debugImage_depthNormal);
720 #endif
721 }
722 
723 void vpMbDepthNormalTracker::segmentPointCloud(const vpMatrix &point_cloud, unsigned int width,
724  unsigned int height)
725 {
728 
729 #if DEBUG_DISPLAY_DEPTH_NORMAL
730  if (!m_debugDisp_depthNormal->isInitialised()) {
731  m_debugImage_depthNormal.resize(height, width);
732  m_debugDisp_depthNormal->init(m_debugImage_depthNormal, 50, 0, "Debug display normal depth tracker");
733  }
734 
735  m_debugImage_depthNormal = 0;
736  std::vector<std::vector<vpImagePoint> > roiPts_vec;
737 #endif
738 
739  for (std::vector<vpMbtFaceDepthNormal *>::iterator it = m_depthNormalFaces.begin(); it != m_depthNormalFaces.end();
740  ++it) {
741  vpMbtFaceDepthNormal *face = *it;
742 
743  if (face->isVisible() && face->isTracked()) {
744  vpColVector desired_features;
745 
746 #if DEBUG_DISPLAY_DEPTH_NORMAL
747  std::vector<std::vector<vpImagePoint> > roiPts_vec_;
748 #endif
749 
750  if (face->computeDesiredFeatures(m_cMo, width, height, point_cloud, desired_features, m_depthNormalSamplingStepX,
752 #if DEBUG_DISPLAY_DEPTH_NORMAL
753  ,
754  m_debugImage_depthNormal, roiPts_vec_
755 #endif
756  ,
757  m_mask)) {
758  m_depthNormalListOfDesiredFeatures.push_back(desired_features);
759  m_depthNormalListOfActiveFaces.push_back(face);
760 
761 #if DEBUG_DISPLAY_DEPTH_NORMAL
762  roiPts_vec.insert(roiPts_vec.end(), roiPts_vec_.begin(), roiPts_vec_.end());
763 #endif
764  }
765  }
766  }
767 
768 #if DEBUG_DISPLAY_DEPTH_NORMAL
769  vpDisplay::display(m_debugImage_depthNormal);
770 
771  for (size_t i = 0; i < roiPts_vec.size(); i++) {
772  if (roiPts_vec[i].empty())
773  continue;
774 
775  for (size_t j = 0; j < roiPts_vec[i].size() - 1; j++) {
776  vpDisplay::displayLine(m_debugImage_depthNormal, roiPts_vec[i][j], roiPts_vec[i][j + 1], vpColor::red, 2);
777  }
778  vpDisplay::displayLine(m_debugImage_depthNormal, roiPts_vec[i][0], roiPts_vec[i][roiPts_vec[i].size() - 1],
779  vpColor::red, 2);
780  }
781 
782  vpDisplay::flush(m_debugImage_depthNormal);
783 #endif
784 }
785 
787 {
788  m_cam = cam;
789 
790  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
791  it != m_depthNormalFaces.end(); ++it) {
792  (*it)->setCameraParameters(cam);
793  }
794 }
795 
797 {
798  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
799  it != m_depthNormalFaces.end(); ++it) {
800  (*it)->setFaceCentroidMethod(method);
801  }
802 }
803 
806 {
808 
809  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
810  it != m_depthNormalFaces.end(); ++it) {
811  (*it)->setFeatureEstimationMethod(method);
812  }
813 }
814 
816 {
818 
819  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
820  it != m_depthNormalFaces.end(); ++it) {
821  (*it)->setPclPlaneEstimationMethod(method);
822  }
823 }
824 
826 {
828 
829  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
830  it != m_depthNormalFaces.end(); ++it) {
831  (*it)->setPclPlaneEstimationRansacMaxIter(maxIter);
832  }
833 }
834 
836 {
838 
839  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
840  it != m_depthNormalFaces.end(); ++it) {
841  (*it)->setPclPlaneEstimationRansacThreshold(threshold);
842  }
843 }
844 
845 void vpMbDepthNormalTracker::setDepthNormalSamplingStep(unsigned int stepX, unsigned int stepY)
846 {
847  if (stepX == 0 || stepY == 0) {
848  std::cerr << "stepX and stepY must be greater than zero!" << std::endl;
849  return;
850  }
851 
854 }
855 
856 // void vpMbDepthNormalTracker::setDepthNormalUseRobust(bool use) {
857 // m_depthNormalUseRobust = use;
858 //}
859 
861 {
862  throw vpException(vpException::fatalError, "Cannot track with a grayscale image!");
863 }
864 
866 {
867  throw vpException(vpException::fatalError, "Cannot track with a color image!");
868 }
869 
870 #ifdef VISP_HAVE_PCL
871 void vpMbDepthNormalTracker::track(const pcl::PointCloud<pcl::PointXYZ>::ConstPtr &point_cloud)
872 {
873  segmentPointCloud(point_cloud);
874 
875  computeVVS();
876 
877  computeVisibility(point_cloud->width, point_cloud->height);
878 }
879 #endif
880 
881 void vpMbDepthNormalTracker::track(const std::vector<vpColVector> &point_cloud, unsigned int width, unsigned int height)
882 {
883  segmentPointCloud(point_cloud, width, height);
884 
885  computeVVS();
886 
887  computeVisibility(width, height);
888 }
889 
890 void vpMbDepthNormalTracker::initCircle(const vpPoint & /*p1*/, const vpPoint & /*p2*/, const vpPoint & /*p3*/,
891  double /*radius*/, int /*idFace*/, const std::string & /*name*/)
892 {
893  throw vpException(vpException::fatalError, "vpMbDepthNormalTracker::initCircle() should not be called!");
894 }
895 
896 void vpMbDepthNormalTracker::initCylinder(const vpPoint & /*p1*/, const vpPoint & /*p2*/, double /*radius*/,
897  int /*idFace*/, const std::string & /*name*/)
898 {
899  throw vpException(vpException::fatalError, "vpMbDepthNormalTracker::initCylinder() should not be called!");
900 }
901 
903 
void setWindowName(const Ogre::String &n)
Definition: vpAROgre.h:269
void resize(unsigned int nrows, unsigned int ncols, bool flagNullify=true, bool recopy_=true)
Definition: vpArray2D.h:352
unsigned int getRows() const
Definition: vpArray2D.h:337
Generic class defining intrinsic camera parameters.
void computeFov(const unsigned int &w, const unsigned int &h)
Implementation of column vector and the associated operations.
Definition: vpColVector.h:163
void insert(unsigned int i, const vpColVector &v)
void resize(unsigned int i, bool flagNullify=true)
Definition: vpColVector.h:1056
Class to define RGB colors available for display functionalities.
Definition: vpColor.h:152
static const vpColor red
Definition: vpColor.h:211
static const vpColor blue
Definition: vpColor.h:217
Display for windows using GDI (available on any windows 32 platform).
Definition: vpDisplayGDI.h:128
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Definition: vpDisplayX.h:128
static void display(const vpImage< unsigned char > &I)
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void flush(const vpImage< unsigned char > &I)
static void displayArrow(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color=vpColor::white, unsigned int w=4, unsigned int h=2, unsigned int thickness=1)
error that can be emitted by ViSP classes.
Definition: vpException.h:59
@ ioError
I/O error.
Definition: vpException.h:79
@ fatalError
Fatal error.
Definition: vpException.h:84
const char * what() const
Definition: vpException.cpp:70
static vpHomogeneousMatrix direct(const vpColVector &v)
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpHomogeneousMatrix inverse() const
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:82
unsigned int getWidth() const
Definition: vpImage.h:245
unsigned int getHeight() const
Definition: vpImage.h:184
static double rad(double deg)
Definition: vpMath.h:127
static double sqr(double x)
Definition: vpMath.h:201
static bool equal(double x, double y, double threshold=0.001)
Definition: vpMath.h:449
static double deg(double rad)
Definition: vpMath.h:117
Implementation of a matrix and operations on matrices.
Definition: vpMatrix.h:146
void eye()
Definition: vpMatrix.cpp:448
vpMatrix AtA() const
Definition: vpMatrix.cpp:645
void insert(const vpMatrix &A, unsigned int r, unsigned int c)
Definition: vpMatrix.cpp:5781
virtual void setScanLineVisibilityTest(const bool &v) vp_override
void segmentPointCloud(const pcl::PointCloud< pcl::PointXYZ >::ConstPtr &point_cloud)
virtual void setPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cdMo) vp_override
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false) vp_override
vpMatrix m_L_depthNormal
Interaction matrix.
vpRobust m_robust_depthNormal
Robust.
virtual void setDepthNormalSamplingStep(unsigned int stepX, unsigned int stepY)
virtual void testTracking() vp_override
vpMbtFaceDepthNormal::vpFeatureEstimationType m_depthNormalFeatureEstimationMethod
Method to estimate the desired features.
virtual void initCircle(const vpPoint &p1, const vpPoint &p2, const vpPoint &p3, double radius, int idFace=0, const std::string &name="") vp_override
std::vector< vpMbtFaceDepthNormal * > m_depthNormalFaces
List of faces.
virtual void resetTracker() vp_override
int m_depthNormalPclPlaneEstimationRansacMaxIter
PCL RANSAC maximum number of iterations.
unsigned int m_depthNormalSamplingStepY
Sampling step in y-direction.
virtual void computeVVSInteractionMatrixAndResidu() vp_override
double m_depthNormalPclPlaneEstimationRansacThreshold
PCL RANSAC threshold.
virtual void setDepthNormalPclPlaneEstimationRansacThreshold(double thresold)
void addFace(vpMbtPolygon &polygon, bool alreadyClose)
bool m_depthNormalUseRobust
If true, use Tukey robust M-Estimator.
virtual void setOgreVisibilityTest(const bool &v) vp_override
void reInitModel(const vpImage< unsigned char > &I, const std::string &cad_name, const vpHomogeneousMatrix &cMo, bool verbose=false)
virtual void computeVVSInit() vp_override
virtual std::vector< std::vector< double > > getFeaturesForDisplayDepthNormal()
vpColVector m_w_depthNormal
Robust weights.
vpColVector m_error_depthNormal
(s - s*)
vpColVector m_weightedError_depthNormal
Weighted error.
std::vector< vpMbtFaceDepthNormal * > m_depthNormalListOfActiveFaces
List of current active (visible and with features extracted) faces.
virtual std::vector< std::vector< double > > getModelForDisplay(unsigned int width, unsigned int height, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, bool displayFullModel=false) vp_override
virtual void loadConfigFile(const std::string &configFile, bool verbose=true) vp_override
virtual void initFaceFromCorners(vpMbtPolygon &polygon) vp_override
int m_depthNormalPclPlaneEstimationMethod
PCL plane estimation method.
virtual void init(const vpImage< unsigned char > &I) vp_override
virtual void track(const vpImage< unsigned char > &) vp_override
virtual void setDepthNormalFaceCentroidMethod(const vpMbtFaceDepthNormal::vpFaceCentroidType &method)
virtual void setDepthNormalPclPlaneEstimationMethod(int method)
std::vector< vpColVector > m_depthNormalListOfDesiredFeatures
List of desired features.
virtual void initFaceFromLines(vpMbtPolygon &polygon) vp_override
virtual ~vpMbDepthNormalTracker() vp_override
virtual void setCameraParameters(const vpCameraParameters &camera) vp_override
void setUseDepthNormalTracking(const std::string &name, const bool &useDepthNormalTracking)
void computeVisibility(unsigned int width, unsigned int height)
vpMbHiddenFaces< vpMbtPolygon > m_depthNormalHiddenFacesDisplay
Set of faces describing the object used only for display with scan line.
virtual void setDepthNormalFeatureEstimationMethod(const vpMbtFaceDepthNormal::vpFeatureEstimationType &method)
virtual void setDepthNormalPclPlaneEstimationRansacMaxIter(int maxIter)
virtual void initCylinder(const vpPoint &p1, const vpPoint &p2, double radius, int idFace=0, const std::string &name="") vp_override
unsigned int m_depthNormalSamplingStepX
Sampling step in x-direction.
vpAROgre * getOgreContext()
void computeClippedPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam)
unsigned int setVisibleOgre(unsigned int width, unsigned int height, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, const double &angleAppears, const double &angleDisappears, bool &changed)
void initOgre(const vpCameraParameters &cam=vpCameraParameters())
unsigned int setVisible(unsigned int width, unsigned int height, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, const double &angle, bool &changed)
void setBackgroundSizeOgre(const unsigned int &h, const unsigned int &w)
void computeScanLineRender(const vpCameraParameters &cam, const unsigned int &w, const unsigned int &h)
void setOgreShowConfigDialog(bool showConfigDialog)
double m_lambda
Gain of the virtual visual servoing stage.
Definition: vpMbTracker.h:187
bool modelInitialised
Definition: vpMbTracker.h:123
vpImage< unsigned char > m_I
Grayscale image buffer, used when passing color images.
Definition: vpMbTracker.h:223
bool m_computeInteraction
Definition: vpMbTracker.h:185
vpMatrix oJo
The Degrees of Freedom to estimate.
Definition: vpMbTracker.h:115
vpUniRand m_rand
Random number generator used in vpMbtDistanceLine::buildFrom()
Definition: vpMbTracker.h:227
double m_initialMu
Initial Mu for Levenberg Marquardt optimization loop.
Definition: vpMbTracker.h:193
vpHomogeneousMatrix m_cMo
The current pose.
Definition: vpMbTracker.h:113
virtual void computeVVSCheckLevenbergMarquardt(unsigned int iter, vpColVector &error, const vpColVector &m_error_prev, const vpHomogeneousMatrix &cMoPrev, double &mu, bool &reStartFromLastIncrement, vpColVector *const w=nullptr, const vpColVector *const m_w_prev=nullptr)
vpCameraParameters m_cam
The camera parameters.
Definition: vpMbTracker.h:111
double m_stopCriteriaEpsilon
Epsilon threshold to stop the VVS optimization loop.
Definition: vpMbTracker.h:191
bool useOgre
Use Ogre3d for visibility tests.
Definition: vpMbTracker.h:155
virtual void computeVVSWeights(vpRobust &robust, const vpColVector &error, vpColVector &w)
vpMbHiddenFaces< vpMbtPolygon > faces
Set of faces describing the object.
Definition: vpMbTracker.h:143
virtual void setScanLineVisibilityTest(const bool &v)
Definition: vpMbTracker.h:608
virtual void setOgreVisibilityTest(const bool &v)
virtual void computeCovarianceMatrixVVS(const bool isoJoIdentity, const vpColVector &w_true, const vpHomogeneousMatrix &cMoPrev, const vpMatrix &L_true, const vpMatrix &LVJ_true, const vpColVector &error)
vpMbtOptimizationMethod m_optimizationMethod
Optimization method used.
Definition: vpMbTracker.h:140
virtual void computeVVSPoseEstimation(const bool isoJoIdentity, unsigned int iter, vpMatrix &L, vpMatrix &LTL, vpColVector &R, const vpColVector &error, vpColVector &error_prev, vpColVector &LTR, double &mu, vpColVector &v, const vpColVector *const w=nullptr, vpColVector *const m_w_prev=nullptr)
bool displayFeatures
If true, the features are displayed.
Definition: vpMbTracker.h:138
double angleDisappears
Angle used to detect a face disappearance.
Definition: vpMbTracker.h:147
virtual void setNearClippingDistance(const double &dist)
virtual void setFarClippingDistance(const double &dist)
double distFarClip
Distance for near clipping.
Definition: vpMbTracker.h:151
bool m_isoJoIdentity
Boolean to know if oJo is identity (for fast computation)
Definition: vpMbTracker.h:117
bool useScanLine
Use Scanline for visibility tests.
Definition: vpMbTracker.h:158
virtual void setClipping(const unsigned int &flags)
double angleAppears
Angle used to detect a face appearance.
Definition: vpMbTracker.h:145
const vpImage< bool > * m_mask
Mask used to disable tracking on a part of image.
Definition: vpMbTracker.h:221
virtual void initFromPose(const vpImage< unsigned char > &I, const std::string &initFile)
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix())
bool computeCovariance
Flag used to specify if the covariance matrix has to be computed or not.
Definition: vpMbTracker.h:128
double distNearClip
Distance for near clipping.
Definition: vpMbTracker.h:149
unsigned int m_maxIter
Maximum number of iterations of the virtual visual servoing stage.
Definition: vpMbTracker.h:189
bool ogreShowConfigDialog
Definition: vpMbTracker.h:156
unsigned int clippingFlag
Flags specifying which clipping to used.
Definition: vpMbTracker.h:153
double m_distNearClip
Distance for near clipping.
std::vector< std::vector< double > > getModelForDisplay(unsigned int width, unsigned int height, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, bool displayFullModel=false)
void addLine(vpPoint &p1, vpPoint &p2, vpMbHiddenFaces< vpMbtPolygon > *const faces, vpUniRand &rand_gen, int polygon=-1, std::string name="")
double m_distFarClip
Distance for near clipping.
vpMbHiddenFaces< vpMbtPolygon > * m_hiddenFace
Pointer to the list of faces.
std::vector< std::vector< double > > getFeaturesForDisplay(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double scale=0.05)
void setPclPlaneEstimationMethod(int method)
void setPclPlaneEstimationRansacThreshold(double threshold)
vpMbtPolygon * m_polygon
Polygon defining the face.
vpCameraParameters m_cam
Camera intrinsic parameters.
vpPlane m_planeObject
Plane equation described in the object frame.
unsigned int m_clippingFlag
Flags specifying which clipping to used.
void setTracked(bool tracked)
void setFeatureEstimationMethod(const vpFeatureEstimationType &method)
void setPclPlaneEstimationRansacMaxIter(int maxIter)
bool m_useScanLine
Scan line visibility.
bool computeDesiredFeatures(const vpHomogeneousMatrix &cMo, unsigned int width, unsigned int height, const pcl::PointCloud< pcl::PointXYZ >::ConstPtr &point_cloud, vpColVector &desired_features, unsigned int stepX, unsigned int stepY, const vpImage< bool > *mask=nullptr)
Implementation of a polygon of the model used by the model-based tracker.
Definition: vpMbtPolygon.h:58
std::string getName() const
Definition: vpMbtPolygon.h:98
int getIndex() const
Definition: vpMbtPolygon.h:91
Parse an Xml file to extract configuration parameters of a mbtConfig object.
void setDepthNormalPclPlaneEstimationRansacMaxIter(int maxIter)
int getDepthNormalPclPlaneEstimationRansacMaxIter() const
unsigned int getDepthNormalSamplingStepX() const
void getCameraParameters(vpCameraParameters &cam) const
unsigned int getDepthNormalSamplingStepY() const
void setDepthNormalFeatureEstimationMethod(const vpMbtFaceDepthNormal::vpFeatureEstimationType &method)
vpMbtFaceDepthNormal::vpFeatureEstimationType getDepthNormalFeatureEstimationMethod() const
void setDepthNormalPclPlaneEstimationMethod(int method)
void setDepthNormalSamplingStepX(unsigned int stepX)
void setAngleDisappear(const double &adisappear)
int getDepthNormalPclPlaneEstimationMethod() const
void setAngleAppear(const double &aappear)
double getDepthNormalPclPlaneEstimationRansacThreshold() const
void parse(const std::string &filename)
void setDepthNormalPclPlaneEstimationRansacThreshold(double threshold)
void setCameraParameters(const vpCameraParameters &cam)
void setDepthNormalSamplingStepY(unsigned int stepY)
This class defines the container for a plane geometrical structure.
Definition: vpPlane.h:54
@ object_frame
Definition: vpPlane.h:65
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Definition: vpPoint.h:77
unsigned int nbpt
Number of points used to define the polygon.
Definition: vpPolygon3D.h:71
unsigned int getNbPoint() const
Definition: vpPolygon3D.h:127
vpPoint * p
corners in the object frame
Definition: vpPolygon3D.h:76
void setMinMedianAbsoluteDeviation(double mad_min)
Definition: vpRobust.h:156
vpVelocityTwistMatrix buildFrom(const vpTranslationVector &t, const vpRotationMatrix &R)