Visual Servoing Platform  version 3.5.1 under development (2023-09-22)
vpMbDepthNormalTracker.cpp
1 /****************************************************************************
2  *
3  * ViSP, open source Visual Servoing Platform software.
4  * Copyright (C) 2005 - 2023 by Inria. All rights reserved.
5  *
6  * This software is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  * See the file LICENSE.txt at the root directory of this source
11  * distribution for additional information about the GNU GPL.
12  *
13  * For using ViSP with software that can not be combined with the GNU
14  * GPL, please contact Inria about acquiring a ViSP Professional
15  * Edition License.
16  *
17  * See https://visp.inria.fr for more information.
18  *
19  * This software was developed at:
20  * Inria Rennes - Bretagne Atlantique
21  * Campus Universitaire de Beaulieu
22  * 35042 Rennes Cedex
23  * France
24  *
25  * If you have questions regarding the use of this file, please contact
26  * Inria at visp@inria.fr
27  *
28  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
29  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
30  *
31  * Description:
32  * Model-based tracker using depth normal features.
33  *
34 *****************************************************************************/
35 
36 #include <iostream>
37 
38 #include <visp3/core/vpConfig.h>
39 
40 #ifdef VISP_HAVE_PCL
41 #include <pcl/point_cloud.h>
42 #endif
43 
44 #include <visp3/core/vpDisplay.h>
45 #include <visp3/core/vpExponentialMap.h>
46 #include <visp3/core/vpTrackingException.h>
47 #include <visp3/mbt/vpMbDepthNormalTracker.h>
48 #include <visp3/mbt/vpMbtXmlGenericParser.h>
49 
50 #if DEBUG_DISPLAY_DEPTH_NORMAL
51 #include <visp3/gui/vpDisplayGDI.h>
52 #include <visp3/gui/vpDisplayX.h>
53 #endif
54 
56  : m_depthNormalFeatureEstimationMethod(vpMbtFaceDepthNormal::ROBUST_FEATURE_ESTIMATION),
57  m_depthNormalHiddenFacesDisplay(), m_depthNormalListOfActiveFaces(), m_depthNormalListOfDesiredFeatures(),
58  m_depthNormalFaces(), m_depthNormalPclPlaneEstimationMethod(2), m_depthNormalPclPlaneEstimationRansacMaxIter(200),
59  m_depthNormalPclPlaneEstimationRansacThreshold(0.001), m_depthNormalSamplingStepX(2), m_depthNormalSamplingStepY(2),
60  m_depthNormalUseRobust(false), m_error_depthNormal(), m_featuresToBeDisplayedDepthNormal(), m_L_depthNormal(),
61  m_robust_depthNormal(), m_w_depthNormal(), m_weightedError_depthNormal()
62 #if DEBUG_DISPLAY_DEPTH_NORMAL
63  ,
64  m_debugDisp_depthNormal(NULL), m_debugImage_depthNormal()
65 #endif
66 {
67 #ifdef VISP_HAVE_OGRE
68  faces.getOgreContext()->setWindowName("MBT Depth");
69 #endif
70 
71 #if defined(VISP_HAVE_X11) && DEBUG_DISPLAY_DEPTH_NORMAL
72  m_debugDisp_depthNormal = new vpDisplayX;
73 #elif defined(VISP_HAVE_GDI) && DEBUG_DISPLAY_DEPTH_NORMAL
74  m_debugDisp_depthNormal = new vpDisplayGDI;
75 #endif
76 }
77 
79 {
80  for (size_t i = 0; i < m_depthNormalFaces.size(); i++) {
81  delete m_depthNormalFaces[i];
82  }
83 }
84 
85 void vpMbDepthNormalTracker::addFace(vpMbtPolygon &polygon, bool alreadyClose)
86 {
87  if (polygon.nbpt < 3) {
88  return;
89  }
90 
91  // Copy hidden faces
93 
94  vpMbtFaceDepthNormal *normal_face = new vpMbtFaceDepthNormal;
95  normal_face->m_hiddenFace = &faces;
96  normal_face->m_polygon = &polygon;
97  normal_face->m_cam = m_cam;
98  normal_face->m_useScanLine = useScanLine;
99  normal_face->m_clippingFlag = clippingFlag;
100  normal_face->m_distNearClip = distNearClip;
101  normal_face->m_distFarClip = distFarClip;
106 
107  // Add lines that compose the face
108  unsigned int nbpt = polygon.getNbPoint();
109  if (nbpt > 0) {
110  for (unsigned int i = 0; i < nbpt - 1; i++) {
111  normal_face->addLine(polygon.p[i], polygon.p[i + 1], &m_depthNormalHiddenFacesDisplay, m_rand, polygon.getIndex(),
112  polygon.getName());
113  }
114 
115  if (!alreadyClose) {
116  // Add last line that closes the face
117  normal_face->addLine(polygon.p[nbpt - 1], polygon.p[0], &m_depthNormalHiddenFacesDisplay, m_rand,
118  polygon.getIndex(), polygon.getName());
119  }
120  }
121 
122  // Construct a vpPlane in object frame
123  vpPoint pts[3];
124  pts[0] = polygon.p[0];
125  pts[1] = polygon.p[1];
126  pts[2] = polygon.p[2];
127  normal_face->m_planeObject = vpPlane(pts[0], pts[1], pts[2], vpPlane::object_frame);
128 
129  m_depthNormalFaces.push_back(normal_face);
130 }
131 
132 void vpMbDepthNormalTracker::computeVisibility(unsigned int width, unsigned int height)
133 {
134  bool changed = false;
135  faces.setVisible(width, height, m_cam, m_cMo, angleAppears, angleDisappears, changed);
136 
137  if (useScanLine) {
138  // if (clippingFlag <= 2) {
139  // cam.computeFov(width, height);
140  // }
141 
143  faces.computeScanLineRender(m_cam, width, height);
144  }
145 
146  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
147  it != m_depthNormalFaces.end(); ++it) {
148  vpMbtFaceDepthNormal *face_normal = *it;
149  face_normal->computeVisibility();
150  }
151 }
152 
154 {
155  double normRes = 0;
156  double normRes_1 = -1;
157  unsigned int iter = 0;
158 
159  computeVVSInit();
160  unsigned int nb_features = (unsigned int)(3 * m_depthNormalListOfDesiredFeatures.size());
161 
162  vpColVector error_prev(nb_features);
163  vpMatrix LTL;
164  vpColVector LTR, v;
165 
166  double mu = m_initialMu;
167  vpHomogeneousMatrix cMo_prev;
168 
169  bool isoJoIdentity = m_isoJoIdentity; // Backup since it can be modified if L is not full rank
170  if (isoJoIdentity)
171  oJo.eye();
172 
174  vpMatrix L_true, LVJ_true;
175 
176  while (std::fabs(normRes_1 - normRes) > m_stopCriteriaEpsilon && (iter < m_maxIter)) {
178 
179  bool reStartFromLastIncrement = false;
180  computeVVSCheckLevenbergMarquardt(iter, m_error_depthNormal, error_prev, cMo_prev, mu, reStartFromLastIncrement);
181 
182  if (!reStartFromLastIncrement) {
185 
186  if (computeCovariance) {
187  L_true = m_L_depthNormal;
188  if (!isoJoIdentity) {
189  cVo.buildFrom(m_cMo);
190  LVJ_true = (m_L_depthNormal * (cVo * oJo));
191  }
192  }
193 
194  // Compute DoF only once
195  if (iter == 0) {
196  // If all the 6 dof should be estimated, we check if the interaction
197  // matrix is full rank. If not we remove automatically the dof that
198  // cannot be estimated. This is particularly useful when considering
199  // circles (rank 5) and cylinders (rank 4)
200  if (isoJoIdentity) {
201  cVo.buildFrom(m_cMo);
202 
203  vpMatrix K; // kernel
204  unsigned int rank = (m_L_depthNormal * cVo).kernel(K);
205  if (rank == 0) {
206  throw vpException(vpException::fatalError, "Rank=0, cannot estimate the pose !");
207  }
208 
209  if (rank != 6) {
210  vpMatrix I; // Identity
211  I.eye(6);
212  oJo = I - K.AtA();
213 
214  isoJoIdentity = false;
215  }
216  }
217  }
218 
219  double num = 0.0, den = 0.0;
220  for (unsigned int i = 0; i < m_L_depthNormal.getRows(); i++) {
221  // Compute weighted errors and stop criteria
224  den += m_w_depthNormal[i];
225 
226  // weight interaction matrix
227  for (unsigned int j = 0; j < 6; j++) {
228  m_L_depthNormal[i][j] *= m_w_depthNormal[i];
229  }
230  }
231 
233  m_error_depthNormal, error_prev, LTR, mu, v);
234 
235  cMo_prev = m_cMo;
237 
238  normRes_1 = normRes;
239  normRes = sqrt(num / den);
240  }
241 
242  iter++;
243  }
244 
245  computeCovarianceMatrixVVS(isoJoIdentity, m_w_depthNormal, cMo_prev, L_true, LVJ_true, m_error_depthNormal);
246 }
247 
249 {
250  unsigned int nb_features = (unsigned int)(3 * m_depthNormalListOfDesiredFeatures.size());
251 
252  m_L_depthNormal.resize(nb_features, 6, false, false);
253  m_error_depthNormal.resize(nb_features, false);
254  m_weightedError_depthNormal.resize(nb_features, false);
255 
256  m_w_depthNormal.resize(nb_features, false);
257  m_w_depthNormal = 1;
258 
260 }
261 
263 {
264  unsigned int cpt = 0;
265  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalListOfActiveFaces.begin();
266  it != m_depthNormalListOfActiveFaces.end(); ++it) {
267  vpMatrix L_face;
268  vpColVector features_face;
269  (*it)->computeInteractionMatrix(m_cMo, L_face, features_face);
270 
271  vpColVector face_error = features_face - m_depthNormalListOfDesiredFeatures[(size_t)cpt];
272 
273  m_error_depthNormal.insert(cpt * 3, face_error);
274  m_L_depthNormal.insert(L_face, cpt * 3, 0);
275 
276  cpt++;
277  }
278 }
279 
281  const vpCameraParameters &cam, const vpColor &col, unsigned int thickness,
282  bool displayFullModel)
283 {
284  std::vector<std::vector<double> > models =
285  vpMbDepthNormalTracker::getModelForDisplay(I.getWidth(), I.getHeight(), cMo, cam, displayFullModel);
286 
287  for (size_t i = 0; i < models.size(); i++) {
288  if (vpMath::equal(models[i][0], 0)) {
289  vpImagePoint ip1(models[i][1], models[i][2]);
290  vpImagePoint ip2(models[i][3], models[i][4]);
291  vpDisplay::displayLine(I, ip1, ip2, col, thickness);
292  }
293  }
294 
295  if (displayFeatures) {
296  std::vector<std::vector<double> > features = getFeaturesForDisplayDepthNormal();
297  for (size_t i = 0; i < features.size(); i++) {
298  vpImagePoint im_centroid(features[i][1], features[i][2]);
299  vpImagePoint im_extremity(features[i][3], features[i][4]);
300  bool desired = vpMath::equal(features[i][0], 2);
301  vpDisplay::displayArrow(I, im_centroid, im_extremity, desired ? vpColor::blue : vpColor::red, 4, 2, thickness);
302  }
303  }
304 }
305 
307  const vpCameraParameters &cam, const vpColor &col, unsigned int thickness,
308  bool displayFullModel)
309 {
310  std::vector<std::vector<double> > models =
311  vpMbDepthNormalTracker::getModelForDisplay(I.getWidth(), I.getHeight(), cMo, cam, displayFullModel);
312 
313  for (size_t i = 0; i < models.size(); i++) {
314  if (vpMath::equal(models[i][0], 0)) {
315  vpImagePoint ip1(models[i][1], models[i][2]);
316  vpImagePoint ip2(models[i][3], models[i][4]);
317  vpDisplay::displayLine(I, ip1, ip2, col, thickness);
318  }
319  }
320 
321  if (displayFeatures) {
322  std::vector<std::vector<double> > features = getFeaturesForDisplayDepthNormal();
323  for (size_t i = 0; i < features.size(); i++) {
324  vpImagePoint im_centroid(features[i][1], features[i][2]);
325  vpImagePoint im_extremity(features[i][3], features[i][4]);
326  bool desired = vpMath::equal(features[i][0], 2);
327  vpDisplay::displayArrow(I, im_centroid, im_extremity, desired ? vpColor::blue : vpColor::red, 4, 2, thickness);
328  }
329  }
330 }
331 
333 {
334  std::vector<std::vector<double> > features;
335 
336  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
337  it != m_depthNormalFaces.end(); ++it) {
338  vpMbtFaceDepthNormal *face_normal = *it;
339  std::vector<std::vector<double> > currentFeatures = face_normal->getFeaturesForDisplay(m_cMo, m_cam);
340  features.insert(features.end(), currentFeatures.begin(), currentFeatures.end());
341  }
342 
343  return features;
344 }
345 
361 std::vector<std::vector<double> > vpMbDepthNormalTracker::getModelForDisplay(unsigned int width, unsigned int height,
362  const vpHomogeneousMatrix &cMo,
363  const vpCameraParameters &cam,
364  bool displayFullModel)
365 {
366  std::vector<std::vector<double> > models;
367 
368  vpCameraParameters c = cam;
369 
370  bool changed = false;
372 
373  if (useScanLine) {
374  c.computeFov(width, height);
375 
378  }
379 
380  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
381  it != m_depthNormalFaces.end(); ++it) {
382  vpMbtFaceDepthNormal *face_normal = *it;
383  std::vector<std::vector<double> > modelLines =
384  face_normal->getModelForDisplay(width, height, cMo, cam, displayFullModel);
385  models.insert(models.end(), modelLines.begin(), modelLines.end());
386  }
387 
388  return models;
389 }
390 
392 {
393  if (!modelInitialised) {
394  throw vpException(vpException::fatalError, "model not initialized");
395  }
396 
397  bool reInitialisation = false;
398  if (!useOgre) {
399  faces.setVisible(I.getWidth(), I.getHeight(), m_cam, m_cMo, angleAppears, angleDisappears, reInitialisation);
400  } else {
401 #ifdef VISP_HAVE_OGRE
402  if (!faces.isOgreInitialised()) {
406  // Turn off Ogre config dialog display for the next call to this
407  // function since settings are saved in the ogre.cfg file and used
408  // during the next call
409  ogreShowConfigDialog = false;
410  }
411 
413 #else
414  faces.setVisible(I.getWidth(), I.getHeight(), m_cam, m_cMo, angleAppears, angleDisappears, reInitialisation);
415 #endif
416  }
417 
418  if (useScanLine || clippingFlag > 3)
420 
422 }
423 
424 void vpMbDepthNormalTracker::loadConfigFile(const std::string &configFile, bool verbose)
425 {
427  xmlp.setVerbose(verbose);
431 
438 
439  try {
440  if (verbose) {
441  std::cout << " *********** Parsing XML for Mb Depth Tracker ************ " << std::endl;
442  }
443  xmlp.parse(configFile);
444  } catch (const vpException &e) {
445  std::cerr << "Exception: " << e.what() << std::endl;
446  throw vpException(vpException::ioError, "Cannot open XML file \"%s\"", configFile.c_str());
447  }
448 
449  vpCameraParameters camera;
450  xmlp.getCameraParameters(camera);
451  setCameraParameters(camera);
452 
455 
456  if (xmlp.hasNearClippingDistance())
458 
459  if (xmlp.hasFarClippingDistance())
461 
462  if (xmlp.getFovClipping())
464 
470 }
471 
472 void vpMbDepthNormalTracker::reInitModel(const vpImage<unsigned char> &I, const std::string &cad_name,
473  const vpHomogeneousMatrix &cMo, bool verbose)
474 {
475  m_cMo.eye();
476 
477  for (size_t i = 0; i < m_depthNormalFaces.size(); i++) {
478  delete m_depthNormalFaces[i];
479  m_depthNormalFaces[i] = NULL;
480  }
481 
482  m_depthNormalFaces.clear();
483 
484  loadModel(cad_name, verbose);
485  initFromPose(I, cMo);
486 }
487 
488 #if defined(VISP_HAVE_PCL)
489 void vpMbDepthNormalTracker::reInitModel(const pcl::PointCloud<pcl::PointXYZ>::ConstPtr &point_cloud,
490  const std::string &cad_name, const vpHomogeneousMatrix &cMo, bool verbose)
491 {
492  vpImage<unsigned char> I_dummy(point_cloud->height, point_cloud->width);
493  reInitModel(I_dummy, cad_name, cMo, verbose);
494 }
495 
496 #endif
497 
499 {
500  m_cMo.eye();
501 
502  for (std::vector<vpMbtFaceDepthNormal *>::iterator it = m_depthNormalFaces.begin(); it != m_depthNormalFaces.end();
503  ++it) {
504  vpMbtFaceDepthNormal *normal_face = *it;
505  delete normal_face;
506  normal_face = NULL;
507  }
508 
509  m_depthNormalFaces.clear();
510 
511  m_computeInteraction = true;
512  computeCovariance = false;
513 
516 
518 
519  m_lambda = 1.0;
520 
521  faces.reset();
522 
524 
525  useScanLine = false;
526 
527 #ifdef VISP_HAVE_OGRE
528  useOgre = false;
529 #endif
530 
533 }
534 
536 {
538 #ifdef VISP_HAVE_OGRE
539  faces.getOgreContext()->setWindowName("MBT Depth");
540 #endif
541 }
542 
544 {
545  m_cMo = cdMo;
546  init(I);
547 }
548 
550 {
551  m_cMo = cdMo;
552  vpImageConvert::convert(I_color, m_I);
553  init(m_I);
554 }
555 
556 #if defined(VISP_HAVE_PCL)
557 void vpMbDepthNormalTracker::setPose(const pcl::PointCloud<pcl::PointXYZ>::ConstPtr &point_cloud,
558  const vpHomogeneousMatrix &cdMo)
559 {
560  vpImage<unsigned char> I_dummy(point_cloud->height, point_cloud->width);
561  m_cMo = cdMo;
562  init(I_dummy);
563 }
564 #endif
565 
567 {
569 
570  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
571  it != m_depthNormalFaces.end(); ++it) {
572  (*it)->setScanLineVisibilityTest(v);
573  }
574 }
575 
576 void vpMbDepthNormalTracker::setUseDepthNormalTracking(const std::string &name, const bool &useDepthNormalTracking)
577 {
578  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
579  it != m_depthNormalFaces.end(); ++it) {
580  vpMbtFaceDepthNormal *face = *it;
581  if (face->m_polygon->getName() == name) {
582  face->setTracked(useDepthNormalTracking);
583  }
584  }
585 }
586 
588 
589 #ifdef VISP_HAVE_PCL
590 void vpMbDepthNormalTracker::segmentPointCloud(const pcl::PointCloud<pcl::PointXYZ>::ConstPtr &point_cloud)
591 {
594 
595 #if DEBUG_DISPLAY_DEPTH_NORMAL
596  if (!m_debugDisp_depthNormal->isInitialised()) {
597  m_debugImage_depthNormal.resize(point_cloud->height, point_cloud->width);
598  m_debugDisp_depthNormal->init(m_debugImage_depthNormal, 50, 0, "Debug display normal depth tracker");
599  }
600 
601  m_debugImage_depthNormal = 0;
602  std::vector<std::vector<vpImagePoint> > roiPts_vec;
603 #endif
604 
605  for (std::vector<vpMbtFaceDepthNormal *>::iterator it = m_depthNormalFaces.begin(); it != m_depthNormalFaces.end();
606  ++it) {
607  vpMbtFaceDepthNormal *face = *it;
608 
609  if (face->isVisible() && face->isTracked()) {
610  vpColVector desired_features;
611 
612 #if DEBUG_DISPLAY_DEPTH_NORMAL
613  std::vector<std::vector<vpImagePoint> > roiPts_vec_;
614 #endif
615  if (face->computeDesiredFeatures(m_cMo, point_cloud->width, point_cloud->height, point_cloud, desired_features,
617 #if DEBUG_DISPLAY_DEPTH_NORMAL
618  ,
619  m_debugImage_depthNormal, roiPts_vec_
620 #endif
621  ,
622  m_mask)) {
623  m_depthNormalListOfDesiredFeatures.push_back(desired_features);
624  m_depthNormalListOfActiveFaces.push_back(face);
625 
626 #if DEBUG_DISPLAY_DEPTH_NORMAL
627  roiPts_vec.insert(roiPts_vec.end(), roiPts_vec_.begin(), roiPts_vec_.end());
628 #endif
629  }
630  }
631  }
632 
633 #if DEBUG_DISPLAY_DEPTH_NORMAL
634  vpDisplay::display(m_debugImage_depthNormal);
635 
636  for (size_t i = 0; i < roiPts_vec.size(); i++) {
637  if (roiPts_vec[i].empty())
638  continue;
639 
640  for (size_t j = 0; j < roiPts_vec[i].size() - 1; j++) {
641  vpDisplay::displayLine(m_debugImage_depthNormal, roiPts_vec[i][j], roiPts_vec[i][j + 1], vpColor::red, 2);
642  }
643  vpDisplay::displayLine(m_debugImage_depthNormal, roiPts_vec[i][0], roiPts_vec[i][roiPts_vec[i].size() - 1],
644  vpColor::red, 2);
645  }
646 
647  vpDisplay::flush(m_debugImage_depthNormal);
648 #endif
649 }
650 #endif
651 
652 void vpMbDepthNormalTracker::segmentPointCloud(const std::vector<vpColVector> &point_cloud, unsigned int width,
653  unsigned int height)
654 {
657 
658 #if DEBUG_DISPLAY_DEPTH_NORMAL
659  if (!m_debugDisp_depthNormal->isInitialised()) {
660  m_debugImage_depthNormal.resize(height, width);
661  m_debugDisp_depthNormal->init(m_debugImage_depthNormal, 50, 0, "Debug display normal depth tracker");
662  }
663 
664  m_debugImage_depthNormal = 0;
665  std::vector<std::vector<vpImagePoint> > roiPts_vec;
666 #endif
667 
668  for (std::vector<vpMbtFaceDepthNormal *>::iterator it = m_depthNormalFaces.begin(); it != m_depthNormalFaces.end();
669  ++it) {
670  vpMbtFaceDepthNormal *face = *it;
671 
672  if (face->isVisible() && face->isTracked()) {
673  vpColVector desired_features;
674 
675 #if DEBUG_DISPLAY_DEPTH_NORMAL
676  std::vector<std::vector<vpImagePoint> > roiPts_vec_;
677 #endif
678 
679  if (face->computeDesiredFeatures(m_cMo, width, height, point_cloud, desired_features, m_depthNormalSamplingStepX,
681 #if DEBUG_DISPLAY_DEPTH_NORMAL
682  ,
683  m_debugImage_depthNormal, roiPts_vec_
684 #endif
685  ,
686  m_mask)) {
687  m_depthNormalListOfDesiredFeatures.push_back(desired_features);
688  m_depthNormalListOfActiveFaces.push_back(face);
689 
690 #if DEBUG_DISPLAY_DEPTH_NORMAL
691  roiPts_vec.insert(roiPts_vec.end(), roiPts_vec_.begin(), roiPts_vec_.end());
692 #endif
693  }
694  }
695  }
696 
697 #if DEBUG_DISPLAY_DEPTH_NORMAL
698  vpDisplay::display(m_debugImage_depthNormal);
699 
700  for (size_t i = 0; i < roiPts_vec.size(); i++) {
701  if (roiPts_vec[i].empty())
702  continue;
703 
704  for (size_t j = 0; j < roiPts_vec[i].size() - 1; j++) {
705  vpDisplay::displayLine(m_debugImage_depthNormal, roiPts_vec[i][j], roiPts_vec[i][j + 1], vpColor::red, 2);
706  }
707  vpDisplay::displayLine(m_debugImage_depthNormal, roiPts_vec[i][0], roiPts_vec[i][roiPts_vec[i].size() - 1],
708  vpColor::red, 2);
709  }
710 
711  vpDisplay::flush(m_debugImage_depthNormal);
712 #endif
713 }
714 
716 {
717  m_cam = cam;
718 
719  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
720  it != m_depthNormalFaces.end(); ++it) {
721  (*it)->setCameraParameters(cam);
722  }
723 }
724 
726 {
727  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
728  it != m_depthNormalFaces.end(); ++it) {
729  (*it)->setFaceCentroidMethod(method);
730  }
731 }
732 
735 {
737 
738  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
739  it != m_depthNormalFaces.end(); ++it) {
740  (*it)->setFeatureEstimationMethod(method);
741  }
742 }
743 
745 {
747 
748  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
749  it != m_depthNormalFaces.end(); ++it) {
750  (*it)->setPclPlaneEstimationMethod(method);
751  }
752 }
753 
755 {
757 
758  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
759  it != m_depthNormalFaces.end(); ++it) {
760  (*it)->setPclPlaneEstimationRansacMaxIter(maxIter);
761  }
762 }
763 
765 {
767 
768  for (std::vector<vpMbtFaceDepthNormal *>::const_iterator it = m_depthNormalFaces.begin();
769  it != m_depthNormalFaces.end(); ++it) {
770  (*it)->setPclPlaneEstimationRansacThreshold(threshold);
771  }
772 }
773 
774 void vpMbDepthNormalTracker::setDepthNormalSamplingStep(unsigned int stepX, unsigned int stepY)
775 {
776  if (stepX == 0 || stepY == 0) {
777  std::cerr << "stepX and stepY must be greater than zero!" << std::endl;
778  return;
779  }
780 
783 }
784 
785 // void vpMbDepthNormalTracker::setDepthNormalUseRobust(bool use) {
786 // m_depthNormalUseRobust = use;
787 //}
788 
790 {
791  throw vpException(vpException::fatalError, "Cannot track with a grayscale image!");
792 }
793 
795 {
796  throw vpException(vpException::fatalError, "Cannot track with a color image!");
797 }
798 
799 #ifdef VISP_HAVE_PCL
800 void vpMbDepthNormalTracker::track(const pcl::PointCloud<pcl::PointXYZ>::ConstPtr &point_cloud)
801 {
802  segmentPointCloud(point_cloud);
803 
804  computeVVS();
805 
806  computeVisibility(point_cloud->width, point_cloud->height);
807 }
808 #endif
809 
810 void vpMbDepthNormalTracker::track(const std::vector<vpColVector> &point_cloud, unsigned int width, unsigned int height)
811 {
812  segmentPointCloud(point_cloud, width, height);
813 
814  computeVVS();
815 
816  computeVisibility(width, height);
817 }
818 
819 void vpMbDepthNormalTracker::initCircle(const vpPoint & /*p1*/, const vpPoint & /*p2*/, const vpPoint & /*p3*/,
820  double /*radius*/, int /*idFace*/, const std::string & /*name*/)
821 {
822  throw vpException(vpException::fatalError, "vpMbDepthNormalTracker::initCircle() should not be called!");
823 }
824 
825 void vpMbDepthNormalTracker::initCylinder(const vpPoint & /*p1*/, const vpPoint & /*p2*/, double /*radius*/,
826  int /*idFace*/, const std::string & /*name*/)
827 {
828  throw vpException(vpException::fatalError, "vpMbDepthNormalTracker::initCylinder() should not be called!");
829 }
830 
832 
void setWindowName(const Ogre::String &n)
Definition: vpAROgre.h:269
void resize(unsigned int nrows, unsigned int ncols, bool flagNullify=true, bool recopy_=true)
Definition: vpArray2D.h:305
unsigned int getRows() const
Definition: vpArray2D.h:290
Generic class defining intrinsic camera parameters.
void computeFov(const unsigned int &w, const unsigned int &h)
Implementation of column vector and the associated operations.
Definition: vpColVector.h:167
void insert(unsigned int i, const vpColVector &v)
void resize(unsigned int i, bool flagNullify=true)
Definition: vpColVector.h:351
Class to define RGB colors available for display functionalities.
Definition: vpColor.h:152
static const vpColor red
Definition: vpColor.h:211
static const vpColor blue
Definition: vpColor.h:217
Display for windows using GDI (available on any windows 32 platform).
Definition: vpDisplayGDI.h:128
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Definition: vpDisplayX.h:132
static void display(const vpImage< unsigned char > &I)
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void flush(const vpImage< unsigned char > &I)
static void displayArrow(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color=vpColor::white, unsigned int w=4, unsigned int h=2, unsigned int thickness=1)
error that can be emitted by ViSP classes.
Definition: vpException.h:59
@ ioError
I/O error.
Definition: vpException.h:79
@ fatalError
Fatal error.
Definition: vpException.h:84
const char * what() const
Definition: vpException.cpp:70
static vpHomogeneousMatrix direct(const vpColVector &v)
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpHomogeneousMatrix inverse() const
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:82
unsigned int getWidth() const
Definition: vpImage.h:242
unsigned int getHeight() const
Definition: vpImage.h:184
static double rad(double deg)
Definition: vpMath.h:116
static double sqr(double x)
Definition: vpMath.h:124
static bool equal(double x, double y, double threshold=0.001)
Definition: vpMath.h:369
static double deg(double rad)
Definition: vpMath.h:106
Implementation of a matrix and operations on matrices.
Definition: vpMatrix.h:152
void eye()
Definition: vpMatrix.cpp:446
vpMatrix AtA() const
Definition: vpMatrix.cpp:625
void insert(const vpMatrix &A, unsigned int r, unsigned int c)
Definition: vpMatrix.cpp:5996
void segmentPointCloud(const pcl::PointCloud< pcl::PointXYZ >::ConstPtr &point_cloud)
virtual void setPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cdMo)
virtual void initCylinder(const vpPoint &p1, const vpPoint &p2, double radius, int idFace=0, const std::string &name="")
virtual void loadConfigFile(const std::string &configFile, bool verbose=true)
vpMatrix m_L_depthNormal
Interaction matrix.
vpRobust m_robust_depthNormal
Robust.
virtual void setDepthNormalSamplingStep(unsigned int stepX, unsigned int stepY)
vpMbtFaceDepthNormal::vpFeatureEstimationType m_depthNormalFeatureEstimationMethod
Method to estimate the desired features.
std::vector< vpMbtFaceDepthNormal * > m_depthNormalFaces
List of faces.
int m_depthNormalPclPlaneEstimationRansacMaxIter
PCL RANSAC maximum number of iterations.
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false)
unsigned int m_depthNormalSamplingStepY
Sampling step in y-direction.
double m_depthNormalPclPlaneEstimationRansacThreshold
PCL RANSAC threshold.
virtual void setDepthNormalPclPlaneEstimationRansacThreshold(double thresold)
virtual void track(const vpImage< unsigned char > &)
virtual void initFaceFromLines(vpMbtPolygon &polygon)
void addFace(vpMbtPolygon &polygon, bool alreadyClose)
bool m_depthNormalUseRobust
If true, use Tukey robust M-Estimator.
void reInitModel(const vpImage< unsigned char > &I, const std::string &cad_name, const vpHomogeneousMatrix &cMo, bool verbose=false)
virtual std::vector< std::vector< double > > getFeaturesForDisplayDepthNormal()
vpColVector m_w_depthNormal
Robust weights.
vpColVector m_error_depthNormal
(s - s*)
virtual void setScanLineVisibilityTest(const bool &v)
vpColVector m_weightedError_depthNormal
Weighted error.
std::vector< vpMbtFaceDepthNormal * > m_depthNormalListOfActiveFaces
List of current active (visible and with features extracted) faces.
virtual void initCircle(const vpPoint &p1, const vpPoint &p2, const vpPoint &p3, double radius, int idFace=0, const std::string &name="")
virtual void setCameraParameters(const vpCameraParameters &camera)
virtual void setOgreVisibilityTest(const bool &v)
int m_depthNormalPclPlaneEstimationMethod
PCL plane estimation method.
virtual void computeVVSInteractionMatrixAndResidu()
virtual void setDepthNormalFaceCentroidMethod(const vpMbtFaceDepthNormal::vpFaceCentroidType &method)
virtual void setDepthNormalPclPlaneEstimationMethod(int method)
std::vector< vpColVector > m_depthNormalListOfDesiredFeatures
List of desired features.
virtual void initFaceFromCorners(vpMbtPolygon &polygon)
virtual std::vector< std::vector< double > > getModelForDisplay(unsigned int width, unsigned int height, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, bool displayFullModel=false)
void setUseDepthNormalTracking(const std::string &name, const bool &useDepthNormalTracking)
void computeVisibility(unsigned int width, unsigned int height)
vpMbHiddenFaces< vpMbtPolygon > m_depthNormalHiddenFacesDisplay
Set of faces describing the object used only for display with scan line.
virtual void setDepthNormalFeatureEstimationMethod(const vpMbtFaceDepthNormal::vpFeatureEstimationType &method)
virtual void init(const vpImage< unsigned char > &I)
virtual void setDepthNormalPclPlaneEstimationRansacMaxIter(int maxIter)
unsigned int m_depthNormalSamplingStepX
Sampling step in x-direction.
vpAROgre * getOgreContext()
void computeClippedPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam)
unsigned int setVisibleOgre(unsigned int width, unsigned int height, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, const double &angleAppears, const double &angleDisappears, bool &changed)
void initOgre(const vpCameraParameters &cam=vpCameraParameters())
unsigned int setVisible(unsigned int width, unsigned int height, const vpCameraParameters &cam, const vpHomogeneousMatrix &cMo, const double &angle, bool &changed)
void setBackgroundSizeOgre(const unsigned int &h, const unsigned int &w)
void computeScanLineRender(const vpCameraParameters &cam, const unsigned int &w, const unsigned int &h)
void setOgreShowConfigDialog(bool showConfigDialog)
double m_lambda
Gain of the virtual visual servoing stage.
Definition: vpMbTracker.h:187
bool modelInitialised
Definition: vpMbTracker.h:123
virtual void computeVVSPoseEstimation(const bool isoJoIdentity, unsigned int iter, vpMatrix &L, vpMatrix &LTL, vpColVector &R, const vpColVector &error, vpColVector &error_prev, vpColVector &LTR, double &mu, vpColVector &v, const vpColVector *const w=NULL, vpColVector *const m_w_prev=NULL)
vpImage< unsigned char > m_I
Grayscale image buffer, used when passing color images.
Definition: vpMbTracker.h:223
virtual void computeVVSCheckLevenbergMarquardt(unsigned int iter, vpColVector &error, const vpColVector &m_error_prev, const vpHomogeneousMatrix &cMoPrev, double &mu, bool &reStartFromLastIncrement, vpColVector *const w=NULL, const vpColVector *const m_w_prev=NULL)
bool m_computeInteraction
Definition: vpMbTracker.h:185
vpMatrix oJo
The Degrees of Freedom to estimate.
Definition: vpMbTracker.h:115
vpUniRand m_rand
Random number generator used in vpMbtDistanceLine::buildFrom()
Definition: vpMbTracker.h:227
double m_initialMu
Initial Mu for Levenberg Marquardt optimization loop.
Definition: vpMbTracker.h:193
vpHomogeneousMatrix m_cMo
The current pose.
Definition: vpMbTracker.h:113
vpCameraParameters m_cam
The camera parameters.
Definition: vpMbTracker.h:111
double m_stopCriteriaEpsilon
Epsilon threshold to stop the VVS optimization loop.
Definition: vpMbTracker.h:191
bool useOgre
Use Ogre3d for visibility tests.
Definition: vpMbTracker.h:155
virtual void computeVVSWeights(vpRobust &robust, const vpColVector &error, vpColVector &w)
vpMbHiddenFaces< vpMbtPolygon > faces
Set of faces describing the object.
Definition: vpMbTracker.h:143
virtual void setScanLineVisibilityTest(const bool &v)
Definition: vpMbTracker.h:605
virtual void setOgreVisibilityTest(const bool &v)
virtual void computeCovarianceMatrixVVS(const bool isoJoIdentity, const vpColVector &w_true, const vpHomogeneousMatrix &cMoPrev, const vpMatrix &L_true, const vpMatrix &LVJ_true, const vpColVector &error)
vpMbtOptimizationMethod m_optimizationMethod
Optimization method used.
Definition: vpMbTracker.h:140
bool displayFeatures
If true, the features are displayed.
Definition: vpMbTracker.h:138
double angleDisappears
Angle used to detect a face disappearance.
Definition: vpMbTracker.h:147
virtual void setNearClippingDistance(const double &dist)
virtual void setFarClippingDistance(const double &dist)
double distFarClip
Distance for near clipping.
Definition: vpMbTracker.h:151
bool m_isoJoIdentity
Boolean to know if oJo is identity (for fast computation)
Definition: vpMbTracker.h:117
bool useScanLine
Use Scanline for visibility tests.
Definition: vpMbTracker.h:158
virtual void setClipping(const unsigned int &flags)
double angleAppears
Angle used to detect a face appearance.
Definition: vpMbTracker.h:145
const vpImage< bool > * m_mask
Mask used to disable tracking on a part of image.
Definition: vpMbTracker.h:221
virtual void initFromPose(const vpImage< unsigned char > &I, const std::string &initFile)
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix())
bool computeCovariance
Flag used to specify if the covariance matrix has to be computed or not.
Definition: vpMbTracker.h:128
double distNearClip
Distance for near clipping.
Definition: vpMbTracker.h:149
unsigned int m_maxIter
Maximum number of iterations of the virtual visual servoing stage.
Definition: vpMbTracker.h:189
bool ogreShowConfigDialog
Definition: vpMbTracker.h:156
unsigned int clippingFlag
Flags specifying which clipping to used.
Definition: vpMbTracker.h:153
double m_distNearClip
Distance for near clipping.
std::vector< std::vector< double > > getModelForDisplay(unsigned int width, unsigned int height, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, bool displayFullModel=false)
void addLine(vpPoint &p1, vpPoint &p2, vpMbHiddenFaces< vpMbtPolygon > *const faces, vpUniRand &rand_gen, int polygon=-1, std::string name="")
double m_distFarClip
Distance for near clipping.
bool computeDesiredFeatures(const vpHomogeneousMatrix &cMo, unsigned int width, unsigned int height, const pcl::PointCloud< pcl::PointXYZ >::ConstPtr &point_cloud, vpColVector &desired_features, unsigned int stepX, unsigned int stepY, const vpImage< bool > *mask=NULL)
vpMbHiddenFaces< vpMbtPolygon > * m_hiddenFace
Pointer to the list of faces.
std::vector< std::vector< double > > getFeaturesForDisplay(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double scale=0.05)
void setPclPlaneEstimationMethod(int method)
void setPclPlaneEstimationRansacThreshold(double threshold)
vpMbtPolygon * m_polygon
Polygon defining the face.
vpCameraParameters m_cam
Camera intrinsic parameters.
vpPlane m_planeObject
Plane equation described in the object frame.
unsigned int m_clippingFlag
Flags specifying which clipping to used.
void setTracked(bool tracked)
void setFeatureEstimationMethod(const vpFeatureEstimationType &method)
void setPclPlaneEstimationRansacMaxIter(int maxIter)
bool m_useScanLine
Scan line visibility.
Implementation of a polygon of the model used by the model-based tracker.
Definition: vpMbtPolygon.h:65
std::string getName() const
Definition: vpMbtPolygon.h:106
int getIndex() const
Definition: vpMbtPolygon.h:99
Parse an Xml file to extract configuration parameters of a mbtConfig object.
void setDepthNormalPclPlaneEstimationRansacMaxIter(int maxIter)
int getDepthNormalPclPlaneEstimationRansacMaxIter() const
unsigned int getDepthNormalSamplingStepX() const
void getCameraParameters(vpCameraParameters &cam) const
unsigned int getDepthNormalSamplingStepY() const
void setDepthNormalFeatureEstimationMethod(const vpMbtFaceDepthNormal::vpFeatureEstimationType &method)
vpMbtFaceDepthNormal::vpFeatureEstimationType getDepthNormalFeatureEstimationMethod() const
void setDepthNormalPclPlaneEstimationMethod(int method)
void setDepthNormalSamplingStepX(unsigned int stepX)
void setAngleDisappear(const double &adisappear)
int getDepthNormalPclPlaneEstimationMethod() const
void setAngleAppear(const double &aappear)
double getDepthNormalPclPlaneEstimationRansacThreshold() const
void parse(const std::string &filename)
void setDepthNormalPclPlaneEstimationRansacThreshold(double threshold)
void setCameraParameters(const vpCameraParameters &cam)
void setDepthNormalSamplingStepY(unsigned int stepY)
This class defines the container for a plane geometrical structure.
Definition: vpPlane.h:54
@ object_frame
Definition: vpPlane.h:65
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Definition: vpPoint.h:77
unsigned int nbpt
Number of points used to define the polygon.
Definition: vpPolygon3D.h:71
unsigned int getNbPoint() const
Definition: vpPolygon3D.h:127
vpPoint * p
corners in the object frame
Definition: vpPolygon3D.h:76
void setMinMedianAbsoluteDeviation(double mad_min)
Definition: vpRobust.h:155
vpVelocityTwistMatrix buildFrom(const vpTranslationVector &t, const vpRotationMatrix &R)