Visual Servoing Platform  version 3.6.1 under development (2025-01-11)
vpRBTracker.cpp
1 /*
2  * ViSP, open source Visual Servoing Platform software.
3  * Copyright (C) 2005 - 2024 by Inria. All rights reserved.
4  *
5  * This software is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  * See the file LICENSE.txt at the root directory of this source
10  * distribution for additional information about the GNU GPL.
11  *
12  * For using ViSP with software that can not be combined with the GNU
13  * GPL, please contact Inria about acquiring a ViSP Professional
14  * Edition License.
15  *
16  * See https://visp.inria.fr for more information.
17  *
18  * This software was developed at:
19  * Inria Rennes - Bretagne Atlantique
20  * Campus Universitaire de Beaulieu
21  * 35042 Rennes Cedex
22  * France
23  *
24  * If you have questions regarding the use of this file, please contact
25  * Inria at visp@inria.fr
26  *
27  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
28  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
29  */
30 
31 #include <visp3/rbt/vpRBTracker.h>
32 
33 #if defined(VISP_HAVE_NLOHMANN_JSON)
34 #include VISP_NLOHMANN_JSON(json.hpp)
35 #endif
36 
37 #include <visp3/core/vpExponentialMap.h>
38 #include <visp3/core/vpIoTools.h>
39 
40 #include <visp3/ar/vpPanda3DRendererSet.h>
41 #include <visp3/ar/vpPanda3DGeometryRenderer.h>
42 #include <visp3/ar/vpPanda3DRGBRenderer.h>
43 
44 #include <visp3/rbt/vpRBFeatureTrackerFactory.h>
45 #include <visp3/rbt/vpRBDriftDetectorFactory.h>
46 #include <visp3/rbt/vpObjectMaskFactory.h>
47 #include <visp3/rbt/vpRBVisualOdometry.h>
48 #include <visp3/rbt/vpRBInitializationHelper.h>
49 
50 BEGIN_VISP_NAMESPACE
51 
53  m_firstIteration(true), m_trackers(0), m_lambda(1.0), m_vvsIterations(10), m_muInit(0.0), m_muIterFactor(0.5),
54  m_renderer(m_rendererSettings), m_imageHeight(480), m_imageWidth(640), m_verbose(false)
55 {
58 
59  m_driftDetector = nullptr;
60  m_mask = nullptr;
61  m_odometry = nullptr;
62 }
63 
65 {
66  cMo = m_cMo;
67 }
68 
70 {
71  m_cMo = cMo;
72  m_cMoPrev = cMo;
74 }
75 
77 {
78  vpMatrix sumInvs(6, 6, 0.0);
79  double sumWeights = 0.0;
80  for (const std::shared_ptr<vpRBFeatureTracker> &tracker: m_trackers) {
81  if (tracker->getNumFeatures() == 0) {
82  continue;
83  }
84  tracker->updateCovariance(m_lambda);
85  vpMatrix trackerCov = tracker->getCovariance();
86  double trackerWeight = tracker->getVVSTrackerWeight();
87  if (trackerCov.getRows() != 6 || trackerCov.getCols() != 6) {
89  "Expected tracker pose covariance to have dimensions 6x6, but got %dx%d",
90  trackerCov.getRows(), trackerCov.getCols());
91  }
92 
93  sumInvs += (trackerWeight * trackerCov.pseudoInverse());
94  sumWeights += trackerWeight;
95  }
96  return sumWeights * sumInvs.pseudoInverse();
97 }
98 
100 
101 void vpRBTracker::setCameraParameters(const vpCameraParameters &cam, unsigned h, unsigned w)
102 {
103  if (cam.get_projModel() != vpCameraParameters::vpCameraParametersProjType::perspectiveProjWithoutDistortion) {
105  "Camera model cannot have distortion. Undistort images before tracking and use the undistorted camera model");
106  }
107  if (h == 0 || w == 0) {
108  throw vpException(
110  "Image dimensions must be greater than 0"
111  );
112  }
113  m_cam = cam;
114  m_imageHeight = h;
115  m_imageWidth = w;
119 }
120 
122 {
123  m_depthSilhouetteSettings = settings;
124 }
125 
127 {
128  m_firstIteration = true;
129 }
130 
131 void vpRBTracker::setModelPath(const std::string &path)
132 {
133  m_modelPath = path;
134 }
135 
136 void vpRBTracker::setupRenderer(const std::string &file)
137 {
138  if (!vpIoTools::checkFilename(file)) {
139  throw vpException(vpException::badValue, "3D model file %s could not be found", file.c_str());
140  }
141 
142  const std::shared_ptr<vpPanda3DGeometryRenderer> geometryRenderer = std::make_shared<vpPanda3DGeometryRenderer>(
143  vpPanda3DGeometryRenderer::vpRenderType::OBJECT_NORMALS);
144  m_renderer.addSubRenderer(geometryRenderer);
145 
146  bool requiresSilhouetteShader = false;
147  for (std::shared_ptr<vpRBFeatureTracker> &tracker: m_trackers) {
148  if (tracker->requiresSilhouetteCandidates()) {
149  requiresSilhouetteShader = true;
150  break;
151  }
152  }
153  if (requiresSilhouetteShader) {
154  m_renderer.addSubRenderer(std::make_shared<vpPanda3DDepthCannyFilter>(
155  "depthCanny", geometryRenderer, true, 0.0));
156  }
158  m_renderer.addLight(vpPanda3DAmbientLight("ambient", vpRGBf(0.4f)));
160  m_renderer.setFocusedObject("object");
161 }
162 
164 {
165  for (std::shared_ptr<vpRBFeatureTracker> tracker : m_trackers) {
166  if (tracker->requiresDepth() || tracker->requiresRGB()) {
167  throw vpException(vpException::badValue, "Some tracked features require RGB or depth features");
168  }
169  }
170  checkDimensionsOrThrow(I, "grayscale");
171  vpRBFeatureTrackerInput frameInput;
172  frameInput.I = I;
173  frameInput.cam = m_cam;
174  track(frameInput);
175 }
176 
178 {
179  for (std::shared_ptr<vpRBFeatureTracker> &tracker : m_trackers) {
180  if (tracker->requiresDepth()) {
181  throw vpException(vpException::badValue, "Some tracked features require depth features");
182  }
183  }
184  checkDimensionsOrThrow(I, "grayscale");
185  checkDimensionsOrThrow(IRGB, "color");
186  vpRBFeatureTrackerInput frameInput;
187  frameInput.I = I;
188  frameInput.IRGB = IRGB;
189  frameInput.cam = m_cam;
190  track(frameInput);
191 }
192 
194 {
196 }
197 
199 {
200  checkDimensionsOrThrow(I, "grayscale");
201  checkDimensionsOrThrow(IRGB, "color");
202  checkDimensionsOrThrow(depth, "depth");
203  vpRBFeatureTrackerInput frameInput;
204  frameInput.I = I;
205  frameInput.IRGB = IRGB;
206  frameInput.depth = depth;
207  frameInput.cam = m_cam;
208  track(frameInput);
209 }
210 
212 {
213  m_logger.reset();
214 
216  updateRender(input);
218 
219  if (m_firstIteration) {
220  m_firstIteration = false;
221  m_previousFrame.I = input.I;
222  m_previousFrame.IRGB = input.IRGB;
223  }
224 
226  if (m_mask) {
227  m_mask->updateMask(input, m_previousFrame, input.mask);
228  }
230 
231  bool requiresSilhouetteCandidates = false;
232  for (std::shared_ptr<vpRBFeatureTracker> &tracker : m_trackers) {
233  if (tracker->requiresSilhouetteCandidates()) {
234  requiresSilhouetteCandidates = true;
235  break;
236  }
237  }
238 
240  if (requiresSilhouetteCandidates) {
241  const vpHomogeneousMatrix cTcp = m_cMo * m_cMoPrev.inverse();
243  input.renders.silhouetteCanny, input.renders.isSilhouette, input.cam, cTcp);
244  if (input.silhouettePoints.size() == 0) {
245  throw vpException(vpException::badValue, "Could not extract silhouette from depth canny: Object may not be in image");
246  }
247  }
248 
249  if (m_odometry) {
251  m_odometry->compute(input, m_previousFrame);
252  vpHomogeneousMatrix cnTc = m_odometry->getCameraMotion();
253  m_cMo = cnTc * m_cMo;
254  updateRender(input);
256  }
257 
258  if (requiresSilhouetteCandidates) {
259  const vpHomogeneousMatrix cTcp = m_cMo * m_cMoPrev.inverse();
261  input.renders.silhouetteCanny, input.renders.isSilhouette, input.cam, cTcp);
262  if (input.silhouettePoints.size() == 0) {
263  throw vpException(vpException::badValue, "Could not extract silhouette from depth canny: Object may not be in image");
264  }
265  }
266 
267 
268  int id = 0;
269  for (std::shared_ptr<vpRBFeatureTracker> &tracker : m_trackers) {
271  tracker->onTrackingIterStart();
273  id += 1;
274  }
275  id = 0;
276  for (std::shared_ptr<vpRBFeatureTracker> &tracker : m_trackers) {
278  try {
279  tracker->extractFeatures(input, m_previousFrame, m_cMo);
280  }
281  catch (vpException &e) {
282  std::cerr << "Tracker " << id << " raised an exception in extractFeatures" << std::endl;
283  throw e;
284  }
286  id += 1;
287  }
288  id = 0;
289  for (std::shared_ptr<vpRBFeatureTracker> &tracker : m_trackers) {
291  try {
292  tracker->trackFeatures(input, m_previousFrame, m_cMo);
293  }
294  catch (vpException &e) {
295  std::cerr << "Tracker " << id << " raised an exception in trackFeatures" << std::endl;
296  throw e;
297  }
299  id += 1;
300  }
301 
302 
303 
304  id = 0;
305  for (std::shared_ptr<vpRBFeatureTracker> &tracker : m_trackers) {
307  tracker->initVVS(input, m_previousFrame, m_cMo);
309  //std::cout << "Tracker " << id << " has " << tracker->getNumFeatures() << " features" << std::endl;
310  id += 1;
311  }
312 
313  m_cMoPrev = m_cMo;
314  double bestError = std::numeric_limits<double>::max();
315  vpHomogeneousMatrix best_cMo = m_cMo;
316  double mu = m_muInit;
317  for (unsigned int iter = 0; iter < m_vvsIterations; ++iter) {
318  id = 0;
319  for (std::shared_ptr<vpRBFeatureTracker> &tracker : m_trackers) {
321  try {
322  tracker->computeVVSIter(input, m_cMo, iter);
323  }
324  catch (vpException &) {
325  std::cerr << "Tracker " << id << " raised an exception in computeVVSIter" << std::endl;
326  throw;
327  }
329  id += 1;
330  }
331 
333  bool converged = true;
334  for (std::shared_ptr<vpRBFeatureTracker> &tracker : m_trackers) {
335  if (!tracker->vvsHasConverged()) {
336  converged = false;
337  break;
338  }
339  }
340  if (converged) {
341  break;
342  }
343 
344  vpMatrix LTL(6, 6, 0.0);
345  vpColVector LTR(6, 0.0);
346  double error = 0.f;
347  unsigned int numFeatures = 0;
348 
349  for (std::shared_ptr<vpRBFeatureTracker> &tracker : m_trackers) {
350  if (tracker->getNumFeatures() > 0) {
351  numFeatures += tracker->getNumFeatures();
352  const double weight = tracker->getVVSTrackerWeight();
353  LTL += weight * tracker->getLTL();
354  LTR += weight * tracker->getLTR();
355  error += (weight * tracker->getWeightedError()).sumSquare();
356  //std::cout << "Error = " << (weight * tracker->getWeightedError()).sumSquare() << std::endl;
357  }
358  }
359 
360  if (numFeatures >= 6) {
361 
362  if (error < bestError) {
363  bestError = error;
364  best_cMo = m_cMo;
365  }
366 
367  vpMatrix H(6, 6);
368  H.eye(6);
369  try {
370  vpColVector v = -m_lambda * ((LTL + mu * H).pseudoInverse(LTL.getRows() * std::numeric_limits<double>::epsilon()) * LTR);
372  }
373  catch (vpException &) {
374  std::cerr << "Could not compute pseudo inverse" << std::endl;
375  }
376  mu *= m_muIterFactor;
377  }
378  else {
379  return;
380  }
381  }
382 
383  //m_cMo = best_cMo;
384 
385  for (std::shared_ptr<vpRBFeatureTracker> &tracker : m_trackers) {
386  tracker->onTrackingIterEnd();
387  }
388  //m_cMo = m_kalman.filter(m_cMo, 1.0 / 20.0);
389  if (m_currentFrame.I.getSize() == 0) {
390  m_currentFrame = input;
391  m_previousFrame = input;
392  }
393  else {
394  m_previousFrame = std::move(m_currentFrame);
395  m_currentFrame = std::move(input);
396  }
398  if (m_driftDetector) {
400  }
402  if (m_verbose) {
403  std::cout << m_logger << std::endl;
404  }
405 }
406 
408 {
410 
411  frame.renders.cMo = m_cMo;
412 
413  // Update clipping distances
417 
418  float clipNear, clipFar;
419  m_renderer.computeClipping(clipNear, clipFar);
420  frame.renders.zNear = std::max(0.001f, clipNear);
421  frame.renders.zFar = clipFar;
424 
425  bool shouldRenderSilhouette = m_renderer.getRenderer<vpPanda3DDepthCannyFilter>() != nullptr;
426  if (shouldRenderSilhouette) {
427  // For silhouette extraction, update depth difference threshold
428  double thresholdValue = m_depthSilhouetteSettings.getThreshold();
430  m_renderer.getRenderer<vpPanda3DDepthCannyFilter>()->setEdgeThreshold((frame.renders.zFar - frame.renders.zNear) * thresholdValue);
431  }
432  else {
433  m_renderer.getRenderer<vpPanda3DDepthCannyFilter>()->setEdgeThreshold(thresholdValue);
434  }
435  }
436 
437  // Call Panda renderer
439 
441 
442  // Extract data from Panda textures
443 #ifdef VISP_HAVE_OPENMP
444 #pragma omp parallel sections
445 #endif
446 
447  {
448 #ifdef VISP_HAVE_OPENMP
449 #pragma omp section
450 #endif
451  {
453  frame.renders.normals,
454  frame.renders.depth,
455  frame.renders.boundingBox,
457  }
458 #ifdef VISP_HAVE_OPENMP
459 #pragma omp section
460 #endif
461  {
462  if (shouldRenderSilhouette) {
464  frame.renders.silhouetteCanny,
465  frame.renders.isSilhouette,
466  frame.renders.boundingBox,
468  }
469  }
470  // #pragma omp section
471  // {
472  // vpImage<vpRGBa> renders.color;
473  // m_renderer.getRenderer<vpPanda3DRGBRenderer>()->getRender(renders.color);
474  // m_renderer.placeRendernto(renders.color, frame.renders.color, vpRGBa(0));
475  // }
476  }
477 
478 }
479 
480 std::vector<vpRBSilhouettePoint> vpRBTracker::extractSilhouettePoints(
481  const vpImage<vpRGBf> &Inorm, const vpImage<float> &Idepth,
482  const vpImage<vpRGBf> &silhouetteCanny, const vpImage<unsigned char> &Ivalid,
483  const vpCameraParameters &cam, const vpHomogeneousMatrix &cTcp)
484 {
485  std::vector<std::pair<unsigned int, unsigned int>> candidates =
487 
488  std::vector<vpRBSilhouettePoint> points;
489  vpColVector norm(3);
490 
491  for (unsigned int i = 0; i < candidates.size(); ++i) {
492  unsigned int n = candidates[i].first, m = candidates[i].second;
493  double theta = silhouetteCanny[n][m].B;
494  if (std::isnan(theta)) {
495  continue;
496  }
497 
498  norm[0] = Inorm[n][m].R;
499  norm[1] = Inorm[n][m].G;
500  norm[2] = Inorm[n][m].B;
501  const double l = std::sqrt(norm[0] * norm[0] + norm[1] * norm[1] + norm[2] * norm[2]);
502 
503  if (l > 1e-1) {
504  const double Z = Idepth[n][m];
505  //bool noNeighbor = true;
506  // double nx = cos(theta);
507  // double ny = sin(theta);
508  // const double Zn = Idepth[static_cast<unsigned int>(round(n + ny * 1))][static_cast<unsigned int>(round(m + nx * 2))];
509 #if defined(VISP_DEBUG_RB_TRACKER)
510  if (fabs(theta) > M_PI + 1e-6) {
511  throw vpException(vpException::badValue, "Theta expected to be in -Pi, Pi range but was not");
512  }
513 #endif
514  points.push_back(vpRBSilhouettePoint(n, m, norm, theta, Z));
515  // if (Zn > 0) {
516  // theta = -theta;
517  // }
518  // Code to filter when two edges are too close and should not be used
519  // for (unsigned int normalOffset = 1; normalOffset <= 3; ++normalOffset) {
520  // unsigned char offY = static_cast<unsigned char>(round(n + normalOffset * ny));
521  // unsigned char offX = static_cast<unsigned char>(round(m + normalOffset * nx));
522  // unsigned char negOffY = static_cast<unsigned char>(round(n - normalOffset * ny));
523  // unsigned char negOffX = static_cast<unsigned char>(round(m - normalOffset * nx));
524  // if (offY == n || offX == m || negOffY == n||negOffX == m) {
525  // continue;
526  // }
527 
528  // if (Ivalid(offY, offX) || Ivalid(negOffY, negOffX)) {
529  // noNeighbor = false;
530  // // std::cout << (unsigned int)(Ivalid(n + normalOffset * ny, m + normalOffset * nx)) << std::endl;
531  // break;
532  // }
533  // }
534  // if (noNeighbor) {
535  // points.push_back(vpRBSilhouettePoint(n, m, norm, theta, Z));
536  // }
537  }
538  }
539 
540  return points;
541 }
542 
543 void vpRBTracker::addTracker(std::shared_ptr<vpRBFeatureTracker> tracker)
544 {
545  if (tracker == nullptr) {
546  throw vpException(vpException::badValue, "Adding tracker: tracker cannot be null");
547  }
548  m_trackers.push_back(tracker);
549 }
550 
552 {
553  if (m_mask) {
554  m_mask->display(m_currentFrame.mask, Imask);
555  }
556 }
557 
559 {
560  if (m_currentFrame.renders.normals.getSize() == 0) {
561  return;
562  }
563 
564 
565  for (std::shared_ptr<vpRBFeatureTracker> &tracker : m_trackers) {
566  if (tracker->featuresShouldBeDisplayed()) {
567  tracker->display(m_currentFrame.cam, I, IRGB, depth);
568  }
569  }
570 
571  if (m_driftDetector) {
572  m_driftDetector->display(IRGB);
573  }
574 }
575 
577 {
578  return m_renderer;
579 }
580 
581 #if defined(VISP_HAVE_NLOHMANN_JSON)
582 void vpRBTracker::loadConfigurationFile(const std::string &filename)
583 {
584  std::ifstream jsonFile(filename);
585  if (!jsonFile.good()) {
586  throw vpException(vpException::ioError, "Could not read from settings file " + filename + " to initialize the RBTracker");
587  }
588  nlohmann::json settings;
589  try {
590  settings = nlohmann::json::parse(jsonFile);
591  }
592  catch (nlohmann::json::parse_error &e) {
593  std::stringstream msg;
594  msg << "Could not parse JSON file : \n";
595  msg << e.what() << std::endl;
596  msg << "Byte position of error: " << e.byte;
597  throw vpException(vpException::ioError, msg.str());
598  }
599  loadConfiguration(settings);
600  jsonFile.close();
601 }
602 
603 void vpRBTracker::loadConfiguration(const nlohmann::json &j)
604 {
605  m_firstIteration = true;
606  const nlohmann::json verboseSettings = j.at("verbose");
607  m_verbose = verboseSettings.value("enabled", m_verbose);
608 
609  const nlohmann::json cameraSettings = j.at("camera");
610  m_cam = cameraSettings.at("intrinsics");
611  m_imageHeight = cameraSettings.value("height", m_imageHeight);
612  m_imageWidth = cameraSettings.value("width", m_imageWidth);
614 
615  if (j.contains("model")) {
616  setModelPath(j.at("model"));
617  }
618 
619  const nlohmann::json vvsSettings = j.at("vvs");
620  setMaxOptimizationIters(vvsSettings.value("maxIterations", m_vvsIterations));
621  setOptimizationGain(vvsSettings.value("gain", m_lambda));
622  setOptimizationInitialMu(vvsSettings.value("mu", m_muInit));
623  setOptimizationMuIterFactor(vvsSettings.value("muIterFactor", m_muIterFactor));
624 
625  m_depthSilhouetteSettings = j.at("silhouetteExtractionSettings");
626 
627  m_trackers.clear();
628  const nlohmann::json features = j.at("features");
630  for (const nlohmann::json &trackerSettings: features) {
631  std::shared_ptr<vpRBFeatureTracker> tracker = featureFactory.buildFromJson(trackerSettings);
632  if (tracker == nullptr) {
633  throw vpException(
635  "Cannot instantiate subtracker with the current settings, make sure that the type is registered. Settings: %s",
636  trackerSettings.dump(2).c_str()
637  );
638  }
639  m_trackers.push_back(tracker);
640  }
641 
642  if (j.contains("mask")) {
644  const nlohmann::json maskSettings = j.at("mask");
645  m_mask = maskFactory.buildFromJson(maskSettings);
646  if (m_mask == nullptr) {
647  throw vpException(
649  "Cannot instantiate object mask with the current settings, make sure that the type is registered. Settings: %s",
650  maskSettings.dump(2).c_str());
651  }
652  }
653  if (j.contains("drift")) {
655  const nlohmann::json driftSettings = j.at("drift");
656  m_driftDetector = factory.buildFromJson(driftSettings);
657  if (m_driftDetector == nullptr) {
658  throw vpException(
660  "Cannot instantiate drift detection with the current settings, make sure that the type is registered in the factory"
661  );
662  }
663  }
664 }
665 #endif
666 
667 #ifdef VISP_HAVE_MODULE_GUI
668 void vpRBTracker::initClick(const vpImage<unsigned char> &I, const std::string &initFile, bool displayHelp)
669 {
670  vpRBInitializationHelper initializer;
671  initializer.setCameraParameters(m_cam);
672  initializer.initClick(I, initFile, displayHelp);
673  m_cMo = initializer.getPose();
674 }
675 #endif
676 
677 END_VISP_NAMESPACE
unsigned int getCols() const
Definition: vpArray2D.h:337
unsigned int getRows() const
Definition: vpArray2D.h:347
Generic class defining intrinsic camera parameters.
vpCameraParametersProjType get_projModel() const
Implementation of column vector and the associated operations.
Definition: vpColVector.h:191
std::shared_ptr< T > buildFromJson(const nlohmann::json &j)
error that can be emitted by ViSP classes.
Definition: vpException.h:60
@ ioError
I/O error.
Definition: vpException.h:67
@ badValue
Used to indicate that a value is not in the allowed range.
Definition: vpException.h:73
@ dimensionError
Bad dimension.
Definition: vpException.h:71
static vpHomogeneousMatrix direct(const vpColVector &v)
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpHomogeneousMatrix inverse() const
void resize(unsigned int h, unsigned int w)
resize the image : Image initialization
Definition: vpImage.h:544
unsigned int getSize() const
Definition: vpImage.h:221
static bool checkFilename(const std::string &filename)
Definition: vpIoTools.cpp:786
Implementation of a matrix and operations on matrices.
Definition: vpMatrix.h:169
vpMatrix pseudoInverse(double svThreshold=1e-6) const
Single object focused renderer.
void computeClipping(float &nearV, float &farV)
virtual void setRenderParameters(const vpPanda3DRenderParameters &params) VP_OVERRIDE
Set new rendering parameters. If the scene has already been initialized, the renderer camera is updat...
void setFocusedObject(const std::string &focused)
A factory that can be used to create Object segmentation algorithms from JSON data.
static vpObjectMaskFactory & getFactory()
Class representing an ambient light.
NodePath loadObject(const std::string &nodeName, const std::string &modelPath)
Load a 3D object. To load an .obj file, Panda3D must be compiled with assimp support.
Implementation of canny filtering, using Sobel kernels.
Renderer that outputs object geometric information.
void setClippingDistance(double nearV, double farV)
Set the clipping distance. When a panda camera uses these render parameters, objects that are closer ...
void setCameraIntrinsics(const vpCameraParameters &cam)
set camera intrinsics. Only camera intrinsics for a lens without distortion are supported.
void setImageResolution(unsigned int height, unsigned int width)
Set the image resolution. When this object is given to a vpPanda3DBaseRenderer, this will be the reso...
void addNodeToScene(const NodePath &object) VP_OVERRIDE
void initFramework() VP_OVERRIDE
Initialize the framework and propagate the created panda3D framework to the subrenderers.
void addSubRenderer(std::shared_ptr< vpPanda3DBaseRenderer > renderer)
Add a new subrenderer: This subrenderer should have a unique name, not present in the set.
void addLight(const vpPanda3DLight &light) VP_OVERRIDE
Light this lightable object with a new light.
void setCameraPose(const vpHomogeneousMatrix &wTc) VP_OVERRIDE
Set the pose of the camera, using the ViSP convention. This change is propagated to all subrenderers.
std::shared_ptr< RendererType > getRenderer()
Retrieve the first subrenderer with the specified template type.
A factory that can be used to instanciate drift detection algorithms from JSON data.
static vpRBDriftDetectorFactory & getFactory()
A factory to instantiate feature trackers from JSON data.
static vpRBFeatureTrackerFactory & getFactory()
All the data related to a single tracking frame. This contains both the input data (from a real camer...
vpImage< vpRGBa > IRGB
Image luminance.
vpImage< unsigned char > I
std::vector< vpRBSilhouettePoint > silhouettePoints
vpImage< float > mask
depth image, 0 sized if depth is not available
vpImage< float > depth
RGB image, 0 sized if RGB is not available.
vpRBRenderData renders
camera parameters
A set of utilities to perform initialization.
vpHomogeneousMatrix getPose() const
void setCameraParameters(const vpCameraParameters &cam)
Silhouette point simple candidate representation.
void setTrackerFeatureTrackingTime(int id, double elapsed)
void setRenderTime(double elapsed)
void setOdometryTime(double elapsed)
void setInitVVSTime(int id, double elapsed)
void setMaskTime(double elapsed)
void setTrackerIterStartTime(int id, double elapsed)
void addTrackerVVSTime(int id, double elapsed)
void setDriftDetectionTime(double elapsed)
void setTrackerFeatureExtractionTime(int id, double elapsed)
std::shared_ptr< vpRBDriftDetector > m_driftDetector
Definition: vpRBTracker.h:272
std::shared_ptr< vpRBVisualOdometry > m_odometry
Definition: vpRBTracker.h:273
vpSilhouettePointsExtractionSettings m_depthSilhouetteSettings
Factor with which to multiply mu at every iteration during VVS.
Definition: vpRBTracker.h:262
vpCameraParameters m_cam
Definition: vpRBTracker.h:255
void track(const vpImage< unsigned char > &I)
void checkDimensionsOrThrow(const vpImage< T > &I, const std::string &imgType) const
Definition: vpRBTracker.h:234
bool m_verbose
Definition: vpRBTracker.h:269
vpObjectCentricRenderer m_renderer
Definition: vpRBTracker.h:264
vpCameraParameters getCameraParameters() const
Definition: vpRBTracker.cpp:99
unsigned m_vvsIterations
VVS gain.
Definition: vpRBTracker.h:258
vpRBTrackerLogger m_logger
Color and render image dimensions.
Definition: vpRBTracker.h:268
bool m_firstIteration
Definition: vpRBTracker.h:245
double m_muIterFactor
Initial mu value for Levenberg-Marquardt.
Definition: vpRBTracker.h:260
void updateRender(vpRBFeatureTrackerInput &frame)
std::vector< std::shared_ptr< vpRBFeatureTracker > > m_trackers
Whether this is the first iteration.
Definition: vpRBTracker.h:247
void setModelPath(const std::string &path)
std::string m_modelPath
Definition: vpRBTracker.h:252
vpPanda3DRenderParameters m_rendererSettings
Definition: vpRBTracker.h:263
void displayMask(vpImage< unsigned char > &Imask) const
unsigned m_imageHeight
Definition: vpRBTracker.h:266
double m_lambda
Definition: vpRBTracker.h:257
void getPose(vpHomogeneousMatrix &cMo) const
Definition: vpRBTracker.cpp:64
std::shared_ptr< vpObjectMask > m_mask
Definition: vpRBTracker.h:271
vpHomogeneousMatrix m_cMoPrev
Definition: vpRBTracker.h:254
void loadConfiguration(const nlohmann::json &j)
void addTracker(std::shared_ptr< vpRBFeatureTracker > tracker)
vpRBFeatureTrackerInput m_currentFrame
List of trackers.
Definition: vpRBTracker.h:249
void loadConfigurationFile(const std::string &filename)
void setupRenderer(const std::string &file)
void display(const vpImage< unsigned char > &I, const vpImage< vpRGBa > &IRGB, const vpImage< unsigned char > &depth)
unsigned m_imageWidth
Definition: vpRBTracker.h:266
vpRBFeatureTrackerInput m_previousFrame
Definition: vpRBTracker.h:250
void setSilhouetteExtractionParameters(const vpSilhouettePointsExtractionSettings &settings)
vpMatrix getCovariance() const
Definition: vpRBTracker.cpp:76
vpObjectCentricRenderer & getRenderer()
void setOptimizationMuIterFactor(double factor)
Definition: vpRBTracker.h:142
std::vector< vpRBSilhouettePoint > extractSilhouettePoints(const vpImage< vpRGBf > &Inorm, const vpImage< float > &Idepth, const vpImage< vpRGBf > &Ior, const vpImage< unsigned char > &Ivalid, const vpCameraParameters &cam, const vpHomogeneousMatrix &cTcp)
void setOptimizationInitialMu(double mu)
Definition: vpRBTracker.h:133
void setOptimizationGain(double lambda)
Definition: vpRBTracker.h:116
double m_muInit
Max number of VVS iterations.
Definition: vpRBTracker.h:259
void setPose(const vpHomogeneousMatrix &cMo)
Definition: vpRBTracker.cpp:69
vpHomogeneousMatrix m_cMo
Definition: vpRBTracker.h:253
void setMaxOptimizationIters(unsigned int iters)
Definition: vpRBTracker.h:124
void startTracking()
void setCameraParameters(const vpCameraParameters &cam, unsigned h, unsigned w)
Definition: vpRGBf.h:64
std::vector< std::pair< unsigned int, unsigned int > > getSilhouetteCandidates(const vpImage< unsigned char > &validSilhouette, const vpImage< float > &renderDepth, const vpCameraParameters &cam, const vpHomogeneousMatrix &cTcp, const std::vector< vpRBSilhouettePoint > &previousPoints, long randomSeed=41) const
double zNear
Binary image indicating whether a given pixel is part of the silhouette.
vpImage< float > depth
Image containing the per-pixel normal vector (RGB, in object space)
vpImage< vpRGBf > silhouetteCanny
vpHomogeneousMatrix cMo
vpImage< vpRGBf > normals
vpImage< unsigned char > isSilhouette
Image containing the orientation of the gradients.