31 #include <visp3/rbt/vpRBTracker.h>
33 #if defined(VISP_HAVE_NLOHMANN_JSON)
34 #include VISP_NLOHMANN_JSON(json.hpp)
37 #include <visp3/core/vpExponentialMap.h>
38 #include <visp3/core/vpIoTools.h>
40 #include <visp3/ar/vpPanda3DRendererSet.h>
41 #include <visp3/ar/vpPanda3DGeometryRenderer.h>
42 #include <visp3/ar/vpPanda3DRGBRenderer.h>
44 #include <visp3/rbt/vpRBFeatureTrackerFactory.h>
45 #include <visp3/rbt/vpRBDriftDetectorFactory.h>
46 #include <visp3/rbt/vpObjectMaskFactory.h>
47 #include <visp3/rbt/vpRBVisualOdometry.h>
48 #include <visp3/rbt/vpRBInitializationHelper.h>
53 m_firstIteration(true), m_trackers(0), m_lambda(1.0), m_vvsIterations(10), m_muInit(0.0), m_muIterFactor(0.5),
54 m_renderer(m_rendererSettings), m_imageHeight(480), m_imageWidth(640), m_verbose(false)
79 double sumWeights = 0.0;
80 for (
const std::shared_ptr<vpRBFeatureTracker> &tracker:
m_trackers) {
81 if (tracker->getNumFeatures() == 0) {
85 vpMatrix trackerCov = tracker->getCovariance();
86 double trackerWeight = tracker->getVVSTrackerWeight();
89 "Expected tracker pose covariance to have dimensions 6x6, but got %dx%d",
94 sumWeights += trackerWeight;
103 if (cam.
get_projModel() != vpCameraParameters::vpCameraParametersProjType::perspectiveProjWithoutDistortion) {
105 "Camera model cannot have distortion. Undistort images before tracking and use the undistorted camera model");
107 if (h == 0 || w == 0) {
110 "Image dimensions must be greater than 0"
142 const std::shared_ptr<vpPanda3DGeometryRenderer> geometryRenderer = std::make_shared<vpPanda3DGeometryRenderer>(
143 vpPanda3DGeometryRenderer::vpRenderType::OBJECT_NORMALS);
146 bool requiresSilhouetteShader =
false;
147 for (std::shared_ptr<vpRBFeatureTracker> &tracker:
m_trackers) {
148 if (tracker->requiresSilhouetteCandidates()) {
149 requiresSilhouetteShader =
true;
153 if (requiresSilhouetteShader) {
155 "depthCanny", geometryRenderer,
true, 0.0));
165 for (std::shared_ptr<vpRBFeatureTracker> tracker :
m_trackers) {
166 if (tracker->requiresDepth() || tracker->requiresRGB()) {
179 for (std::shared_ptr<vpRBFeatureTracker> &tracker :
m_trackers) {
180 if (tracker->requiresDepth()) {
188 frameInput.
IRGB = IRGB;
205 frameInput.
IRGB = IRGB;
206 frameInput.
depth = depth;
231 bool requiresSilhouetteCandidates =
false;
232 for (std::shared_ptr<vpRBFeatureTracker> &tracker :
m_trackers) {
233 if (tracker->requiresSilhouetteCandidates()) {
234 requiresSilhouetteCandidates =
true;
240 if (requiresSilhouetteCandidates) {
258 if (requiresSilhouetteCandidates) {
269 for (std::shared_ptr<vpRBFeatureTracker> &tracker :
m_trackers) {
271 tracker->onTrackingIterStart();
276 for (std::shared_ptr<vpRBFeatureTracker> &tracker :
m_trackers) {
282 std::cerr <<
"Tracker " <<
id <<
" raised an exception in extractFeatures" << std::endl;
289 for (std::shared_ptr<vpRBFeatureTracker> &tracker :
m_trackers) {
295 std::cerr <<
"Tracker " <<
id <<
" raised an exception in trackFeatures" << std::endl;
305 for (std::shared_ptr<vpRBFeatureTracker> &tracker :
m_trackers) {
314 double bestError = std::numeric_limits<double>::max();
319 for (std::shared_ptr<vpRBFeatureTracker> &tracker :
m_trackers) {
322 tracker->computeVVSIter(input,
m_cMo, iter);
325 std::cerr <<
"Tracker " <<
id <<
" raised an exception in computeVVSIter" << std::endl;
333 bool converged =
true;
334 for (std::shared_ptr<vpRBFeatureTracker> &tracker :
m_trackers) {
335 if (!tracker->vvsHasConverged()) {
347 unsigned int numFeatures = 0;
349 for (std::shared_ptr<vpRBFeatureTracker> &tracker :
m_trackers) {
350 if (tracker->getNumFeatures() > 0) {
351 numFeatures += tracker->getNumFeatures();
352 const double weight = tracker->getVVSTrackerWeight();
353 LTL += weight * tracker->getLTL();
354 LTR += weight * tracker->getLTR();
355 error += (weight * tracker->getWeightedError()).sumSquare();
360 if (numFeatures >= 6) {
362 if (error < bestError) {
374 std::cerr <<
"Could not compute pseudo inverse" << std::endl;
385 for (std::shared_ptr<vpRBFeatureTracker> &tracker :
m_trackers) {
386 tracker->onTrackingIterEnd();
418 float clipNear, clipFar;
426 if (shouldRenderSilhouette) {
443 #ifdef VISP_HAVE_OPENMP
444 #pragma omp parallel sections
448 #ifdef VISP_HAVE_OPENMP
458 #ifdef VISP_HAVE_OPENMP
462 if (shouldRenderSilhouette) {
485 std::vector<std::pair<unsigned int, unsigned int>> candidates =
488 std::vector<vpRBSilhouettePoint> points;
491 for (
unsigned int i = 0; i < candidates.size(); ++i) {
492 unsigned int n = candidates[i].first, m = candidates[i].second;
493 double theta = silhouetteCanny[n][m].B;
494 if (std::isnan(theta)) {
498 norm[0] = Inorm[n][m].R;
499 norm[1] = Inorm[n][m].G;
500 norm[2] = Inorm[n][m].B;
501 const double l = std::sqrt(norm[0] * norm[0] + norm[1] * norm[1] + norm[2] * norm[2]);
504 const double Z = Idepth[n][m];
509 #if defined(VISP_DEBUG_RB_TRACKER)
510 if (fabs(theta) > M_PI + 1e-6) {
545 if (tracker ==
nullptr) {
565 for (std::shared_ptr<vpRBFeatureTracker> &tracker :
m_trackers) {
566 if (tracker->featuresShouldBeDisplayed()) {
581 #if defined(VISP_HAVE_NLOHMANN_JSON)
584 std::ifstream jsonFile(filename);
585 if (!jsonFile.good()) {
588 nlohmann::json settings;
590 settings = nlohmann::json::parse(jsonFile);
592 catch (nlohmann::json::parse_error &e) {
593 std::stringstream msg;
594 msg <<
"Could not parse JSON file : \n";
595 msg << e.what() << std::endl;
596 msg <<
"Byte position of error: " << e.byte;
606 const nlohmann::json verboseSettings = j.at(
"verbose");
609 const nlohmann::json cameraSettings = j.at(
"camera");
610 m_cam = cameraSettings.at(
"intrinsics");
615 if (j.contains(
"model")) {
619 const nlohmann::json vvsSettings = j.at(
"vvs");
628 const nlohmann::json features = j.at(
"features");
630 for (
const nlohmann::json &trackerSettings: features) {
631 std::shared_ptr<vpRBFeatureTracker> tracker = featureFactory.
buildFromJson(trackerSettings);
632 if (tracker ==
nullptr) {
635 "Cannot instantiate subtracker with the current settings, make sure that the type is registered. Settings: %s",
636 trackerSettings.dump(2).c_str()
642 if (j.contains(
"mask")) {
644 const nlohmann::json maskSettings = j.at(
"mask");
649 "Cannot instantiate object mask with the current settings, make sure that the type is registered. Settings: %s",
650 maskSettings.dump(2).c_str());
653 if (j.contains(
"drift")) {
655 const nlohmann::json driftSettings = j.at(
"drift");
660 "Cannot instantiate drift detection with the current settings, make sure that the type is registered in the factory"
667 #ifdef VISP_HAVE_MODULE_GUI
668 void vpRBTracker::initClick(
const vpImage<unsigned char> &I,
const std::string &initFile,
bool displayHelp)
672 initializer.initClick(I, initFile, displayHelp);
unsigned int getCols() const
unsigned int getRows() const
Generic class defining intrinsic camera parameters.
vpCameraParametersProjType get_projModel() const
Implementation of column vector and the associated operations.
std::shared_ptr< T > buildFromJson(const nlohmann::json &j)
error that can be emitted by ViSP classes.
@ badValue
Used to indicate that a value is not in the allowed range.
@ dimensionError
Bad dimension.
static vpHomogeneousMatrix direct(const vpColVector &v)
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpHomogeneousMatrix inverse() const
void resize(unsigned int h, unsigned int w)
resize the image : Image initialization
unsigned int getSize() const
Implementation of a matrix and operations on matrices.
vpMatrix pseudoInverse(double svThreshold=1e-6) const
Single object focused renderer.
vpRect getBoundingBox() const
void computeClipping(float &nearV, float &farV)
virtual void setRenderParameters(const vpPanda3DRenderParameters ¶ms) VP_OVERRIDE
Set new rendering parameters. If the scene has already been initialized, the renderer camera is updat...
void setFocusedObject(const std::string &focused)
A factory that can be used to create Object segmentation algorithms from JSON data.
static vpObjectMaskFactory & getFactory()
Class representing an ambient light.
virtual void renderFrame()
NodePath loadObject(const std::string &nodeName, const std::string &modelPath)
Load a 3D object. To load an .obj file, Panda3D must be compiled with assimp support.
Implementation of canny filtering, using Sobel kernels.
Renderer that outputs object geometric information.
void setClippingDistance(double nearV, double farV)
Set the clipping distance. When a panda camera uses these render parameters, objects that are closer ...
void setCameraIntrinsics(const vpCameraParameters &cam)
set camera intrinsics. Only camera intrinsics for a lens without distortion are supported.
void setImageResolution(unsigned int height, unsigned int width)
Set the image resolution. When this object is given to a vpPanda3DBaseRenderer, this will be the reso...
void addNodeToScene(const NodePath &object) VP_OVERRIDE
void initFramework() VP_OVERRIDE
Initialize the framework and propagate the created panda3D framework to the subrenderers.
void addSubRenderer(std::shared_ptr< vpPanda3DBaseRenderer > renderer)
Add a new subrenderer: This subrenderer should have a unique name, not present in the set.
void addLight(const vpPanda3DLight &light) VP_OVERRIDE
Light this lightable object with a new light.
void setCameraPose(const vpHomogeneousMatrix &wTc) VP_OVERRIDE
Set the pose of the camera, using the ViSP convention. This change is propagated to all subrenderers.
std::shared_ptr< RendererType > getRenderer()
Retrieve the first subrenderer with the specified template type.
A factory that can be used to instanciate drift detection algorithms from JSON data.
static vpRBDriftDetectorFactory & getFactory()
A factory to instantiate feature trackers from JSON data.
static vpRBFeatureTrackerFactory & getFactory()
A set of utilities to perform initialization.
vpHomogeneousMatrix getPose() const
void setCameraParameters(const vpCameraParameters &cam)
Silhouette point simple candidate representation.
void setTrackerFeatureTrackingTime(int id, double elapsed)
void setRenderTime(double elapsed)
void setOdometryTime(double elapsed)
void setInitVVSTime(int id, double elapsed)
void setMaskTime(double elapsed)
void setTrackerIterStartTime(int id, double elapsed)
void addTrackerVVSTime(int id, double elapsed)
void setDriftDetectionTime(double elapsed)
void setTrackerFeatureExtractionTime(int id, double elapsed)
std::shared_ptr< vpRBDriftDetector > m_driftDetector
std::shared_ptr< vpRBVisualOdometry > m_odometry
vpSilhouettePointsExtractionSettings m_depthSilhouetteSettings
Factor with which to multiply mu at every iteration during VVS.
void track(const vpImage< unsigned char > &I)
void checkDimensionsOrThrow(const vpImage< T > &I, const std::string &imgType) const
vpObjectCentricRenderer m_renderer
vpCameraParameters getCameraParameters() const
unsigned m_vvsIterations
VVS gain.
vpRBTrackerLogger m_logger
Color and render image dimensions.
double m_muIterFactor
Initial mu value for Levenberg-Marquardt.
void updateRender(vpRBFeatureTrackerInput &frame)
std::vector< std::shared_ptr< vpRBFeatureTracker > > m_trackers
Whether this is the first iteration.
void setModelPath(const std::string &path)
vpPanda3DRenderParameters m_rendererSettings
void displayMask(vpImage< unsigned char > &Imask) const
void getPose(vpHomogeneousMatrix &cMo) const
std::shared_ptr< vpObjectMask > m_mask
vpHomogeneousMatrix m_cMoPrev
void loadConfiguration(const nlohmann::json &j)
void addTracker(std::shared_ptr< vpRBFeatureTracker > tracker)
vpRBFeatureTrackerInput m_currentFrame
List of trackers.
void loadConfigurationFile(const std::string &filename)
void setupRenderer(const std::string &file)
void display(const vpImage< unsigned char > &I, const vpImage< vpRGBa > &IRGB, const vpImage< unsigned char > &depth)
vpRBFeatureTrackerInput m_previousFrame
void setSilhouetteExtractionParameters(const vpSilhouettePointsExtractionSettings &settings)
vpMatrix getCovariance() const
vpObjectCentricRenderer & getRenderer()
void setOptimizationMuIterFactor(double factor)
std::vector< vpRBSilhouettePoint > extractSilhouettePoints(const vpImage< vpRGBf > &Inorm, const vpImage< float > &Idepth, const vpImage< vpRGBf > &Ior, const vpImage< unsigned char > &Ivalid, const vpCameraParameters &cam, const vpHomogeneousMatrix &cTcp)
void setOptimizationInitialMu(double mu)
void setOptimizationGain(double lambda)
double m_muInit
Max number of VVS iterations.
void setPose(const vpHomogeneousMatrix &cMo)
vpHomogeneousMatrix m_cMo
void setMaxOptimizationIters(unsigned int iters)
void setCameraParameters(const vpCameraParameters &cam, unsigned h, unsigned w)
double zNear
Binary image indicating whether a given pixel is part of the silhouette.
vpImage< float > depth
Image containing the per-pixel normal vector (RGB, in object space)
vpImage< vpRGBf > silhouetteCanny
vpImage< vpRGBf > normals
vpImage< unsigned char > isSilhouette
Image containing the orientation of the gradients.