Visual Servoing Platform  version 3.6.1 under development (2024-11-21)
tutorial-mb-generic-tracker-apriltag-rs2.cpp
#include <fstream>
#include <ios>
#include <iostream>
#include <visp3/core/vpConfig.h>
#include <visp3/detection/vpDetectorAprilTag.h>
#include <visp3/gui/vpDisplayGDI.h>
#include <visp3/gui/vpDisplayOpenCV.h>
#include <visp3/gui/vpDisplayX.h>
#include <visp3/mbt/vpMbGenericTracker.h>
#include <visp3/sensor/vpRealSense2.h>
#ifdef ENABLE_VISP_NAMESPACE
using namespace VISP_NAMESPACE_NAME;
#endif
typedef enum { state_detection, state_tracking, state_quit } state_t;
// Creates a cube.cao file in your current directory
// cubeEdgeSize : size of cube edges in meters
void createCaoFile(double cubeEdgeSize)
{
std::ofstream fileStream;
fileStream.open("cube.cao", std::ofstream::out | std::ofstream::trunc);
fileStream << "V1\n";
fileStream << "# 3D Points\n";
fileStream << "8 # Number of points\n";
fileStream << cubeEdgeSize / 2 << " " << cubeEdgeSize / 2 << " " << 0 << " # Point 0: (X, Y, Z)\n";
fileStream << cubeEdgeSize / 2 << " " << -cubeEdgeSize / 2 << " " << 0 << " # Point 1\n";
fileStream << -cubeEdgeSize / 2 << " " << -cubeEdgeSize / 2 << " " << 0 << " # Point 2\n";
fileStream << -cubeEdgeSize / 2 << " " << cubeEdgeSize / 2 << " " << 0 << " # Point 3\n";
fileStream << -cubeEdgeSize / 2 << " " << cubeEdgeSize / 2 << " " << -cubeEdgeSize << " # Point 4\n";
fileStream << -cubeEdgeSize / 2 << " " << -cubeEdgeSize / 2 << " " << -cubeEdgeSize << " # Point 5\n";
fileStream << cubeEdgeSize / 2 << " " << -cubeEdgeSize / 2 << " " << -cubeEdgeSize << " # Point 6\n";
fileStream << cubeEdgeSize / 2 << " " << cubeEdgeSize / 2 << " " << -cubeEdgeSize << " # Point 7\n";
fileStream << "# 3D Lines\n";
fileStream << "0 # Number of lines\n";
fileStream << "# Faces from 3D lines\n";
fileStream << "0 # Number of faces\n";
fileStream << "# Faces from 3D points\n";
fileStream << "6 # Number of faces\n";
fileStream << "4 0 3 2 1 # Face 0: [number of points] [index of the 3D points]...\n";
fileStream << "4 1 2 5 6\n";
fileStream << "4 4 7 6 5\n";
fileStream << "4 0 7 4 3\n";
fileStream << "4 5 2 3 4\n";
fileStream << "4 0 1 6 7 # Face 5\n";
fileStream << "# 3D cylinders\n";
fileStream << "0 # Number of cylinders\n";
fileStream << "# 3D circles\n";
fileStream << "0 # Number of circles\n";
fileStream.close();
}
#if defined(VISP_HAVE_APRILTAG)
state_t detectAprilTag(const vpImage<unsigned char> &I, vpDetectorAprilTag &detector, double tagSize,
{
std::vector<vpHomogeneousMatrix> cMo_vec;
// Detection
bool ret = detector.detect(I, tagSize, cam, cMo_vec);
// Display camera pose
for (size_t i = 0; i < cMo_vec.size(); i++) {
vpDisplay::displayFrame(I, cMo_vec[i], cam, tagSize / 2, vpColor::none, 3);
}
vpDisplay::displayText(I, 40, 20, "State: waiting tag detection", vpColor::red);
if (ret && detector.getNbObjects() > 0) { // if tag detected, we pick the first one
cMo = cMo_vec[0];
return state_tracking;
}
return state_detection;
}
#endif // #if defined(VISP_HAVE_APRILTAG)
state_t track(const vpImage<unsigned char> &I, vpMbGenericTracker &tracker, double projection_error_threshold,
{
tracker.getCameraParameters(cam);
// Track the object
try {
tracker.track(I);
}
catch (...) {
return state_detection;
}
tracker.getPose(cMo);
// Detect tracking error
double projection_error = tracker.computeCurrentProjectionError(I, cMo, cam);
if (projection_error > projection_error_threshold) {
return state_detection;
}
// Display
tracker.display(I, cMo, cam, vpColor::red, 2);
vpDisplay::displayFrame(I, cMo, cam, 0.025, vpColor::none, 3);
vpDisplay::displayText(I, 40, 20, "State: tracking in progress", vpColor::red);
{
std::stringstream ss;
ss << "Features: edges " << tracker.getNbFeaturesEdge() << ", klt " << tracker.getNbFeaturesKlt();
vpDisplay::displayText(I, 60, 20, ss.str(), vpColor::red);
}
return state_tracking;
}
state_t track(std::map<std::string, const vpImage<unsigned char> *> mapOfImages,
#if defined(VISP_HAVE_PCL) && defined(VISP_HAVE_PCL_COMMON)
std::map<std::string, pcl::PointCloud<pcl::PointXYZ>::ConstPtr> mapOfPointclouds,
#else
std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds,
std::map<std::string, unsigned int> mapOfWidths, std::map<std::string, unsigned int> mapOfHeights,
#endif
const vpImage<unsigned char> &I_gray, const vpImage<unsigned char> &I_depth,
const vpHomogeneousMatrix &depth_M_color, vpMbGenericTracker &tracker, double projection_error_threshold,
{
vpCameraParameters cam_color, cam_depth;
tracker.getCameraParameters(cam_color, cam_depth);
// Track the object
try {
#if defined(VISP_HAVE_PCL) && defined(VISP_HAVE_PCL_COMMON)
tracker.track(mapOfImages, mapOfPointclouds);
#else
tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
#endif
}
catch (...) {
return state_detection;
}
tracker.getPose(cMo);
// Detect tracking error
double projection_error = tracker.computeCurrentProjectionError(I_gray, cMo, cam_color);
if (projection_error > projection_error_threshold) {
return state_detection;
}
// Display
tracker.display(I_gray, I_depth, cMo, depth_M_color * cMo, cam_color, cam_depth, vpColor::red, 3);
vpDisplay::displayFrame(I_gray, cMo, cam_color, 0.05, vpColor::none, 3);
vpDisplay::displayFrame(I_depth, depth_M_color * cMo, cam_depth, 0.05, vpColor::none, 3);
return state_tracking;
}
int main(int argc, const char **argv)
{
#if defined(VISP_HAVE_APRILTAG) && defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_MODULE_MBT)
double opt_tag_size = 0.08;
float opt_quad_decimate = 1.0;
int opt_nthreads = 1;
double opt_cube_size = 0.125; // 12.5cm by default
#ifdef VISP_HAVE_OPENCV
bool opt_use_texture = false;
#endif
bool opt_use_depth = false;
double opt_projection_error_threshold = 40.;
#if !(defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV))
bool display_off = true;
#else
bool display_off = false;
#endif
for (int i = 1; i < argc; i++) {
if (std::string(argv[i]) == "--tag_size" && i + 1 < argc) {
opt_tag_size = atof(argv[i + 1]);
}
else if (std::string(argv[i]) == "--quad_decimate" && i + 1 < argc) {
opt_quad_decimate = (float)atof(argv[i + 1]);
}
else if (std::string(argv[i]) == "--nthreads" && i + 1 < argc) {
opt_nthreads = atoi(argv[i + 1]);
}
else if (std::string(argv[i]) == "--display_off") {
display_off = true;
}
else if (std::string(argv[i]) == "--tag_family" && i + 1 < argc) {
opt_tag_family = (vpDetectorAprilTag::vpAprilTagFamily)atoi(argv[i + 1]);
}
else if (std::string(argv[i]) == "--cube_size" && i + 1 < argc) {
opt_cube_size = atof(argv[i + 1]);
#ifdef VISP_HAVE_OPENCV
}
else if (std::string(argv[i]) == "--texture") {
opt_use_texture = true;
#endif
}
else if (std::string(argv[i]) == "--depth") {
opt_use_depth = true;
}
else if (std::string(argv[i]) == "--projection_error" && i + 1 < argc) {
opt_projection_error_threshold = atof(argv[i + 1]);
}
else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") {
std::cout << "Usage: " << argv[0]
<< " [--cube_size <size in m>] [--tag_size <size in m>]"
" [--quad_decimate <decimation>] [--nthreads <nb>]"
" [--tag_family <0: TAG_36h11, 1: TAG_36h10, 2: TAG_36ARTOOLKIT, "
" 3: TAG_25h9, 4: TAG_25h7, 5: TAG_16h5>]";
#if (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV))
std::cout << " [--display_off]";
#endif
std::cout << " [--texture] [--depth] [--projection_error <30 - 100>] [--help]" << std::endl;
return EXIT_SUCCESS;
}
}
createCaoFile(opt_cube_size);
try {
vpRealSense2 realsense;
rs2::config config;
int width = 640, height = 480, stream_fps = 30;
config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, stream_fps);
config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, stream_fps);
config.disable_stream(RS2_STREAM_INFRARED);
realsense.open(config);
vpCameraParameters cam_color, cam_depth;
vpHomogeneousMatrix depth_M_color;
if (opt_use_depth) {
depth_M_color = realsense.getTransformation(RS2_STREAM_COLOR, RS2_STREAM_DEPTH);
}
vpImage<vpRGBa> I_color(height, width);
vpImage<unsigned char> I_gray(height, width);
vpImage<unsigned char> I_depth(height, width);
vpImage<uint16_t> I_depth_raw(height, width);
std::map<std::string, vpHomogeneousMatrix> mapOfCameraTransformations;
std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
#if defined(VISP_HAVE_PCL) && defined(VISP_HAVE_PCL_COMMON)
std::map<std::string, pcl::PointCloud<pcl::PointXYZ>::ConstPtr> mapOfPointclouds;
pcl::PointCloud<pcl::PointXYZ>::Ptr pointcloud(new pcl::PointCloud<pcl::PointXYZ>());
#else
std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
std::vector<vpColVector> pointcloud;
#endif
std::map<std::string, vpHomogeneousMatrix> mapOfCameraPoses;
std::cout << "Cube size: " << opt_cube_size << std::endl;
std::cout << "AprilTag size: " << opt_tag_size << std::endl;
std::cout << "AprilTag family: " << opt_tag_family << std::endl;
std::cout << "Camera parameters:" << std::endl;
std::cout << " Color:\n" << cam_color << std::endl;
if (opt_use_depth)
std::cout << " Depth:\n" << cam_depth << std::endl;
std::cout << "Detection: " << std::endl;
std::cout << " Quad decimate: " << opt_quad_decimate << std::endl;
std::cout << " Threads number: " << opt_nthreads << std::endl;
std::cout << "Tracker: " << std::endl;
std::cout << " Use edges : 1" << std::endl;
std::cout << " Use texture: "
#ifdef VISP_HAVE_OPENCV
<< opt_use_texture << std::endl;
#else
<< " na" << std::endl;
#endif
std::cout << " Use depth : " << opt_use_depth << std::endl;
std::cout << " Projection error: " << opt_projection_error_threshold << std::endl;
// Construct display
vpDisplay *d_gray = nullptr;
vpDisplay *d_depth = nullptr;
if (!display_off) {
#ifdef VISP_HAVE_X11
d_gray = new vpDisplayX(I_gray, 50, 50, "Color stream");
if (opt_use_depth)
d_depth = new vpDisplayX(I_depth, 80 + I_gray.getWidth(), 50, "Depth stream");
#elif defined(VISP_HAVE_GDI)
d_gray = new vpDisplayGDI(I_gray);
if (opt_use_depth)
d_depth = new vpDisplayGDI(I_depth);
#elif defined(HAVE_OPENCV_HIGHGUI)
d_gray = new vpDisplayOpenCV(I_gray);
if (opt_use_depth)
d_depth = new vpDisplayOpenCV(I_depth);
#endif
}
// Initialize AprilTag detector
vpDetectorAprilTag detector(opt_tag_family);
detector.setAprilTagQuadDecimate(opt_quad_decimate);
detector.setAprilTagNbThreads(opt_nthreads);
// Prepare MBT
std::vector<int> trackerTypes;
#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
if (opt_use_texture)
trackerTypes.push_back(vpMbGenericTracker::EDGE_TRACKER | vpMbGenericTracker::KLT_TRACKER);
else
#endif
trackerTypes.push_back(vpMbGenericTracker::EDGE_TRACKER);
if (opt_use_depth)
trackerTypes.push_back(vpMbGenericTracker::DEPTH_DENSE_TRACKER);
vpMbGenericTracker tracker(trackerTypes);
// edges
vpMe me;
me.setMaskSize(5);
me.setMaskNumber(180);
me.setRange(12);
me.setThreshold(20);
me.setMu1(0.5);
me.setMu2(0.5);
tracker.setMovingEdge(me);
#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
if (opt_use_texture) {
vpKltOpencv klt_settings;
klt_settings.setMaxFeatures(300);
klt_settings.setWindowSize(5);
klt_settings.setQuality(0.015);
klt_settings.setMinDistance(8);
klt_settings.setHarrisFreeParameter(0.01);
klt_settings.setBlockSize(3);
klt_settings.setPyramidLevels(3);
tracker.setKltOpencv(klt_settings);
tracker.setKltMaskBorder(5);
}
#endif
if (opt_use_depth) {
// camera calibration params
tracker.setCameraParameters(cam_color, cam_depth);
// model definition
tracker.loadModel("cube.cao", "cube.cao");
mapOfCameraTransformations["Camera2"] = depth_M_color;
tracker.setCameraTransformationMatrix(mapOfCameraTransformations);
}
else {
// camera calibration params
tracker.setCameraParameters(cam_color);
// model definition
tracker.loadModel("cube.cao");
}
tracker.setDisplayFeatures(true);
state_t state = state_detection;
// wait for a tag detection
while (state != state_quit) {
if (opt_use_depth) {
#if defined(VISP_HAVE_PCL) && defined(VISP_HAVE_PCL_COMMON)
realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, nullptr, pointcloud, nullptr);
#else
realsense.acquire((unsigned char *)I_color.bitmap, (unsigned char *)I_depth_raw.bitmap, &pointcloud, nullptr,
nullptr);
#endif
vpImageConvert::convert(I_color, I_gray);
vpImageConvert::createDepthHistogram(I_depth_raw, I_depth);
mapOfImages["Camera1"] = &I_gray;
mapOfImages["Camera2"] = &I_depth;
#if defined(VISP_HAVE_PCL) && defined(VISP_HAVE_PCL_COMMON)
mapOfPointclouds["Camera2"] = pointcloud;
#else
mapOfPointclouds["Camera2"] = &pointcloud;
mapOfWidths["Camera2"] = width;
mapOfHeights["Camera2"] = height;
#endif
}
else {
realsense.acquire(I_gray);
}
if (state == state_detection) {
state = detectAprilTag(I_gray, detector, opt_tag_size, cam_color, cMo);
// Initialize the tracker with the result of the detection
if (state == state_tracking) {
if (opt_use_depth) {
mapOfCameraPoses["Camera1"] = cMo;
mapOfCameraPoses["Camera2"] = depth_M_color * cMo;
tracker.initFromPose(mapOfImages, mapOfCameraPoses);
}
else {
tracker.initFromPose(I_gray, cMo);
}
}
}
if (state == state_tracking) {
if (opt_use_depth) {
#if defined(VISP_HAVE_PCL) && defined(VISP_HAVE_PCL_COMMON)
state = track(mapOfImages, mapOfPointclouds, I_gray, I_depth, depth_M_color, tracker,
opt_projection_error_threshold, cMo);
#else
state = track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights, I_gray, I_depth, depth_M_color,
tracker, opt_projection_error_threshold, cMo);
#endif
}
else {
state = track(I_gray, tracker, opt_projection_error_threshold, cMo);
}
{
std::stringstream ss;
ss << "Features: edges " << tracker.getNbFeaturesEdge() << ", klt " << tracker.getNbFeaturesKlt()
<< ", depth " << tracker.getNbFeaturesDepthDense();
vpDisplay::displayText(I_gray, I_gray.getHeight() - 30, 20, ss.str(), vpColor::red);
}
}
vpDisplay::displayText(I_gray, 20, 20, "Click to quit...", vpColor::red);
if (vpDisplay::getClick(I_gray, false)) { // exit
state = state_quit;
}
if (opt_use_depth) {
vpDisplay::displayText(I_depth, 20, 20, "Click to quit...", vpColor::red);
if (vpDisplay::getClick(I_depth, false)) { // exit
state = state_quit;
}
vpDisplay::flush(I_depth);
}
}
if (!display_off) {
delete d_gray;
if (opt_use_depth)
delete d_depth;
}
}
catch (const vpException &e) {
std::cerr << "Catch an exception: " << e.getMessage() << std::endl;
}
return EXIT_SUCCESS;
#else
(void)argc;
(void)argv;
#ifndef VISP_HAVE_APRILTAG
std::cout << "ViSP is not build with Apriltag support" << std::endl;
#endif
#ifndef VISP_HAVE_REALSENSE2
std::cout << "ViSP is not build with librealsense2 support" << std::endl;
#endif
std::cout << "Install missing 3rd parties, configure and build ViSP to run this tutorial" << std::endl;
#endif
return EXIT_SUCCESS;
}
Generic class defining intrinsic camera parameters.
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
static const vpColor red
Definition: vpColor.h:217
static const vpColor none
Definition: vpColor.h:229
void setAprilTagQuadDecimate(float quadDecimate)
bool detect(const vpImage< unsigned char > &I) VP_OVERRIDE
@ TAG_36h11
AprilTag 36h11 pattern (recommended)
void setAprilTagNbThreads(int nThreads)
size_t getNbObjects() const
Display for windows using GDI (available on any windows 32 platform).
Definition: vpDisplayGDI.h:130
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Class that defines generic functionalities for display.
Definition: vpDisplay.h:178
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Definition: vpException.h:60
const char * getMessage() const
Definition: vpException.cpp:65
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void createDepthHistogram(const vpImage< uint16_t > &src_depth, vpImage< vpRGBa > &dest_rgba)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
Definition: vpKltOpencv.h:74
void setBlockSize(int blockSize)
Definition: vpKltOpencv.h:267
void setQuality(double qualityLevel)
Definition: vpKltOpencv.h:356
void setHarrisFreeParameter(double harris_k)
Definition: vpKltOpencv.h:275
void setMaxFeatures(int maxCount)
Definition: vpKltOpencv.h:315
void setMinDistance(double minDistance)
Definition: vpKltOpencv.h:324
void setWindowSize(int winSize)
Definition: vpKltOpencv.h:377
void setPyramidLevels(int pyrMaxLevel)
Definition: vpKltOpencv.h:343
static double rad(double deg)
Definition: vpMath.h:129
Real-time 6D object pose tracking using its CAD model.
virtual void setCameraParameters(const vpCameraParameters &camera) VP_OVERRIDE
virtual void setDisplayFeatures(bool displayF) VP_OVERRIDE
virtual unsigned int getNbFeaturesEdge() const
virtual double computeCurrentProjectionError(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &_cMo, const vpCameraParameters &_cam) VP_OVERRIDE
virtual void getCameraParameters(vpCameraParameters &camera) const VP_OVERRIDE
virtual void initFromPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo) VP_OVERRIDE
virtual void getPose(vpHomogeneousMatrix &cMo) const VP_OVERRIDE
virtual unsigned int getNbFeaturesKlt() const
virtual void setMovingEdge(const vpMe &me)
virtual void setAngleDisappear(const double &a) VP_OVERRIDE
virtual void track(const vpImage< unsigned char > &I) VP_OVERRIDE
virtual unsigned int getNbFeaturesDepthDense() const
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix()) VP_OVERRIDE
virtual void setCameraTransformationMatrix(const std::string &cameraName, const vpHomogeneousMatrix &cameraTransformationMatrix)
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false) VP_OVERRIDE
virtual void setAngleAppear(const double &a) VP_OVERRIDE
Definition: vpMe.h:134
void setMu1(const double &mu_1)
Definition: vpMe.h:385
void setRange(const unsigned int &range)
Definition: vpMe.h:415
void setLikelihoodThresholdType(const vpLikelihoodThresholdType likelihood_threshold_type)
Definition: vpMe.h:505
void setMaskNumber(const unsigned int &mask_number)
Definition: vpMe.cpp:552
void setThreshold(const double &threshold)
Definition: vpMe.h:466
void setSampleStep(const double &sample_step)
Definition: vpMe.h:422
void setMaskSize(const unsigned int &mask_size)
Definition: vpMe.cpp:560
void setMu2(const double &mu_2)
Definition: vpMe.h:392
@ NORMALIZED_THRESHOLD
Definition: vpMe.h:145
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion, int index=-1) const
void acquire(vpImage< unsigned char > &grey, double *ts=nullptr)
bool open(const rs2::config &cfg=rs2::config())
vpHomogeneousMatrix getTransformation(const rs2_stream &from, const rs2_stream &to, int from_index=-1) const