Visual Servoing Platform  version 3.6.1 under development (2025-01-11)
tutorial-rbt-realsense.cpp
#include <iostream>
#include <visp3/core/vpConfig.h>
#ifdef ENABLE_VISP_NAMESPACE
using namespace VISP_NAMESPACE_NAME;
#endif
#ifndef VISP_HAVE_REALSENSE2
int main()
{
std::cerr << "To run this tutorial, recompile ViSP with the Realsense third party library" << std::endl;
return EXIT_SUCCESS;
}
#else
#include <visp3/sensor/vpRealSense2.h>
#include <visp3/io/vpParseArgv.h>
#include <visp3/rbt/vpRBTracker.h>
#include "render-based-tutorial-utils.h"
#ifndef DOXYGEN_SHOULD_SKIP_THIS
{
CmdArguments() : height(480), width(848), fps(60)
{ }
void registerArguments(vpJsonArgumentParser &parser)
{
parser.addArgument("--height", height, false, "Realsense requested image height")
.addArgument("--width", width, false, "Realsense requested image width")
.addArgument("--fps", fps, false, "Realsense requested framerate");
}
unsigned int height, width, fps;
};
#endif
void updateDepth(const vpImage<uint16_t> &depthRaw, float depthScale, float maxZDisplay, vpImage<float> &depth, vpImage<unsigned char> &IdepthDisplay)
{
depth.resize(depthRaw.getHeight(), depthRaw.getWidth());
#ifdef VISP_HAVE_OPENMP
#pragma omp parallel for
#endif
for (unsigned int i = 0; i < depthRaw.getSize(); ++i) {
depth.bitmap[i] = depthScale * static_cast<float>(depthRaw.bitmap[i]);
IdepthDisplay.bitmap[i] = depth.bitmap[i] > maxZDisplay ? 0 : static_cast<unsigned int>((depth.bitmap[i] / maxZDisplay) * 255.f);
}
}
int main(int argc, const char **argv)
{
// Read the command line options
vpRBTrackerTutorial::BaseArguments baseArgs;
CmdArguments realsenseArgs;
vpRBTrackerTutorial::vpRBExperimentLogger logger;
vpRBTrackerTutorial::vpRBExperimentPlotter plotter;
"Tutorial showing the usage of the Render-Based tracker with a RealSense camera",
"--config", "/"
);
baseArgs.registerArguments(parser);
realsenseArgs.registerArguments(parser);
logger.registerArguments(parser);
plotter.registerArguments(parser);
parser.parse(argc, argv);
baseArgs.postProcessArguments();
plotter.postProcessArguments(baseArgs.display);
if (baseArgs.enableRenderProfiling) {
vpRBTrackerTutorial::enableRendererProfiling();
}
std::cout << "Loading tracker: " << baseArgs.trackerConfiguration << std::endl;
vpRBTracker tracker;
tracker.loadConfigurationFile(baseArgs.trackerConfiguration);
tracker.startTracking();
const unsigned int width = realsenseArgs.width, height = realsenseArgs.height;
const unsigned fps = realsenseArgs.fps;
vpImage<unsigned char> Id(height, width);
vpImage<vpRGBa> Icol(height, width);
vpImage<uint16_t> depthRaw(height, width);
vpImage<float> depth(height, width);
vpImage<unsigned char> IdepthDisplay(height, width);
vpImage<unsigned char> IProbaDisplay(height, width);
vpImage<unsigned char> cannyDisplay(height, width);
vpImage<vpRGBa> InormDisplay(height, width);
vpRealSense2 realsense;
std::cout << "Opening realsense with " << width << "x" << height << " @ " << fps << "fps" << std::endl;
rs2::config config;
config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, fps);
config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, fps);
rs2::align align_to(RS2_STREAM_COLOR);
try {
realsense.open(config);
}
catch (const vpException &e) {
std::cout << "Caught an exception: " << e.what() << std::endl;
std::cout << "Check if the Realsense camera is connected..." << std::endl;
return EXIT_SUCCESS;
}
float depthScale = realsense.getDepthScale();
//camera warmup
for (int i = 0; i < 10; ++i) {
realsense.acquire(Icol);
}
tracker.setCameraParameters(cam, height, width);
std::cout << "Creating displays" << std::endl;;
std::vector<std::shared_ptr<vpDisplay>> displays, displaysDebug;
if (baseArgs.display) {
displays = vpRBTrackerTutorial::createDisplays(Id, Icol, IdepthDisplay, IProbaDisplay);
if (baseArgs.debugDisplay) {
displaysDebug = vpDisplayFactory::makeDisplayGrid(1, 2,
0, 0,
20, 20,
"Normals in object frame", InormDisplay,
"Depth canny", cannyDisplay
);
}
plotter.init(displays);
}
if (baseArgs.display && !baseArgs.hasInlineInit()) {
bool ready = false;
while (!ready) {
realsense.acquire((unsigned char *)Icol.bitmap, (unsigned char *)depthRaw.bitmap, nullptr, nullptr, &align_to);
updateDepth(depthRaw, depthScale, baseArgs.maxDepthDisplay, depth, IdepthDisplay);
if (vpDisplay::getClick(Id, false)) {
ready = true;
}
else {
vpTime::wait(1000.0 / fps);
}
}
}
updateDepth(depthRaw, depthScale, baseArgs.maxDepthDisplay, depth, IdepthDisplay);
// Manual initialization of the tracker
std::cout << "Starting init" << std::endl;
if (baseArgs.hasInlineInit()) {
tracker.setPose(baseArgs.cMoInit);
}
else if (baseArgs.display) {
tracker.initClick(Id, baseArgs.initFile, true);
tracker.getPose(cMo);
}
else {
throw vpException(vpException::notImplementedError, "Cannot initalize tracking: no auto init function provided");
}
std::cout << "Starting pose: " << vpPoseVector(cMo).t() << std::endl;
if (baseArgs.display) {
}
//vpRBTrackerFilter &ukfm = tracker.getFilter();
logger.startLog();
unsigned int iter = 1;
// Main tracking loop
double expStart = vpTime::measureTimeMs();
while (true) {
double frameStart = vpTime::measureTimeMs();
// Acquire images
realsense.acquire((unsigned char *)Icol.bitmap, (unsigned char *)depthRaw.bitmap, nullptr, nullptr, &align_to);
updateDepth(depthRaw, depthScale, baseArgs.maxDepthDisplay, depth, IdepthDisplay);
// Pose tracking
double trackingStart = vpTime::measureTimeMs();
tracker.track(Id, Icol, depth);
double trackingEnd = vpTime::measureTimeMs();
tracker.getPose(cMo);
double displayStart = vpTime::measureTimeMs();
if (baseArgs.display) {
if (baseArgs.debugDisplay) {
const vpRBFeatureTrackerInput &lastFrame = tracker.getMostRecentFrame();
vpRBTrackerTutorial::displayCanny(lastFrame.renders.silhouetteCanny, cannyDisplay, lastFrame.renders.isSilhouette);
}
vpDisplay::display(IdepthDisplay);
// vpDisplay::display(Icol);
tracker.display(Id, Icol, IdepthDisplay);
vpDisplay::displayFrame(Icol, cMo, cam, 0.05, vpColor::none, 2);
vpDisplay::displayText(Id, 20, 5, "Right click to exit", vpColor::red);
if (vpDisplay::getClick(Id, button, false)) {
if (button == vpMouseButton::button3) {
break;
}
}
tracker.displayMask(IProbaDisplay);
vpDisplay::display(IProbaDisplay);
vpDisplay::flush(IdepthDisplay); vpDisplay::flush(IProbaDisplay);
}
logger.logFrame(tracker, iter, Id, Icol, IdepthDisplay, IProbaDisplay);
const double displayEnd = vpTime::measureTimeMs();
// ukfm.filter(cMo, 0.05);
// const vpHomogeneousMatrix cMoFiltered = ukfm.getFilteredPose();
// vpDisplay::displayFrame(Icol, cMoFiltered, cam, 0.05, vpColor::yellow, 2);
const double frameEnd = vpTime::measureTimeMs();
std::cout << "Iter " << iter << ": " << round(frameEnd - frameStart) << "ms" << std::endl;
std::cout << "- Tracking: " << round(trackingEnd - trackingStart) << "ms" << std::endl;
std::cout << "- Display: " << round(displayEnd - displayStart) << "ms" << std::endl;
plotter.plot(tracker, (frameEnd - expStart) / 1000.0);
iter++;
}
logger.close();
return EXIT_SUCCESS;
}
#endif
Generic class defining intrinsic camera parameters.
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
static const vpColor red
Definition: vpColor.h:217
static const vpColor none
Definition: vpColor.h:229
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Definition: vpException.h:60
@ notImplementedError
Not implemented.
Definition: vpException.h:69
const char * what() const
Definition: vpException.cpp:71
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Definition of the vpImage class member functions.
Definition: vpImage.h:131
unsigned int getWidth() const
Definition: vpImage.h:242
void resize(unsigned int h, unsigned int w)
resize the image : Image initialization
Definition: vpImage.h:544
unsigned int getSize() const
Definition: vpImage.h:221
Type * bitmap
points toward the bitmap
Definition: vpImage.h:135
unsigned int getHeight() const
Definition: vpImage.h:181
Command line argument parsing with support for JSON files. If a JSON file is supplied,...
vpJsonArgumentParser & addArgument(const std::string &name, T &parameter, const bool required=true, const std::string &help="No description")
Add an argument that can be provided by the user, either via command line or through the json file.
void parse(int argc, const char *argv[])
Parse the arguments.
Implementation of a pose vector and operations on poses.
Definition: vpPoseVector.h:203
vpRowVector t() const
All the data related to a single tracking frame. This contains both the input data (from a real camer...
vpRBRenderData renders
camera parameters
void track(const vpImage< unsigned char > &I)
void displayMask(vpImage< unsigned char > &Imask) const
void getPose(vpHomogeneousMatrix &cMo) const
Definition: vpRBTracker.cpp:64
const vpRBFeatureTrackerInput & getMostRecentFrame() const
Definition: vpRBTracker.h:82
void loadConfigurationFile(const std::string &filename)
void display(const vpImage< unsigned char > &I, const vpImage< vpRGBa > &IRGB, const vpImage< unsigned char > &depth)
void setPose(const vpHomogeneousMatrix &cMo)
Definition: vpRBTracker.cpp:69
void startTracking()
void setCameraParameters(const vpCameraParameters &cam, unsigned h, unsigned w)
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion, int index=-1) const
void acquire(vpImage< unsigned char > &grey, double *ts=nullptr)
bool open(const rs2::config &cfg=rs2::config())
float getDepthScale()
std::vector< std::shared_ptr< vpDisplay > > makeDisplayGrid(unsigned int rows, unsigned int cols, unsigned int startX, unsigned int startY, unsigned int paddingX, unsigned int paddingY, Args &... args)
Create a grid of displays, given a set of images. All the displays will be initialized in the correct...
VISP_EXPORT int wait(double t0, double t)
VISP_EXPORT double measureTimeMs()
void registerArguments(vpJsonArgumentParser &parser)
vpImage< vpRGBf > silhouetteCanny
vpImage< unsigned char > isSilhouette
Image containing the orientation of the gradients.