3 #include <visp3/core/vpConfig.h>
5 #ifdef ENABLE_VISP_NAMESPACE
9 #ifndef VISP_HAVE_REALSENSE2
13 std::cerr <<
"To run this tutorial, recompile ViSP with the Realsense third party library" << std::endl;
18 #include <visp3/sensor/vpRealSense2.h>
19 #include <visp3/io/vpParseArgv.h>
21 #include <visp3/ar/vpPanda3DFrameworkManager.h>
23 #include <visp3/rbt/vpRBTracker.h>
25 #include "render-based-tutorial-utils.h"
27 #ifndef DOXYGEN_SHOULD_SKIP_THIS
35 parser.
addArgument(
"--height", height,
false,
"Realsense requested image height")
36 .
addArgument(
"--width", width,
false,
"Realsense requested image width")
37 .
addArgument(
"--fps", fps,
false,
"Realsense requested framerate");
40 unsigned int height, width, fps;
47 #ifdef VISP_HAVE_OPENMP
48 #pragma omp parallel for
50 for (
unsigned int i = 0; i < depthRaw.
getSize(); ++i) {
51 depth.
bitmap[i] = depthScale *
static_cast<float>(depthRaw.
bitmap[i]);
52 IdepthDisplay.
bitmap[i] = depth.
bitmap[i] > maxZDisplay ? 0 :
static_cast<unsigned int>((depth.
bitmap[i] / maxZDisplay) * 255.f);
56 int main(
int argc,
const char **argv)
59 vpRBTrackerTutorial::BaseArguments baseArgs;
61 vpRBTrackerTutorial::vpRBExperimentLogger logger;
62 vpRBTrackerTutorial::vpRBExperimentPlotter plotter;
65 "Tutorial showing the usage of the Render-Based tracker with a RealSense camera",
69 baseArgs.registerArguments(parser);
71 logger.registerArguments(parser);
72 plotter.registerArguments(parser);
74 parser.
parse(argc, argv);
76 baseArgs.postProcessArguments();
77 plotter.postProcessArguments(baseArgs.display);
79 if (baseArgs.enableRenderProfiling) {
80 vpRBTrackerTutorial::enableRendererProfiling();
83 std::cout <<
"Loading tracker: " << baseArgs.trackerConfiguration << std::endl;
87 const unsigned int width = realsenseArgs.width, height = realsenseArgs.height;
88 const unsigned fps = realsenseArgs.fps;
101 std::cout <<
"Opening realsense with " << width <<
"x" << height <<
" @ " << fps <<
"fps" << std::endl;
103 config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, fps);
104 config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, fps);
105 rs2::align align_to(RS2_STREAM_COLOR);
107 realsense.
open(config);
110 std::cout <<
"Caught an exception: " << e.
what() << std::endl;
111 std::cout <<
"Check if the Realsense camera is connected..." << std::endl;
117 for (
int i = 0; i < 10; ++i) {
125 std::cout <<
"Creating displays" << std::endl;;
126 std::vector<std::shared_ptr<vpDisplay>> displays, displaysDebug;
128 if (baseArgs.display) {
129 displays = vpRBTrackerTutorial::createDisplays(Id, Icol, IdepthDisplay, IProbaDisplay);
130 if (baseArgs.debugDisplay) {
134 "Normals in object frame", InormDisplay,
135 "Depth canny", cannyDisplay
138 plotter.init(displays);
141 if (baseArgs.display && !baseArgs.hasInlineInit()) {
144 realsense.
acquire((
unsigned char *)Icol.bitmap, (
unsigned char *)depthRaw.
bitmap,
nullptr,
nullptr, &align_to);
145 updateDepth(depthRaw, depthScale, baseArgs.maxDepthDisplay, depth, IdepthDisplay);
158 updateDepth(depthRaw, depthScale, baseArgs.maxDepthDisplay, depth, IdepthDisplay);
163 std::cout <<
"Starting init" << std::endl;
164 if (baseArgs.hasInlineInit()) {
165 tracker.
setPose(baseArgs.cMoInit);
167 else if (baseArgs.display) {
169 tracker.
initClick(Id, baseArgs.initFile,
true);
176 std::cout <<
"Starting pose: " <<
vpPoseVector(cMo).
t() << std::endl;
178 if (baseArgs.display) {
184 unsigned int iter = 1;
190 realsense.
acquire((
unsigned char *)Icol.bitmap, (
unsigned char *)depthRaw.
bitmap,
nullptr,
nullptr, &align_to);
191 updateDepth(depthRaw, depthScale, baseArgs.maxDepthDisplay, depth, IdepthDisplay);
196 tracker.
track(Id, Icol, depth);
200 if (baseArgs.display) {
201 if (baseArgs.debugDisplay) {
210 tracker.
display(Id, Icol, IdepthDisplay);
226 logger.logFrame(tracker, iter, Id, Icol, IdepthDisplay, IProbaDisplay);
234 std::cout <<
"Iter " << iter <<
": " << round(frameEnd - frameStart) <<
"ms" << std::endl;
235 std::cout <<
"- Tracking: " << round(trackingEnd - trackingStart) <<
"ms" << std::endl;
236 std::cout <<
"- Display: " << round(displayEnd - displayStart) <<
"ms" << std::endl;
237 plotter.plot(tracker, (frameEnd - expStart) / 1000.0);
Generic class defining intrinsic camera parameters.
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
static const vpColor none
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
@ notImplementedError
Not implemented.
const char * what() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Definition of the vpImage class member functions.
unsigned int getWidth() const
void resize(unsigned int h, unsigned int w)
resize the image : Image initialization
unsigned int getSize() const
Type * bitmap
points toward the bitmap
unsigned int getHeight() const
Command line argument parsing with support for JSON files. If a JSON file is supplied,...
vpJsonArgumentParser & addArgument(const std::string &name, T ¶meter, const bool required=true, const std::string &help="No description")
Add an argument that can be provided by the user, either via command line or through the json file.
void parse(int argc, const char *argv[])
Parse the arguments.
static vpPanda3DFrameworkManager & getInstance()
Implementation of a pose vector and operations on poses.
void track(const vpImage< unsigned char > &I)
void initClick(const vpImage< unsigned char > &I, const std::string &initFile, bool displayHelp)
void displayMask(vpImage< unsigned char > &Imask) const
void getPose(vpHomogeneousMatrix &cMo) const
const vpRBFeatureTrackerInput & getMostRecentFrame() const
void loadConfigurationFile(const std::string &filename)
void display(const vpImage< unsigned char > &I, const vpImage< vpRGBa > &IRGB, const vpImage< unsigned char > &depth)
void setPose(const vpHomogeneousMatrix &cMo)
void setCameraParameters(const vpCameraParameters &cam, unsigned h, unsigned w)
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion, int index=-1) const
void acquire(vpImage< unsigned char > &grey, double *ts=nullptr)
bool open(const rs2::config &cfg=rs2::config())
std::vector< std::shared_ptr< vpDisplay > > makeDisplayGrid(unsigned int rows, unsigned int cols, unsigned int startX, unsigned int startY, unsigned int paddingX, unsigned int paddingY, Args &... args)
Create a grid of displays, given a set of images. All the displays will be initialized in the correct...
VISP_EXPORT int wait(double t0, double t)
VISP_EXPORT double measureTimeMs()
void registerArguments(vpJsonArgumentParser &parser)
vpImage< vpRGBf > silhouetteCanny
vpImage< unsigned char > isSilhouette
Image containing the orientation of the gradients.