6 #include <visp3/core/vpConfig.h>
7 #include <visp3/detection/vpDetectorAprilTag.h>
8 #include <visp3/gui/vpDisplayGDI.h>
9 #include <visp3/gui/vpDisplayOpenCV.h>
10 #include <visp3/gui/vpDisplayX.h>
11 #include <visp3/mbt/vpMbGenericTracker.h>
12 #include <visp3/sensor/vpRealSense2.h>
14 #ifdef ENABLE_VISP_NAMESPACE
18 typedef enum { state_detection, state_tracking, state_quit } state_t;
22 void createCaoFile(
double cubeEdgeSize)
24 std::ofstream fileStream;
25 fileStream.open(
"cube.cao", std::ofstream::out | std::ofstream::trunc);
27 fileStream <<
"# 3D Points\n";
28 fileStream <<
"8 # Number of points\n";
29 fileStream << cubeEdgeSize / 2 <<
" " << cubeEdgeSize / 2 <<
" " << 0 <<
" # Point 0: (X, Y, Z)\n";
30 fileStream << cubeEdgeSize / 2 <<
" " << -cubeEdgeSize / 2 <<
" " << 0 <<
" # Point 1\n";
31 fileStream << -cubeEdgeSize / 2 <<
" " << -cubeEdgeSize / 2 <<
" " << 0 <<
" # Point 2\n";
32 fileStream << -cubeEdgeSize / 2 <<
" " << cubeEdgeSize / 2 <<
" " << 0 <<
" # Point 3\n";
33 fileStream << -cubeEdgeSize / 2 <<
" " << cubeEdgeSize / 2 <<
" " << -cubeEdgeSize <<
" # Point 4\n";
34 fileStream << -cubeEdgeSize / 2 <<
" " << -cubeEdgeSize / 2 <<
" " << -cubeEdgeSize <<
" # Point 5\n";
35 fileStream << cubeEdgeSize / 2 <<
" " << -cubeEdgeSize / 2 <<
" " << -cubeEdgeSize <<
" # Point 6\n";
36 fileStream << cubeEdgeSize / 2 <<
" " << cubeEdgeSize / 2 <<
" " << -cubeEdgeSize <<
" # Point 7\n";
37 fileStream <<
"# 3D Lines\n";
38 fileStream <<
"0 # Number of lines\n";
39 fileStream <<
"# Faces from 3D lines\n";
40 fileStream <<
"0 # Number of faces\n";
41 fileStream <<
"# Faces from 3D points\n";
42 fileStream <<
"6 # Number of faces\n";
43 fileStream <<
"4 0 3 2 1 # Face 0: [number of points] [index of the 3D points]...\n";
44 fileStream <<
"4 1 2 5 6\n";
45 fileStream <<
"4 4 7 6 5\n";
46 fileStream <<
"4 0 7 4 3\n";
47 fileStream <<
"4 5 2 3 4\n";
48 fileStream <<
"4 0 1 6 7 # Face 5\n";
49 fileStream <<
"# 3D cylinders\n";
50 fileStream <<
"0 # Number of cylinders\n";
51 fileStream <<
"# 3D circles\n";
52 fileStream <<
"0 # Number of circles\n";
56 #if defined(VISP_HAVE_APRILTAG)
60 std::vector<vpHomogeneousMatrix> cMo_vec;
63 bool ret = detector.
detect(I, tagSize, cam, cMo_vec);
66 for (
size_t i = 0; i < cMo_vec.size(); i++) {
74 return state_tracking;
77 return state_detection;
92 return state_detection;
99 if (projection_error > projection_error_threshold) {
100 return state_detection;
108 std::stringstream ss;
113 return state_tracking;
117 #
if defined(VISP_HAVE_PCL) && defined(VISP_HAVE_PCL_COMMON)
118 std::map<std::string, pcl::PointCloud<pcl::PointXYZ>::ConstPtr> mapOfPointclouds,
120 std::map<std::string,
const std::vector<vpColVector> *> mapOfPointclouds,
121 std::map<std::string, unsigned int> mapOfWidths, std::map<std::string, unsigned int> mapOfHeights,
132 #if defined(VISP_HAVE_PCL) && defined(VISP_HAVE_PCL_COMMON)
133 tracker.
track(mapOfImages, mapOfPointclouds);
135 tracker.
track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
139 return state_detection;
146 if (projection_error > projection_error_threshold) {
147 return state_detection;
151 tracker.
display(I_gray, I_depth, cMo, depth_M_color * cMo, cam_color, cam_depth,
vpColor::red, 3);
155 return state_tracking;
158 int main(
int argc,
const char **argv)
161 #if defined(VISP_HAVE_APRILTAG) && defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_MODULE_MBT)
165 double opt_tag_size = 0.08;
166 float opt_quad_decimate = 1.0;
167 int opt_nthreads = 1;
168 double opt_cube_size = 0.125;
169 #ifdef VISP_HAVE_OPENCV
170 bool opt_use_texture =
false;
172 bool opt_use_depth =
false;
173 double opt_projection_error_threshold = 40.;
175 #if !(defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV))
176 bool display_off =
true;
178 bool display_off =
false;
181 for (
int i = 1; i < argc; i++) {
182 if (std::string(argv[i]) ==
"--tag_size" && i + 1 < argc) {
183 opt_tag_size = atof(argv[i + 1]);
185 else if (std::string(argv[i]) ==
"--quad_decimate" && i + 1 < argc) {
186 opt_quad_decimate = (float)atof(argv[i + 1]);
188 else if (std::string(argv[i]) ==
"--nthreads" && i + 1 < argc) {
189 opt_nthreads = atoi(argv[i + 1]);
191 else if (std::string(argv[i]) ==
"--display_off") {
194 else if (std::string(argv[i]) ==
"--tag_family" && i + 1 < argc) {
197 else if (std::string(argv[i]) ==
"--cube_size" && i + 1 < argc) {
198 opt_cube_size = atof(argv[i + 1]);
199 #ifdef VISP_HAVE_OPENCV
201 else if (std::string(argv[i]) ==
"--texture") {
202 opt_use_texture =
true;
205 else if (std::string(argv[i]) ==
"--depth") {
206 opt_use_depth =
true;
208 else if (std::string(argv[i]) ==
"--projection_error" && i + 1 < argc) {
209 opt_projection_error_threshold = atof(argv[i + 1]);
211 else if (std::string(argv[i]) ==
"--help" || std::string(argv[i]) ==
"-h") {
212 std::cout <<
"Usage: " << argv[0]
213 <<
" [--cube_size <size in m>] [--tag_size <size in m>]"
214 " [--quad_decimate <decimation>] [--nthreads <nb>]"
215 " [--tag_family <0: TAG_36h11, 1: TAG_36h10, 2: TAG_36ARTOOLKIT, "
216 " 3: TAG_25h9, 4: TAG_25h7, 5: TAG_16h5>]";
217 #if (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV))
218 std::cout <<
" [--display_off]";
220 std::cout <<
" [--texture] [--depth] [--projection_error <30 - 100>] [--help]" << std::endl;
225 createCaoFile(opt_cube_size);
231 int width = 640, height = 480, stream_fps = 30;
232 config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, stream_fps);
233 config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, stream_fps);
234 config.disable_stream(RS2_STREAM_INFRARED);
235 realsense.
open(config);
249 std::map<std::string, vpHomogeneousMatrix> mapOfCameraTransformations;
250 std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
251 #if defined(VISP_HAVE_PCL) && defined(VISP_HAVE_PCL_COMMON)
252 std::map<std::string, pcl::PointCloud<pcl::PointXYZ>::ConstPtr> mapOfPointclouds;
253 pcl::PointCloud<pcl::PointXYZ>::Ptr pointcloud(
new pcl::PointCloud<pcl::PointXYZ>());
255 std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
256 std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
257 std::vector<vpColVector> pointcloud;
260 std::map<std::string, vpHomogeneousMatrix> mapOfCameraPoses;
262 std::cout <<
"Cube size: " << opt_cube_size << std::endl;
263 std::cout <<
"AprilTag size: " << opt_tag_size << std::endl;
264 std::cout <<
"AprilTag family: " << opt_tag_family << std::endl;
265 std::cout <<
"Camera parameters:" << std::endl;
266 std::cout <<
" Color:\n" << cam_color << std::endl;
268 std::cout <<
" Depth:\n" << cam_depth << std::endl;
269 std::cout <<
"Detection: " << std::endl;
270 std::cout <<
" Quad decimate: " << opt_quad_decimate << std::endl;
271 std::cout <<
" Threads number: " << opt_nthreads << std::endl;
272 std::cout <<
"Tracker: " << std::endl;
273 std::cout <<
" Use edges : 1" << std::endl;
274 std::cout <<
" Use texture: "
275 #ifdef VISP_HAVE_OPENCV
276 << opt_use_texture << std::endl;
278 <<
" na" << std::endl;
280 std::cout <<
" Use depth : " << opt_use_depth << std::endl;
281 std::cout <<
" Projection error: " << opt_projection_error_threshold << std::endl;
289 d_gray =
new vpDisplayX(I_gray, 50, 50,
"Color stream");
291 d_depth =
new vpDisplayX(I_depth, 80 + I_gray.getWidth(), 50,
"Depth stream");
292 #elif defined(VISP_HAVE_GDI)
296 #elif defined(HAVE_OPENCV_HIGHGUI)
309 std::vector<int> trackerTypes;
310 #if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
335 #if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
336 if (opt_use_texture) {
345 tracker.setKltOpencv(klt_settings);
346 tracker.setKltMaskBorder(5);
354 tracker.
loadModel(
"cube.cao",
"cube.cao");
355 mapOfCameraTransformations[
"Camera2"] = depth_M_color;
371 state_t state = state_detection;
374 while (state != state_quit) {
376 #if defined(VISP_HAVE_PCL) && defined(VISP_HAVE_PCL_COMMON)
377 realsense.
acquire((
unsigned char *)I_color.bitmap, (
unsigned char *)I_depth_raw.bitmap,
nullptr, pointcloud,
nullptr);
379 realsense.
acquire((
unsigned char *)I_color.bitmap, (
unsigned char *)I_depth_raw.bitmap, &pointcloud,
nullptr,
387 mapOfImages[
"Camera1"] = &I_gray;
388 mapOfImages[
"Camera2"] = &I_depth;
389 #if defined(VISP_HAVE_PCL) && defined(VISP_HAVE_PCL_COMMON)
390 mapOfPointclouds[
"Camera2"] = pointcloud;
392 mapOfPointclouds[
"Camera2"] = &pointcloud;
393 mapOfWidths[
"Camera2"] = width;
394 mapOfHeights[
"Camera2"] = height;
402 if (state == state_detection) {
403 state = detectAprilTag(I_gray, detector, opt_tag_size, cam_color, cMo);
406 if (state == state_tracking) {
408 mapOfCameraPoses[
"Camera1"] = cMo;
409 mapOfCameraPoses[
"Camera2"] = depth_M_color * cMo;
418 if (state == state_tracking) {
420 #if defined(VISP_HAVE_PCL) && defined(VISP_HAVE_PCL_COMMON)
421 state = track(mapOfImages, mapOfPointclouds, I_gray, I_depth, depth_M_color, tracker,
422 opt_projection_error_threshold, cMo);
424 state = track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights, I_gray, I_depth, depth_M_color,
425 tracker, opt_projection_error_threshold, cMo);
429 state = track(I_gray, tracker, opt_projection_error_threshold, cMo);
432 std::stringstream ss;
461 std::cerr <<
"Catch an exception: " << e.
getMessage() << std::endl;
468 #ifndef VISP_HAVE_APRILTAG
469 std::cout <<
"ViSP is not build with Apriltag support" << std::endl;
471 #ifndef VISP_HAVE_REALSENSE2
472 std::cout <<
"ViSP is not build with librealsense2 support" << std::endl;
474 std::cout <<
"Install missing 3rd parties, configure and build ViSP to run this tutorial" << std::endl;
Generic class defining intrinsic camera parameters.
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
static const vpColor none
void setAprilTagQuadDecimate(float quadDecimate)
bool detect(const vpImage< unsigned char > &I) VP_OVERRIDE
@ TAG_36h11
AprilTag 36h11 pattern (recommended)
void setAprilTagNbThreads(int nThreads)
size_t getNbObjects() const
Display for windows using GDI (available on any windows 32 platform).
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Class that defines generic functionalities for display.
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
const char * getMessage() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void createDepthHistogram(const vpImage< uint16_t > &src_depth, vpImage< vpRGBa > &dest_rgba)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
void setBlockSize(int blockSize)
void setQuality(double qualityLevel)
void setHarrisFreeParameter(double harris_k)
void setMaxFeatures(int maxCount)
void setMinDistance(double minDistance)
void setWindowSize(int winSize)
void setPyramidLevels(int pyrMaxLevel)
static double rad(double deg)
Real-time 6D object pose tracking using its CAD model.
virtual void setCameraParameters(const vpCameraParameters &camera) VP_OVERRIDE
virtual void setDisplayFeatures(bool displayF) VP_OVERRIDE
virtual unsigned int getNbFeaturesEdge() const
virtual double computeCurrentProjectionError(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &_cMo, const vpCameraParameters &_cam) VP_OVERRIDE
virtual void getCameraParameters(vpCameraParameters &camera) const VP_OVERRIDE
virtual void initFromPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo) VP_OVERRIDE
virtual void getPose(vpHomogeneousMatrix &cMo) const VP_OVERRIDE
virtual unsigned int getNbFeaturesKlt() const
virtual void setMovingEdge(const vpMe &me)
virtual void setAngleDisappear(const double &a) VP_OVERRIDE
virtual void track(const vpImage< unsigned char > &I) VP_OVERRIDE
virtual unsigned int getNbFeaturesDepthDense() const
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix()) VP_OVERRIDE
virtual void setCameraTransformationMatrix(const std::string &cameraName, const vpHomogeneousMatrix &cameraTransformationMatrix)
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false) VP_OVERRIDE
virtual void setAngleAppear(const double &a) VP_OVERRIDE
void setMu1(const double &mu_1)
void setRange(const unsigned int &range)
void setLikelihoodThresholdType(const vpLikelihoodThresholdType likelihood_threshold_type)
void setMaskNumber(const unsigned int &mask_number)
void setThreshold(const double &threshold)
void setSampleStep(const double &sample_step)
void setMaskSize(const unsigned int &mask_size)
void setMu2(const double &mu_2)
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion, int index=-1) const
void acquire(vpImage< unsigned char > &grey, double *ts=nullptr)
bool open(const rs2::config &cfg=rs2::config())
vpHomogeneousMatrix getTransformation(const rs2_stream &from, const rs2_stream &to, int from_index=-1) const