6 #include <visp3/detection/vpDetectorAprilTag.h>
7 #include <visp3/gui/vpDisplayGDI.h>
8 #include <visp3/gui/vpDisplayOpenCV.h>
9 #include <visp3/gui/vpDisplayX.h>
10 #include <visp3/mbt/vpMbGenericTracker.h>
11 #include <visp3/sensor/vpRealSense2.h>
13 typedef enum { state_detection, state_tracking, state_quit } state_t;
17 void createCaoFile(
double cubeEdgeSize)
19 std::ofstream fileStream;
20 fileStream.open(
"cube.cao", std::ofstream::out | std::ofstream::trunc);
22 fileStream <<
"# 3D Points\n";
23 fileStream <<
"8 # Number of points\n";
24 fileStream << cubeEdgeSize / 2 <<
" " << cubeEdgeSize / 2 <<
" " << 0 <<
" # Point 0: (X, Y, Z)\n";
25 fileStream << cubeEdgeSize / 2 <<
" " << -cubeEdgeSize / 2 <<
" " << 0 <<
" # Point 1\n";
26 fileStream << -cubeEdgeSize / 2 <<
" " << -cubeEdgeSize / 2 <<
" " << 0 <<
" # Point 2\n";
27 fileStream << -cubeEdgeSize / 2 <<
" " << cubeEdgeSize / 2 <<
" " << 0 <<
" # Point 3\n";
28 fileStream << -cubeEdgeSize / 2 <<
" " << cubeEdgeSize / 2 <<
" " << -cubeEdgeSize <<
" # Point 4\n";
29 fileStream << -cubeEdgeSize / 2 <<
" " << -cubeEdgeSize / 2 <<
" " << -cubeEdgeSize <<
" # Point 5\n";
30 fileStream << cubeEdgeSize / 2 <<
" " << -cubeEdgeSize / 2 <<
" " << -cubeEdgeSize <<
" # Point 6\n";
31 fileStream << cubeEdgeSize / 2 <<
" " << cubeEdgeSize / 2 <<
" " << -cubeEdgeSize <<
" # Point 7\n";
32 fileStream <<
"# 3D Lines\n";
33 fileStream <<
"0 # Number of lines\n";
34 fileStream <<
"# Faces from 3D lines\n";
35 fileStream <<
"0 # Number of faces\n";
36 fileStream <<
"# Faces from 3D points\n";
37 fileStream <<
"6 # Number of faces\n";
38 fileStream <<
"4 0 3 2 1 # Face 0: [number of points] [index of the 3D points]...\n";
39 fileStream <<
"4 1 2 5 6\n";
40 fileStream <<
"4 4 7 6 5\n";
41 fileStream <<
"4 0 7 4 3\n";
42 fileStream <<
"4 5 2 3 4\n";
43 fileStream <<
"4 0 1 6 7 # Face 5\n";
44 fileStream <<
"# 3D cylinders\n";
45 fileStream <<
"0 # Number of cylinders\n";
46 fileStream <<
"# 3D circles\n";
47 fileStream <<
"0 # Number of circles\n";
51 #if defined(VISP_HAVE_APRILTAG)
55 std::vector<vpHomogeneousMatrix> cMo_vec;
58 bool ret = detector.
detect(I, tagSize, cam, cMo_vec);
61 for (
size_t i = 0; i < cMo_vec.size(); i++) {
69 return state_tracking;
72 return state_detection;
87 return state_detection;
94 if (projection_error > projection_error_threshold) {
95 return state_detection;
103 std::stringstream ss;
108 return state_tracking;
113 std::map<std::string, pcl::PointCloud<pcl::PointXYZ>::ConstPtr> mapOfPointclouds,
115 std::map<std::string,
const std::vector<vpColVector> *> mapOfPointclouds,
116 std::map<std::string, unsigned int> mapOfWidths, std::map<std::string, unsigned int> mapOfHeights,
128 tracker.
track(mapOfImages, mapOfPointclouds);
130 tracker.
track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
134 return state_detection;
141 if (projection_error > projection_error_threshold) {
142 return state_detection;
146 tracker.
display(I_gray, I_depth, cMo, depth_M_color * cMo, cam_color, cam_depth,
vpColor::red, 3);
150 return state_tracking;
153 int main(
int argc,
const char **argv)
156 #if defined(VISP_HAVE_APRILTAG) && defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_MODULE_MBT)
160 double opt_tag_size = 0.08;
161 float opt_quad_decimate = 1.0;
162 int opt_nthreads = 1;
163 double opt_cube_size = 0.125;
164 #ifdef VISP_HAVE_OPENCV
165 bool opt_use_texture =
false;
167 bool opt_use_depth =
false;
168 double opt_projection_error_threshold = 40.;
170 #if !(defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV))
171 bool display_off =
true;
173 bool display_off =
false;
176 for (
int i = 1; i < argc; i++) {
177 if (std::string(argv[i]) ==
"--tag_size" && i + 1 < argc) {
178 opt_tag_size = atof(argv[i + 1]);
180 else if (std::string(argv[i]) ==
"--quad_decimate" && i + 1 < argc) {
181 opt_quad_decimate = (float)atof(argv[i + 1]);
183 else if (std::string(argv[i]) ==
"--nthreads" && i + 1 < argc) {
184 opt_nthreads = atoi(argv[i + 1]);
186 else if (std::string(argv[i]) ==
"--display_off") {
189 else if (std::string(argv[i]) ==
"--tag_family" && i + 1 < argc) {
192 else if (std::string(argv[i]) ==
"--cube_size" && i + 1 < argc) {
193 opt_cube_size = atof(argv[i + 1]);
194 #ifdef VISP_HAVE_OPENCV
196 else if (std::string(argv[i]) ==
"--texture") {
197 opt_use_texture =
true;
200 else if (std::string(argv[i]) ==
"--depth") {
201 opt_use_depth =
true;
203 else if (std::string(argv[i]) ==
"--projection_error" && i + 1 < argc) {
204 opt_projection_error_threshold = atof(argv[i + 1]);
206 else if (std::string(argv[i]) ==
"--help" || std::string(argv[i]) ==
"-h") {
207 std::cout <<
"Usage: " << argv[0]
208 <<
" [--cube_size <size in m>] [--tag_size <size in m>]"
209 " [--quad_decimate <decimation>] [--nthreads <nb>]"
210 " [--tag_family <0: TAG_36h11, 1: TAG_36h10, 2: TAG_36ARTOOLKIT, "
211 " 3: TAG_25h9, 4: TAG_25h7, 5: TAG_16h5>]";
212 #if (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV))
213 std::cout <<
" [--display_off]";
215 std::cout <<
" [--texture] [--depth] [--projection_error <30 - 100>] [--help]" << std::endl;
220 createCaoFile(opt_cube_size);
226 int width = 640, height = 480, stream_fps = 30;
227 config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, stream_fps);
228 config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, stream_fps);
229 config.disable_stream(RS2_STREAM_INFRARED);
230 realsense.
open(config);
244 std::map<std::string, vpHomogeneousMatrix> mapOfCameraTransformations;
245 std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
247 std::map<std::string, pcl::PointCloud<pcl::PointXYZ>::ConstPtr> mapOfPointclouds;
248 pcl::PointCloud<pcl::PointXYZ>::Ptr pointcloud(
new pcl::PointCloud<pcl::PointXYZ>());
250 std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
251 std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
252 std::vector<vpColVector> pointcloud;
255 std::map<std::string, vpHomogeneousMatrix> mapOfCameraPoses;
257 std::cout <<
"Cube size: " << opt_cube_size << std::endl;
258 std::cout <<
"AprilTag size: " << opt_tag_size << std::endl;
259 std::cout <<
"AprilTag family: " << opt_tag_family << std::endl;
260 std::cout <<
"Camera parameters:" << std::endl;
261 std::cout <<
" Color:\n" << cam_color << std::endl;
263 std::cout <<
" Depth:\n" << cam_depth << std::endl;
264 std::cout <<
"Detection: " << std::endl;
265 std::cout <<
" Quad decimate: " << opt_quad_decimate << std::endl;
266 std::cout <<
" Threads number: " << opt_nthreads << std::endl;
267 std::cout <<
"Tracker: " << std::endl;
268 std::cout <<
" Use edges : 1" << std::endl;
269 std::cout <<
" Use texture: "
270 #ifdef VISP_HAVE_OPENCV
271 << opt_use_texture << std::endl;
273 <<
" na" << std::endl;
275 std::cout <<
" Use depth : " << opt_use_depth << std::endl;
276 std::cout <<
" Projection error: " << opt_projection_error_threshold << std::endl;
284 d_gray =
new vpDisplayX(I_gray, 50, 50,
"Color stream");
286 d_depth =
new vpDisplayX(I_depth, 80 + I_gray.getWidth(), 50,
"Depth stream");
287 #elif defined(VISP_HAVE_GDI)
291 #elif defined(HAVE_OPENCV_HIGHGUI)
304 std::vector<int> trackerTypes;
305 #if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
330 #if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
331 if (opt_use_texture) {
349 tracker.
loadModel(
"cube.cao",
"cube.cao");
350 mapOfCameraTransformations[
"Camera2"] = depth_M_color;
366 state_t state = state_detection;
369 while (state != state_quit) {
372 realsense.
acquire((
unsigned char *)I_color.bitmap, (
unsigned char *)I_depth_raw.bitmap,
nullptr, pointcloud,
nullptr);
374 realsense.
acquire((
unsigned char *)I_color.bitmap, (
unsigned char *)I_depth_raw.bitmap, &pointcloud,
nullptr,
382 mapOfImages[
"Camera1"] = &I_gray;
383 mapOfImages[
"Camera2"] = &I_depth;
385 mapOfPointclouds[
"Camera2"] = pointcloud;
387 mapOfPointclouds[
"Camera2"] = &pointcloud;
388 mapOfWidths[
"Camera2"] = width;
389 mapOfHeights[
"Camera2"] = height;
397 if (state == state_detection) {
398 state = detectAprilTag(I_gray, detector, opt_tag_size, cam_color, cMo);
401 if (state == state_tracking) {
403 mapOfCameraPoses[
"Camera1"] = cMo;
404 mapOfCameraPoses[
"Camera2"] = depth_M_color * cMo;
413 if (state == state_tracking) {
416 state = track(mapOfImages, mapOfPointclouds, I_gray, I_depth, depth_M_color, tracker,
417 opt_projection_error_threshold, cMo);
419 state = track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights, I_gray, I_depth, depth_M_color,
420 tracker, opt_projection_error_threshold, cMo);
424 state = track(I_gray, tracker, opt_projection_error_threshold, cMo);
427 std::stringstream ss;
456 std::cerr <<
"Catch an exception: " << e.
getMessage() << std::endl;
463 #ifndef VISP_HAVE_APRILTAG
464 std::cout <<
"ViSP is not build with Apriltag support" << std::endl;
466 #ifndef VISP_HAVE_REALSENSE2
467 std::cout <<
"ViSP is not build with librealsense2 support" << std::endl;
469 std::cout <<
"Install missing 3rd parties, configure and build ViSP to run this tutorial" << std::endl;
Generic class defining intrinsic camera parameters.
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
static const vpColor none
void setAprilTagQuadDecimate(float quadDecimate)
bool detect(const vpImage< unsigned char > &I) vp_override
@ TAG_36h11
AprilTag 36h11 pattern (recommended)
void setAprilTagNbThreads(int nThreads)
size_t getNbObjects() const
Display for windows using GDI (available on any windows 32 platform).
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Class that defines generic functionalities for display.
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
const char * getMessage() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void createDepthHistogram(const vpImage< uint16_t > &src_depth, vpImage< vpRGBa > &dest_rgba)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
void setBlockSize(int blockSize)
void setQuality(double qualityLevel)
void setHarrisFreeParameter(double harris_k)
void setMaxFeatures(int maxCount)
void setMinDistance(double minDistance)
void setWindowSize(int winSize)
void setPyramidLevels(int pyrMaxLevel)
static double rad(double deg)
Real-time 6D object pose tracking using its CAD model.
virtual void initFromPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo) vp_override
virtual void setKltMaskBorder(const unsigned int &e)
virtual unsigned int getNbFeaturesEdge() const
virtual void setAngleDisappear(const double &a) vp_override
virtual void getPose(vpHomogeneousMatrix &cMo) const vp_override
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false) vp_override
virtual void getCameraParameters(vpCameraParameters &camera) const vp_override
virtual unsigned int getNbFeaturesKlt() const
virtual void setMovingEdge(const vpMe &me)
virtual void setAngleAppear(const double &a) vp_override
virtual unsigned int getNbFeaturesDepthDense() const
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix()) vp_override
virtual void setKltOpencv(const vpKltOpencv &t)
virtual void setCameraParameters(const vpCameraParameters &camera) vp_override
virtual double computeCurrentProjectionError(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &_cMo, const vpCameraParameters &_cam) vp_override
virtual void setDisplayFeatures(bool displayF) vp_override
virtual void setCameraTransformationMatrix(const std::string &cameraName, const vpHomogeneousMatrix &cameraTransformationMatrix)
virtual void track(const vpImage< unsigned char > &I) vp_override
void setMu1(const double &mu_1)
void setRange(const unsigned int &range)
void setLikelihoodThresholdType(const vpLikelihoodThresholdType likelihood_threshold_type)
void setMaskNumber(const unsigned int &mask_number)
void setThreshold(const double &threshold)
void setSampleStep(const double &sample_step)
void setMaskSize(const unsigned int &mask_size)
void setMu2(const double &mu_2)
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion, int index=-1) const
void acquire(vpImage< unsigned char > &grey, double *ts=nullptr)
bool open(const rs2::config &cfg=rs2::config())
vpHomogeneousMatrix getTransformation(const rs2_stream &from, const rs2_stream &to, int from_index=-1) const