6 #include <visp3/gui/vpDisplayGDI.h> 7 #include <visp3/gui/vpDisplayOpenCV.h> 8 #include <visp3/gui/vpDisplayX.h> 9 #include <visp3/core/vpXmlParserCamera.h> 10 #include <visp3/sensor/vpRealSense2.h> 11 #include <visp3/detection/vpDetectorAprilTag.h> 12 #include <visp3/mbt/vpMbGenericTracker.h> 22 void createCaoFile(
double cubeEdgeSize)
24 std::ofstream fileStream;
25 fileStream.open(
"cube.cao", std::ofstream::out | std::ofstream::trunc);
27 fileStream <<
"# 3D Points\n";
28 fileStream <<
"8 # Number of points\n";
29 fileStream << cubeEdgeSize / 2 <<
" " << cubeEdgeSize / 2 <<
" " << 0 <<
" # Point 0: (X, Y, Z)\n";
30 fileStream << cubeEdgeSize / 2 <<
" " << -cubeEdgeSize / 2 <<
" " << 0 <<
" # Point 1\n";
31 fileStream << -cubeEdgeSize / 2 <<
" " << -cubeEdgeSize / 2 <<
" " << 0 <<
" # Point 2\n";
32 fileStream << -cubeEdgeSize / 2 <<
" " << cubeEdgeSize / 2 <<
" " << 0 <<
" # Point 3\n";
33 fileStream << -cubeEdgeSize / 2 <<
" " << cubeEdgeSize / 2 <<
" " << -cubeEdgeSize <<
" # Point 4\n";
34 fileStream << -cubeEdgeSize / 2 <<
" " << -cubeEdgeSize / 2 <<
" " << -cubeEdgeSize <<
" # Point 5\n";
35 fileStream << cubeEdgeSize / 2 <<
" " << -cubeEdgeSize / 2 <<
" " << -cubeEdgeSize <<
" # Point 6\n";
36 fileStream << cubeEdgeSize / 2 <<
" " << cubeEdgeSize / 2 <<
" " << -cubeEdgeSize <<
" # Point 7\n";
37 fileStream <<
"# 3D Lines\n";
38 fileStream <<
"0 # Number of lines\n";
39 fileStream <<
"# Faces from 3D lines\n";
40 fileStream <<
"0 # Number of faces\n";
41 fileStream <<
"# Faces from 3D points\n";
42 fileStream <<
"6 # Number of faces\n";
43 fileStream <<
"4 0 3 2 1 # Face 0: [number of points] [index of the 3D points]...\n";
44 fileStream <<
"4 1 2 5 6\n";
45 fileStream <<
"4 4 7 6 5\n";
46 fileStream <<
"4 0 7 4 3\n";
47 fileStream <<
"4 5 2 3 4\n";
48 fileStream <<
"4 0 1 6 7 # Face 5\n";
49 fileStream <<
"# 3D cylinders\n";
50 fileStream <<
"0 # Number of cylinders\n";
51 fileStream <<
"# 3D circles\n";
52 fileStream <<
"0 # Number of circles\n";
56 #if defined(VISP_HAVE_APRILTAG) 60 std::vector<vpHomogeneousMatrix> cMo_vec;
63 bool ret = detector.
detect(I, tagSize, cam, cMo_vec);
66 for (
size_t i = 0; i < cMo_vec.size(); i++) {
74 return state_tracking;
77 return state_detection;
79 #endif // #if defined(VISP_HAVE_APRILTAG) 92 return state_detection;
99 if (projection_error > projection_error_threshold) {
100 return state_detection;
108 std::stringstream ss;
113 return state_tracking;
118 std::map<std::string, pcl::PointCloud< pcl::PointXYZ >::ConstPtr>mapOfPointclouds,
120 std::map<std::string,
const std::vector<vpColVector> *>mapOfPointclouds,
121 std::map<std::string, unsigned int> mapOfWidths,
122 std::map<std::string, unsigned int> mapOfHeights,
136 tracker.
track(mapOfImages, mapOfPointclouds);
138 tracker.
track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
142 return state_detection;
149 if (projection_error > projection_error_threshold) {
150 return state_detection;
154 tracker.
display(I_gray, I_depth, cMo, depth_M_color*cMo, cam_color, cam_depth,
vpColor::red, 3);
158 return state_tracking;
161 int main(
int argc,
const char **argv)
164 #if defined(VISP_HAVE_APRILTAG) && defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_MODULE_MBT) 168 double opt_tag_size = 0.08;
169 float opt_quad_decimate = 1.0;
170 int opt_nthreads = 1;
171 double opt_cube_size = 0.125;
172 #ifdef VISP_HAVE_OPENCV 173 bool opt_use_texture =
false;
175 bool opt_use_depth =
false;
176 double opt_projection_error_threshold = 40.;
178 #if !(defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV)) 179 bool display_off =
true;
181 bool display_off =
false;
184 for (
int i = 1; i < argc; i++) {
185 if (std::string(argv[i]) ==
"--tag_size" && i + 1 < argc) {
186 opt_tag_size = atof(argv[i + 1]);
187 }
else if (std::string(argv[i]) ==
"--quad_decimate" && i + 1 < argc) {
188 opt_quad_decimate = (float)atof(argv[i + 1]);
189 }
else if (std::string(argv[i]) ==
"--nthreads" && i + 1 < argc) {
190 opt_nthreads = atoi(argv[i + 1]);
191 }
else if (std::string(argv[i]) ==
"--display_off") {
193 }
else if (std::string(argv[i]) ==
"--tag_family" && i + 1 < argc) {
195 }
else if (std::string(argv[i]) ==
"--cube_size" && i + 1 < argc) {
196 opt_cube_size = atof(argv[i + 1]);
197 #ifdef VISP_HAVE_OPENCV 198 }
else if (std::string(argv[i]) ==
"--texture") {
199 opt_use_texture =
true;
201 }
else if (std::string(argv[i]) ==
"--depth") {
202 opt_use_depth =
true;
203 }
else if (std::string(argv[i]) ==
"--projection_error" && i + 1 < argc) {
204 opt_projection_error_threshold = atof(argv[i + 1]);
205 }
else if (std::string(argv[i]) ==
"--help" || std::string(argv[i]) ==
"-h") {
206 std::cout <<
"Usage: " << argv[0] <<
" [--cube_size <size in m>] [--tag_size <size in m>]" 207 " [--quad_decimate <decimation>] [--nthreads <nb>]" 208 " [--tag_family <0: TAG_36h11, 1: TAG_36h10, 2: TAG_36ARTOOLKIT, " 209 " 3: TAG_25h9, 4: TAG_25h7, 5: TAG_16h5>]";
210 #if (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV)) 211 std::cout <<
" [--display_off]";
213 std::cout <<
" [--texture] [--depth] [--projection_error <30 - 100>] [--help]" << std::endl;
218 createCaoFile(opt_cube_size);
224 int width = 640, height = 480, stream_fps = 30;
225 config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, stream_fps);
226 config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, stream_fps);
227 config.disable_stream(RS2_STREAM_INFRARED);
228 realsense.
open(config);
242 std::map<std::string, vpHomogeneousMatrix> mapOfCameraTransformations;
243 std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
245 std::map<std::string, pcl::PointCloud< pcl::PointXYZ >::ConstPtr> mapOfPointclouds;
246 pcl::PointCloud<pcl::PointXYZ>::Ptr pointcloud(
new pcl::PointCloud<pcl::PointXYZ>());
248 std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
249 std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
250 std::vector<vpColVector> pointcloud;
253 std::map<std::string, vpHomogeneousMatrix> mapOfCameraPoses;
255 std::cout <<
"Cube size: " << opt_cube_size << std::endl;
256 std::cout <<
"AprilTag size: " << opt_tag_size << std::endl;
257 std::cout <<
"AprilTag family: " << opt_tag_family << std::endl;
258 std::cout <<
"Camera parameters:" << std::endl;
259 std::cout <<
" Color:\n" << cam_color << std::endl;
261 std::cout <<
" Depth:\n" << cam_depth << std::endl;
262 std::cout <<
"Detection: " << std::endl;
263 std::cout <<
" Quad decimate: " << opt_quad_decimate << std::endl;
264 std::cout <<
" Threads number: " << opt_nthreads << std::endl;
265 std::cout <<
"Tracker: " << std::endl;
266 std::cout <<
" Use edges : 1" << std::endl;
267 std::cout <<
" Use texture: " 268 #ifdef VISP_HAVE_OPENCV 269 << opt_use_texture << std::endl;
271 <<
" na" << std::endl;
273 std::cout <<
" Use depth : " << opt_use_depth << std::endl;
274 std::cout <<
" Projection error: " << opt_projection_error_threshold << std::endl;
282 d_gray =
new vpDisplayX(I_gray, 50, 50,
"Color stream");
285 #elif defined(VISP_HAVE_GDI) 289 #elif defined(VISP_HAVE_OPENCV) 302 std::vector<int> trackerTypes;
303 #ifdef VISP_HAVE_OPENCV 327 #ifdef VISP_HAVE_OPENCV 328 if (opt_use_texture) {
346 tracker.
loadModel(
"cube.cao",
"cube.cao");
347 mapOfCameraTransformations[
"Camera2"] = depth_M_color;
363 state_t state = state_detection;
366 while (state != state_quit) {
369 realsense.
acquire((
unsigned char *) I_color.bitmap, (
unsigned char *) I_depth_raw.bitmap, NULL, pointcloud, NULL);
371 realsense.
acquire((
unsigned char *) I_color.bitmap, (
unsigned char *) I_depth_raw.bitmap, &pointcloud, NULL, NULL);
378 mapOfImages[
"Camera1"] = &I_gray;
379 mapOfImages[
"Camera2"] = &I_depth;
381 mapOfPointclouds[
"Camera2"] = pointcloud;
383 mapOfPointclouds[
"Camera2"] = &pointcloud;
384 mapOfWidths[
"Camera2"] = width;
385 mapOfHeights[
"Camera2"] = height;
393 if (state == state_detection) {
394 state = detectAprilTag(I_gray, detector, opt_tag_size, cam_color, cMo);
397 if (state == state_tracking) {
399 mapOfCameraPoses[
"Camera1"] = cMo;
400 mapOfCameraPoses[
"Camera2"] = depth_M_color * cMo;
409 if (state == state_tracking) {
412 state = track(mapOfImages, mapOfPointclouds,
413 I_gray, I_depth, depth_M_color, tracker, opt_projection_error_threshold, cMo);
415 state = track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights,
416 I_gray, I_depth, depth_M_color, tracker, opt_projection_error_threshold, cMo);
420 state = track(I_gray, tracker, opt_projection_error_threshold, cMo);
423 std::stringstream ss;
452 std::cerr <<
"Catch an exception: " << e.
getMessage() << std::endl;
459 #ifndef VISP_HAVE_APRILTAG 460 std::cout <<
"ViSP is not build with Apriltag support" << std::endl;
462 #ifndef VISP_HAVE_REALSENSE2 463 std::cout <<
"ViSP is not build with librealsense2 support" << std::endl;
465 std::cout <<
"Install missing 3rd parties, configure and build ViSP to run this tutorial" << std::endl;
void setMaxFeatures(int maxCount)
Class that defines generic functionnalities for display.
virtual void track(const vpImage< unsigned char > &I)
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
virtual unsigned int getNbFeaturesKlt() const
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false)
void setHarrisFreeParameter(double harris_k)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Implementation of an homogeneous matrix and operations on such kind of matrices.
AprilTag 36h11 pattern (recommended)
void setMaskNumber(const unsigned int &a)
Display for windows using GDI (available on any windows 32 platform).
void setSampleStep(const double &s)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
virtual void setDisplayFeatures(bool displayF)
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
static const vpColor none
void setMinDistance(double minDistance)
virtual void setAngleDisappear(const double &a)
error that can be emited by ViSP classes.
size_t getNbObjects() const
const char * getMessage() const
virtual void setMovingEdge(const vpMe &me)
Real-time 6D object pose tracking using its CAD model.
static void flush(const vpImage< unsigned char > &I)
bool open(const rs2::config &cfg=rs2::config())
void setMu1(const double &mu_1)
virtual unsigned int getNbFeaturesEdge() const
void setQuality(double qualityLevel)
void setAprilTagQuadDecimate(float quadDecimate)
vpHomogeneousMatrix getTransformation(const rs2_stream &from, const rs2_stream &to, int from_index=-1) const
virtual void setKltMaskBorder(const unsigned int &e)
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix())
void setMaskSize(const unsigned int &a)
void setPyramidLevels(int pyrMaxLevel)
static void display(const vpImage< unsigned char > &I)
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Generic class defining intrinsic camera parameters.
void setAprilTagNbThreads(int nThreads)
virtual void initFromPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo)
virtual void setAngleAppear(const double &a)
void setWindowSize(int winSize)
virtual void getCameraParameters(vpCameraParameters &camera) const
static double rad(double deg)
virtual void setCameraParameters(const vpCameraParameters &camera)
void setMu2(const double &mu_2)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0))
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion, int index=-1) const
void setThreshold(const double &t)
virtual unsigned int getNbFeaturesDepthDense() const
void setRange(const unsigned int &r)
virtual void setKltOpencv(const vpKltOpencv &t)
unsigned int getWidth() const
void setBlockSize(int blockSize)
void acquire(vpImage< unsigned char > &grey, double *ts=NULL)
virtual void getPose(vpHomogeneousMatrix &cMo) const
static void createDepthHistogram(const vpImage< uint16_t > &src_depth, vpImage< vpRGBa > &dest_rgba)
virtual void setCameraTransformationMatrix(const std::string &cameraName, const vpHomogeneousMatrix &cameraTransformationMatrix)
bool detect(const vpImage< unsigned char > &I)
virtual double computeCurrentProjectionError(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &_cMo, const vpCameraParameters &_cam)