6 #include <visp3/gui/vpDisplayGDI.h> 7 #include <visp3/gui/vpDisplayOpenCV.h> 8 #include <visp3/gui/vpDisplayX.h> 9 #include <visp3/core/vpXmlParserCamera.h> 10 #include <visp3/sensor/vpV4l2Grabber.h> 11 #include <visp3/detection/vpDetectorAprilTag.h> 12 #include <visp3/mbt/vpMbGenericTracker.h> 22 void createCaoFile(
double cubeEdgeSize)
24 std::ofstream fileStream;
25 fileStream.open(
"cube.cao", std::ofstream::out | std::ofstream::trunc);
27 fileStream <<
"# 3D Points\n";
28 fileStream <<
"8 # Number of points\n";
29 fileStream << cubeEdgeSize / 2 <<
" " << cubeEdgeSize / 2 <<
" " << 0 <<
" # Point 0: (X, Y, Z)\n";
30 fileStream << cubeEdgeSize / 2 <<
" " << -cubeEdgeSize / 2 <<
" " << 0 <<
" # Point 1\n";
31 fileStream << -cubeEdgeSize / 2 <<
" " << -cubeEdgeSize / 2 <<
" " << 0 <<
" # Point 2\n";
32 fileStream << -cubeEdgeSize / 2 <<
" " << cubeEdgeSize / 2 <<
" " << 0 <<
" # Point 3\n";
33 fileStream << -cubeEdgeSize / 2 <<
" " << cubeEdgeSize / 2 <<
" " << -cubeEdgeSize <<
" # Point 4\n";
34 fileStream << -cubeEdgeSize / 2 <<
" " << -cubeEdgeSize / 2 <<
" " << -cubeEdgeSize <<
" # Point 5\n";
35 fileStream << cubeEdgeSize / 2 <<
" " << -cubeEdgeSize / 2 <<
" " << -cubeEdgeSize <<
" # Point 6\n";
36 fileStream << cubeEdgeSize / 2 <<
" " << cubeEdgeSize / 2 <<
" " << -cubeEdgeSize <<
" # Point 7\n";
37 fileStream <<
"# 3D Lines\n";
38 fileStream <<
"0 # Number of lines\n";
39 fileStream <<
"# Faces from 3D lines\n";
40 fileStream <<
"0 # Number of faces\n";
41 fileStream <<
"# Faces from 3D points\n";
42 fileStream <<
"6 # Number of faces\n";
43 fileStream <<
"4 0 3 2 1 # Face 0: [number of points] [index of the 3D points]...\n";
44 fileStream <<
"4 1 2 5 6\n";
45 fileStream <<
"4 4 7 6 5\n";
46 fileStream <<
"4 0 7 4 3\n";
47 fileStream <<
"4 5 2 3 4\n";
48 fileStream <<
"4 0 1 6 7 # Face 5\n";
49 fileStream <<
"# 3D cylinders\n";
50 fileStream <<
"0 # Number of cylinders\n";
51 fileStream <<
"# 3D circles\n";
52 fileStream <<
"0 # Number of circles\n";
56 #if defined(VISP_HAVE_APRILTAG) 60 std::vector<vpHomogeneousMatrix> cMo_vec;
63 bool ret = detector.
detect(I, tagSize, cam, cMo_vec);
66 for (
size_t i = 0; i < cMo_vec.size(); i++) {
74 return state_tracking;
77 return state_detection;
79 #endif // #if defined(VISP_HAVE_APRILTAG) 92 return state_detection;
99 if (projection_error > projection_error_threshold) {
100 return state_detection;
108 return state_tracking;
111 int main(
int argc,
const char **argv)
114 #if defined(VISP_HAVE_APRILTAG) && (defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_OPENCV)) && \ 115 defined(VISP_HAVE_MODULE_MBT) 120 double opt_tag_size = 0.08;
121 float opt_quad_decimate = 1.0;
122 int opt_nthreads = 1;
123 std::string opt_intrinsic_file =
"";
124 std::string opt_camera_name =
"";
125 double opt_cube_size = 0.125;
126 #ifdef VISP_HAVE_OPENCV 127 bool opt_use_texture =
false;
129 double opt_projection_error_threshold = 40.;
131 #if !(defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV)) 132 bool display_off =
true;
134 bool display_off =
false;
137 for (
int i = 1; i < argc; i++) {
138 if (std::string(argv[i]) ==
"--tag_size" && i + 1 < argc) {
139 opt_tag_size = atof(argv[i + 1]);
140 }
else if (std::string(argv[i]) ==
"--input" && i + 1 < argc) {
141 opt_device = atoi(argv[i + 1]);
142 }
else if (std::string(argv[i]) ==
"--quad_decimate" && i + 1 < argc) {
143 opt_quad_decimate = (float)atof(argv[i + 1]);
144 }
else if (std::string(argv[i]) ==
"--nthreads" && i + 1 < argc) {
145 opt_nthreads = atoi(argv[i + 1]);
146 }
else if (std::string(argv[i]) ==
"--intrinsic" && i + 1 < argc) {
147 opt_intrinsic_file = std::string(argv[i + 1]);
148 }
else if (std::string(argv[i]) ==
"--camera_name" && i + 1 < argc) {
149 opt_camera_name = std::string(argv[i + 1]);
150 }
else if (std::string(argv[i]) ==
"--display_off") {
152 }
else if (std::string(argv[i]) ==
"--tag_family" && i + 1 < argc) {
154 }
else if (std::string(argv[i]) ==
"--cube_size" && i + 1 < argc) {
155 opt_cube_size = atof(argv[i + 1]);
156 #ifdef VISP_HAVE_OPENCV 157 }
else if (std::string(argv[i]) ==
"--texture") {
158 opt_use_texture =
true;
160 }
else if (std::string(argv[i]) ==
"--projection_error" && i + 1 < argc) {
161 opt_projection_error_threshold = atof(argv[i + 1]);
162 }
else if (std::string(argv[i]) ==
"--help" || std::string(argv[i]) ==
"-h") {
163 std::cout <<
"Usage: " << argv[0] <<
" [--input <camera id>] [--cube_size <size in m>] [--tag_size <size in m>]" 164 " [--quad_decimate <decimation>] [--nthreads <nb>]" 165 " [--intrinsic <xml intrinsic file>] [--camera_name <camera name in xml file>]" 166 " [--tag_family <0: TAG_36h11, 1: TAG_36h10, 2: TAG_36ARTOOLKIT, " 167 " 3: TAG_25h9, 4: TAG_25h7, 5: TAG_16h5>]";
168 #if (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV)) 169 std::cout <<
" [--display_off]";
171 std::cout <<
" [--texture] [--projection_error <30 - 100>] [--help]" << std::endl;
176 createCaoFile(opt_cube_size);
179 bool camIsInit =
false;
180 #ifdef VISP_HAVE_XML2 182 if (!opt_intrinsic_file.empty() && !opt_camera_name.empty()) {
192 #if defined(VISP_HAVE_V4L2) 194 std::ostringstream device;
195 device <<
"/dev/video" << opt_device;
196 std::cout <<
"Use device " << device.str() <<
" (v4l2 grabber)" << std::endl;
200 #elif defined(VISP_HAVE_OPENCV) 201 std::cout <<
"Use device " << opt_device <<
" (OpenCV grabber)" << std::endl;
202 cv::VideoCapture cap(opt_device);
203 if (!cap.isOpened()) {
204 std::cout <<
"Failed to open the camera" << std::endl;
215 std::cout <<
"Cube size: " << opt_cube_size << std::endl;
216 std::cout <<
"AprilTag size: " << opt_tag_size << std::endl;
217 std::cout <<
"AprilTag family: " << opt_tag_family << std::endl;
218 std::cout <<
"Camera parameters:\n" << cam << std::endl;
219 std::cout <<
"Detection: " << std::endl;
220 std::cout <<
" Quad decimate: " << opt_quad_decimate << std::endl;
221 std::cout <<
" Threads number: " << opt_nthreads << std::endl;
222 std::cout <<
"Tracker: " << std::endl;
223 std::cout <<
" Use edges : 1"<< std::endl;
224 std::cout <<
" Use texture: " 225 #ifdef VISP_HAVE_OPENCV 226 << opt_use_texture << std::endl;
228 <<
" na" << std::endl;
230 std::cout <<
" Projection error: " << opt_projection_error_threshold << std::endl;
237 #elif defined(VISP_HAVE_GDI) 239 #elif defined(VISP_HAVE_OPENCV) 251 #ifdef VISP_HAVE_OPENCV 268 #ifdef VISP_HAVE_OPENCV 269 if (opt_use_texture) {
292 state_t state = state_detection;
295 while (state != state_quit) {
297 #if defined(VISP_HAVE_V4L2) 299 #elif defined(VISP_HAVE_OPENCV) 306 if (state == state_detection) {
307 state = detectAprilTag(I, detector, opt_tag_size, cam, cMo);
310 if (state == state_tracking) {
315 if (state == state_tracking) {
316 state = track(I, tracker, opt_projection_error_threshold, cMo);
330 std::cerr <<
"Catch an exception: " << e.
getMessage() << std::endl;
337 #ifndef VISP_HAVE_APRILTAG 338 std::cout <<
"ViSP is not build with Apriltag support" << std::endl;
340 #if !(defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_OPENCV)) 341 std::cout <<
"ViSP is not build with v4l2 or OpenCV support" << std::endl;
343 std::cout <<
"Install missing 3rd parties, configure and build ViSP to run this tutorial" << std::endl;
virtual void setDisplayFeatures(const bool displayF)
void setAprilTagQuadDecimate(const float quadDecimate)
void acquire(vpImage< unsigned char > &I)
Class that defines generic functionnalities for display.
virtual void track(const vpImage< unsigned char > &I)
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
void setHarrisFreeParameter(double harris_k)
unsigned int getWidth() const
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Implementation of an homogeneous matrix and operations on such kind of matrices.
void setMaskNumber(const unsigned int &a)
Display for windows using GDI (available on any windows 32 platform).
void setMaxFeatures(const int maxCount)
void setSampleStep(const double &s)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
void setDevice(const std::string &devname)
static const vpColor none
void setMinDistance(double minDistance)
virtual void setAngleDisappear(const double &a)
error that can be emited by ViSP classes.
virtual void getCameraParameters(vpCameraParameters &cam1, vpCameraParameters &cam2) const
virtual void setMovingEdge(const vpMe &me)
size_t getNbObjects() const
XML parser to load and save intrinsic camera parameters.
Real-time 6D object pose tracking using its CAD model.
static void flush(const vpImage< unsigned char > &I)
void setMu1(const double &mu_1)
void setQuality(double qualityLevel)
void initPersProjWithoutDistortion(const double px, const double py, const double u0, const double v0)
virtual void setKltMaskBorder(const unsigned int &e)
virtual void initFromPose(const vpImage< unsigned char > &I1, const vpImage< unsigned char > &I2, const std::string &initFile1, const std::string &initFile2)
void setMaskSize(const unsigned int &a)
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, const unsigned int thickness=1, const bool displayFullModel=false)
static void display(const vpImage< unsigned char > &I)
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Generic class defining intrinsic camera parameters.
virtual void setAngleAppear(const double &a)
const char * getMessage(void) const
void setScale(unsigned scale=vpV4l2Grabber::DEFAULT_SCALE)
void setPyramidLevels(const int pyrMaxLevel)
static double rad(double deg)
virtual void loadModel(const std::string &modelFile, const bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix())
virtual void setCameraParameters(const vpCameraParameters &camera)
void setMu2(const double &mu_2)
Class that is a wrapper over the Video4Linux2 (V4L2) driver.
void setWindowSize(const int winSize)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0))
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
void setBlockSize(const int blockSize)
virtual void getPose(vpHomogeneousMatrix &c1Mo, vpHomogeneousMatrix &c2Mo) const
void setThreshold(const double &t)
unsigned int getHeight() const
virtual void setTrackerType(const int type)
void setRange(const unsigned int &r)
int parse(vpCameraParameters &cam, const std::string &filename, const std::string &camera_name, const vpCameraParameters::vpCameraParametersProjType &projModel, const unsigned int image_width=0, const unsigned int image_height=0)
virtual void setKltOpencv(const vpKltOpencv &t)
void setAprilTagNbThreads(const int nThreads)
bool detect(const vpImage< unsigned char > &I)
virtual double computeCurrentProjectionError(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &_cMo, const vpCameraParameters &_cam)