6 #include <visp3/gui/vpDisplayGDI.h> 7 #include <visp3/gui/vpDisplayOpenCV.h> 8 #include <visp3/gui/vpDisplayX.h> 9 #include <visp3/core/vpXmlParserCamera.h> 10 #include <visp3/sensor/vpV4l2Grabber.h> 11 #include <visp3/detection/vpDetectorAprilTag.h> 12 #include <visp3/mbt/vpMbGenericTracker.h> 22 void createCaoFile(
double cubeEdgeSize)
24 std::ofstream fileStream;
25 fileStream.open(
"cube.cao", std::ofstream::out | std::ofstream::trunc);
27 fileStream <<
"# 3D Points\n";
28 fileStream <<
"8 # Number of points\n";
29 fileStream << cubeEdgeSize / 2 <<
" " << cubeEdgeSize / 2 <<
" " << 0 <<
" # Point 0: (X, Y, Z)\n";
30 fileStream << cubeEdgeSize / 2 <<
" " << -cubeEdgeSize / 2 <<
" " << 0 <<
" # Point 1\n";
31 fileStream << -cubeEdgeSize / 2 <<
" " << -cubeEdgeSize / 2 <<
" " << 0 <<
" # Point 2\n";
32 fileStream << -cubeEdgeSize / 2 <<
" " << cubeEdgeSize / 2 <<
" " << 0 <<
" # Point 3\n";
33 fileStream << -cubeEdgeSize / 2 <<
" " << cubeEdgeSize / 2 <<
" " << -cubeEdgeSize <<
" # Point 4\n";
34 fileStream << -cubeEdgeSize / 2 <<
" " << -cubeEdgeSize / 2 <<
" " << -cubeEdgeSize <<
" # Point 5\n";
35 fileStream << cubeEdgeSize / 2 <<
" " << -cubeEdgeSize / 2 <<
" " << -cubeEdgeSize <<
" # Point 6\n";
36 fileStream << cubeEdgeSize / 2 <<
" " << cubeEdgeSize / 2 <<
" " << -cubeEdgeSize <<
" # Point 7\n";
37 fileStream <<
"# 3D Lines\n";
38 fileStream <<
"0 # Number of lines\n";
39 fileStream <<
"# Faces from 3D lines\n";
40 fileStream <<
"0 # Number of faces\n";
41 fileStream <<
"# Faces from 3D points\n";
42 fileStream <<
"6 # Number of faces\n";
43 fileStream <<
"4 0 3 2 1 # Face 0: [number of points] [index of the 3D points]...\n";
44 fileStream <<
"4 1 2 5 6\n";
45 fileStream <<
"4 4 7 6 5\n";
46 fileStream <<
"4 0 7 4 3\n";
47 fileStream <<
"4 5 2 3 4\n";
48 fileStream <<
"4 0 1 6 7 # Face 5\n";
49 fileStream <<
"# 3D cylinders\n";
50 fileStream <<
"0 # Number of cylinders\n";
51 fileStream <<
"# 3D circles\n";
52 fileStream <<
"0 # Number of circles\n";
56 #if defined(VISP_HAVE_APRILTAG) 60 std::vector<vpHomogeneousMatrix> cMo_vec;
63 bool ret = detector.
detect(I, tagSize, cam, cMo_vec);
66 for (
size_t i = 0; i < cMo_vec.size(); i++) {
74 return state_tracking;
77 return state_detection;
79 #endif // #if defined(VISP_HAVE_APRILTAG) 92 return state_detection;
99 if (projection_error > projection_error_threshold) {
100 return state_detection;
108 std::stringstream ss;
113 return state_tracking;
116 int main(
int argc,
const char **argv)
119 #if defined(VISP_HAVE_APRILTAG) && (defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_OPENCV)) && \ 120 defined(VISP_HAVE_MODULE_MBT) 125 double opt_tag_size = 0.08;
126 float opt_quad_decimate = 1.0;
127 int opt_nthreads = 1;
128 std::string opt_intrinsic_file =
"";
129 std::string opt_camera_name =
"";
130 double opt_cube_size = 0.125;
131 #ifdef VISP_HAVE_OPENCV 132 bool opt_use_texture =
false;
134 double opt_projection_error_threshold = 40.;
136 #if !(defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV)) 137 bool display_off =
true;
139 bool display_off =
false;
142 for (
int i = 1; i < argc; i++) {
143 if (std::string(argv[i]) ==
"--tag_size" && i + 1 < argc) {
144 opt_tag_size = atof(argv[i + 1]);
145 }
else if (std::string(argv[i]) ==
"--input" && i + 1 < argc) {
146 opt_device = atoi(argv[i + 1]);
147 }
else if (std::string(argv[i]) ==
"--quad_decimate" && i + 1 < argc) {
148 opt_quad_decimate = (float)atof(argv[i + 1]);
149 }
else if (std::string(argv[i]) ==
"--nthreads" && i + 1 < argc) {
150 opt_nthreads = atoi(argv[i + 1]);
151 }
else if (std::string(argv[i]) ==
"--intrinsic" && i + 1 < argc) {
152 opt_intrinsic_file = std::string(argv[i + 1]);
153 }
else if (std::string(argv[i]) ==
"--camera_name" && i + 1 < argc) {
154 opt_camera_name = std::string(argv[i + 1]);
155 }
else if (std::string(argv[i]) ==
"--display_off") {
157 }
else if (std::string(argv[i]) ==
"--tag_family" && i + 1 < argc) {
159 }
else if (std::string(argv[i]) ==
"--cube_size" && i + 1 < argc) {
160 opt_cube_size = atof(argv[i + 1]);
161 #ifdef VISP_HAVE_OPENCV 162 }
else if (std::string(argv[i]) ==
"--texture") {
163 opt_use_texture =
true;
165 }
else if (std::string(argv[i]) ==
"--projection_error" && i + 1 < argc) {
166 opt_projection_error_threshold = atof(argv[i + 1]);
167 }
else if (std::string(argv[i]) ==
"--help" || std::string(argv[i]) ==
"-h") {
168 std::cout <<
"Usage: " << argv[0] <<
" [--input <camera id>] [--cube_size <size in m>] [--tag_size <size in m>]" 169 " [--quad_decimate <decimation>] [--nthreads <nb>]" 170 " [--intrinsic <xml intrinsic file>] [--camera_name <camera name in xml file>]" 171 " [--tag_family <0: TAG_36h11, 1: TAG_36h10, 2: TAG_36ARTOOLKIT, " 172 " 3: TAG_25h9, 4: TAG_25h7, 5: TAG_16h5>]";
173 #if (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV)) 174 std::cout <<
" [--display_off]";
176 std::cout <<
" [--texture] [--projection_error <30 - 100>] [--help]" << std::endl;
181 createCaoFile(opt_cube_size);
184 bool camIsInit =
false;
186 if (!opt_intrinsic_file.empty() && !opt_camera_name.empty()) {
195 #if defined(VISP_HAVE_V4L2) 197 std::ostringstream device;
198 device <<
"/dev/video" << opt_device;
199 std::cout <<
"Use device " << device.str() <<
" (v4l2 grabber)" << std::endl;
203 #elif defined(VISP_HAVE_OPENCV) 204 std::cout <<
"Use device " << opt_device <<
" (OpenCV grabber)" << std::endl;
205 cv::VideoCapture cap(opt_device);
206 if (!cap.isOpened()) {
207 std::cout <<
"Failed to open the camera" << std::endl;
218 std::cout <<
"Cube size: " << opt_cube_size << std::endl;
219 std::cout <<
"AprilTag size: " << opt_tag_size << std::endl;
220 std::cout <<
"AprilTag family: " << opt_tag_family << std::endl;
221 std::cout <<
"Camera parameters:\n" << cam << std::endl;
222 std::cout <<
"Detection: " << std::endl;
223 std::cout <<
" Quad decimate: " << opt_quad_decimate << std::endl;
224 std::cout <<
" Threads number: " << opt_nthreads << std::endl;
225 std::cout <<
"Tracker: " << std::endl;
226 std::cout <<
" Use edges : 1"<< std::endl;
227 std::cout <<
" Use texture: " 228 #ifdef VISP_HAVE_OPENCV 229 << opt_use_texture << std::endl;
231 <<
" na" << std::endl;
233 std::cout <<
" Projection error: " << opt_projection_error_threshold << std::endl;
240 #elif defined(VISP_HAVE_GDI) 242 #elif defined(VISP_HAVE_OPENCV) 254 #ifdef VISP_HAVE_OPENCV 271 #ifdef VISP_HAVE_OPENCV 272 if (opt_use_texture) {
295 state_t state = state_detection;
298 while (state != state_quit) {
300 #if defined(VISP_HAVE_V4L2) 302 #elif defined(VISP_HAVE_OPENCV) 309 if (state == state_detection) {
310 state = detectAprilTag(I, detector, opt_tag_size, cam, cMo);
313 if (state == state_tracking) {
320 if (state == state_tracking) {
321 state = track(I, tracker, opt_projection_error_threshold, cMo);
335 std::cerr <<
"Catch an exception: " << e.
getMessage() << std::endl;
342 #ifndef VISP_HAVE_APRILTAG 343 std::cout <<
"ViSP is not build with Apriltag support" << std::endl;
345 #if !(defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_OPENCV)) 346 std::cout <<
"ViSP is not build with v4l2 or OpenCV support" << std::endl;
348 std::cout <<
"Install missing 3rd parties, configure and build ViSP to run this tutorial" << std::endl;
void acquire(vpImage< unsigned char > &I)
void setMaxFeatures(int maxCount)
Class that defines generic functionnalities for display.
virtual void track(const vpImage< unsigned char > &I)
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
virtual unsigned int getNbFeaturesKlt() const
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false)
void setHarrisFreeParameter(double harris_k)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Implementation of an homogeneous matrix and operations on such kind of matrices.
AprilTag 36h11 pattern (recommended)
virtual void setTrackerType(int type)
void setMaskNumber(const unsigned int &a)
Display for windows using GDI (available on any windows 32 platform).
void setSampleStep(const double &s)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
virtual void setDisplayFeatures(bool displayF)
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
void setDevice(const std::string &devname)
static const vpColor none
void setMinDistance(double minDistance)
virtual void setAngleDisappear(const double &a)
error that can be emited by ViSP classes.
size_t getNbObjects() const
const char * getMessage() const
virtual void setMovingEdge(const vpMe &me)
XML parser to load and save intrinsic camera parameters.
Real-time 6D object pose tracking using its CAD model.
static void flush(const vpImage< unsigned char > &I)
void setMu1(const double &mu_1)
virtual unsigned int getNbFeaturesEdge() const
void setQuality(double qualityLevel)
void setAprilTagQuadDecimate(float quadDecimate)
virtual void setKltMaskBorder(const unsigned int &e)
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix())
void setMaskSize(const unsigned int &a)
void setPyramidLevels(int pyrMaxLevel)
static void display(const vpImage< unsigned char > &I)
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Generic class defining intrinsic camera parameters.
void setAprilTagNbThreads(int nThreads)
virtual void initFromPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo)
virtual void setAngleAppear(const double &a)
void setWindowSize(int winSize)
virtual void getCameraParameters(vpCameraParameters &camera) const
void setScale(unsigned scale=vpV4l2Grabber::DEFAULT_SCALE)
static double rad(double deg)
int parse(vpCameraParameters &cam, const std::string &filename, const std::string &camera_name, const vpCameraParameters::vpCameraParametersProjType &projModel, unsigned int image_width=0, unsigned int image_height=0)
void initPersProjWithoutDistortion(double px, double py, double u0, double v0)
virtual void setCameraParameters(const vpCameraParameters &camera)
void setMu2(const double &mu_2)
Class that is a wrapper over the Video4Linux2 (V4L2) driver.
unsigned int getHeight() const
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0))
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
void setThreshold(const double &t)
void setRange(const unsigned int &r)
virtual void setKltOpencv(const vpKltOpencv &t)
unsigned int getWidth() const
void setBlockSize(int blockSize)
virtual void getPose(vpHomogeneousMatrix &cMo) const
bool detect(const vpImage< unsigned char > &I)
virtual double computeCurrentProjectionError(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &_cMo, const vpCameraParameters &_cam)