6 #include <visp3/core/vpConfig.h>
7 #include <visp3/core/vpXmlParserCamera.h>
8 #include <visp3/detection/vpDetectorAprilTag.h>
9 #include <visp3/gui/vpDisplayGDI.h>
10 #include <visp3/gui/vpDisplayOpenCV.h>
11 #include <visp3/gui/vpDisplayX.h>
12 #include <visp3/mbt/vpMbGenericTracker.h>
13 #include <visp3/sensor/vpV4l2Grabber.h>
15 #if defined(HAVE_OPENCV_VIDEOIO)
16 #include <opencv2/videoio.hpp>
19 #ifdef ENABLE_VISP_NAMESPACE
23 typedef enum { state_detection, state_tracking, state_quit } state_t;
27 void createCaoFile(
double cubeEdgeSize)
29 std::ofstream fileStream;
30 fileStream.open(
"cube.cao", std::ofstream::out | std::ofstream::trunc);
32 fileStream <<
"# 3D Points\n";
33 fileStream <<
"8 # Number of points\n";
34 fileStream << cubeEdgeSize / 2 <<
" " << cubeEdgeSize / 2 <<
" " << 0 <<
" # Point 0: (X, Y, Z)\n";
35 fileStream << cubeEdgeSize / 2 <<
" " << -cubeEdgeSize / 2 <<
" " << 0 <<
" # Point 1\n";
36 fileStream << -cubeEdgeSize / 2 <<
" " << -cubeEdgeSize / 2 <<
" " << 0 <<
" # Point 2\n";
37 fileStream << -cubeEdgeSize / 2 <<
" " << cubeEdgeSize / 2 <<
" " << 0 <<
" # Point 3\n";
38 fileStream << -cubeEdgeSize / 2 <<
" " << cubeEdgeSize / 2 <<
" " << -cubeEdgeSize <<
" # Point 4\n";
39 fileStream << -cubeEdgeSize / 2 <<
" " << -cubeEdgeSize / 2 <<
" " << -cubeEdgeSize <<
" # Point 5\n";
40 fileStream << cubeEdgeSize / 2 <<
" " << -cubeEdgeSize / 2 <<
" " << -cubeEdgeSize <<
" # Point 6\n";
41 fileStream << cubeEdgeSize / 2 <<
" " << cubeEdgeSize / 2 <<
" " << -cubeEdgeSize <<
" # Point 7\n";
42 fileStream <<
"# 3D Lines\n";
43 fileStream <<
"0 # Number of lines\n";
44 fileStream <<
"# Faces from 3D lines\n";
45 fileStream <<
"0 # Number of faces\n";
46 fileStream <<
"# Faces from 3D points\n";
47 fileStream <<
"6 # Number of faces\n";
48 fileStream <<
"4 0 3 2 1 # Face 0: [number of points] [index of the 3D points]...\n";
49 fileStream <<
"4 1 2 5 6\n";
50 fileStream <<
"4 4 7 6 5\n";
51 fileStream <<
"4 0 7 4 3\n";
52 fileStream <<
"4 5 2 3 4\n";
53 fileStream <<
"4 0 1 6 7 # Face 5\n";
54 fileStream <<
"# 3D cylinders\n";
55 fileStream <<
"0 # Number of cylinders\n";
56 fileStream <<
"# 3D circles\n";
57 fileStream <<
"0 # Number of circles\n";
61 #if defined(VISP_HAVE_APRILTAG)
65 std::vector<vpHomogeneousMatrix> cMo_vec;
68 bool ret = detector.
detect(I, tagSize, cam, cMo_vec);
71 for (
size_t i = 0; i < cMo_vec.size(); i++) {
79 return state_tracking;
82 return state_detection;
97 return state_detection;
104 if (projection_error > projection_error_threshold) {
105 return state_detection;
113 std::stringstream ss;
118 return state_tracking;
121 int main(
int argc,
const char **argv)
124 #if defined(VISP_HAVE_APRILTAG) && (defined(VISP_HAVE_V4L2) || defined(HAVE_OPENCV_VIDEOIO)) && defined(VISP_HAVE_MODULE_MBT)
129 double opt_tag_size = 0.08;
130 float opt_quad_decimate = 1.0;
131 int opt_nthreads = 1;
132 std::string opt_intrinsic_file =
"";
133 std::string opt_camera_name =
"";
134 double opt_cube_size = 0.125;
135 #ifdef VISP_HAVE_OPENCV
136 bool opt_use_texture =
false;
138 double opt_projection_error_threshold = 40.;
140 #if !(defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV))
141 bool display_off =
true;
143 bool display_off =
false;
146 for (
int i = 1; i < argc; i++) {
147 if (std::string(argv[i]) ==
"--tag_size" && i + 1 < argc) {
148 opt_tag_size = atof(argv[i + 1]);
150 else if (std::string(argv[i]) ==
"--input" && i + 1 < argc) {
151 opt_device = atoi(argv[i + 1]);
153 else if (std::string(argv[i]) ==
"--quad_decimate" && i + 1 < argc) {
154 opt_quad_decimate = (float)atof(argv[i + 1]);
156 else if (std::string(argv[i]) ==
"--nthreads" && i + 1 < argc) {
157 opt_nthreads = atoi(argv[i + 1]);
159 else if (std::string(argv[i]) ==
"--intrinsic" && i + 1 < argc) {
160 opt_intrinsic_file = std::string(argv[i + 1]);
162 else if (std::string(argv[i]) ==
"--camera_name" && i + 1 < argc) {
163 opt_camera_name = std::string(argv[i + 1]);
165 else if (std::string(argv[i]) ==
"--display_off") {
168 else if (std::string(argv[i]) ==
"--tag_family" && i + 1 < argc) {
171 else if (std::string(argv[i]) ==
"--cube_size" && i + 1 < argc) {
172 opt_cube_size = atof(argv[i + 1]);
173 #ifdef VISP_HAVE_OPENCV
175 else if (std::string(argv[i]) ==
"--texture") {
176 opt_use_texture =
true;
179 else if (std::string(argv[i]) ==
"--projection_error" && i + 1 < argc) {
180 opt_projection_error_threshold = atof(argv[i + 1]);
182 else if (std::string(argv[i]) ==
"--help" || std::string(argv[i]) ==
"-h") {
183 std::cout <<
"Usage: " << argv[0]
184 <<
" [--input <camera id>] [--cube_size <size in m>] [--tag_size <size in m>]"
185 " [--quad_decimate <decimation>] [--nthreads <nb>]"
186 " [--intrinsic <xml intrinsic file>] [--camera_name <camera name in xml file>]"
187 " [--tag_family <0: TAG_36h11, 1: TAG_36h10, 2: TAG_36ARTOOLKIT, "
188 " 3: TAG_25h9, 4: TAG_25h7, 5: TAG_16h5>]";
189 #if (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV))
190 std::cout <<
" [--display_off]";
192 std::cout <<
" [--texture] [--projection_error <30 - 100>] [--help]" << std::endl;
197 createCaoFile(opt_cube_size);
200 bool camIsInit =
false;
201 #if defined(VISP_HAVE_PUGIXML)
203 if (!opt_intrinsic_file.empty() && !opt_camera_name.empty()) {
213 #if defined(VISP_HAVE_V4L2)
215 std::ostringstream device;
216 device <<
"/dev/video" << opt_device;
217 std::cout <<
"Use device " << device.str() <<
" (v4l2 grabber)" << std::endl;
221 #elif defined(HAVE_OPENCV_VIDEOIO)
222 std::cout <<
"Use device " << opt_device <<
" (OpenCV grabber)" << std::endl;
223 cv::VideoCapture cap(opt_device);
224 if (!cap.isOpened()) {
225 std::cout <<
"Failed to open the camera" << std::endl;
236 std::cout <<
"Cube size: " << opt_cube_size << std::endl;
237 std::cout <<
"AprilTag size: " << opt_tag_size << std::endl;
238 std::cout <<
"AprilTag family: " << opt_tag_family << std::endl;
239 std::cout <<
"Camera parameters:\n" << cam << std::endl;
240 std::cout <<
"Detection: " << std::endl;
241 std::cout <<
" Quad decimate: " << opt_quad_decimate << std::endl;
242 std::cout <<
" Threads number: " << opt_nthreads << std::endl;
243 std::cout <<
"Tracker: " << std::endl;
244 std::cout <<
" Use edges : 1" << std::endl;
245 std::cout <<
" Use texture: "
246 #ifdef VISP_HAVE_OPENCV
247 << opt_use_texture << std::endl;
249 <<
" na" << std::endl;
251 std::cout <<
" Projection error: " << opt_projection_error_threshold << std::endl;
257 d =
new vpDisplayX(I);
258 #elif defined(VISP_HAVE_GDI)
260 #elif defined(HAVE_OPENCV_HIGHGUI)
272 #if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
290 #if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
291 if (opt_use_texture) {
300 tracker.setKltOpencv(klt_settings);
301 tracker.setKltMaskBorder(5);
314 state_t state = state_detection;
317 while (state != state_quit) {
319 #if defined(VISP_HAVE_V4L2)
321 #elif defined(HAVE_OPENCV_VIDEOIO)
328 if (state == state_detection) {
329 state = detectAprilTag(I, detector, opt_tag_size, cam, cMo);
332 if (state == state_tracking) {
339 if (state == state_tracking) {
340 state = track(I, tracker, opt_projection_error_threshold, cMo);
355 std::cerr <<
"Catch an exception: " << e.
getMessage() << std::endl;
362 #ifndef VISP_HAVE_APRILTAG
363 std::cout <<
"ViSP is not build with Apriltag support" << std::endl;
365 #if !(defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_OPENCV))
366 std::cout <<
"ViSP is not build with v4l2 or OpenCV support" << std::endl;
368 std::cout <<
"Install missing 3rd parties, configure and build ViSP to run this tutorial" << std::endl;
Generic class defining intrinsic camera parameters.
void initPersProjWithoutDistortion(double px, double py, double u0, double v0)
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
static const vpColor none
void setAprilTagQuadDecimate(float quadDecimate)
bool detect(const vpImage< unsigned char > &I) VP_OVERRIDE
@ TAG_36h11
AprilTag 36h11 pattern (recommended)
void setAprilTagNbThreads(int nThreads)
size_t getNbObjects() const
Display for windows using GDI (available on any windows 32 platform).
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Class that defines generic functionalities for display.
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
const char * getMessage() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
unsigned int getWidth() const
unsigned int getHeight() const
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
void setBlockSize(int blockSize)
void setQuality(double qualityLevel)
void setHarrisFreeParameter(double harris_k)
void setMaxFeatures(int maxCount)
void setMinDistance(double minDistance)
void setWindowSize(int winSize)
void setPyramidLevels(int pyrMaxLevel)
static double rad(double deg)
Real-time 6D object pose tracking using its CAD model.
virtual void setCameraParameters(const vpCameraParameters &camera) VP_OVERRIDE
virtual void setDisplayFeatures(bool displayF) VP_OVERRIDE
virtual unsigned int getNbFeaturesEdge() const
virtual double computeCurrentProjectionError(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &_cMo, const vpCameraParameters &_cam) VP_OVERRIDE
virtual void getCameraParameters(vpCameraParameters &camera) const VP_OVERRIDE
virtual void initFromPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo) VP_OVERRIDE
virtual void getPose(vpHomogeneousMatrix &cMo) const VP_OVERRIDE
virtual unsigned int getNbFeaturesKlt() const
virtual void setMovingEdge(const vpMe &me)
virtual void setAngleDisappear(const double &a) VP_OVERRIDE
virtual void track(const vpImage< unsigned char > &I) VP_OVERRIDE
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix()) VP_OVERRIDE
virtual void setTrackerType(int type)
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false) VP_OVERRIDE
virtual void setAngleAppear(const double &a) VP_OVERRIDE
void setMu1(const double &mu_1)
void setRange(const unsigned int &range)
void setLikelihoodThresholdType(const vpLikelihoodThresholdType likelihood_threshold_type)
void setMaskNumber(const unsigned int &mask_number)
void setThreshold(const double &threshold)
void setSampleStep(const double &sample_step)
void setMaskSize(const unsigned int &mask_size)
void setMu2(const double &mu_2)
Class that is a wrapper over the Video4Linux2 (V4L2) driver.
void setScale(unsigned scale=vpV4l2Grabber::DEFAULT_SCALE)
void setDevice(const std::string &devname)
void acquire(vpImage< unsigned char > &I)
XML parser to load and save intrinsic camera parameters.
int parse(vpCameraParameters &cam, const std::string &filename, const std::string &camera_name, const vpCameraParameters::vpCameraParametersProjType &projModel, unsigned int image_width=0, unsigned int image_height=0, bool verbose=true)