1 #include <visp3/core/vpConfig.h>
3 #ifdef VISP_HAVE_MODULE_SENSOR
4 #include <visp3/sensor/vp1394CMUGrabber.h>
5 #include <visp3/sensor/vp1394TwoGrabber.h>
6 #include <visp3/sensor/vpFlyCaptureGrabber.h>
7 #include <visp3/sensor/vpRealSense2.h>
8 #include <visp3/sensor/vpV4l2Grabber.h>
10 #include <visp3/core/vpIoTools.h>
11 #include <visp3/core/vpXmlParserCamera.h>
12 #include <visp3/gui/vpDisplayGDI.h>
13 #include <visp3/gui/vpDisplayOpenCV.h>
14 #include <visp3/gui/vpDisplayX.h>
15 #include <visp3/io/vpImageIo.h>
16 #include <visp3/vision/vpKeyPoint.h>
18 #include <visp3/mbt/vpMbGenericTracker.h>
21 #if defined(HAVE_OPENCV_VIDEOIO)
22 #include <opencv2/videoio.hpp>
34 int main(
int argc,
char **argv)
36 #if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_VIDEOIO) && \
37 (defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || \
38 defined(HAVE_OPENCV_HIGHGUI) || defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2))
41 std::string opt_modelname =
"model/teabox/teabox.cao";
44 double opt_proj_error_threshold = 30.;
45 bool opt_use_ogre =
false;
46 bool opt_use_scanline =
false;
47 bool opt_display_projection_error =
false;
48 bool opt_learn =
false;
49 bool opt_auto_init =
false;
50 std::string opt_learning_data =
"learning/data-learned.bin";
51 std::string opt_intrinsic_file =
"";
52 std::string opt_camera_name =
"";
54 for (
int i = 0; i < argc; i++) {
55 if (std::string(argv[i]) ==
"--model") {
56 opt_modelname = std::string(argv[i + 1]);
58 else if (std::string(argv[i]) ==
"--tracker") {
59 opt_tracker = atoi(argv[i + 1]);
61 else if (std::string(argv[i]) ==
"--camera_device" && i + 1 < argc) {
62 opt_device = atoi(argv[i + 1]);
64 else if (std::string(argv[i]) ==
"--max_proj_error") {
65 opt_proj_error_threshold = atof(argv[i + 1]);
67 else if (std::string(argv[i]) ==
"--use_ogre") {
70 else if (std::string(argv[i]) ==
"--use_scanline") {
71 opt_use_scanline =
true;
73 else if (std::string(argv[i]) ==
"--learn") {
76 else if (std::string(argv[i]) ==
"--learning_data" && i + 1 < argc) {
77 opt_learning_data = argv[i + 1];
79 else if (std::string(argv[i]) ==
"--auto_init") {
82 else if (std::string(argv[i]) ==
"--display_proj_error") {
83 opt_display_projection_error =
true;
85 else if (std::string(argv[i]) ==
"--intrinsic" && i + 1 < argc) {
86 opt_intrinsic_file = std::string(argv[i + 1]);
88 else if (std::string(argv[i]) ==
"--camera_name" && i + 1 < argc) {
89 opt_camera_name = std::string(argv[i + 1]);
91 else if (std::string(argv[i]) ==
"--help" || std::string(argv[i]) ==
"-h") {
93 <<
"\nUsage: " << argv[0] <<
" [--camera_device <camera device> (default: 0)]"
94 <<
" [--intrinsic <intrinsic file> (default: empty)]"
95 <<
" [--camera_name <camera name> (default: empty)]"
96 <<
" [--model <model name> (default: teabox)]"
97 <<
" [--tracker <0=egde|1=keypoint|2=hybrid> (default: 2)]"
98 <<
" [--use_ogre] [--use_scanline]"
99 <<
" [--max_proj_error <allowed projection error> (default: 30)]"
100 <<
" [--learn] [--auto_init] [--learning_data <data-learned.bin> (default: learning/data-learned.bin)]"
101 <<
" [--display_proj_error]"
102 <<
" [--help] [-h]\n"
110 if (!parentname.empty())
111 objectname = parentname +
"/" + objectname;
113 std::cout <<
"Tracker requested config files: " << objectname <<
".[init, cao]" << std::endl;
114 std::cout <<
"Tracker optional config files: " << objectname <<
".[ppm]" << std::endl;
116 std::cout <<
"Tracked features: " << std::endl;
117 std::cout <<
" Use edges : " << (opt_tracker == 0 || opt_tracker == 2) << std::endl;
118 std::cout <<
" Use klt : " << (opt_tracker == 1 || opt_tracker == 2) << std::endl;
119 std::cout <<
"Tracker options: " << std::endl;
120 std::cout <<
" Use ogre : " << opt_use_ogre << std::endl;
121 std::cout <<
" Use scanline: " << opt_use_scanline << std::endl;
122 std::cout <<
" Proj. error : " << opt_proj_error_threshold << std::endl;
123 std::cout <<
" Display proj. error: " << opt_display_projection_error << std::endl;
124 std::cout <<
"Config files: " << std::endl;
125 std::cout <<
" Config file : "
126 <<
"\"" << objectname +
".xml"
127 <<
"\"" << std::endl;
128 std::cout <<
" Model file : "
129 <<
"\"" << objectname +
".cao"
130 <<
"\"" << std::endl;
131 std::cout <<
" Init file : "
132 <<
"\"" << objectname +
".init"
133 <<
"\"" << std::endl;
134 std::cout <<
"Learning options : " << std::endl;
135 std::cout <<
" Learn : " << opt_learn << std::endl;
136 std::cout <<
" Auto init : " << opt_auto_init << std::endl;
137 std::cout <<
" Learning data: " << opt_learning_data << std::endl;
140 #if VISP_VERSION_INT > VP_VERSION_INT(3, 2, 0)
152 #if defined(VISP_HAVE_PUGIXML)
154 if (!opt_intrinsic_file.empty() && !opt_camera_name.empty()) {
164 #if defined(VISP_HAVE_V4L2)
166 std::ostringstream device;
167 device <<
"/dev/video" << opt_device;
168 std::cout <<
"Use Video 4 Linux grabber on device " << device.str() << std::endl;
172 #elif defined(VISP_HAVE_DC1394)
174 std::cout <<
"Use DC1394 grabber" << std::endl;
177 #elif defined(VISP_HAVE_CMU1394)
179 std::cout <<
"Use CMU1394 grabber" << std::endl;
182 #elif defined(VISP_HAVE_FLYCAPTURE)
184 std::cout <<
"Use FlyCapture grabber" << std::endl;
187 #elif defined(VISP_HAVE_REALSENSE2)
189 std::cout <<
"Use Realsense 2 grabber" << std::endl;
192 config.disable_stream(RS2_STREAM_DEPTH);
193 config.disable_stream(RS2_STREAM_INFRARED);
194 config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30);
198 std::cout <<
"Read camera parameters from Realsense device" << std::endl;
200 #elif defined(HAVE_OPENCV_VIDEOIO)
201 std::cout <<
"Use OpenCV grabber on device " << opt_device << std::endl;
202 cv::VideoCapture g(opt_device);
204 std::cout <<
"Failed to open the camera" << std::endl;
214 #if defined(VISP_HAVE_X11)
216 #elif defined(VISP_HAVE_GDI)
218 #elif defined(HAVE_OPENCV_HIGHGUI)
221 display->init(I, 100, 100,
"Model-based tracker");
224 #if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || \
225 defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2)
227 #elif defined(HAVE_OPENCV_VIDEOIO)
243 if (opt_tracker == 0)
245 #if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
246 else if (opt_tracker == 1)
252 #if !defined(VISP_HAVE_MODULE_KLT)
253 std::cout <<
"klt and hybrid model-based tracker are not available since visp_klt module is not available. "
254 "In CMakeGUI turn visp_klt module ON, configure and build ViSP again."
257 std::cout <<
"Hybrid tracking is impossible since OpenCV is not enabled. "
258 <<
"Install OpenCV, configure and build ViSP again to run this tutorial." << std::endl;
267 #if defined(VISP_HAVE_PUGIXML)
277 if (opt_tracker == 0 || opt_tracker == 2) {
292 #if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
293 if (opt_tracker == 1 || opt_tracker == 2) {
328 #if (defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)) || \
329 (VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
330 std::string detectorName =
"SIFT";
331 std::string extractorName =
"SIFT";
332 std::string matcherName =
"BruteForce";
334 std::string detectorName =
"FAST";
335 std::string extractorName =
"ORB";
336 std::string matcherName =
"BruteForce-Hamming";
339 if (opt_learn || opt_auto_init) {
343 #if !(defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D))
344 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
345 keypoint.setDetectorParameter(
"ORB",
"nLevels", 1);
347 cv::Ptr<cv::ORB> orb_detector = keypoint.
getDetector(
"ORB").dynamicCast<cv::ORB>();
349 orb_detector->setNLevels(1);
357 std::cout <<
"Cannot enable auto detection. Learning file \"" << opt_learning_data <<
"\" doesn't exist"
364 tracker.
initClick(I, objectname +
".init",
true);
367 bool learn_position =
false;
368 bool run_auto_init =
false;
370 run_auto_init =
true;
375 unsigned int learn_cpt = 0;
377 bool tracking_failed =
false;
381 #if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || \
382 defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2)
384 #elif defined(HAVE_OPENCV_VIDEOIO)
392 tracking_failed =
false;
394 std::cout <<
"Auto init succeed" << std::endl;
402 else if (tracking_failed) {
404 tracking_failed =
false;
405 tracker.
initClick(I, objectname +
".init",
true);
414 run_auto_init =
false;
420 tracking_failed =
true;
422 std::cout <<
"Tracker needs to restart (tracking exception)" << std::endl;
423 run_auto_init =
true;
427 if (!tracking_failed) {
428 double proj_error = 0;
438 if (proj_error > opt_proj_error_threshold) {
439 std::cout <<
"Tracker needs to restart (projection error detected: " << proj_error <<
")" << std::endl;
441 run_auto_init =
true;
443 tracking_failed =
true;
447 if (!tracking_failed) {
460 std::stringstream ss;
461 ss <<
"Translation: " << std::setprecision(5) << pose[0] <<
" " << pose[1] <<
" " << pose[2] <<
" [m]";
464 ss <<
"Rotation tu: " << std::setprecision(4) <<
vpMath::deg(pose[3]) <<
" " <<
vpMath::deg(pose[4]) <<
" "
469 std::stringstream ss;
475 if (learn_position) {
478 std::vector<cv::KeyPoint> trainKeyPoints;
479 keypoint.
detect(I, trainKeyPoints);
482 std::vector<vpPolygon> polygons;
483 std::vector<std::vector<vpPoint> > roisPt;
484 std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair = tracker.
getPolygonFaces();
485 polygons = pair.first;
486 roisPt = pair.second;
489 std::vector<cv::Point3f> points3f;
493 keypoint.
buildReference(I, trainKeyPoints, points3f,
true, learn_id++);
496 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
499 learn_position =
false;
500 std::cout <<
"Data learned" << std::endl;
503 std::stringstream ss;
508 else if (opt_auto_init)
519 learn_position =
true;
522 run_auto_init =
true;
528 if (opt_learn && learn_cpt) {
529 std::cout <<
"Save learning from " << learn_cpt <<
" images in file: " << opt_learning_data << std::endl;
538 std::cout <<
"Catch a ViSP exception: " << e << std::endl;
540 #elif defined(VISP_HAVE_OPENCV)
543 std::cout <<
"Install a 3rd party dedicated to frame grabbing (dc1394, cmu1394, v4l2, OpenCV, FlyCapture, "
544 "Realsense2), configure and build ViSP again to use this example"
549 std::cout <<
"Install OpenCV 3rd party, configure and build ViSP again to use this example" << std::endl;
Firewire cameras video capture based on CMU 1394 Digital Camera SDK.
void open(vpImage< unsigned char > &I)
Class for firewire ieee1394 video devices using libdc1394-2.x api.
void open(vpImage< unsigned char > &I)
Generic class defining intrinsic camera parameters.
void initPersProjWithoutDistortion(double px, double py, double u0, double v0)
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
static const vpColor none
static const vpColor yellow
static const vpColor green
Display for windows using GDI (available on any windows 32 platform).
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Class that defines generic functionalities for display.
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
const std::string & getStringMessage() const
void open(vpImage< unsigned char > &I)
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Class that allows keypoints detection (and descriptors extraction) and matching thanks to OpenCV libr...
unsigned int matchPoint(const vpImage< unsigned char > &I)
void setExtractor(const vpFeatureDescriptorType &extractorType)
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
void setMatcher(const std::string &matcherName)
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
void setDetector(const vpFeatureDetectorType &detectorType)
unsigned int buildReference(const vpImage< unsigned char > &I)
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
void setBlockSize(int blockSize)
void setQuality(double qualityLevel)
void setHarrisFreeParameter(double harris_k)
void setMaxFeatures(int maxCount)
void setMinDistance(double minDistance)
void setWindowSize(int winSize)
void setPyramidLevels(int pyrMaxLevel)
static double deg(double rad)
Real-time 6D object pose tracking using its CAD model.
virtual void initFromPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo) vp_override
virtual int getTrackerType() const
virtual void setKltMaskBorder(const unsigned int &e)
virtual void loadConfigFile(const std::string &configFile, bool verbose=true) vp_override
virtual unsigned int getNbFeaturesEdge() const
virtual void getPose(vpHomogeneousMatrix &cMo) const vp_override
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false) vp_override
virtual void getCameraParameters(vpCameraParameters &camera) const vp_override
virtual void setProjectionErrorDisplay(bool display) vp_override
virtual unsigned int getNbFeaturesKlt() const
virtual void setOgreVisibilityTest(const bool &v) vp_override
virtual void setMovingEdge(const vpMe &me)
virtual std::pair< std::vector< vpPolygon >, std::vector< std::vector< vpPoint > > > getPolygonFaces(bool orderPolygons=true, bool useVisibility=true, bool clipPolygon=false) vp_override
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix()) vp_override
virtual void setKltOpencv(const vpKltOpencv &t)
virtual void setScanLineVisibilityTest(const bool &v) vp_override
virtual void setCameraParameters(const vpCameraParameters &camera) vp_override
virtual void setTrackerType(int type)
virtual void initClick(const vpImage< unsigned char > &I1, const vpImage< unsigned char > &I2, const std::string &initFile1, const std::string &initFile2, bool displayHelp=false, const vpHomogeneousMatrix &T1=vpHomogeneousMatrix(), const vpHomogeneousMatrix &T2=vpHomogeneousMatrix())
virtual double computeCurrentProjectionError(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &_cMo, const vpCameraParameters &_cam) vp_override
virtual void setDisplayFeatures(bool displayF) vp_override
virtual void setProjectionErrorComputation(const bool &flag) vp_override
virtual void track(const vpImage< unsigned char > &I) vp_override
virtual double getProjectionError() const
void setMu1(const double &mu_1)
void setRange(const unsigned int &range)
void setLikelihoodThresholdType(const vpLikelihoodThresholdType likelihood_threshold_type)
void setMaskNumber(const unsigned int &mask_number)
void setThreshold(const double &threshold)
void setSampleStep(const double &sample_step)
void setMaskSize(const unsigned int &mask_size)
void setMu2(const double &mu_2)
Implementation of a pose vector and operations on poses.
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion, int index=-1) const
void acquire(vpImage< unsigned char > &grey, double *ts=nullptr)
bool open(const rs2::config &cfg=rs2::config())
Class that is a wrapper over the Video4Linux2 (V4L2) driver.
void open(vpImage< unsigned char > &I)
void setScale(unsigned scale=vpV4l2Grabber::DEFAULT_SCALE)
void setDevice(const std::string &devname)
XML parser to load and save intrinsic camera parameters.
int parse(vpCameraParameters &cam, const std::string &filename, const std::string &camera_name, const vpCameraParameters::vpCameraParametersProjType &projModel, unsigned int image_width=0, unsigned int image_height=0, bool verbose=true)
void display(vpImage< unsigned char > &I, const std::string &title)
Display a gray-scale image.
VISP_EXPORT double measureTimeMs()