4 #include <visp3/core/vpConfig.h>
16 #if (defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || \
17 defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2) || \
18 ((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI)) || \
19 ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_VIDEOIO))) && defined(VISP_HAVE_DISPLAY) && \
20 ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_CALIB3D) && defined(HAVE_OPENCV_FEATURES2D)) || \
21 ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_3D) && defined(HAVE_OPENCV_FEATURES))
23 #ifdef VISP_HAVE_MODULE_SENSOR
24 #include <visp3/sensor/vp1394CMUGrabber.h>
25 #include <visp3/sensor/vp1394TwoGrabber.h>
26 #include <visp3/sensor/vpFlyCaptureGrabber.h>
27 #include <visp3/sensor/vpRealSense2.h>
28 #include <visp3/sensor/vpV4l2Grabber.h>
30 #include <visp3/core/vpIoTools.h>
31 #include <visp3/core/vpXmlParserCamera.h>
32 #include <visp3/gui/vpDisplayFactory.h>
33 #include <visp3/io/vpImageIo.h>
34 #include <visp3/vision/vpKeyPoint.h>
36 #include <visp3/mbt/vpMbGenericTracker.h>
39 #if (VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI)
40 #include <opencv2/highgui/highgui.hpp>
41 #elif (VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_VIDEOIO)
42 #include <opencv2/videoio/videoio.hpp>
45 int main(
int argc,
char **argv)
47 #ifdef ENABLE_VISP_NAMESPACE
51 #if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
52 std::shared_ptr<vpDisplay> display;
58 std::string opt_modelname =
"model/teabox/teabox.cao";
61 double opt_proj_error_threshold = 30.;
62 bool opt_use_ogre =
false;
63 bool opt_use_scanline =
false;
64 bool opt_display_projection_error =
false;
65 bool opt_learn =
false;
66 bool opt_auto_init =
false;
67 std::string opt_learning_data =
"learning/data-learned.bin";
68 std::string opt_intrinsic_file =
"";
69 std::string opt_camera_name =
"";
71 for (
int i = 1; i < argc; i++) {
72 if (std::string(argv[i]) ==
"--model" && i + 1 < argc) {
73 opt_modelname = std::string(argv[++i]);
75 else if (std::string(argv[i]) ==
"--tracker" && i + 1 < argc) {
76 opt_tracker = atoi(argv[++i]);
78 else if (std::string(argv[i]) ==
"--camera-device" && i + 1 < argc) {
79 opt_device = atoi(argv[++i]);
81 else if (std::string(argv[i]) ==
"--max_proj_error" && i + 1 < argc) {
82 opt_proj_error_threshold = atof(argv[++i]);
84 else if (std::string(argv[i]) ==
"--use_ogre") {
87 else if (std::string(argv[i]) ==
"--use_scanline") {
88 opt_use_scanline =
true;
90 else if (std::string(argv[i]) ==
"--learn") {
93 else if (std::string(argv[i]) ==
"--learning_data" && i + 1 < argc) {
94 opt_learning_data = argv[++i];
96 else if (std::string(argv[i]) ==
"--auto_init") {
99 else if (std::string(argv[i]) ==
"--display_proj_error") {
100 opt_display_projection_error =
true;
102 else if (std::string(argv[i]) ==
"--intrinsic" && i + 1 < argc) {
103 opt_intrinsic_file = std::string(argv[++i]);
105 else if (std::string(argv[i]) ==
"--camera-name" && i + 1 < argc) {
106 opt_camera_name = std::string(argv[++i]);
108 else if (std::string(argv[i]) ==
"--help" || std::string(argv[i]) ==
"-h") {
110 <<
"\nUsage: " << argv[0] <<
" [--camera-device <camera device> (default: 0)]"
111 <<
" [--intrinsic <intrinsic file> (default: empty)]"
112 <<
" [--camera-name <camera name> (default: empty)]"
113 <<
" [--model <model name> (default: teabox)]"
114 <<
" [--tracker <0=egde|1=keypoint|2=hybrid> (default: 2)]"
115 <<
" [--use_ogre] [--use_scanline]"
116 <<
" [--max_proj_error <allowed projection error> (default: 30)]"
119 <<
" [--learning_data <data-learned.bin> (default: learning/data-learned.bin)]"
120 <<
" [--display_proj_error]"
121 <<
" [--help] [-h]\n"
129 if (!parentname.empty())
130 objectname = parentname +
"/" + objectname;
132 std::cout <<
"Tracker requested config files: " << objectname <<
".[init, cao]" << std::endl;
133 std::cout <<
"Tracker optional config files: " << objectname <<
".[ppm]" << std::endl;
135 std::cout <<
"Tracked features: " << std::endl;
136 std::cout <<
" Use edges : " << (opt_tracker == 0 || opt_tracker == 2) << std::endl;
137 std::cout <<
" Use klt : " << (opt_tracker == 1 || opt_tracker == 2) << std::endl;
138 std::cout <<
"Tracker options: " << std::endl;
139 std::cout <<
" Use ogre : " << opt_use_ogre << std::endl;
140 std::cout <<
" Use scanline: " << opt_use_scanline << std::endl;
141 std::cout <<
" Proj. error : " << opt_proj_error_threshold << std::endl;
142 std::cout <<
" Display proj. error: " << opt_display_projection_error << std::endl;
143 std::cout <<
"Config files: " << std::endl;
144 std::cout <<
" Config file : "
145 <<
"\"" << objectname +
".xml"
146 <<
"\"" << std::endl;
147 std::cout <<
" Model file : "
148 <<
"\"" << objectname +
".cao"
149 <<
"\"" << std::endl;
150 std::cout <<
" Init file : "
151 <<
"\"" << objectname +
".init"
152 <<
"\"" << std::endl;
153 std::cout <<
"Learning options : " << std::endl;
154 std::cout <<
" Learn : " << opt_learn << std::endl;
155 std::cout <<
" Auto init : " << opt_auto_init << std::endl;
156 std::cout <<
" Learning data: " << opt_learning_data << std::endl;
159 #if VISP_VERSION_INT > VP_VERSION_INT(3, 2, 0)
171 #if defined(VISP_HAVE_PUGIXML)
173 if (!opt_intrinsic_file.empty() && !opt_camera_name.empty()) {
183 #if defined(VISP_HAVE_V4L2)
185 std::ostringstream device;
186 device <<
"/dev/video" << opt_device;
187 std::cout <<
"Use Video 4 Linux grabber on device " << device.str() << std::endl;
191 #elif defined(VISP_HAVE_DC1394)
193 std::cout <<
"Use DC1394 grabber" << std::endl;
196 #elif defined(VISP_HAVE_CMU1394)
198 std::cout <<
"Use CMU1394 grabber" << std::endl;
201 #elif defined(VISP_HAVE_FLYCAPTURE)
203 std::cout <<
"Use FlyCapture grabber" << std::endl;
206 #elif defined(VISP_HAVE_REALSENSE2)
208 std::cout <<
"Use Realsense 2 grabber" << std::endl;
211 config.disable_stream(RS2_STREAM_DEPTH);
212 config.disable_stream(RS2_STREAM_INFRARED);
213 config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30);
217 std::cout <<
"Read camera parameters from Realsense device" << std::endl;
219 #elif ((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI))|| ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_VIDEOIO))
220 std::cout <<
"Use OpenCV grabber on device " << opt_device << std::endl;
221 cv::VideoCapture g(opt_device);
223 std::cout <<
"Failed to open the camera" << std::endl;
232 #if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
237 display->init(I, 100, 100,
"Model-based tracker");
240 #if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2)
242 #elif ((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI))|| ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_VIDEOIO))
258 if (opt_tracker == 0)
260 #if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
261 else if (opt_tracker == 1)
267 #if !defined(VISP_HAVE_MODULE_KLT)
268 std::cout <<
"klt and hybrid model-based tracker are not available since visp_klt module is not available. "
269 "In CMakeGUI turn visp_klt module ON, configure and build ViSP again."
272 std::cout <<
"Hybrid tracking is impossible since OpenCV is not enabled. "
273 <<
"Install OpenCV, configure and build ViSP again to run this tutorial." << std::endl;
282 #if defined(VISP_HAVE_PUGIXML)
292 if (opt_tracker == 0 || opt_tracker == 2) {
307 #if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
308 if (opt_tracker == 1 || opt_tracker == 2) {
318 tracker.setKltOpencv(klt_settings);
319 tracker.setKltMaskBorder(5);
343 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_XFEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
344 std::string detectorName =
"SIFT";
345 std::string extractorName =
"SIFT";
346 std::string matcherName =
"BruteForce";
347 #elif ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
348 std::string detectorName =
"FAST";
349 std::string extractorName =
"ORB";
350 std::string matcherName =
"BruteForce-Hamming";
353 if (opt_learn || opt_auto_init) {
357 #if ((VISP_HAVE_OPENCV_VERSION < 0x050000) && defined(HAVE_OPENCV_FEATURES2D)) || ((VISP_HAVE_OPENCV_VERSION >= 0x050000) && defined(HAVE_OPENCV_FEATURES))
358 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
359 keypoint.setDetectorParameter(
"ORB",
"nLevels", 1);
361 cv::Ptr<cv::ORB> orb_detector = keypoint.
getDetector(
"ORB").dynamicCast<cv::ORB>();
363 orb_detector->setNLevels(1);
371 std::cout <<
"Cannot enable auto detection. Learning file \"" << opt_learning_data <<
"\" doesn't exist"
378 tracker.initClick(I, objectname +
".init",
true);
381 bool learn_position =
false;
382 bool run_auto_init =
false;
384 run_auto_init =
true;
389 unsigned int learn_cpt = 0;
391 bool tracking_failed =
false;
395 #if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2)
397 #elif ((VISP_HAVE_OPENCV_VERSION < 0x030000) && defined(HAVE_OPENCV_HIGHGUI))|| ((VISP_HAVE_OPENCV_VERSION >= 0x030000) && defined(HAVE_OPENCV_VIDEOIO))
405 tracking_failed =
false;
407 std::cout <<
"Auto init succeed" << std::endl;
415 else if (tracking_failed) {
417 tracking_failed =
false;
418 tracker.initClick(I, objectname +
".init",
true);
427 run_auto_init =
false;
433 tracking_failed =
true;
435 std::cout <<
"Tracker needs to restart (tracking exception)" << std::endl;
436 run_auto_init =
true;
440 if (!tracking_failed) {
441 double proj_error = 0;
451 if (proj_error > opt_proj_error_threshold) {
452 std::cout <<
"Tracker needs to restart (projection error detected: " << proj_error <<
")" << std::endl;
454 run_auto_init =
true;
456 tracking_failed =
true;
460 if (!tracking_failed) {
473 std::stringstream ss;
474 ss <<
"Translation: " << std::setprecision(5) << pose[0] <<
" " << pose[1] <<
" " << pose[2] <<
" [m]";
477 ss <<
"Rotation tu: " << std::setprecision(4) <<
vpMath::deg(pose[3]) <<
" " <<
vpMath::deg(pose[4]) <<
" "
482 std::stringstream ss;
488 if (learn_position) {
491 std::vector<cv::KeyPoint> trainKeyPoints;
492 keypoint.
detect(I, trainKeyPoints);
495 std::vector<vpPolygon> polygons;
496 std::vector<std::vector<vpPoint> > roisPt;
497 std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair = tracker.
getPolygonFaces();
498 polygons = pair.first;
499 roisPt = pair.second;
502 std::vector<cv::Point3f> points3f;
506 keypoint.
buildReference(I, trainKeyPoints, points3f,
true, learn_id++);
509 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
512 learn_position =
false;
513 std::cout <<
"Data learned" << std::endl;
516 std::stringstream ss;
521 else if (opt_auto_init)
532 learn_position =
true;
535 run_auto_init =
true;
541 if (opt_learn && learn_cpt) {
542 std::cout <<
"Save learning from " << learn_cpt <<
" images in file: " << opt_learning_data << std::endl;
547 std::cout <<
"Catch a ViSP exception: " << e << std::endl;
551 #if (VISP_CXX_STANDARD < VISP_CXX_STANDARD_11)
552 if (display !=
nullptr) {
563 #if defined(VISP_HAVE_OPENCV)
564 std::cout <<
"Install a 3rd party dedicated to frame grabbing (dc1394, cmu1394, v4l2, OpenCV, FlyCapture, "
565 <<
"Realsense2), configure and build ViSP again to use this tutorial."
568 std::cout <<
"Install OpenCV 3rd party, configure and build ViSP again to use this tutorial." << std::endl;
Firewire cameras video capture based on CMU 1394 Digital Camera SDK.
void open(vpImage< unsigned char > &I)
Class for firewire ieee1394 video devices using libdc1394-2.x api.
void open(vpImage< unsigned char > &I)
Generic class defining intrinsic camera parameters.
void initPersProjWithoutDistortion(double px, double py, double u0, double v0)
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
static const vpColor none
static const vpColor yellow
static const vpColor green
Class that defines generic functionalities for display.
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
const std::string & getStringMessage() const
void open(vpImage< unsigned char > &I)
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Class that allows keypoints 2D features detection (and descriptors extraction) and matching thanks to...
unsigned int matchPoint(const vpImage< unsigned char > &I)
void setExtractor(const vpFeatureDescriptorType &extractorType)
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
void setMatcher(const std::string &matcherName)
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
void setDetector(const vpFeatureDetectorType &detectorType)
unsigned int buildReference(const vpImage< unsigned char > &I)
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
void setBlockSize(int blockSize)
void setQuality(double qualityLevel)
void setHarrisFreeParameter(double harris_k)
void setMaxFeatures(int maxCount)
void setMinDistance(double minDistance)
void setWindowSize(int winSize)
void setPyramidLevels(int pyrMaxLevel)
static double deg(double rad)
Real-time 6D object pose tracking using its CAD model.
virtual void setCameraParameters(const vpCameraParameters &camera) VP_OVERRIDE
virtual int getTrackerType() const
virtual void setOgreVisibilityTest(const bool &v) VP_OVERRIDE
virtual std::pair< std::vector< vpPolygon >, std::vector< std::vector< vpPoint > > > getPolygonFaces(bool orderPolygons=true, bool useVisibility=true, bool clipPolygon=false) VP_OVERRIDE
virtual void setProjectionErrorComputation(const bool &flag) VP_OVERRIDE
virtual void setDisplayFeatures(bool displayF) VP_OVERRIDE
virtual unsigned int getNbFeaturesEdge() const
virtual double computeCurrentProjectionError(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &_cMo, const vpCameraParameters &_cam) VP_OVERRIDE
virtual void getCameraParameters(vpCameraParameters &camera) const VP_OVERRIDE
virtual void initFromPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo) VP_OVERRIDE
virtual void getPose(vpHomogeneousMatrix &cMo) const VP_OVERRIDE
virtual unsigned int getNbFeaturesKlt() const
virtual void setMovingEdge(const vpMe &me)
virtual void track(const vpImage< unsigned char > &I) VP_OVERRIDE
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix()) VP_OVERRIDE
virtual void setTrackerType(int type)
virtual void setScanLineVisibilityTest(const bool &v) VP_OVERRIDE
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false) VP_OVERRIDE
virtual void loadConfigFile(const std::string &configFile, bool verbose=true) VP_OVERRIDE
virtual void setProjectionErrorDisplay(bool display) VP_OVERRIDE
virtual double getProjectionError() const
void setMu1(const double &mu_1)
void setRange(const unsigned int &range)
void setLikelihoodThresholdType(const vpLikelihoodThresholdType likelihood_threshold_type)
void setMaskNumber(const unsigned int &mask_number)
void setThreshold(const double &threshold)
void setSampleStep(const double &sample_step)
void setMaskSize(const unsigned int &mask_size)
void setMu2(const double &mu_2)
Implementation of a pose vector and operations on poses.
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion, int index=-1) const
void acquire(vpImage< unsigned char > &grey, double *ts=nullptr)
bool open(const rs2::config &cfg=rs2::config())
Class that is a wrapper over the Video4Linux2 (V4L2) driver.
void open(vpImage< unsigned char > &I)
void setScale(unsigned scale=vpV4l2Grabber::DEFAULT_SCALE)
void setDevice(const std::string &devname)
XML parser to load and save intrinsic camera parameters.
int parse(vpCameraParameters &cam, const std::string &filename, const std::string &camera_name, const vpCameraParameters::vpCameraParametersProjType &projModel, unsigned int image_width=0, unsigned int image_height=0, bool verbose=true)
std::shared_ptr< vpDisplay > createDisplay()
Return a smart pointer vpDisplay specialization if a GUI library is available or nullptr otherwise.
vpDisplay * allocateDisplay()
Return a newly allocated vpDisplay specialization if a GUI library is available or nullptr otherwise.
VISP_EXPORT double measureTimeMs()