#include <visp3/core/vpConfig.h>
#ifdef VISP_HAVE_MODULE_SENSOR
#include <visp3/sensor/vp1394CMUGrabber.h>
#include <visp3/sensor/vp1394TwoGrabber.h>
#include <visp3/sensor/vpFlyCaptureGrabber.h>
#include <visp3/sensor/vpRealSense2.h>
#include <visp3/sensor/vpV4l2Grabber.h>
#endif
#include <visp3/core/vpIoTools.h>
#include <visp3/core/vpXmlParserCamera.h>
#include <visp3/gui/vpDisplayGDI.h>
#include <visp3/gui/vpDisplayOpenCV.h>
#include <visp3/gui/vpDisplayX.h>
#include <visp3/io/vpImageIo.h>
#include <visp3/vision/vpKeyPoint.h>
#include <visp3/mbt/vpMbGenericTracker.h>
#if defined(HAVE_OPENCV_VIDEOIO)
#include <opencv2/videoio.hpp>
#endif
int main(int argc, char **argv)
{
#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_VIDEOIO) && \
(defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || \
defined(HAVE_OPENCV_HIGHGUI) || defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2))
#ifdef ENABLE_VISP_NAMESPACE
#endif
try {
std::string opt_modelname = "model/teabox/teabox.cao";
int opt_tracker = 2;
int opt_device = 0;
double opt_proj_error_threshold = 30.;
bool opt_use_ogre = false;
bool opt_use_scanline = false;
bool opt_display_projection_error = false;
bool opt_learn = false;
bool opt_auto_init = false;
std::string opt_learning_data = "learning/data-learned.bin";
std::string opt_intrinsic_file = "";
std::string opt_camera_name = "";
for (int i = 0; i < argc; i++) {
if (std::string(argv[i]) == "--model") {
opt_modelname = std::string(argv[i + 1]);
}
else if (std::string(argv[i]) == "--tracker") {
opt_tracker = atoi(argv[i + 1]);
}
else if (std::string(argv[i]) == "--camera_device" && i + 1 < argc) {
opt_device = atoi(argv[i + 1]);
}
else if (std::string(argv[i]) == "--max_proj_error") {
opt_proj_error_threshold = atof(argv[i + 1]);
}
else if (std::string(argv[i]) == "--use_ogre") {
opt_use_ogre = true;
}
else if (std::string(argv[i]) == "--use_scanline") {
opt_use_scanline = true;
}
else if (std::string(argv[i]) == "--learn") {
opt_learn = true;
}
else if (std::string(argv[i]) == "--learning_data" && i + 1 < argc) {
opt_learning_data = argv[i + 1];
}
else if (std::string(argv[i]) == "--auto_init") {
opt_auto_init = true;
}
else if (std::string(argv[i]) == "--display_proj_error") {
opt_display_projection_error = true;
}
else if (std::string(argv[i]) == "--intrinsic" && i + 1 < argc) {
opt_intrinsic_file = std::string(argv[i + 1]);
}
else if (std::string(argv[i]) == "--camera_name" && i + 1 < argc) {
opt_camera_name = std::string(argv[i + 1]);
}
else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") {
std::cout
<< "\nUsage: " << argv[0] << " [--camera_device <camera device> (default: 0)]"
<< " [--intrinsic <intrinsic file> (default: empty)]"
<< " [--camera_name <camera name> (default: empty)]"
<< " [--model <model name> (default: teabox)]"
<< " [--tracker <0=egde|1=keypoint|2=hybrid> (default: 2)]"
<< " [--use_ogre] [--use_scanline]"
<< " [--max_proj_error <allowed projection error> (default: 30)]"
<< " [--learn] [--auto_init] [--learning_data <data-learned.bin> (default: learning/data-learned.bin)]"
<< " [--display_proj_error]"
<< " [--help] [-h]\n"
<< std::endl;
return EXIT_SUCCESS;
}
}
if (!parentname.empty())
objectname = parentname + "/" + objectname;
std::cout << "Tracker requested config files: " << objectname << ".[init, cao]" << std::endl;
std::cout << "Tracker optional config files: " << objectname << ".[ppm]" << std::endl;
std::cout << "Tracked features: " << std::endl;
std::cout << " Use edges : " << (opt_tracker == 0 || opt_tracker == 2) << std::endl;
std::cout << " Use klt : " << (opt_tracker == 1 || opt_tracker == 2) << std::endl;
std::cout << "Tracker options: " << std::endl;
std::cout << " Use ogre : " << opt_use_ogre << std::endl;
std::cout << " Use scanline: " << opt_use_scanline << std::endl;
std::cout << " Proj. error : " << opt_proj_error_threshold << std::endl;
std::cout << " Display proj. error: " << opt_display_projection_error << std::endl;
std::cout << "Config files: " << std::endl;
std::cout << " Config file : "
<< "\"" << objectname + ".xml"
<< "\"" << std::endl;
std::cout << " Model file : "
<< "\"" << objectname + ".cao"
<< "\"" << std::endl;
std::cout << " Init file : "
<< "\"" << objectname + ".init"
<< "\"" << std::endl;
std::cout << "Learning options : " << std::endl;
std::cout << " Learn : " << opt_learn << std::endl;
std::cout << " Auto init : " << opt_auto_init << std::endl;
std::cout << " Learning data: " << opt_learning_data << std::endl;
#if VISP_VERSION_INT > VP_VERSION_INT(3, 2, 0)
#else
#endif
#if defined(VISP_HAVE_PUGIXML)
if (!opt_intrinsic_file.empty() && !opt_camera_name.empty()) {
}
#endif
#if defined(VISP_HAVE_V4L2)
std::ostringstream device;
device << "/dev/video" << opt_device;
std::cout << "Use Video 4 Linux grabber on device " << device.str() << std::endl;
#elif defined(VISP_HAVE_DC1394)
(void)opt_device;
std::cout << "Use DC1394 grabber" << std::endl;
#elif defined(VISP_HAVE_CMU1394)
(void)opt_device;
std::cout << "Use CMU1394 grabber" << std::endl;
#elif defined(VISP_HAVE_FLYCAPTURE)
(void)opt_device;
std::cout << "Use FlyCapture grabber" << std::endl;
#elif defined(VISP_HAVE_REALSENSE2)
(void)opt_device;
std::cout << "Use Realsense 2 grabber" << std::endl;
rs2::config config;
config.disable_stream(RS2_STREAM_DEPTH);
config.disable_stream(RS2_STREAM_INFRARED);
config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30);
std::cout << "Read camera parameters from Realsense device" << std::endl;
#elif defined(HAVE_OPENCV_VIDEOIO)
std::cout << "Use OpenCV grabber on device " << opt_device << std::endl;
cv::VideoCapture g(opt_device);
if (!g.isOpened()) {
std::cout << "Failed to open the camera" << std::endl;
return EXIT_FAILURE;
}
cv::Mat frame;
g >> frame;
#endif
#if defined(VISP_HAVE_X11)
display = new vpDisplayX;
#elif defined(VISP_HAVE_GDI)
#elif defined(HAVE_OPENCV_HIGHGUI)
#endif
display->init(I, 100, 100, "Model-based tracker");
while (true) {
#if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || \
defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2)
#elif defined(HAVE_OPENCV_VIDEOIO)
g >> frame;
#endif
break;
}
}
if (opt_tracker == 0)
#if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
else if (opt_tracker == 1)
else
#else
else {
#if !defined(VISP_HAVE_MODULE_KLT)
std::cout << "klt and hybrid model-based tracker are not available since visp_klt module is not available. "
"In CMakeGUI turn visp_klt module ON, configure and build ViSP again."
<< std::endl;
#else
std::cout << "Hybrid tracking is impossible since OpenCV is not enabled. "
<< "Install OpenCV, configure and build ViSP again to run this tutorial." << std::endl;
#endif
return EXIT_SUCCESS;
}
#endif
bool usexml = false;
#if defined(VISP_HAVE_PUGIXML)
usexml = true;
}
#endif
if (!usexml) {
if (opt_tracker == 0 || opt_tracker == 2) {
}
#if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
if (opt_tracker == 1 || opt_tracker == 2) {
tracker.setKltOpencv(klt_settings);
tracker.setKltMaskBorder(5);
}
#endif
}
#if (defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D)) || \
(VISP_HAVE_OPENCV_VERSION >= 0x030411 && CV_MAJOR_VERSION < 4) || (VISP_HAVE_OPENCV_VERSION >= 0x040400)
std::string detectorName = "SIFT";
std::string extractorName = "SIFT";
std::string matcherName = "BruteForce";
#else
std::string detectorName = "FAST";
std::string extractorName = "ORB";
std::string matcherName = "BruteForce-Hamming";
#endif
if (opt_learn || opt_auto_init) {
#if !(defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D))
#if (VISP_HAVE_OPENCV_VERSION < 0x030000)
keypoint.setDetectorParameter("ORB", "nLevels", 1);
#else
cv::Ptr<cv::ORB> orb_detector = keypoint.
getDetector(
"ORB").dynamicCast<cv::ORB>();
if (orb_detector) {
orb_detector->setNLevels(1);
}
#endif
#endif
}
if (opt_auto_init) {
std::cout << "Cannot enable auto detection. Learning file \"" << opt_learning_data << "\" doesn't exist"
<< std::endl;
return EXIT_FAILURE;
}
}
else {
tracker.initClick(I, objectname + ".init", true);
}
bool learn_position = false;
bool run_auto_init = false;
if (opt_auto_init) {
run_auto_init = true;
}
int learn_id = 1;
unsigned int learn_cpt = 0;
bool quit = false;
bool tracking_failed = false;
while (!quit) {
#if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || \
defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2)
#elif defined(HAVE_OPENCV_VIDEOIO)
g >> frame;
#endif
if (run_auto_init) {
tracking_failed = false;
std::cout << "Auto init succeed" << std::endl;
}
else {
continue;
}
}
else if (tracking_failed) {
tracking_failed = false;
tracker.initClick(I, objectname + ".init", true);
}
try {
if (run_auto_init) {
run_auto_init = false;
}
}
tracking_failed = true;
if (opt_auto_init) {
std::cout << "Tracker needs to restart (tracking exception)" << std::endl;
run_auto_init = true;
}
}
if (!tracking_failed) {
double proj_error = 0;
}
else {
}
if (proj_error > opt_proj_error_threshold) {
std::cout << "Tracker needs to restart (projection error detected: " << proj_error << ")" << std::endl;
if (opt_auto_init) {
run_auto_init = true;
}
tracking_failed = true;
}
}
if (!tracking_failed) {
{
std::stringstream ss;
ss << "Translation: " << std::setprecision(5) << pose[0] << " " << pose[1] << " " << pose[2] << " [m]";
ss.str("");
}
{
std::stringstream ss;
}
}
if (learn_position) {
learn_cpt++;
std::vector<cv::KeyPoint> trainKeyPoints;
keypoint.
detect(I, trainKeyPoints);
std::vector<vpPolygon> polygons;
std::vector<std::vector<vpPoint> > roisPt;
std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair = tracker.
getPolygonFaces();
polygons = pair.first;
roisPt = pair.second;
std::vector<cv::Point3f> points3f;
keypoint.
buildReference(I, trainKeyPoints, points3f,
true, learn_id++);
for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
}
learn_position = false;
std::cout << "Data learned" << std::endl;
}
std::stringstream ss;
if (opt_learn)
else if (opt_auto_init)
else
quit = true;
}
learn_position = true;
}
run_auto_init = true;
}
}
}
if (opt_learn && learn_cpt) {
std::cout << "Save learning from " << learn_cpt << " images in file: " << opt_learning_data << std::endl;
}
delete display;
}
std::cout << "Catch a ViSP exception: " << e << std::endl;
}
#elif defined(VISP_HAVE_OPENCV)
(void)argc;
(void)argv;
std::cout << "Install a 3rd party dedicated to frame grabbing (dc1394, cmu1394, v4l2, OpenCV, FlyCapture, "
"Realsense2), configure and build ViSP again to use this example"
<< std::endl;
#else
(void)argc;
(void)argv;
std::cout << "Install OpenCV 3rd party, configure and build ViSP again to use this example" << std::endl;
#endif
}
Firewire cameras video capture based on CMU 1394 Digital Camera SDK.
Class for firewire ieee1394 video devices using libdc1394-2.x api.
Generic class defining intrinsic camera parameters.
void initPersProjWithoutDistortion(double px, double py, double u0, double v0)
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
static const vpColor none
static const vpColor yellow
static const vpColor green
Display for windows using GDI (available on any windows 32 platform).
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Class that defines generic functionalities for display.
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
const std::string & getStringMessage() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Class that allows keypoints detection (and descriptors extraction) and matching thanks to OpenCV libr...
unsigned int matchPoint(const vpImage< unsigned char > &I)
void setExtractor(const vpFeatureDescriptorType &extractorType)
void loadLearningData(const std::string &filename, bool binaryMode=false, bool append=false)
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
void setMatcher(const std::string &matcherName)
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
void setDetector(const vpFeatureDetectorType &detectorType)
unsigned int buildReference(const vpImage< unsigned char > &I)
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
void setBlockSize(int blockSize)
void setQuality(double qualityLevel)
void setHarrisFreeParameter(double harris_k)
void setMaxFeatures(int maxCount)
void setMinDistance(double minDistance)
void setWindowSize(int winSize)
void setPyramidLevels(int pyrMaxLevel)
static double deg(double rad)
Real-time 6D object pose tracking using its CAD model.
virtual void setCameraParameters(const vpCameraParameters &camera) VP_OVERRIDE
virtual int getTrackerType() const
virtual void setOgreVisibilityTest(const bool &v) VP_OVERRIDE
virtual std::pair< std::vector< vpPolygon >, std::vector< std::vector< vpPoint > > > getPolygonFaces(bool orderPolygons=true, bool useVisibility=true, bool clipPolygon=false) VP_OVERRIDE
virtual void setProjectionErrorComputation(const bool &flag) VP_OVERRIDE
virtual void setDisplayFeatures(bool displayF) VP_OVERRIDE
virtual unsigned int getNbFeaturesEdge() const
virtual double computeCurrentProjectionError(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &_cMo, const vpCameraParameters &_cam) VP_OVERRIDE
virtual void getCameraParameters(vpCameraParameters &camera) const VP_OVERRIDE
virtual void initFromPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo) VP_OVERRIDE
virtual void getPose(vpHomogeneousMatrix &cMo) const VP_OVERRIDE
virtual unsigned int getNbFeaturesKlt() const
virtual void setMovingEdge(const vpMe &me)
virtual void track(const vpImage< unsigned char > &I) VP_OVERRIDE
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix()) VP_OVERRIDE
virtual void setTrackerType(int type)
virtual void setScanLineVisibilityTest(const bool &v) VP_OVERRIDE
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false) VP_OVERRIDE
virtual void loadConfigFile(const std::string &configFile, bool verbose=true) VP_OVERRIDE
virtual void setProjectionErrorDisplay(bool display) VP_OVERRIDE
virtual double getProjectionError() const
void setMu1(const double &mu_1)
void setRange(const unsigned int &range)
void setLikelihoodThresholdType(const vpLikelihoodThresholdType likelihood_threshold_type)
void setMaskNumber(const unsigned int &mask_number)
void setThreshold(const double &threshold)
void setSampleStep(const double &sample_step)
void setMaskSize(const unsigned int &mask_size)
void setMu2(const double &mu_2)
Implementation of a pose vector and operations on poses.
Class that is a wrapper over the Video4Linux2 (V4L2) driver.
void open(vpImage< unsigned char > &I)
void setScale(unsigned scale=vpV4l2Grabber::DEFAULT_SCALE)
void setDevice(const std::string &devname)
void acquire(vpImage< unsigned char > &I)
XML parser to load and save intrinsic camera parameters.
int parse(vpCameraParameters &cam, const std::string &filename, const std::string &camera_name, const vpCameraParameters::vpCameraParametersProjType &projModel, unsigned int image_width=0, unsigned int image_height=0, bool verbose=true)
VISP_EXPORT double measureTimeMs()