#include <visp3/core/vpConfig.h>
#include <visp3/core/vpIoTools.h>
#include <visp3/gui/vpDisplayGDI.h>
#include <visp3/gui/vpDisplayOpenCV.h>
#include <visp3/gui/vpDisplayX.h>
#include <visp3/gui/vpPlot.h>
#include <visp3/mbt/vpMbGenericTracker.h>
#include <visp3/io/vpVideoReader.h>
#include <visp3/io/vpVideoWriter.h>
#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_VIDEOIO) && defined(HAVE_OPENCV_HIGHGUI)
#ifdef ENABLE_VISP_NAMESPACE
#endif
namespace
{
{
std::vector<double> vec { t[0], t[1], t[2], tu[0], tu[1], tu[2] };
return vec;
}
}
#endif
int main(int argc, char **argv)
{
#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_VIDEOIO) && defined(HAVE_OPENCV_HIGHGUI)
std::string opt_videoname = "model/teabox/teabox.mp4";
std::string opt_modelname = "model/teabox/teabox.cao";
int opt_tracker = 0;
int opt_video_first_frame = -1;
int opt_downscale_img = 1;
bool opt_verbose = false;
bool opt_plot = true;
bool opt_display_scale_auto = false;
std::string opt_save;
#if defined(VISP_HAVE_MINIZ) && defined(VISP_HAVE_WORKING_REGEX)
std::string opt_save_results;
#endif
unsigned int thickness = 2;
std::shared_ptr<vpDisplay> display;
std::shared_ptr<vpPlot> plot;
std::shared_ptr<vpVideoWriter> writer;
try {
for (int i = 0; i < argc; i++) {
if (std::string(argv[i]) == "--video") {
opt_videoname = std::string(argv[++i]);
}
else if (std::string(argv[i]) == "--video-first-frame") {
opt_video_first_frame = std::atoi(argv[++i]);
}
else if (std::string(argv[i]) == "--model") {
opt_modelname = std::string(argv[++i]);
}
else if (std::string(argv[i]) == "--tracker") {
opt_tracker = atoi(argv[++i]);
}
else if (std::string(argv[i]) == "--downscale-img") {
opt_downscale_img = std::atoi(argv[++i]);
}
else if (std::string(argv[i]) == "--save") {
opt_save = std::string(argv[++i]);
}
#if defined(VISP_HAVE_MINIZ) && defined(VISP_HAVE_WORKING_REGEX)
else if (std::string(argv[i]) == "--save-results") {
opt_save_results = std::string(argv[++i]);
}
#endif
else if (std::string(argv[i]) == "--plot") {
opt_plot = true;
}
else if (std::string(argv[i]) == "--dof") {
for (int j = 0; j < 6; j++) {
int val = std::atoi(argv[++i]);
if (val == 0 || val == 1) {
opt_dof_to_estimate[j] = val;
}
else {
std::cout << "Error: wrong value after --dof option. Authorized values are 0 or 1 for each 6 dof to estimate." << std::endl;
return EXIT_FAILURE;
}
}
}
else if (std::string(argv[i]) == "--display-scale-auto") {
opt_display_scale_auto = true;
}
else if (std::string(argv[i]) == "--verbose" || std::string(argv[i]) == "-v") {
opt_verbose = true;
}
else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") {
std::cout << "\nSYNOPSIS " << std::endl
<< argv[0]
<< " [--video <video name>]"
<< " [--video-first-frame <image index>"
<< " [--model <model name>"
<< " [--tracker <0=egde|1=keypoint|2=hybrid>]"
<< " [--downscale-img <scale factor>]"
<< " [--dof <0/1 0/1 0/1 0/1 0/1 0/1>]"
<< " [--save <e.g. results-%04d.png>]"
#if defined(VISP_HAVE_MINIZ) && defined(VISP_HAVE_WORKING_REGEX)
<< " [--save-results <e.g. tracking_poses.npz>]"
#endif
<< " [--display-scale-auto]"
<< " [--plot]"
<< " [--verbose,-v]"
<< " [--help,-h]"
<< std::endl;
std::cout << "\nOPTIONS " << std::endl
<< " --video <video name>" << std::endl
<< " Input video name." << std::endl
<< " Default: model/teabox/teabox.mp4" << std::endl
<< std::endl
<< " --video-first-frame <image index>" << std::endl
<< " Index of the first image to process." << std::endl
<< " Set to -1 to process the first image of the video." << std::endl
<< " Default: -1" << std::endl
<< std::endl
<< " --model <model name>" << std::endl
<< " CAD model filename. Supported formats are .cao and .wrl." << std::endl
<< " To use wrl format, ViSP need to be built with Coin3D third-party." << std::endl
<< " Default: model/teabox/teabox.cao" << std::endl
<< std::endl
<< " --tracker <0=egde|1=keypoint|2=hybrid>" << std::endl
<< " Tracker type:" << std::endl
<< " - when 0: use only moving-edges" << std::endl
<< " - when 1: use only KLT keypoints" << std::endl
<< " - when 2: use hybrid scheme, moving-edges and KLT keypoints." << std::endl
<< " Default: 0" << std::endl
<< std::endl
<< " --downscale-img <scale factor>" << std::endl
<< " Downscale input image width and height by this factor." << std::endl
<< " When set to 1, image not down scaled. When set to 2, image width" << std::endl
<< " and height is divided by 2." << std::endl
<< " Default: 1" << std::endl
<< std::endl
<< " --dof <0/1 0/1 0/1 0/1 0/1 0/1>" << std::endl
<< " 6-dim vector of 0 and 1 to indicate which dof [tx ty tz rx ry rz]" << std::endl
<< " has to be estimated." << std::endl
<< " When set to 1 the dof is estimated. When rather set to 0 the dof" << std::endl
<< " is not estimated. It's value is the one from the initialisation." << std::endl
<< " Default: 1 1 1 1 1 1 (to estimate all 6 dof)" << std::endl
<< std::endl
<< " --save <e.g. results-%04d.png>" << std::endl
<< " Name of the saved image sequence that contains tracking results in overlay." << std::endl
<< " When the name contains a folder like in the next example, the folder" << std::endl
<< " is created if it doesn't exist."
<< " Example: \"result/image-%04d.png\"." << std::endl
<< std::endl
#if defined(VISP_HAVE_MINIZ) && defined(VISP_HAVE_WORKING_REGEX)
<< " --save-results <e.g. tracking_results.npz>" << std::endl
<< " Name of the npz file containing cMo data estimated from MBT." << std::endl
<< " When the name contains a folder like in the next example, the folder" << std::endl
<< " is created if it doesn't exist."
<< " Example: \"result/tracking_results.npz\"." << std::endl
<< std::endl
#endif
<< " --display-scale-auto" << std::endl
<< " Enable display window auto scaling to ensure that the image is fully" << std::endl
<< " visible on the screen. Useful for large images." << std::endl
<< " Note that this option doesn't affect the size of the processed images." << std::endl
<< std::endl
<< " --plot" << std::endl
<< " Open a window that plots the estimated pose evolution." << std::endl
<< std::endl
<< " --verbose, -v" << std::endl
<< " Enable verbose mode." << std::endl
<< std::endl
<< " --help, -h" << std::endl
<< " Display this helper message." << std::endl
<< std::endl;
return EXIT_SUCCESS;
}
}
if (!parentname.empty())
objectname = parentname + "/" + objectname;
std::cout << " *********** Tracker config ************ " << std::endl;
std::cout << "Video name : " << opt_videoname << std::endl;
std::cout << "Tracker cad model file : " << objectname << ".[cao or wrl]" << std::endl;
std::cout << "Tracker init file : " << objectname << ".init" << std::endl;
std::cout << "Tracker optional init image: " << objectname << ".[png,ppm,jpg]" << std::endl;
if (opt_downscale_img > 1) {
std::cout << "Downscale image factor : " << opt_downscale_img << std::endl;
}
std::cout << "Dof to estimate : " << opt_dof_to_estimate.t() << std::endl;
if (!opt_save.empty()) {
if (!parent.empty()) {
std::cout << "Create output directory: " << parent << std::endl;
}
}
#if defined(VISP_HAVE_MINIZ) && defined(VISP_HAVE_WORKING_REGEX)
if (!opt_save_results.empty()) {
if (!parent.empty()) {
std::cout << "Create output directory for the npz file: " << parent << std::endl;
}
}
#endif
if (opt_video_first_frame > 0) {
}
if (opt_downscale_img > 1) {
Ivideo.
subsample(opt_downscale_img, opt_downscale_img, I);
}
else {
}
if (!opt_save.empty()) {
writer = std::make_shared<vpVideoWriter>();
writer->setFileName(opt_save);
writer->open(O);
}
#if defined(VISP_HAVE_X11)
display = std::make_shared<vpDisplayX>();
#elif defined(VISP_HAVE_GDI)
display = std::make_shared<vpDisplayGDI>();
#elif defined(HAVE_OPENCV_HIGHGUI)
display = std::make_shared<vpDisplayOpenCV>();
#endif
if (opt_display_scale_auto) {
}
display->init(I, 100, 100, "Model-based tracker");
if (opt_plot) {
plot = std::make_shared<vpPlot>(2, 700, 700, display->getWindowXPosition() + I.
getWidth() / display->getDownScalingFactor() + 30,
display->getWindowYPosition(), "Estimated pose");
plot->initGraph(0, 3);
plot->setTitle(0, "Translation [m]");
plot->initGraph(1, 3);
plot->setTitle(1, "Attitude thetaU [deg]");
}
if (opt_tracker == 0)
#if defined(VISP_HAVE_MODULE_KLT) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
else if (opt_tracker == 1)
else
#else
else {
std::cout << "klt and hybrid model-based tracker are not available since visp_klt module is not available. "
"In CMakeGUI turn visp_klt module ON, configure and build ViSP again."
<< std::endl;
return EXIT_FAILURE;
}
#endif
bool usexml = false;
#if defined(VISP_HAVE_PUGIXML)
std::cout << "Tracker config file : " << objectname + ".xml" << std::endl;
usexml = true;
}
#endif
if (!usexml) {
if (opt_tracker == 0 || opt_tracker == 2) {
}
#if defined(VISP_HAVE_MODULE_KLT) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
if (opt_tracker == 1 || opt_tracker == 2) {
tracker.setKltOpencv(klt_settings);
tracker.setKltMaskBorder(5);
}
#endif
}
if (opt_dof_to_estimate != 1.) {
}
std::cout << "Camera parameters: \n" << cam << std::endl;
std::cout <<
"Initialize tracker on image size: " << I.
getWidth() <<
" x " << I.
getHeight() << std::endl;
std::vector<double> vec_poses;
#if defined(VISP_HAVE_MINIZ) && defined(VISP_HAVE_WORKING_REGEX)
if (!opt_save_results.empty()) {
}
#endif
tracker.initClick(I, objectname + ".init", true);
if (opt_downscale_img > 1) {
Ivideo.
subsample(opt_downscale_img, opt_downscale_img, I);
}
else {
}
std::stringstream ss;
if (opt_verbose) {
std::cout << "-- " << ss.str() << std::endl;
}
{
std::stringstream ss;
ss << "Features";
}
#if defined(VISP_HAVE_MODULE_KLT) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
}
#endif
if (opt_verbose) {
std::cout << ss.str() << std::endl;
std::cout << "cMo:\n" << cMo << std::endl;
}
}
{
std::stringstream ss;
ss << "Projection error: " << std::setprecision(2) << proj_error << " deg";
if (opt_verbose) {
std::cout << ss.str() << std::endl;
}
}
if (opt_plot) {
}
if (!opt_save.empty()) {
writer->saveFrame(O);
}
#if defined(VISP_HAVE_MINIZ) && defined(VISP_HAVE_WORKING_REGEX)
if (!opt_save_results.empty()) {
std::vector<double> vec_pose = poseToVec(cMo);
vec_poses.insert(vec_poses.end(), vec_pose.begin(), vec_pose.end());
}
#endif
break;
}
#if defined(VISP_HAVE_MINIZ) && defined(VISP_HAVE_WORKING_REGEX)
if (!opt_save_results.empty()) {
visp::cnpy::npz_save(opt_save_results,
"vec_poses", vec_poses.data(), { static_cast<size_t>(vec_poses.size()/6), 6 },
"a");
}
#endif
}
std::cout << "Catch a ViSP exception: " << e << std::endl;
}
#ifdef VISP_HAVE_OGRE
catch (Ogre::Exception &e) {
std::cout << "Catch an Ogre exception: " << e.getDescription() << std::endl;
}
#endif
#else
(void)argc;
(void)argv;
std::cout << "Install OpenCV and rebuild ViSP to use this example." << std::endl;
#endif
return EXIT_SUCCESS;
}
Generic class defining intrinsic camera parameters.
void initPersProjWithoutDistortion(double px, double py, double u0, double v0)
Implementation of column vector and the associated operations.
static const vpColor none
static const vpColor blue
static const vpColor green
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void getImage(const vpImage< unsigned char > &Is, vpImage< vpRGBa > &Id)
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpThetaUVector getThetaUVector() const
vpRotationMatrix getRotationMatrix() const
vpTranslationVector getTranslationVector() const
void subsample(unsigned int v_scale, unsigned int h_scale, vpImage< Type > &sampled) const
unsigned int getWidth() const
unsigned int getHeight() const
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
void setBlockSize(int blockSize)
void setQuality(double qualityLevel)
void setHarrisFreeParameter(double harris_k)
void setMaxFeatures(int maxCount)
void setMinDistance(double minDistance)
void setWindowSize(int winSize)
void setPyramidLevels(int pyrMaxLevel)
static double rad(double deg)
static double deg(double rad)
Real-time 6D object pose tracking using its CAD model.
virtual void setCameraParameters(const vpCameraParameters &camera) VP_OVERRIDE
virtual int getTrackerType() const
virtual void setOgreVisibilityTest(const bool &v) VP_OVERRIDE
virtual void setDisplayFeatures(bool displayF) VP_OVERRIDE
virtual unsigned int getNbFeaturesEdge() const
virtual double computeCurrentProjectionError(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &_cMo, const vpCameraParameters &_cam) VP_OVERRIDE
virtual void getCameraParameters(vpCameraParameters &camera) const VP_OVERRIDE
virtual void setGoodMovingEdgesRatioThreshold(double threshold)
virtual void getPose(vpHomogeneousMatrix &cMo) const VP_OVERRIDE
virtual unsigned int getNbFeaturesKlt() const
virtual void setMovingEdge(const vpMe &me)
virtual void setAngleDisappear(const double &a) VP_OVERRIDE
virtual void track(const vpImage< unsigned char > &I) VP_OVERRIDE
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix()) VP_OVERRIDE
virtual void getClipping(unsigned int &clippingFlag1, unsigned int &clippingFlag2) const
virtual void setClipping(const unsigned int &flags) VP_OVERRIDE
virtual void setTrackerType(int type)
virtual void setOgreShowConfigDialog(bool showConfigDialog) VP_OVERRIDE
virtual void setScanLineVisibilityTest(const bool &v) VP_OVERRIDE
virtual void setNearClippingDistance(const double &dist) VP_OVERRIDE
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false) VP_OVERRIDE
virtual void loadConfigFile(const std::string &configFile, bool verbose=true) VP_OVERRIDE
virtual void setAngleAppear(const double &a) VP_OVERRIDE
virtual void setFarClippingDistance(const double &dist) VP_OVERRIDE
virtual void setEstimatedDoF(const vpColVector &v)
void setMu1(const double &mu_1)
void setRange(const unsigned int &range)
void setLikelihoodThresholdType(const vpLikelihoodThresholdType likelihood_threshold_type)
void setMaskNumber(const unsigned int &mask_number)
void setThreshold(const double &threshold)
void setSampleStep(const double &sample_step)
void setMaskSize(const unsigned int &mask_size)
void setMu2(const double &mu_2)
Implementation of a rotation vector as axis-angle minimal representation.
Class that consider the case of a translation vector.
Class that enables to manipulate easily a video file or a sequence of images. As it inherits from the...
void acquire(vpImage< vpRGBa > &I)
void open(vpImage< vpRGBa > &I)
void setFileName(const std::string &filename)
void setFirstFrameIndex(const long first_frame)
long getFrameIndex() const
void npz_save(std::string zipname, std::string fname, const T *data, const std::vector< size_t > &shape, std::string mode="w")