42 #include <visp3/core/vpConfig.h>
44 #if defined(VISP_HAVE_MODULE_MBT) && \
45 (defined(VISP_HAVE_LAPACK) || defined(VISP_HAVE_EIGEN3) || defined(VISP_HAVE_OPENCV))
47 #if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
48 #include <type_traits>
51 #include <visp3/core/vpFont.h>
52 #include <visp3/core/vpImageDraw.h>
53 #include <visp3/core/vpIoTools.h>
54 #include <visp3/gui/vpDisplayD3D.h>
55 #include <visp3/gui/vpDisplayGDI.h>
56 #include <visp3/gui/vpDisplayGTK.h>
57 #include <visp3/gui/vpDisplayOpenCV.h>
58 #include <visp3/gui/vpDisplayX.h>
59 #include <visp3/io/vpImageIo.h>
60 #include <visp3/io/vpParseArgv.h>
61 #include <visp3/mbt/vpMbGenericTracker.h>
63 #define GETOPTARGS "i:dsclt:e:DmCh"
65 #ifdef ENABLE_VISP_NAMESPACE
71 void usage(
const char *name,
const char *badparam)
74 Regression test for vpGenericTracker.\n\
77 %s [-i <test image path>] [-c] [-d] [-s] [-h] [-l] \n\
78 [-t <tracker type>] [-e <last frame index>] [-D] [-m] [-C]\n",
83 -i <input image path> \n\
84 Set image input path.\n\
85 These images come from ViSP-images-x.y.z.tar.gz available \n\
86 on the ViSP website.\n\
87 Setting the VISP_INPUT_IMAGE_PATH environment\n\
88 variable produces the same behavior than using\n\
92 Turn off the display.\n\
95 If display is turn off, tracking results are saved in a video folder.\n\
98 Disable the mouse click. Useful to automate the \n\
99 execution of this program without human intervention.\n\
102 Set tracker type (<1 (Edge)>, <2 (KLT)>, <3 (both)>) for color sensor.\n\
105 Use the scanline for visibility tests.\n\
107 -e <last frame index>\n\
108 Specify the index of the last frame. Once reached, the tracking is stopped.\n\
114 Set a tracking mask.\n\
120 Print the help.\n\n");
123 fprintf(stdout,
"\nERROR: Bad parameter [%s]\n", badparam);
126 bool getOptions(
int argc,
const char **argv, std::string &ipath,
bool &click_allowed,
bool &display,
bool &save,
127 bool &useScanline,
int &trackerType,
int &lastFrame,
bool &use_depth,
bool &use_mask,
128 bool &use_color_image)
139 click_allowed =
false;
151 trackerType = atoi(optarg_);
154 lastFrame = atoi(optarg_);
163 use_color_image =
true;
166 usage(argv[0],
nullptr);
171 usage(argv[0], optarg_);
177 if ((c == 1) || (c == -1)) {
179 usage(argv[0],
nullptr);
180 std::cerr <<
"ERROR: " << std::endl;
181 std::cerr <<
" Bad argument " << optarg_ << std::endl << std::endl;
188 template <
typename Type>
192 #if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
193 static_assert(std::is_same<Type, unsigned char>::value || std::is_same<Type, vpRGBa>::value,
194 "Template function supports only unsigned char and vpRGBa images!");
196 #if VISP_HAVE_DATASET_VERSION >= 0x030600
197 std::string ext(
"png");
199 std::string ext(
"pgm");
201 char buffer[FILENAME_MAX];
202 snprintf(buffer, FILENAME_MAX, std::string(input_directory +
"/Images/Image_%04d." + ext).c_str(), cpt);
203 std::string image_filename = buffer;
205 snprintf(buffer, FILENAME_MAX, std::string(input_directory +
"/Depth/Depth_%04d.bin").c_str(), cpt);
206 std::string depth_filename = buffer;
208 snprintf(buffer, FILENAME_MAX, std::string(input_directory +
"/CameraPose/Camera_%03d.txt").c_str(), cpt);
209 std::string pose_filename = buffer;
217 unsigned int depth_width = 0, depth_height = 0;
218 std::ifstream file_depth(depth_filename.c_str(), std::ios::in | std::ios::binary);
219 if (!file_depth.is_open())
224 I_depth.
resize(depth_height, depth_width);
225 pointcloud.resize(depth_height * depth_width);
227 const float depth_scale = 0.000030518f;
228 for (
unsigned int i = 0; i < I_depth.
getHeight(); i++) {
229 for (
unsigned int j = 0; j < I_depth.
getWidth(); j++) {
231 double x = 0.0, y = 0.0, Z = I_depth[i][j] * depth_scale;
237 pointcloud[i * I_depth.
getWidth() + j] = pt3d;
241 std::ifstream file_pose(pose_filename.c_str());
242 if (!file_pose.is_open()) {
246 for (
unsigned int i = 0; i < 4; i++) {
247 for (
unsigned int j = 0; j < 4; j++) {
248 file_pose >> cMo[i][j];
259 template <
typename Type>
260 bool run(
const std::string &input_directory,
bool opt_click_allowed,
bool opt_display,
bool useScanline,
261 int trackerType_image,
int opt_lastFrame,
bool use_depth,
bool use_mask,
bool save)
263 #if (VISP_CXX_STANDARD >= VISP_CXX_STANDARD_11)
264 static_assert(std::is_same<Type, unsigned char>::value || std::is_same<Type, vpRGBa>::value,
265 "Template function supports only unsigned char and vpRGBa images!");
268 #if defined(VISP_HAVE_X11)
269 vpDisplayX display1, display2;
270 #elif defined(VISP_HAVE_GDI)
272 #elif defined(HAVE_OPENCV_HIGHGUI)
274 #elif defined(VISP_HAVE_D3D9)
276 #elif defined(VISP_HAVE_GTK)
282 std::vector<int> tracker_type(2);
283 tracker_type[0] = trackerType_image;
287 #if defined(VISP_HAVE_PUGIXML)
288 std::string configFileCam1 = input_directory + std::string(
"/Config/chateau.xml");
289 std::string configFileCam2 = input_directory + std::string(
"/Config/chateau_depth.xml");
290 std::cout <<
"Load config file for camera 1: " << configFileCam1 << std::endl;
291 std::cout <<
"Load config file for camera 2: " << configFileCam2 << std::endl;
292 tracker.loadConfigFile(configFileCam1, configFileCam2);
299 tracker.setCameraParameters(cam_color, cam_depth);
312 tracker.setMovingEdge(me);
315 #if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
317 tracker.setKltMaskBorder(5);
326 tracker.setKltOpencv(klt);
331 tracker.setDepthNormalPclPlaneEstimationMethod(2);
332 tracker.setDepthNormalPclPlaneEstimationRansacMaxIter(200);
333 tracker.setDepthNormalPclPlaneEstimationRansacThreshold(0.001);
334 tracker.setDepthNormalSamplingStep(2, 2);
336 tracker.setDepthDenseSamplingStep(4, 4);
340 tracker.setNearClippingDistance(0.01);
341 tracker.setFarClippingDistance(2.0);
345 #ifdef VISP_HAVE_COIN3D
346 tracker.loadModel(input_directory +
"/Models/chateau.wrl", input_directory +
"/Models/chateau.cao");
348 tracker.loadModel(input_directory +
"/Models/chateau.cao", input_directory +
"/Models/chateau.cao");
359 tracker.loadModel(input_directory +
"/Models/cube.cao",
false, T);
361 tracker.getCameraParameters(cam_color, cam_depth);
362 tracker.setDisplayFeatures(
true);
363 tracker.setScanLineVisibilityTest(useScanline);
365 std::map<int, std::pair<double, double> > map_thresh;
367 #ifdef VISP_HAVE_COIN3D
369 useScanline ? std::pair<double, double>(0.007, 6.) : std::pair<double, double>(0.008, 3.9);
370 #if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
371 map_thresh[vpMbGenericTracker::KLT_TRACKER] =
372 useScanline ? std::pair<double, double>(0.007, 1.9) : std::pair<double, double>(0.007, 1.8);
374 useScanline ? std::pair<double, double>(0.005, 3.7) : std::pair<double, double>(0.007, 3.4);
377 useScanline ? std::pair<double, double>(0.003, 1.7) : std::pair<double, double>(0.002, 0.8);
378 #if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
380 std::pair<double, double>(0.002, 0.3);
383 useScanline ? std::pair<double, double>(0.002, 1.8) : std::pair<double, double>(0.002, 0.7);
387 useScanline ? std::pair<double, double>(0.015, 3.0) : std::pair<double, double>(0.009, 4.0);
388 #if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
389 map_thresh[vpMbGenericTracker::KLT_TRACKER] =
390 useScanline ? std::pair<double, double>(0.006, 1.7) : std::pair<double, double>(0.005, 1.4);
392 useScanline ? std::pair<double, double>(0.004, 1.2) : std::pair<double, double>(0.004, 1.2);
395 useScanline ? std::pair<double, double>(0.002, 0.7) : std::pair<double, double>(0.001, 0.4);
396 #if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
398 std::pair<double, double>(0.002, 0.3);
401 useScanline ? std::pair<double, double>(0.001, 0.5) : std::pair<double, double>(0.001, 0.4);
408 std::vector<vpColVector> pointcloud;
410 if (!read_data(input_directory, cpt_frame, cam_depth, I, I_depth_raw, pointcloud, cMo_truth)) {
411 std::cerr <<
"Cannot read first frame!" << std::endl;
416 const double roi_step = 7.0;
417 const double roi_step2 = 6.0;
420 for (
unsigned int i = (
unsigned int)(I.
getRows() / roi_step);
421 i < (
unsigned int)(I.
getRows() * roi_step2 / roi_step); i++) {
422 for (
unsigned int j = (
unsigned int)(I.
getCols() / roi_step);
423 j < (
unsigned int)(I.
getCols() * roi_step2 / roi_step); j++) {
427 tracker.setMask(mask);
439 #ifdef VISP_HAVE_DISPLAY
440 display1.init(I, 0, 0,
"Image");
441 display2.init(I_depth, (
int)I.
getWidth(), 0,
"Depth");
446 depth_M_color[0][3] = -0.05;
447 tracker.setCameraTransformationMatrix(
"Camera2", depth_M_color);
448 tracker.initFromPose(I, cMo_truth);
451 bool click =
false, quit =
false, correct_accuracy =
true;
452 std::vector<double> vec_err_t, vec_err_tu;
453 std::vector<double> time_vec;
454 while (read_data(input_directory, cpt_frame, cam_depth, I, I_depth_raw, pointcloud, cMo_truth) && !quit &&
455 (opt_lastFrame > 0 ? (
int)cpt_frame <= opt_lastFrame :
true)) {
463 convert(I, resultsColor);
464 convert(I_depth, resultsDepth);
468 std::map<std::string, const vpImage<Type> *> mapOfImages;
469 mapOfImages[
"Camera1"] = &I;
470 std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
471 mapOfPointclouds[
"Camera2"] = &pointcloud;
472 std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
474 mapOfWidths[
"Camera2"] = 0;
475 mapOfHeights[
"Camera2"] = 0;
478 mapOfWidths[
"Camera2"] = I_depth.
getWidth();
479 mapOfHeights[
"Camera2"] = I_depth.
getHeight();
482 tracker.track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
485 time_vec.push_back(t);
488 tracker.display(I, I_depth, cMo, depth_M_color * cMo, cam_color, cam_depth,
vpColor::red, 3);
492 std::stringstream ss;
493 ss <<
"Frame: " << cpt_frame;
496 ss <<
"Nb features: " << tracker.getError().getRows();
501 std::map<std::string, std::vector<std::vector<double> > > mapOfModels;
502 std::map<std::string, unsigned int> mapOfW;
504 mapOfW[
"Camera2"] = I_depth.
getWidth();
505 std::map<std::string, unsigned int> mapOfH;
508 std::map<std::string, vpHomogeneousMatrix> mapOfcMos;
509 mapOfcMos[
"Camera1"] = cMo;
510 mapOfcMos[
"Camera2"] = depth_M_color * cMo;
511 std::map<std::string, vpCameraParameters> mapOfCams;
512 mapOfCams[
"Camera1"] = cam_color;
513 mapOfCams[
"Camera2"] = cam_depth;
514 tracker.getModelForDisplay(mapOfModels, mapOfW, mapOfH, mapOfcMos, mapOfCams);
515 for (std::map<std::string, std::vector<std::vector<double> > >::const_iterator it = mapOfModels.begin();
516 it != mapOfModels.end(); ++it) {
517 for (
size_t i = 0; i < it->second.size(); i++) {
519 if (std::fabs(it->second[i][0]) <= std::numeric_limits<double>::epsilon()) {
529 std::map<std::string, std::vector<std::vector<double> > > mapOfFeatures;
530 tracker.getFeaturesForDisplay(mapOfFeatures);
531 for (std::map<std::string, std::vector<std::vector<double> > >::const_iterator it = mapOfFeatures.begin();
532 it != mapOfFeatures.end(); ++it) {
533 for (
size_t i = 0; i < it->second.size(); i++) {
534 if (std::fabs(it->second[i][0]) <=
535 std::numeric_limits<double>::epsilon()) {
537 if (std::fabs(it->second[i][3]) <= std::numeric_limits<double>::epsilon()) {
540 else if (std::fabs(it->second[i][3] - 1) <=
541 std::numeric_limits<double>::epsilon()) {
544 else if (std::fabs(it->second[i][3] - 2) <=
545 std::numeric_limits<double>::epsilon()) {
548 else if (std::fabs(it->second[i][3] - 3) <=
549 std::numeric_limits<double>::epsilon()) {
552 else if (std::fabs(it->second[i][3] - 4) <=
553 std::numeric_limits<double>::epsilon()) {
557 vpImagePoint(it->second[i][1], it->second[i][2]), 3, color, 1);
559 else if (std::fabs(it->second[i][0] - 1) <=
560 std::numeric_limits<double>::epsilon()) {
569 std::ostringstream oss;
570 oss <<
"Tracking time: " << t <<
" ms";
578 for (
unsigned int i = 0; i < 3; i++) {
579 t_est[i] = pose_est[i];
580 t_truth[i] = pose_truth[i];
581 tu_est[i] = pose_est[i + 3];
582 tu_truth[i] = pose_truth[i + 3];
585 vpColVector t_err = t_truth - t_est, tu_err = tu_truth - tu_est;
586 const double t_thresh =
588 const double tu_thresh =
591 vec_err_t.push_back(t_err2);
592 vec_err_tu.push_back(tu_err2);
593 if (!use_mask && (t_err2 > t_thresh || tu_err2 > tu_thresh)) {
594 std::cerr <<
"Pose estimated exceeds the threshold (t_thresh = " << t_thresh <<
" ; tu_thresh = " << tu_thresh
595 <<
")!" << std::endl;
596 std::cout <<
"t_err: " << t_err2 <<
" ; tu_err: " << tu_err2 << std::endl;
597 correct_accuracy =
false;
613 char buffer[FILENAME_MAX];
614 std::ostringstream oss;
615 oss <<
"results/image_%04d.png";
616 snprintf(buffer, FILENAME_MAX, oss.str().c_str(), cpt_frame);
619 results.insert(resultsDepth,
vpImagePoint(0, resultsColor.getWidth()));
625 if (opt_display && opt_click_allowed) {
646 if (!time_vec.empty())
651 if (!vec_err_t.empty())
652 std::cout <<
"Max translation error: " << *std::max_element(vec_err_t.begin(), vec_err_t.end()) << std::endl;
654 if (!vec_err_tu.empty())
655 std::cout <<
"Max thetau error: " << *std::max_element(vec_err_tu.begin(), vec_err_tu.end()) << std::endl;
657 std::cout <<
"Test result: " << (correct_accuracy ?
"success" :
"failure") << std::endl;
658 return correct_accuracy ? EXIT_SUCCESS : EXIT_FAILURE;
662 int main(
int argc,
const char *argv[])
665 std::string env_ipath;
666 std::string opt_ipath =
"";
667 bool opt_click_allowed =
true;
668 bool opt_display =
true;
669 bool opt_save =
false;
670 bool useScanline =
false;
672 #if defined(__mips__) || defined(__mips) || defined(mips) || defined(__MIPS__)
674 int opt_lastFrame = 5;
676 int opt_lastFrame = -1;
678 bool use_depth =
false;
679 bool use_mask =
false;
680 bool use_color_image =
false;
687 if (!getOptions(argc, argv, opt_ipath, opt_click_allowed, opt_display, opt_save, useScanline, trackerType_image,
688 opt_lastFrame, use_depth, use_mask, use_color_image)) {
692 #if ! (defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO))
693 if (trackerType_image == 2 || trackerType_image == 3) {
694 std::cout <<
"Using klt tracker is not possible without OpenCV imgproc and video modules." << std::endl;
695 std::cout <<
"Use rather command line option -t 1 to use edges." << std::endl;
699 std::cout <<
"trackerType_image: " << trackerType_image << std::endl;
700 std::cout <<
"useScanline: " << useScanline << std::endl;
701 std::cout <<
"use_depth: " << use_depth << std::endl;
702 std::cout <<
"use_mask: " << use_mask << std::endl;
703 std::cout <<
"use_color_image: " << use_color_image << std::endl;
704 #ifdef VISP_HAVE_COIN3D
705 std::cout <<
"COIN3D available." << std::endl;
708 #if !defined(VISP_HAVE_MODULE_KLT) || (!defined(VISP_HAVE_OPENCV) || (VISP_HAVE_OPENCV_VERSION < 0x020100))
709 if (trackerType_image & 2) {
710 std::cout <<
"KLT features cannot be used: ViSP is not built with "
711 "KLT module or OpenCV is not available.\nTest is not run."
718 if (opt_ipath.empty() && env_ipath.empty()) {
719 usage(argv[0],
nullptr);
720 std::cerr << std::endl <<
"ERROR:" << std::endl;
721 std::cerr <<
" Use -i <visp image path> option or set VISP_INPUT_IMAGE_PATH " << std::endl
722 <<
" environment variable to specify the location of the " << std::endl
723 <<
" image path where test images are located." << std::endl
729 std::string input_directory =
732 std::cerr <<
"ViSP-images does not contain the folder: " << input_directory <<
"!" << std::endl;
736 if (use_color_image) {
737 return run<vpRGBa>(input_directory, opt_click_allowed, opt_display, useScanline, trackerType_image, opt_lastFrame,
738 use_depth, use_mask, opt_save);
741 return run<unsigned char>(input_directory, opt_click_allowed, opt_display, useScanline, trackerType_image,
742 opt_lastFrame, use_depth, use_mask, opt_save);
745 std::cout <<
"Test succeed" << std::endl;
749 std::cout <<
"Catch an exception: " << e << std::endl;
753 #elif !(defined(VISP_HAVE_LAPACK) || defined(VISP_HAVE_EIGEN3) || defined(VISP_HAVE_OPENCV))
756 std::cout <<
"Cannot run this example: install Lapack, Eigen3 or OpenCV" << std::endl;
762 std::cout <<
"Enable MBT module (VISP_HAVE_MODULE_MBT) to launch this test." << std::endl;
Generic class defining intrinsic camera parameters.
void initPersProjWithoutDistortion(double px, double py, double u0, double v0)
Implementation of column vector and the associated operations.
Class to define RGB colors available for display functionalities.
static const vpColor cyan
static const vpColor none
static const vpColor blue
static const vpColor purple
static const vpColor yellow
static const vpColor green
Display for windows using Direct3D 3rd party. Thus to enable this class Direct3D should be installed....
Display for windows using GDI (available on any windows 32 platform).
The vpDisplayGTK allows to display image using the GTK 3rd party library. Thus to enable this class G...
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
static void displayRectangle(const vpImage< unsigned char > &I, const vpImagePoint &topLeft, unsigned int width, unsigned int height, const vpColor &color, bool fill=false, unsigned int thickness=1)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Font drawing functions for image.
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void createDepthHistogram(const vpImage< uint16_t > &src_depth, vpImage< vpRGBa > &dest_rgba)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static void drawLine(vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, unsigned char color, unsigned int thickness=1)
static void drawCross(vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, unsigned char color, unsigned int thickness=1)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
static void write(const vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition of the vpImage class member functions.
unsigned int getWidth() const
void resize(unsigned int h, unsigned int w)
resize the image : Image initialization
unsigned int getCols() const
unsigned int getHeight() const
unsigned int getRows() const
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
void setBlockSize(int blockSize)
void setQuality(double qualityLevel)
void setHarrisFreeParameter(double harris_k)
void setMaxFeatures(int maxCount)
void setMinDistance(double minDistance)
void setWindowSize(int winSize)
void setPyramidLevels(int pyrMaxLevel)
static double rad(double deg)
static double getMedian(const std::vector< double > &v)
static double getStdev(const std::vector< double > &v, bool useBesselCorrection=false)
static double getMean(const std::vector< double > &v)
static double deg(double rad)
Real-time 6D object pose tracking using its CAD model.
@ ROBUST_FEATURE_ESTIMATION
Robust scheme to estimate the normal of the plane.
void setMu1(const double &mu_1)
void setRange(const unsigned int &range)
void setLikelihoodThresholdType(const vpLikelihoodThresholdType likelihood_threshold_type)
void setMaskNumber(const unsigned int &mask_number)
void setThreshold(const double &threshold)
void setSampleStep(const double &sample_step)
void setMaskSize(const unsigned int &mask_size)
void setMu2(const double &mu_2)
static bool parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, int flags)
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
Implementation of a pose vector and operations on poses.
Defines a rectangle in the plane.
VISP_EXPORT double measureTimeMs()