Example of tracking with vpGenericTracker on Castel.
#include <cstdlib>
#include <iostream>
#include <visp3/core/vpConfig.h>
#if defined(VISP_HAVE_MODULE_MBT) && defined(VISP_HAVE_DISPLAY)
#include <visp3/core/vpDebug.h>
#include <visp3/core/vpHomogeneousMatrix.h>
#include <visp3/core/vpIoTools.h>
#include <visp3/core/vpMath.h>
#include <visp3/gui/vpDisplayD3D.h>
#include <visp3/gui/vpDisplayGDI.h>
#include <visp3/gui/vpDisplayGTK.h>
#include <visp3/gui/vpDisplayOpenCV.h>
#include <visp3/gui/vpDisplayX.h>
#include <visp3/io/vpImageIo.h>
#include <visp3/io/vpParseArgv.h>
#include <visp3/io/vpVideoReader.h>
#include <visp3/mbt/vpMbGenericTracker.h>
#define GETOPTARGS "x:X:m:M:i:n:dchfolwvpt:T:e:"
#define USE_XML 1
#define USE_SMALL_DATASET 1 // small depth dataset in ViSP-images
namespace
{
void usage(const char *name, const char *badparam)
{
fprintf(stdout, "\n\
Example of tracking with vpGenericTracker.\n\
\n\
SYNOPSIS\n\
%s [-i <test image path>] [-x <config file>] [-X <config file depth>]\n\
[-m <model name>] [-M <model name depth>] [-n <initialisation file base name>]\n\
[-f] [-c] [-d] [-h] [-o] [-w] [-l] [-v] [-p]\n\
[-t <tracker type>] [-T <tracker type>] [-e <last frame index>]\n", name);
fprintf(stdout, "\n\
OPTIONS: \n\
-i <input image path> \n\
Set image input path.\n\
These images come from ViSP-images-x.y.z.tar.gz available \n\
on the ViSP website.\n\
Setting the VISP_INPUT_IMAGE_PATH environment\n\
variable produces the same behavior than using\n\
this option.\n\
\n\
-x <config file> \n\
Set the config file (the xml file) to use.\n\
The config file is used to specify the parameters of the tracker.\n\
\n\
-X <config file> \n\
Set the config file (the xml file) to use for the depth sensor.\n\
The config file is used to specify the parameters of the tracker.\n\
\n\
-m <model name> \n\
Specify the name of the file of the model.\n\
The model can either be a vrml model (.wrl) or a .cao file.\n\
\n\
-M <model name> \n\
Specify the name of the file of the model for the depth sensor.\n\
The model can either be a vrml model (.wrl) or a .cao file.\n\
\n\
-n <initialisation file base name> \n\
Base name of the initialisation file. The file will be 'base_name'.init .\n\
This base name is also used for the optional picture specifying where to \n\
click (a .ppm picture).\n\
\n\
-f \n\
Turn off the display of the the moving edges and Klt points. \n\
\n\
-d \n\
Turn off the display.\n\
\n\
-c\n\
Disable the mouse click. Useful to automate the \n\
execution of this program without human intervention.\n\
\n\
-o\n\
Use Ogre3D for visibility tests\n\
\n\
-w\n\
When Ogre3D is enable [-o] show Ogre3D configuration dialog that allows to set the renderer.\n\
\n\
-l\n\
Use the scanline for visibility tests.\n\
\n\
-v\n\
Compute covariance matrix.\n\
\n\
-p\n\
Compute gradient projection error.\n\
\n\
-t <tracker type>\n\
Set tracker type (<1 (Edge)>, <2 (KLT)>, <3 (both)>) for color sensor.\n\
\n\
-T <tracker type>\n\
Set tracker type (<4 (Depth normal)>, <8 (Depth dense)>, <12 (both)>) for depth sensor.\n\
\n\
-e <last frame index>\n\
Specify the index of the last frame. Once reached, the tracking is stopped.\n\
\n\
-h \n\
Print the help.\n\n");
if (badparam)
fprintf(stdout, "\nERROR: Bad parameter [%s]\n", badparam);
}
bool getOptions(int argc, const char **argv, std::string &ipath, std::string &configFile, std::string &configFile_depth,
std::string &modelFile, std::string &modelFile_depth, std::string &initFile, bool &displayFeatures,
bool &click_allowed, bool &display, bool &useOgre, bool &showOgreConfigDialog, bool &useScanline,
bool &computeCovariance, bool &projectionError, int &trackerType, int &tracker_type_depth,
int &lastFrame)
{
const char *optarg_;
int c;
switch (c) {
case 'i':
ipath = optarg_;
break;
case 'x':
configFile = optarg_;
break;
case 'X':
configFile_depth = optarg_;
break;
case 'm':
modelFile = optarg_;
break;
case 'M':
modelFile_depth = optarg_;
break;
case 'n':
initFile = optarg_;
break;
case 'f':
displayFeatures = false;
break;
case 'c':
click_allowed = false;
break;
case 'd':
display = false;
break;
case 'o':
useOgre = true;
break;
case 'l':
useScanline = true;
break;
case 'w':
showOgreConfigDialog = true;
break;
case 'v':
computeCovariance = true;
break;
case 'p':
projectionError = true;
break;
case 't':
trackerType = atoi(optarg_);
break;
case 'T':
tracker_type_depth = atoi(optarg_);
break;
case 'e':
lastFrame = atoi(optarg_);
break;
case 'h':
usage(argv[0], NULL);
return false;
break;
default:
usage(argv[0], optarg_);
return false;
break;
}
}
if ((c == 1) || (c == -1)) {
usage(argv[0], NULL);
std::cerr << "ERROR: " << std::endl;
std::cerr << " Bad argument " << optarg_ << std::endl << std::endl;
return false;
}
return true;
}
struct rs_intrinsics {
};
void rs_deproject_pixel_to_point(float point[3], const rs_intrinsics &intrin, const float pixel[2], float depth)
{
float x = (pixel[0] - intrin.ppx) / intrin.fx;
float y = (pixel[1] - intrin.ppy) / intrin.fy;
float r2 = x * x + y * y;
float f = 1 + intrin.coeffs[0] * r2 + intrin.coeffs[1] * r2 * r2 + intrin.coeffs[4] * r2 * r2 * r2;
float ux = x * f + 2 * intrin.coeffs[2] * x * y + intrin.coeffs[3] * (r2 + 2 * x * x);
float uy = y * f + 2 * intrin.coeffs[3] * x * y + intrin.coeffs[2] * (r2 + 2 * y * y);
x = ux;
y = uy;
point[0] = depth * x;
point[1] = depth * y;
point[2] = depth;
}
vpImage<uint16_t> &I_depth_raw, std::vector<vpColVector> &pointcloud,
unsigned int &pointcloud_width,
unsigned int &pointcloud_height)
{
char buffer[256];
std::stringstream ss;
ss << input_directory << "/image_%04d.pgm";
sprintf(buffer, ss.str().c_str(), cpt);
std::string filename_image = buffer;
std::cerr << "Cannot read: " << filename_image << std::endl;
return false;
}
ss.str("");
ss << input_directory << "/depth_image_%04d.bin";
sprintf(buffer, ss.str().c_str(), cpt);
std::string filename_depth = buffer;
std::ifstream file_depth(filename_depth.c_str(), std::ios::in | std::ios::binary);
if (!file_depth.is_open()) {
return false;
}
unsigned int height = 0, width = 0;
I_depth_raw.
resize(height, width);
uint16_t depth_value = 0;
for (unsigned int i = 0; i < height; i++) {
for (unsigned int j = 0; j < width; j++) {
I_depth_raw[i][j] = depth_value;
}
}
pointcloud_width = width;
pointcloud_height = height;
pointcloud.
resize((
size_t)width * height);
const float depth_scale = 0.000124986647f;
rs_intrinsics depth_intrinsic;
depth_intrinsic.ppx = 311.484558f;
depth_intrinsic.ppy = 246.283234f;
depth_intrinsic.fx = 476.053619f;
depth_intrinsic.fy = 476.053497f;
depth_intrinsic.coeffs[0] = 0.165056542f;
depth_intrinsic.coeffs[1] = -0.0508309528f;
depth_intrinsic.coeffs[2] = 0.00435937941f;
depth_intrinsic.coeffs[3] = 0.00541406544f;
depth_intrinsic.coeffs[4] = 0.250085592f;
for (unsigned int i = 0; i < height; i++) {
for (unsigned int j = 0; j < width; j++) {
float scaled_depth = I_depth_raw[i][j] * depth_scale;
float point[3];
float pixel[2] = {(float)j, (float)i};
rs_deproject_pixel_to_point(point, depth_intrinsic, pixel, scaled_depth);
data_3D[0] = point[0];
data_3D[1] = point[1];
data_3D[2] = point[2];
pointcloud[(size_t)(i * width + j)] = data_3D;
}
}
return true;
}
const std::string &
#if defined(VISP_HAVE_XML2) && USE_XML
configFile
#endif
,
const std::string &
#if defined(VISP_HAVE_XML2) && USE_XML
configFile_depth
#endif
)
{
#if defined(VISP_HAVE_XML2) && USE_XML
dynamic_cast<vpMbGenericTracker *
>(tracker)->loadConfigFile(configFile, configFile_depth);
#else
#if defined(VISP_HAVE_MODULE_KLT) && (defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100))
#endif
dynamic_cast<vpMbGenericTracker *
>(tracker)->setDepthNormalPclPlaneEstimationRansacMaxIter(200);
dynamic_cast<vpMbGenericTracker *
>(tracker)->setDepthNormalPclPlaneEstimationRansacThreshold(0.001);
#endif
}
}
int main(int argc, const char **argv)
{
{
{
vpMbtTukeyEstimator<double> tukey_estimator;
std::vector<double> residues;
residues.push_back(0.5);
residues.push_back(0.1);
residues.push_back(0.15);
residues.push_back(0.14);
residues.push_back(0.12);
std::vector<double> weights(5, 1);
tukey_estimator.MEstimator(residues, weights, 1e-3);
for (size_t i = 0; i < weights.size(); i++) {
std::cout << "residues[" << i << "]=" << residues[i] << " ; weights[i" << i << "]=" << weights[i] << std::endl;
}
std::cout << std::endl;
}
{
vpMbtTukeyEstimator<float> tukey_estimator;
std::vector<float> residues;
residues.push_back(0.5f);
residues.push_back(0.1f);
residues.push_back(0.15f);
residues.push_back(0.14f);
residues.push_back(0.12f);
std::vector<float> weights(5, 1);
tukey_estimator.MEstimator(residues, weights, (float)1e-3);
for (size_t i = 0; i < weights.size(); i++) {
std::cout << "residues[" << i << "]=" << residues[i] << " ; weights[i" << i << "]=" << weights[i] << std::endl;
}
std::cout << std::endl;
}
}
try {
std::string env_ipath;
std::string opt_ipath;
std::string ipath;
std::string opt_configFile;
std::string opt_configFile_depth;
std::string opt_modelFile;
std::string opt_modelFile_depth;
std::string opt_initFile;
std::string initFile;
bool displayFeatures = true;
bool opt_click_allowed = true;
bool opt_display = true;
bool useOgre = false;
bool showOgreConfigDialog = false;
bool useScanline = false;
bool computeCovariance = false;
bool projectionError = false;
#if defined(__mips__) || defined(__mips) || defined(mips) || defined(__MIPS__)
int opt_lastFrame = 5;
#else
int opt_lastFrame = -1;
#endif
if (!env_ipath.empty())
ipath = env_ipath;
if (!getOptions(argc, argv, opt_ipath, opt_configFile, opt_configFile_depth, opt_modelFile, opt_modelFile_depth,
opt_initFile, displayFeatures, opt_click_allowed, opt_display, useOgre, showOgreConfigDialog,
useScanline, computeCovariance, projectionError, trackerType_image, trackerType_depth,
opt_lastFrame)) {
return EXIT_FAILURE;
}
#if !defined(VISP_HAVE_MODULE_KLT) || (!defined(VISP_HAVE_OPENCV) || (VISP_HAVE_OPENCV_VERSION < 0x020100))
if (trackerType_image == 2) {
std::cout << "KLT only features cannot be used: ViSP is not built with "
"KLT module or OpenCV is not available."
<< std::endl;
return EXIT_SUCCESS;
}
#endif
if (opt_ipath.empty() && env_ipath.empty()) {
usage(argv[0], NULL);
std::cerr << std::endl << "ERROR:" << std::endl;
std::cerr << " Use -i <visp image path> option or set VISP_INPUT_IMAGE_PATH " << std::endl
<< " environment variable to specify the location of the " << std::endl
<< " image path where test images are located." << std::endl
<< std::endl;
return EXIT_FAILURE;
}
std::cerr << "ViSP-images does not contain the folder: " << dir_path << "!" << std::endl;
return EXIT_SUCCESS;
}
std::string configFile, configFile_depth;
if (!opt_configFile.empty())
configFile = opt_configFile;
else
configFile =
if (!opt_configFile_depth.empty())
configFile_depth = opt_configFile_depth;
else
configFile_depth =
std::string modelFile, modelFile_depth;
if (!opt_modelFile.empty())
modelFile = opt_modelFile;
else {
#if defined(VISP_HAVE_COIN3D) && (COIN_MAJOR_VERSION == 2 || COIN_MAJOR_VERSION == 3 || COIN_MAJOR_VERSION == 4)
modelFile =
#else
#endif
}
if (!opt_modelFile_depth.empty())
modelFile_depth = opt_modelFile_depth;
else
modelFile_depth =
std::string vrml_ext = ".wrl";
bool use_vrml =
(modelFile.compare(modelFile.length() - vrml_ext.length(), vrml_ext.length(), vrml_ext) == 0) ||
(modelFile_depth.compare(modelFile_depth.length() - vrml_ext.length(), vrml_ext.length(), vrml_ext) == 0);
if (use_vrml) {
#if defined(VISP_HAVE_COIN3D) && (COIN_MAJOR_VERSION == 2 || COIN_MAJOR_VERSION == 3 || COIN_MAJOR_VERSION == 4)
std::cout << "use_vrml: " << use_vrml << std::endl;
#else
std::cerr << "Error: vrml model file is only supported if ViSP is "
"build with Coin3D 3rd party"
<< std::endl;
return EXIT_FAILURE;
#endif
}
if (!opt_initFile.empty())
initFile = opt_initFile;
else
std::vector<vpColVector> pointcloud;
unsigned int pointcloud_width, pointcloud_height;
if (!read_data(0, ipath, I, I_depth_raw, pointcloud, pointcloud_width, pointcloud_height)) {
std::cerr << "Cannot open sequence: " << ipath << std::endl;
return EXIT_FAILURE;
}
#if defined VISP_HAVE_X11
#elif defined VISP_HAVE_GDI
#elif defined VISP_HAVE_OPENCV
#elif defined VISP_HAVE_D3D9
#elif defined VISP_HAVE_GTK
#else
opt_display = false;
#endif
if (opt_display) {
#if defined(VISP_HAVE_DISPLAY)
display1.
init(I, 100, 100,
"Test tracking (Left)");
"Test tracking (Right)");
#endif
}
std::vector<int> trackerTypes(2);
trackerTypes[0] = trackerType_image;
trackerTypes[1] = trackerType_depth;
loadConfiguration(tracker, configFile, configFile_depth);
std::string depth_M_color_filename =
{
std::ifstream depth_M_color_file(depth_M_color_filename.c_str());
depth_M_color.
load(depth_M_color_file);
std::map<std::string, vpHomogeneousMatrix> mapOfCameraTransformationMatrices;
mapOfCameraTransformationMatrices["Camera2"] = depth_M_color;
dynamic_cast<vpMbGenericTracker *
>(tracker)->setCameraTransformationMatrix(mapOfCameraTransformationMatrices);
}
if (useOgre)
if (opt_display && opt_click_allowed) {
}
}
if (opt_display && opt_click_allowed) {
std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
mapOfImages["Camera1"] = &I;
mapOfImages["Camera2"] = &I_depth;
std::map<std::string, std::string> mapOfInitFiles;
mapOfInitFiles["Camera1"] = initFile;
dynamic_cast<vpMbGenericTracker *
>(tracker)->initClick(mapOfImages, mapOfInitFiles,
true);
} else {
vpHomogeneousMatrix c1Moi(0.06846423368, 0.09062570884, 0.3401096693, -2.671882598, 0.1174275908, -0.6011935263);
vpHomogeneousMatrix c2Moi(0.04431452054, 0.09294637757, 0.3357760654, -2.677922443, 0.121297639, -0.6028463357);
}
{
std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
mapOfImages["Camera1"] = &I;
std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
mapOfPointclouds["Camera2"] = &pointcloud;
std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
mapOfWidths["Camera2"] = pointcloud_width;
mapOfHeights["Camera2"] = pointcloud_height;
dynamic_cast<vpMbGenericTracker *
>(tracker)->track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
}
if (opt_display) {
}
bool quit = false, click = false;
unsigned int frame_index = 0;
std::vector<double> time_vec;
while (read_data(frame_index, ipath, I, I_depth_raw, pointcloud, pointcloud_width, pointcloud_height) && !quit &&
(opt_lastFrame > 0 ? (int)frame_index <= opt_lastFrame : true)) {
if (opt_display) {
std::stringstream ss;
ss << "Num frame: " << frame_index;
}
if (frame_index == 10) {
std::cout << "----------Test reset tracker----------" << std::endl;
if (opt_display) {
}
loadConfiguration(tracker, configFile, configFile_depth);
}
#if USE_SMALL_DATASET
if (frame_index == 20) {
c1Mo.
buildFrom(0.07734634051, 0.08993639906, 0.342344402, -2.708409543, 0.0669276477, -0.3798958303);
c2Mo.
buildFrom(0.05319520317, 0.09223511976, 0.3380095812, -2.71438192, 0.07141055397, -0.3810081638);
#else
if (frame_index == 50) {
c1Mo.
buildFrom(0.09280663035, 0.09277655672, 0.330415149, -2.724431817, 0.0293932671, 0.02027966377);
c2Mo.
buildFrom(0.06865933578, 0.09494713501, 0.3260555142, -2.730027451, 0.03498390135, 0.01989831338);
#endif
std::cout << "Test set pose" << std::endl;
}
#if USE_SMALL_DATASET
if (frame_index < 15 || frame_index >= 20) {
#else
if (frame_index < 30 || frame_index >= 50) {
#endif
std::map<std::string, const vpImage<unsigned char> *> mapOfImages;
mapOfImages["Camera1"] = &I;
std::map<std::string, const std::vector<vpColVector> *> mapOfPointclouds;
mapOfPointclouds["Camera2"] = &pointcloud;
std::map<std::string, unsigned int> mapOfWidths, mapOfHeights;
mapOfWidths["Camera2"] = pointcloud_width;
mapOfHeights["Camera2"] = pointcloud_height;
dynamic_cast<vpMbGenericTracker *
>(tracker)->track(mapOfImages, mapOfPointclouds, mapOfWidths, mapOfHeights);
time_vec.push_back(t);
if (opt_display) {
std::stringstream ss;
ss << "Computation time: " << t << " ms";
ss.str("");
}
}
if (opt_click_allowed && opt_display) {
switch (button) {
quit = !click;
break;
click = !click;
break;
default:
break;
}
}
}
if (computeCovariance) {
std::cout <<
"Covariance matrix: \n" << tracker->
getCovarianceMatrix() << std::endl << std::endl;
}
if (projectionError) {
std::cout <<
"Projection error: " << tracker->
getProjectionError() << std::endl << std::endl;
}
if (opt_display) {
}
frame_index++;
}
std::cout << "\nFinal poses, c1Mo:\n" << c1Mo << "\nc2Mo:\n" << c2Mo << std::endl;
<< std::endl;
if (opt_click_allowed && !quit) {
}
delete tracker;
tracker = NULL;
#if defined(VISP_HAVE_XML2) && USE_XML
#endif
#if defined(VISP_HAVE_COIN3D) && (COIN_MAJOR_VERSION >= 2)
if (use_vrml)
SoDB::finish();
#endif
return EXIT_SUCCESS;
std::cout << "Catch an exception: " << e << std::endl;
return EXIT_FAILURE;
}
}
#else
int main()
{
std::cerr << "visp_mbt, visp_gui modules and OpenCV are required to run "
"this example."
<< std::endl;
return EXIT_SUCCESS;
}
#endif