Test keypoint matching and pose estimation with mostly OpenCV functions calls to detect potential memory leaks in testKeyPoint-2.cpp.
#include <iostream>
#include <visp3/core/vpConfig.h>
#if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020301)
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <visp3/core/vpHomogeneousMatrix.h>
#include <visp3/core/vpImage.h>
#include <visp3/core/vpIoTools.h>
#include <visp3/gui/vpDisplayGDI.h>
#include <visp3/gui/vpDisplayGTK.h>
#include <visp3/gui/vpDisplayOpenCV.h>
#include <visp3/gui/vpDisplayX.h>
#include <visp3/io/vpImageIo.h>
#include <visp3/io/vpParseArgv.h>
#include <visp3/io/vpVideoReader.h>
#include <visp3/mbt/vpMbEdgeTracker.h>
#include <visp3/vision/vpKeyPoint.h>
#define GETOPTARGS "cdh"
void usage(const char *name, const char *badparam);
bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display);
void usage(const char *name, const char *badparam)
{
fprintf(stdout, "\n\
Test keypoints matching.\n\
\n\
SYNOPSIS\n\
%s [-c] [-d] [-h]\n", name);
fprintf(stdout, "\n\
OPTIONS: \n\
\n\
-c\n\
Disable the mouse click. Useful to automate the \n\
execution of this program without human intervention.\n\
\n\
-d \n\
Turn off the display.\n\
\n\
-h\n\
Print the help.\n");
if (badparam)
fprintf(stdout, "\nERROR: Bad parameter [%s]\n", badparam);
}
bool getOptions(int argc, const char **argv, bool &click_allowed, bool &display)
{
const char *optarg_;
int c;
switch (c) {
case 'c':
click_allowed = false;
break;
case 'd':
display = false;
break;
case 'h':
usage(argv[0], NULL);
return false;
break;
default:
usage(argv[0], optarg_);
return false;
break;
}
}
if ((c == 1) || (c == -1)) {
usage(argv[0], NULL);
std::cerr << "ERROR: " << std::endl;
std::cerr << " Bad argument " << optarg_ << std::endl << std::endl;
return false;
}
return true;
}
template<typename Type>
void run_test(const std::string &env_ipath, bool opt_click_allowed, bool opt_display,
{
Iref = I;
#if defined VISP_HAVE_X11
#elif defined VISP_HAVE_GTK
#elif defined VISP_HAVE_GDI
#else
#endif
if (opt_display) {
display.
init(I, 0, 0,
"ORB keypoints matching");
}
#ifdef VISP_HAVE_PUGIXML
#else
#endif
if (opt_display && opt_click_allowed) {
} else {
vpHomogeneousMatrix cMoi(0.02044769891, 0.1101505452, 0.5078963719, 2.063603907, 1.110231561, -0.4392789872);
}
cv::Ptr<cv::FeatureDetector> detector;
cv::Ptr<cv::DescriptorExtractor> extractor;
cv::Ptr<cv::DescriptorMatcher> matcher;
#if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
detector = cv::ORB::create(500, 1.2f, 1);
extractor = cv::ORB::create(500, 1.2f, 1);
#elif (VISP_HAVE_OPENCV_VERSION >= 0x020301)
detector = cv::FeatureDetector::create("ORB");
extractor = cv::DescriptorExtractor::create("ORB");
#endif
matcher = cv::DescriptorMatcher::create("BruteForce-Hamming");
#if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
detector->set("nLevels", 1);
#endif
std::vector<cv::KeyPoint> trainKeyPoints;
cv::Mat matImg;
detector->detect(matImg, trainKeyPoints);
std::vector<vpPolygon> polygons;
std::vector<std::vector<vpPoint> > roisPt;
std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair = tracker.
getPolygonFaces(
false);
polygons = pair.first;
roisPt = pair.second;
std::vector<cv::Point3f> points3f;
cv::Mat trainDescriptors;
extractor->compute(matImg, trainKeyPoints, trainDescriptors);
if (trainKeyPoints.size() != (size_t)trainDescriptors.rows || trainKeyPoints.size() != points3f.size()) {
}
bool opt_click = false;
std::vector<cv::KeyPoint> queryKeyPoints;
detector->detect(matImg, queryKeyPoints);
cv::Mat queryDescriptors;
extractor->compute(matImg, queryKeyPoints, queryDescriptors);
std::vector<std::vector<cv::DMatch> > knn_matches;
std::vector<cv::DMatch> matches;
matcher->knnMatch(queryDescriptors, trainDescriptors, knn_matches, 2);
for (std::vector<std::vector<cv::DMatch> >::const_iterator it = knn_matches.begin(); it != knn_matches.end();
++it) {
if (it->size() > 1) {
double ratio = (*it)[0].distance / (*it)[1].distance;
if (ratio < 0.85) {
matches.push_back((*it)[0]);
}
}
}
for (std::vector<cv::DMatch>::const_iterator it = matches.begin(); it != matches.end(); ++it) {
vpPoint pt(points3f[(
size_t)(it->trainIdx)].x, points3f[(
size_t)(it->trainIdx)].y,
points3f[(size_t)(it->trainIdx)].z);
double x = 0.0, y = 0.0;
queryKeyPoints[(size_t)(it->queryIdx)].pt.y, x, y);
pt.set_x(x);
pt.set_y(y);
}
bool is_pose_estimated = false;
if (estimated_pose.
npt >= 4) {
try {
unsigned int nb_inliers = (
unsigned int)(0.6 * estimated_pose.
npt);
is_pose_estimated = true;
} catch (...) {
is_pose_estimated = false;
}
}
if (opt_display) {
for (std::vector<cv::DMatch>::const_iterator it = matches.begin(); it != matches.end(); ++it) {
vpImagePoint leftPt(trainKeyPoints[(
size_t)it->trainIdx].pt.y, trainKeyPoints[(
size_t)it->trainIdx].pt.x);
vpImagePoint rightPt(queryKeyPoints[(
size_t)it->queryIdx].pt.y,
queryKeyPoints[(
size_t)it->queryIdx].pt.x + Iref.
getWidth());
}
if (is_pose_estimated) {
}
}
if (opt_click_allowed && opt_display) {
if (opt_click) {
opt_click = false;
}
} else {
opt_click = true;
break;
}
}
}
}
}
}
int main(int argc, const char **argv)
{
try {
std::string env_ipath;
bool opt_click_allowed = true;
bool opt_display = true;
if (getOptions(argc, argv, opt_click_allowed, opt_display) == false) {
exit(-1);
}
if (env_ipath.empty()) {
std::cerr << "Please set the VISP_INPUT_IMAGE_PATH environment "
"variable value."
<< std::endl;
return -1;
}
{
std::cout << "-- Test on gray level images" << std::endl;
run_test(env_ipath, opt_click_allowed, opt_display, I, Imatch, Iref);
}
{
std::cout << "-- Test on color images" << std::endl;
run_test(env_ipath, opt_click_allowed, opt_display, I, Imatch, Iref);
}
std::cerr << e.
what() << std::endl;
return -1;
}
std::cout << "testKeyPoint-4 is ok !" << std::endl;
return 0;
}
#else
int main()
{
std::cerr << "You need OpenCV library." << std::endl;
return 0;
}
#endif