#include <visp3/core/vpConfig.h>
#include <visp3/core/vpIoTools.h>
#include <visp3/gui/vpDisplayGDI.h>
#include <visp3/gui/vpDisplayOpenCV.h>
#include <visp3/gui/vpDisplayX.h>
#include <visp3/io/vpVideoReader.h>
#include <visp3/mbt/vpMbGenericTracker.h>
#include <visp3/vision/vpKeyPoint.h>
#ifdef ENABLE_VISP_NAMESPACE
#endif
#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_FEATURES2D)
{
std::vector<cv::KeyPoint> trainKeyPoints;
double elapsedTime;
keypoint_learning.
detect(I, trainKeyPoints, elapsedTime);
std::vector<vpPolygon> polygons;
std::vector<std::vector<vpPoint> > roisPt;
std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair = tracker.
getPolygonFaces();
polygons = pair.first;
roisPt = pair.second;
std::vector<cv::Point3f> points3f;
keypoint_learning.
buildReference(I, trainKeyPoints, points3f,
true,
id);
for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
}
}
#endif
int main(int argc, char **argv)
{
#if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_FEATURES2D)
try {
std::string videoname = "cube.mp4";
for (int i = 0; i < argc; i++) {
if (std::string(argv[i]) == "--name")
videoname = std::string(argv[i + 1]);
else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") {
std::cout << "\nUsage: " << argv[0] << " [--name <video name>] [--help] [-h]\n" << std::endl;
return EXIT_SUCCESS;
}
}
if (!parentname.empty())
objectname = parentname + "/" + objectname;
std::cout << "Video name: " << videoname << std::endl;
std::cout << "Tracker requested config files: " << objectname << ".[init,"
<< "xml,"
<< "cao or wrl]" << std::endl;
std::cout << "Tracker optional config files: " << objectname << ".[ppm]" << std::endl;
bool usexml = false;
#if defined(VISP_HAVE_PUGIXML)
tracker.loadConfigFile(objectname + ".xml");
tracker.getCameraParameters(cam);
usexml = true;
}
#endif
if (!usexml) {
tracker.setMovingEdge(me);
tracker.setCameraParameters(cam);
tracker.setNearClippingDistance(0.01);
tracker.setFarClippingDistance(10.0);
}
tracker.setOgreVisibilityTest(false);
tracker.loadModel(objectname + ".cao");
tracker.loadModel(objectname + ".wrl");
tracker.setDisplayFeatures(true);
vpKeyPoint keypoint_learning(
"ORB",
"ORB",
"BruteForce-Hamming");
#if (VISP_HAVE_OPENCV_VERSION < 0x030000)
keypoint_learning.setDetectorParameter("ORB", "nLevels", 1);
#else
cv::Ptr<cv::ORB> orb_learning = keypoint_learning.getDetector("ORB").dynamicCast<cv::ORB>();
if (orb_learning) {
orb_learning->setNLevels(1);
}
#endif
#if defined(VISP_HAVE_X11)
vpDisplayX display;
#elif defined(VISP_HAVE_GDI)
#elif defined(HAVE_OPENCV_HIGHGUI)
#else
std::cout << "No image viewer is available..." << std::endl;
return EXIT_FAILURE;
#endif
std::string imageName[] = { "cube0001.png", "cube0150.png", "cube0200.png" };
vpHomogeneousMatrix(0.02143385294, 0.1098083886, 0.5127439561, 2.087159614, 1.141775176, -0.4701291124),
vpHomogeneousMatrix(0.02651282185, -0.03713587374, 0.6873765919, 2.314744454, 0.3492296488, -0.1226054828),
vpHomogeneousMatrix(0.02965448956, -0.07283091786, 0.7253526051, 2.300529617, -0.4286674806, 0.1788761025) };
for (int i = 0; i < 3; i++) {
if (i == 0) {
display.init(I, 10, 10);
}
std::stringstream title;
title << "Learning cube on image: " << imageName[i];
tracker.setPose(I, initPoseTab[i]);
tracker.track(I);
tracker.getPose(cMo);
learnCube(I, tracker, keypoint_learning, i);
if (i < 2) {
}
else {
}
}
keypoint_learning.saveLearningData("cube_learning_data.bin", true);
vpKeyPoint keypoint_detection(
"ORB",
"ORB",
"BruteForce-Hamming");
#if (VISP_HAVE_OPENCV_VERSION < 0x030000)
keypoint_detection.setDetectorParameter("ORB", "nLevels", 1);
#else
cv::Ptr<cv::ORB> orb_detector = keypoint_detection.getDetector("ORB").dynamicCast<cv::ORB>();
orb_detector = keypoint_detection.getDetector("ORB").dynamicCast<cv::ORB>();
if (orb_detector) {
orb_detector->setNLevels(1);
}
#endif
keypoint_detection.loadLearningData("cube_learning_data.bin", true);
keypoint_detection.createImageMatching(I, IMatching);
#if defined(VISP_HAVE_X11)
vpDisplayX display2;
#elif defined(VISP_HAVE_GTK)
#elif defined(VISP_HAVE_GDI)
#elif defined(HAVE_OPENCV_HIGHGUI)
#endif
display2.
init(IMatching, 50, 50,
"Display matching between learned and current images");
double error;
bool click_done = false;
keypoint_detection.insertImageMatching(I, IMatching);
double elapsedTime;
if (keypoint_detection.matchPoint(I, cam, cMo, error, elapsedTime)) {
tracker.setPose(I, cMo);
keypoint_detection.displayMatching(I, IMatching);
std::vector<vpImagePoint> ransacInliers = keypoint_detection.getRansacInliers();
std::vector<vpImagePoint> ransacOutliers = keypoint_detection.getRansacOutliers();
for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
imPt.set_u(imPt.get_u() + I.
getWidth());
}
for (std::vector<vpImagePoint>::const_iterator it = ransacOutliers.begin(); it != ransacOutliers.end(); ++it) {
imPt.set_u(imPt.get_u() + I.
getWidth());
}
keypoint_detection.displayMatching(I, IMatching);
tracker.setCameraParameters(cam2);
tracker.setPose(IMatching, cMo);
}
click_done = true;
break;
}
click_done = true;
break;
}
}
if (!click_done)
}
std::cout << "Catch an exception: " << e << std::endl;
}
#else
(void)argc;
(void)argv;
std::cout << "Install OpenCV and rebuild ViSP to use this example." << std::endl;
#endif
return EXIT_SUCCESS;
}
Generic class defining intrinsic camera parameters.
void initPersProjWithoutDistortion(double px, double py, double u0, double v0)
static const vpColor none
static const vpColor green
Display for windows using GDI (available on any windows 32 platform).
The vpDisplayGTK allows to display image using the GTK 3rd party library. Thus to enable this class G...
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
void init(vpImage< unsigned char > &I, int winx=-1, int winy=-1, const std::string &title="") VP_OVERRIDE
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void displayCircle(const vpImage< unsigned char > &I, const vpImageCircle &circle, const vpColor &color, bool fill=false, unsigned int thickness=1)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void setTitle(const vpImage< unsigned char > &I, const std::string &windowtitle)
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
unsigned int getWidth() const
unsigned int getHeight() const
Class that allows keypoints detection (and descriptors extraction) and matching thanks to OpenCV libr...
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
unsigned int buildReference(const vpImage< unsigned char > &I)
static double rad(double deg)
Real-time 6D object pose tracking using its CAD model.
virtual std::pair< std::vector< vpPolygon >, std::vector< std::vector< vpPoint > > > getPolygonFaces(bool orderPolygons=true, bool useVisibility=true, bool clipPolygon=false) VP_OVERRIDE
virtual void getCameraParameters(vpCameraParameters &camera) const VP_OVERRIDE
virtual void getPose(vpHomogeneousMatrix &cMo) const VP_OVERRIDE
void setMu1(const double &mu_1)
void setRange(const unsigned int &range)
void setLikelihoodThresholdType(const vpLikelihoodThresholdType likelihood_threshold_type)
void setNbTotalSample(const int &ntotal_sample)
void setMaskNumber(const unsigned int &mask_number)
void setThreshold(const double &threshold)
void setSampleStep(const double &sample_step)
void setMaskSize(const unsigned int &mask_size)
void setMu2(const double &mu_2)
Class that enables to manipulate easily a video file or a sequence of images. As it inherits from the...
void acquire(vpImage< vpRGBa > &I)
void open(vpImage< vpRGBa > &I)
void setFileName(const std::string &filename)