Visual Servoing Platform  version 3.5.0 under development (2022-02-15)
tutorial-detection-object-mbt2-deprecated.cpp
1 #include <visp3/core/vpConfig.h>
3 #include <visp3/core/vpIoTools.h>
4 #include <visp3/gui/vpDisplayGDI.h>
5 #include <visp3/gui/vpDisplayOpenCV.h>
6 #include <visp3/gui/vpDisplayX.h>
7 #include <visp3/io/vpVideoReader.h>
8 #include <visp3/mbt/vpMbEdgeTracker.h>
9 #include <visp3/vision/vpKeyPoint.h>
10 
11 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400)
12 void learnCube(const vpImage<unsigned char> &I, vpMbEdgeTracker &tracker, vpKeyPoint &keypoint_learning, int id)
13 {
15  std::vector<cv::KeyPoint> trainKeyPoints;
16  double elapsedTime;
17  keypoint_learning.detect(I, trainKeyPoints, elapsedTime);
19 
21  std::vector<vpPolygon> polygons;
22  std::vector<std::vector<vpPoint> > roisPt;
23  std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair = tracker.getPolygonFaces();
24  polygons = pair.first;
25  roisPt = pair.second;
26 
27  std::vector<cv::Point3f> points3f;
29  tracker.getPose(cMo);
31  tracker.getCameraParameters(cam);
32  vpKeyPoint::compute3DForPointsInPolygons(cMo, cam, trainKeyPoints, polygons, roisPt, points3f);
34 
36  keypoint_learning.buildReference(I, trainKeyPoints, points3f, true, id);
38 
40  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
41  vpDisplay::displayCross(I, (int)it->pt.y, (int)it->pt.x, 4, vpColor::red);
42  }
44 }
45 #endif
46 
47 int main(int argc, char **argv)
48 {
49 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400)
50  try {
52  std::string videoname = "cube.mp4";
53 
54  for (int i = 0; i < argc; i++) {
55  if (std::string(argv[i]) == "--name")
56  videoname = std::string(argv[i + 1]);
57  else if (std::string(argv[i]) == "--help") {
58  std::cout << "\nUsage: " << argv[0] << " [--name <video name>] [--help]\n" << std::endl;
59  return 0;
60  }
61  }
62  std::string parentname = vpIoTools::getParent(videoname);
63  std::string objectname = vpIoTools::getNameWE(videoname);
64 
65  if (!parentname.empty())
66  objectname = parentname + "/" + objectname;
67 
68  std::cout << "Video name: " << videoname << std::endl;
69  std::cout << "Tracker requested config files: " << objectname << ".[init,"
70  << "xml,"
71  << "cao or wrl]" << std::endl;
72  std::cout << "Tracker optional config files: " << objectname << ".[ppm]" << std::endl;
73 
77 
78  vpMbEdgeTracker tracker;
79  bool usexml = false;
80  if (vpIoTools::checkFilename(objectname + ".xml")) {
81  tracker.loadConfigFile(objectname + ".xml");
82  tracker.getCameraParameters(cam);
83  usexml = true;
84  }
85  if (!usexml) {
86  vpMe me;
87  me.setMaskSize(5);
88  me.setMaskNumber(180);
89  me.setRange(7);
90  me.setThreshold(5000);
91  me.setMu1(0.5);
92  me.setMu2(0.5);
93  me.setSampleStep(4);
94  me.setNbTotalSample(250);
95  tracker.setMovingEdge(me);
96  cam.initPersProjWithoutDistortion(547, 542, 339, 235);
97  tracker.setCameraParameters(cam);
98  tracker.setAngleAppear(vpMath::rad(89));
99  tracker.setAngleDisappear(vpMath::rad(89));
100  tracker.setNearClippingDistance(0.01);
101  tracker.setFarClippingDistance(10.0);
103  }
104 
105  tracker.setOgreVisibilityTest(false);
106  if (vpIoTools::checkFilename(objectname + ".cao"))
107  tracker.loadModel(objectname + ".cao");
108  else if (vpIoTools::checkFilename(objectname + ".wrl"))
109  tracker.loadModel(objectname + ".wrl");
110  tracker.setDisplayFeatures(true);
112 
114  vpKeyPoint keypoint_learning("ORB", "ORB", "BruteForce-Hamming");
115 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
116  keypoint_learning.setDetectorParameter("ORB", "nLevels", 1);
117 #else
118  cv::Ptr<cv::ORB> orb_learning = keypoint_learning.getDetector("ORB").dynamicCast<cv::ORB>();
119  if (orb_learning) {
120  orb_learning->setNLevels(1);
121  }
122 #endif
123 
125 #if defined(VISP_HAVE_X11)
126  vpDisplayX display;
127 #elif defined(VISP_HAVE_GDI)
128  vpDisplayGDI display;
129 #elif defined(VISP_HAVE_OPENCV)
130  vpDisplayOpenCV display;
131 #else
132  std::cout << "No image viewer is available..." << std::endl;
133  return 0;
134 #endif
135 
136  /*
137  * Start the part of the code dedicated to object learning from 3 images
138  */
139  std::string imageName[] = {"cube0001.png", "cube0150.png", "cube0200.png"};
140  vpHomogeneousMatrix initPoseTab[] = {
141  vpHomogeneousMatrix(0.02143385294, 0.1098083886, 0.5127439561, 2.087159614, 1.141775176, -0.4701291124),
142  vpHomogeneousMatrix(0.02651282185, -0.03713587374, 0.6873765919, 2.314744454, 0.3492296488, -0.1226054828),
143  vpHomogeneousMatrix(0.02965448956, -0.07283091786, 0.7253526051, 2.300529617, -0.4286674806, 0.1788761025)};
144  for (int i = 0; i < 3; i++) {
145  vpImageIo::read(I, imageName[i]);
146  if (i == 0) {
147  display.init(I, 10, 10);
148  }
149  std::stringstream title;
150  title << "Learning cube on image: " << imageName[i];
151  vpDisplay::setTitle(I, title.str().c_str());
152 
154 
156  tracker.setPose(I, initPoseTab[i]);
158 
160  tracker.track(I);
162 
164  tracker.getPose(cMo);
165  tracker.display(I, cMo, cam, vpColor::red);
167 
169  learnCube(I, tracker, keypoint_learning, i);
171 
172  vpDisplay::displayText(I, 10, 10, "Learning step: keypoints are detected on visible cube faces", vpColor::red);
173  if (i < 2) {
174  vpDisplay::displayText(I, 30, 10, "Click to continue the learning...", vpColor::red);
175  } else {
176  vpDisplay::displayText(I, 30, 10, "Click to continue with the detection...", vpColor::red);
177  }
178 
179  vpDisplay::flush(I);
180  vpDisplay::getClick(I, true);
181  }
182 
184  keypoint_learning.saveLearningData("cube_learning_data.bin", true);
186 
187  /*
188  * Start the part of the code dedicated to detection and localization
189  */
191  vpKeyPoint keypoint_detection("ORB", "ORB", "BruteForce-Hamming");
192 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
193  keypoint_detection.setDetectorParameter("ORB", "nLevels", 1);
194 #else
195  cv::Ptr<cv::ORB> orb_detector = keypoint_detection.getDetector("ORB").dynamicCast<cv::ORB>();
196  orb_detector = keypoint_detection.getDetector("ORB").dynamicCast<cv::ORB>();
197  if (orb_detector) {
198  orb_detector->setNLevels(1);
199  }
200 #endif
201 
204  keypoint_detection.loadLearningData("cube_learning_data.bin", true);
206 
208  vpImage<unsigned char> IMatching;
209  keypoint_detection.createImageMatching(I, IMatching);
211 
212  vpVideoReader g;
213  g.setFileName(videoname);
214  g.open(I);
215 
216 #if defined VISP_HAVE_X11
217  vpDisplayX display2;
218 #elif defined VISP_HAVE_GTK
219  vpDisplayGTK display2;
220 #elif defined VISP_HAVE_GDI
221  vpDisplayGDI display2;
222 #else
223  vpDisplayOpenCV display2;
224 #endif
225  display2.init(IMatching, 50, 50, "Display matching between learned and current images");
226  vpDisplay::setTitle(I, "Cube detection and localization");
227 
228  double error;
229  bool click_done = false;
230 
231  while (!g.end()) {
232  g.acquire(I);
234 
236  keypoint_detection.insertImageMatching(I, IMatching);
238 
239  vpDisplay::display(IMatching);
240  vpDisplay::displayText(I, 10, 10, "Detection and localization in process...", vpColor::red);
241 
242  double elapsedTime;
244  if (keypoint_detection.matchPoint(I, cam, cMo, error, elapsedTime)) {
246 
248  tracker.setPose(I, cMo);
250 
252  tracker.display(I, cMo, cam, vpColor::red, 2);
253  vpDisplay::displayFrame(I, cMo, cam, 0.05, vpColor::none, 3);
255 
256  keypoint_detection.displayMatching(I, IMatching);
257 
259  std::vector<vpImagePoint> ransacInliers = keypoint_detection.getRansacInliers();
260  std::vector<vpImagePoint> ransacOutliers = keypoint_detection.getRansacOutliers();
262 
264  for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
266  vpImagePoint imPt(*it);
267  imPt.set_u(imPt.get_u() + I.getWidth());
268  imPt.set_v(imPt.get_v() + I.getHeight());
269  vpDisplay::displayCircle(IMatching, imPt, 4, vpColor::green);
270  }
272 
274  for (std::vector<vpImagePoint>::const_iterator it = ransacOutliers.begin(); it != ransacOutliers.end(); ++it) {
276  vpImagePoint imPt(*it);
277  imPt.set_u(imPt.get_u() + I.getWidth());
278  imPt.set_v(imPt.get_v() + I.getHeight());
279  vpDisplay::displayCircle(IMatching, imPt, 4, vpColor::red);
280  }
282 
284  keypoint_detection.displayMatching(I, IMatching);
286 
288  vpCameraParameters cam2;
289  cam2.initPersProjWithoutDistortion(cam.get_px(), cam.get_py(), cam.get_u0() + I.getWidth(),
290  cam.get_v0() + I.getHeight());
291  tracker.setCameraParameters(cam2);
292  tracker.setPose(IMatching, cMo);
293  tracker.display(IMatching, cMo, cam2, vpColor::red, 2);
294  vpDisplay::displayFrame(IMatching, cMo, cam2, 0.05, vpColor::none, 3);
296  }
297 
298  vpDisplay::flush(I);
299  vpDisplay::displayText(IMatching, 30, 10, "A click to exit.", vpColor::red);
300  vpDisplay::flush(IMatching);
301  if (vpDisplay::getClick(I, false)) {
302  click_done = true;
303  break;
304  }
305  if (vpDisplay::getClick(IMatching, false)) {
306  click_done = true;
307  break;
308  }
309  }
310 
311  if (!click_done)
312  vpDisplay::getClick(IMatching);
313  } catch (const vpException &e) {
314  std::cout << "Catch an exception: " << e << std::endl;
315  }
316 #else
317  (void)argc;
318  (void)argv;
319  std::cout << "Install OpenCV and rebuild ViSP to use this example." << std::endl;
320 #endif
321 
322  return 0;
323 }
virtual unsigned int getClipping() const
Definition: vpMbTracker.h:256
virtual std::pair< std::vector< vpPolygon >, std::vector< std::vector< vpPoint > > > getPolygonFaces(bool orderPolygons=true, bool useVisibility=true, bool clipPolygon=false)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Definition: vpImageIo.cpp:149
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
Definition: vpKeyPoint.h:493
void setMovingEdge(const vpMe &me)
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix())
virtual void setAngleDisappear(const double &a)
Definition: vpMbTracker.h:480
Implementation of an homogeneous matrix and operations on such kind of matrices.
virtual void track(const vpImage< unsigned char > &I)
void setMaskNumber(const unsigned int &a)
Definition: vpMe.cpp:452
Display for windows using GDI (available on any windows 32 platform).
Definition: vpDisplayGDI.h:128
void setSampleStep(const double &s)
Definition: vpMe.h:278
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
void setNbTotalSample(const int &nb)
Definition: vpMe.h:255
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Definition: vpDisplayX.h:134
Class that enables to manipulate easily a video file or a sequence of images. As it inherits from the...
static const vpColor none
Definition: vpColor.h:229
error that can be emited by ViSP classes.
Definition: vpException.h:71
void init(vpImage< unsigned char > &I, int winx=-1, int winy=-1, const std::string &title="")
Definition: vpMe.h:60
Make the complete tracking of an object by using its CAD model.
virtual void setPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cdMo)
static std::string getParent(const std::string &pathname)
Definition: vpIoTools.cpp:1606
static const vpColor green
Definition: vpColor.h:220
static void flush(const vpImage< unsigned char > &I)
void setMu1(const double &mu_1)
Definition: vpMe.h:241
static const vpColor red
Definition: vpColor.h:217
virtual void setNearClippingDistance(const double &dist)
virtual void setOgreVisibilityTest(const bool &v)
void open(vpImage< vpRGBa > &I)
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
Definition: vpKeyPoint.cpp:632
virtual void getPose(vpHomogeneousMatrix &cMo) const
Definition: vpMbTracker.h:414
void setMaskSize(const unsigned int &a)
Definition: vpMe.cpp:459
static void display(const vpImage< unsigned char > &I)
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Generic class defining intrinsic camera parameters.
The vpDisplayGTK allows to display image using the GTK 3rd party library. Thus to enable this class G...
Definition: vpDisplayGTK.h:134
void acquire(vpImage< vpRGBa > &I)
unsigned int buildReference(const vpImage< unsigned char > &I)
Definition: vpKeyPoint.cpp:238
virtual void setFarClippingDistance(const double &dist)
virtual void setAngleAppear(const double &a)
Definition: vpMbTracker.h:469
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
static double rad(double deg)
Definition: vpMath.h:110
void initPersProjWithoutDistortion(double px, double py, double u0, double v0)
static void displayCircle(const vpImage< unsigned char > &I, const vpImagePoint &center, unsigned int radius, const vpColor &color, bool fill=false, unsigned int thickness=1)
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
void setMu2(const double &mu_2)
Definition: vpMe.h:248
virtual void loadConfigFile(const std::string &configFile, bool verbose=true)
unsigned int getHeight() const
Definition: vpImage.h:188
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0))
Class that allows keypoints detection (and descriptors extraction) and matching thanks to OpenCV libr...
Definition: vpKeyPoint.h:222
static std::string getNameWE(const std::string &pathname)
Definition: vpIoTools.cpp:1532
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false)
void setThreshold(const double &t)
Definition: vpMe.h:300
void setFileName(const std::string &filename)
static bool checkFilename(const std::string &filename)
Definition: vpIoTools.cpp:802
virtual void setCameraParameters(const vpCameraParameters &cam)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:87
void setRange(const unsigned int &r)
Definition: vpMe.h:271
virtual void setClipping(const unsigned int &flags)
static void setTitle(const vpImage< unsigned char > &I, const std::string &windowtitle)
unsigned int getWidth() const
Definition: vpImage.h:246
virtual void setDisplayFeatures(bool displayF)
Definition: vpMbTracker.h:517
virtual void getCameraParameters(vpCameraParameters &cam) const
Definition: vpMbTracker.h:248
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())