Visual Servoing Platform  version 3.3.0 under development (2020-02-17)
tutorial-detection-object-mbt2-deprecated.cpp
1 #include <visp3/core/vpConfig.h>
3 #include <visp3/core/vpIoTools.h>
4 #include <visp3/gui/vpDisplayGDI.h>
5 #include <visp3/gui/vpDisplayOpenCV.h>
6 #include <visp3/gui/vpDisplayX.h>
7 #include <visp3/io/vpVideoReader.h>
8 #include <visp3/mbt/vpMbEdgeTracker.h>
9 #include <visp3/vision/vpKeyPoint.h>
10 
11 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400)
12 void learnCube(const vpImage<unsigned char> &I, vpMbEdgeTracker &tracker, vpKeyPoint &keypoint_learning, int id)
13 {
15  std::vector<cv::KeyPoint> trainKeyPoints;
16  double elapsedTime;
17  keypoint_learning.detect(I, trainKeyPoints, elapsedTime);
19 
21  std::vector<vpPolygon> polygons;
22  std::vector<std::vector<vpPoint> > roisPt;
23  std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair = tracker.getPolygonFaces();
24  polygons = pair.first;
25  roisPt = pair.second;
26 
27  std::vector<cv::Point3f> points3f;
29  tracker.getPose(cMo);
31  tracker.getCameraParameters(cam);
32  vpKeyPoint::compute3DForPointsInPolygons(cMo, cam, trainKeyPoints, polygons, roisPt, points3f);
34 
36  keypoint_learning.buildReference(I, trainKeyPoints, points3f, true, id);
38 
40  for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
41  vpDisplay::displayCross(I, (int)it->pt.y, (int)it->pt.x, 4, vpColor::red);
42  }
44 }
45 #endif
46 
47 int main(int argc, char **argv)
48 {
49 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400)
50  try {
52  std::string videoname = "cube.mpeg";
53 
54  for (int i = 0; i < argc; i++) {
55  if (std::string(argv[i]) == "--name")
56  videoname = std::string(argv[i + 1]);
57  else if (std::string(argv[i]) == "--help") {
58  std::cout << "\nUsage: " << argv[0] << " [--name <video name>] [--help]\n" << std::endl;
59  return 0;
60  }
61  }
62  std::string parentname = vpIoTools::getParent(videoname);
63  std::string objectname = vpIoTools::getNameWE(videoname);
64 
65  if (!parentname.empty())
66  objectname = parentname + "/" + objectname;
67 
68  std::cout << "Video name: " << videoname << std::endl;
69  std::cout << "Tracker requested config files: " << objectname << ".[init,"
70 #ifdef VISP_HAVE_PUGIXML
71  << "xml,"
72 #endif
73  << "cao or wrl]" << std::endl;
74  std::cout << "Tracker optional config files: " << objectname << ".[ppm]" << std::endl;
75 
79 
80  vpMbEdgeTracker tracker;
81  bool usexml = false;
82 #ifdef VISP_HAVE_PUGIXML
83  if (vpIoTools::checkFilename(objectname + ".xml")) {
84  tracker.loadConfigFile(objectname + ".xml");
85  tracker.getCameraParameters(cam);
86  usexml = true;
87  }
88 #endif
89  if (!usexml) {
90  vpMe me;
91  me.setMaskSize(5);
92  me.setMaskNumber(180);
93  me.setRange(7);
94  me.setThreshold(5000);
95  me.setMu1(0.5);
96  me.setMu2(0.5);
97  me.setSampleStep(4);
98  me.setNbTotalSample(250);
99  tracker.setMovingEdge(me);
100  cam.initPersProjWithoutDistortion(547, 542, 339, 235);
101  tracker.setCameraParameters(cam);
102  tracker.setAngleAppear(vpMath::rad(89));
103  tracker.setAngleDisappear(vpMath::rad(89));
104  tracker.setNearClippingDistance(0.01);
105  tracker.setFarClippingDistance(10.0);
107  }
108 
109  tracker.setOgreVisibilityTest(false);
110  if (vpIoTools::checkFilename(objectname + ".cao"))
111  tracker.loadModel(objectname + ".cao");
112  else if (vpIoTools::checkFilename(objectname + ".wrl"))
113  tracker.loadModel(objectname + ".wrl");
114  tracker.setDisplayFeatures(true);
116 
118  vpKeyPoint keypoint_learning("ORB", "ORB", "BruteForce-Hamming");
119 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
120  keypoint_learning.setDetectorParameter("ORB", "nLevels", 1);
121 #else
122  cv::Ptr<cv::ORB> orb_learning = keypoint_learning.getDetector("ORB").dynamicCast<cv::ORB>();
123  if (orb_learning) {
124  orb_learning->setNLevels(1);
125  }
126 #endif
127 
129 #if defined(VISP_HAVE_X11)
130  vpDisplayX display;
131 #elif defined(VISP_HAVE_GDI)
132  vpDisplayGDI display;
133 #elif defined(VISP_HAVE_OPENCV)
134  vpDisplayOpenCV display;
135 #else
136  std::cout << "No image viewer is available..." << std::endl;
137  return 0;
138 #endif
139 
140  /*
141  * Start the part of the code dedicated to object learning from 3 images
142  */
143  std::string imageName[] = {"cube0001.png", "cube0150.png", "cube0200.png"};
144  vpHomogeneousMatrix initPoseTab[] = {
145  vpHomogeneousMatrix(0.02143385294, 0.1098083886, 0.5127439561, 2.087159614, 1.141775176, -0.4701291124),
146  vpHomogeneousMatrix(0.02651282185, -0.03713587374, 0.6873765919, 2.314744454, 0.3492296488, -0.1226054828),
147  vpHomogeneousMatrix(0.02965448956, -0.07283091786, 0.7253526051, 2.300529617, -0.4286674806, 0.1788761025)};
148  for (int i = 0; i < 3; i++) {
149  vpImageIo::read(I, imageName[i]);
150  if (i == 0) {
151  display.init(I, 10, 10);
152  }
153  std::stringstream title;
154  title << "Learning cube on image: " << imageName[i];
155  vpDisplay::setTitle(I, title.str().c_str());
156 
158 
160  tracker.setPose(I, initPoseTab[i]);
162 
164  tracker.track(I);
166 
168  tracker.getPose(cMo);
169  tracker.display(I, cMo, cam, vpColor::red);
171 
173  learnCube(I, tracker, keypoint_learning, i);
175 
176  vpDisplay::displayText(I, 10, 10, "Learning step: keypoints are detected on visible cube faces", vpColor::red);
177  if (i < 2) {
178  vpDisplay::displayText(I, 30, 10, "Click to continue the learning...", vpColor::red);
179  } else {
180  vpDisplay::displayText(I, 30, 10, "Click to continue with the detection...", vpColor::red);
181  }
182 
183  vpDisplay::flush(I);
184  vpDisplay::getClick(I, true);
185  }
186 
188  keypoint_learning.saveLearningData("cube_learning_data.bin", true);
190 
191  /*
192  * Start the part of the code dedicated to detection and localization
193  */
195  vpKeyPoint keypoint_detection("ORB", "ORB", "BruteForce-Hamming");
196 #if (VISP_HAVE_OPENCV_VERSION < 0x030000)
197  keypoint_detection.setDetectorParameter("ORB", "nLevels", 1);
198 #else
199  cv::Ptr<cv::ORB> orb_detector = keypoint_detection.getDetector("ORB").dynamicCast<cv::ORB>();
200  orb_detector = keypoint_detection.getDetector("ORB").dynamicCast<cv::ORB>();
201  if (orb_detector) {
202  orb_detector->setNLevels(1);
203  }
204 #endif
205 
208  keypoint_detection.loadLearningData("cube_learning_data.bin", true);
210 
212  vpImage<unsigned char> IMatching;
213  keypoint_detection.createImageMatching(I, IMatching);
215 
216  vpVideoReader g;
217  g.setFileName(videoname);
218  g.open(I);
219 
220 #if defined VISP_HAVE_X11
221  vpDisplayX display2;
222 #elif defined VISP_HAVE_GTK
223  vpDisplayGTK display2;
224 #elif defined VISP_HAVE_GDI
225  vpDisplayGDI display2;
226 #else
227  vpDisplayOpenCV display2;
228 #endif
229  display2.init(IMatching, 50, 50, "Display matching between learned and current images");
230  vpDisplay::setTitle(I, "Cube detection and localization");
231 
232  double error;
233  bool click_done = false;
234 
235  while (!g.end()) {
236  g.acquire(I);
238 
240  keypoint_detection.insertImageMatching(I, IMatching);
242 
243  vpDisplay::display(IMatching);
244  vpDisplay::displayText(I, 10, 10, "Detection and localization in process...", vpColor::red);
245 
246  double elapsedTime;
248  if (keypoint_detection.matchPoint(I, cam, cMo, error, elapsedTime)) {
250 
252  tracker.setPose(I, cMo);
254 
256  tracker.display(I, cMo, cam, vpColor::red, 2);
257  vpDisplay::displayFrame(I, cMo, cam, 0.05, vpColor::none, 3);
259 
260  keypoint_detection.displayMatching(I, IMatching);
261 
263  std::vector<vpImagePoint> ransacInliers = keypoint_detection.getRansacInliers();
264  std::vector<vpImagePoint> ransacOutliers = keypoint_detection.getRansacOutliers();
266 
268  for (std::vector<vpImagePoint>::const_iterator it = ransacInliers.begin(); it != ransacInliers.end(); ++it) {
270  vpImagePoint imPt(*it);
271  imPt.set_u(imPt.get_u() + I.getWidth());
272  imPt.set_v(imPt.get_v() + I.getHeight());
273  vpDisplay::displayCircle(IMatching, imPt, 4, vpColor::green);
274  }
276 
278  for (std::vector<vpImagePoint>::const_iterator it = ransacOutliers.begin(); it != ransacOutliers.end(); ++it) {
280  vpImagePoint imPt(*it);
281  imPt.set_u(imPt.get_u() + I.getWidth());
282  imPt.set_v(imPt.get_v() + I.getHeight());
283  vpDisplay::displayCircle(IMatching, imPt, 4, vpColor::red);
284  }
286 
288  keypoint_detection.displayMatching(I, IMatching);
290 
292  vpCameraParameters cam2;
293  cam2.initPersProjWithoutDistortion(cam.get_px(), cam.get_py(), cam.get_u0() + I.getWidth(),
294  cam.get_v0() + I.getHeight());
295  tracker.setCameraParameters(cam2);
296  tracker.setPose(IMatching, cMo);
297  tracker.display(IMatching, cMo, cam2, vpColor::red, 2);
298  vpDisplay::displayFrame(IMatching, cMo, cam2, 0.05, vpColor::none, 3);
300  }
301 
302  vpDisplay::flush(I);
303  vpDisplay::displayText(IMatching, 30, 10, "A click to exit.", vpColor::red);
304  vpDisplay::flush(IMatching);
305  if (vpDisplay::getClick(I, false)) {
306  click_done = true;
307  break;
308  }
309  if (vpDisplay::getClick(IMatching, false)) {
310  click_done = true;
311  break;
312  }
313  }
314 
315  if (!click_done)
316  vpDisplay::getClick(IMatching);
317 
318 #if defined(VISP_HAVE_COIN3D) && (COIN_MAJOR_VERSION >= 2)
319  SoDB::finish();
320 #endif
321  } catch (const vpException &e) {
322  std::cout << "Catch an exception: " << e << std::endl;
323  }
324 #else
325  (void)argc;
326  (void)argv;
327  std::cout << "Install OpenCV and rebuild ViSP to use this example." << std::endl;
328 #endif
329 
330  return 0;
331 }
virtual unsigned int getClipping() const
Definition: vpMbTracker.h:252
virtual std::pair< std::vector< vpPolygon >, std::vector< std::vector< vpPoint > > > getPolygonFaces(bool orderPolygons=true, bool useVisibility=true, bool clipPolygon=false)
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
Definition: vpKeyPoint.h:487
void setMovingEdge(const vpMe &me)
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix())
virtual void setAngleDisappear(const double &a)
Definition: vpMbTracker.h:476
Implementation of an homogeneous matrix and operations on such kind of matrices.
virtual void track(const vpImage< unsigned char > &I)
void setMaskNumber(const unsigned int &a)
Definition: vpMe.cpp:454
Display for windows using GDI (available on any windows 32 platform).
Definition: vpDisplayGDI.h:128
void setSampleStep(const double &s)
Definition: vpMe.h:278
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
void setNbTotalSample(const int &nb)
Definition: vpMe.h:255
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Definition: vpDisplayX.h:150
Class that enables to manipulate easily a video file or a sequence of images. As it inherits from the...
static const vpColor none
Definition: vpColor.h:191
error that can be emited by ViSP classes.
Definition: vpException.h:71
void init(vpImage< unsigned char > &I, int winx=-1, int winy=-1, const std::string &title="")
Definition: vpMe.h:60
Make the complete tracking of an object by using its CAD model.
virtual void setPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cdMo)
static std::string getParent(const std::string &pathname)
Definition: vpIoTools.cpp:1473
void loadConfigFile(const std::string &configFile)
static const vpColor green
Definition: vpColor.h:182
static void flush(const vpImage< unsigned char > &I)
void setMu1(const double &mu_1)
Definition: vpMe.h:241
static const vpColor red
Definition: vpColor.h:179
virtual void setNearClippingDistance(const double &dist)
virtual void setOgreVisibilityTest(const bool &v)
void open(vpImage< vpRGBa > &I)
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
Definition: vpKeyPoint.cpp:632
virtual void getPose(vpHomogeneousMatrix &cMo) const
Definition: vpMbTracker.h:410
void setMaskSize(const unsigned int &a)
Definition: vpMe.cpp:461
static void display(const vpImage< unsigned char > &I)
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Generic class defining intrinsic camera parameters.
The vpDisplayGTK allows to display image using the GTK 3rd party library. Thus to enable this class G...
Definition: vpDisplayGTK.h:137
void acquire(vpImage< vpRGBa > &I)
unsigned int buildReference(const vpImage< unsigned char > &I)
Definition: vpKeyPoint.cpp:240
virtual void setFarClippingDistance(const double &dist)
virtual void setAngleAppear(const double &a)
Definition: vpMbTracker.h:465
void saveLearningData(const std::string &filename, bool binaryMode=false, bool saveTrainingImages=true)
static double rad(double deg)
Definition: vpMath.h:108
void initPersProjWithoutDistortion(double px, double py, double u0, double v0)
static void displayCircle(const vpImage< unsigned char > &I, const vpImagePoint &center, unsigned int radius, const vpColor &color, bool fill=false, unsigned int thickness=1)
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
void setMu2(const double &mu_2)
Definition: vpMe.h:248
unsigned int getHeight() const
Definition: vpImage.h:186
static void read(vpImage< unsigned char > &I, const std::string &filename)
Definition: vpImageIo.cpp:243
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0))
Class that allows keypoints detection (and descriptors extraction) and matching thanks to OpenCV libr...
Definition: vpKeyPoint.h:222
static std::string getNameWE(const std::string &pathname)
Definition: vpIoTools.cpp:1460
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false)
void setThreshold(const double &t)
Definition: vpMe.h:300
void setFileName(const std::string &filename)
static bool checkFilename(const std::string &filename)
Definition: vpIoTools.cpp:730
virtual void setCameraParameters(const vpCameraParameters &cam)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:88
void setRange(const unsigned int &r)
Definition: vpMe.h:271
virtual void setClipping(const unsigned int &flags)
static void setTitle(const vpImage< unsigned char > &I, const std::string &windowtitle)
unsigned int getWidth() const
Definition: vpImage.h:244
virtual void setDisplayFeatures(bool displayF)
Definition: vpMbTracker.h:513
virtual void getCameraParameters(vpCameraParameters &cam) const
Definition: vpMbTracker.h:244
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())