37 #include <visp3/core/vpConfig.h>
39 #if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_FEATURES2D)
41 #include <visp3/core/vpHomogeneousMatrix.h>
42 #include <visp3/core/vpImage.h>
43 #include <visp3/core/vpIoTools.h>
44 #include <visp3/gui/vpDisplayGDI.h>
45 #include <visp3/gui/vpDisplayGTK.h>
46 #include <visp3/gui/vpDisplayOpenCV.h>
47 #include <visp3/gui/vpDisplayX.h>
48 #include <visp3/io/vpImageIo.h>
49 #include <visp3/io/vpParseArgv.h>
50 #include <visp3/io/vpVideoReader.h>
51 #include <visp3/mbt/vpMbEdgeTracker.h>
52 #include <visp3/vision/vpKeyPoint.h>
55 #define GETOPTARGS "cdh"
57 void usage(
const char *name,
const char *badparam);
58 bool getOptions(
int argc,
const char **argv,
bool &click_allowed,
bool &display);
68 void usage(
const char *name,
const char *badparam)
71 Test keypoints matching.\n\
81 Disable the mouse click. Useful to automate the \n\
82 execution of this program without human intervention.\n\
85 Turn off the display.\n\
91 fprintf(stdout,
"\nERROR: Bad parameter [%s]\n", badparam);
105 bool getOptions(
int argc,
const char **argv,
bool &click_allowed,
bool &display)
113 click_allowed =
false;
119 usage(argv[0],
nullptr);
124 usage(argv[0], optarg_);
130 if ((c == 1) || (c == -1)) {
132 usage(argv[0],
nullptr);
133 std::cerr <<
"ERROR: " << std::endl;
134 std::cerr <<
" Bad argument " << optarg_ << std::endl << std::endl;
141 template <
typename Type>
142 void run_test(
const std::string &env_ipath,
bool opt_click_allowed,
bool opt_display,
vpImage<Type> &I,
145 #if VISP_HAVE_DATASET_VERSION >= 0x030600
146 std::string ext(
"png");
148 std::string ext(
"pgm");
159 #if defined(VISP_HAVE_X11)
161 #elif defined(VISP_HAVE_GTK)
163 #elif defined(VISP_HAVE_GDI)
165 #elif defined(HAVE_OPENCV_HIGHGUI)
171 display.init(I, 0, 0,
"ORB keypoints matching");
183 #if defined(VISP_HAVE_PUGYXML)
215 if (opt_display && opt_click_allowed) {
219 vpHomogeneousMatrix cMoi(0.02044769891, 0.1101505452, 0.5078963719, 2.063603907, 1.110231561, -0.4392789872);
228 cv::Ptr<cv::FeatureDetector> detector;
229 cv::Ptr<cv::DescriptorExtractor> extractor;
230 cv::Ptr<cv::DescriptorMatcher> matcher;
232 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
233 detector = cv::ORB::create(500, 1.2f, 1);
234 extractor = cv::ORB::create(500, 1.2f, 1);
235 #elif (VISP_HAVE_OPENCV_VERSION >= 0x020301)
236 detector = cv::FeatureDetector::create(
"ORB");
237 extractor = cv::DescriptorExtractor::create(
"ORB");
239 matcher = cv::DescriptorMatcher::create(
"BruteForce-Hamming");
241 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
242 detector->set(
"nLevels", 1);
246 std::vector<cv::KeyPoint> trainKeyPoints;
249 detector->detect(matImg, trainKeyPoints);
252 std::vector<vpPolygon> polygons;
253 std::vector<std::vector<vpPoint> > roisPt;
254 std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair = tracker.
getPolygonFaces(
false);
255 polygons = pair.first;
256 roisPt = pair.second;
259 std::vector<cv::Point3f> points3f;
263 cv::Mat trainDescriptors;
264 extractor->compute(matImg, trainKeyPoints, trainDescriptors);
266 if (trainKeyPoints.size() != (
size_t)trainDescriptors.rows || trainKeyPoints.size() != points3f.size()) {
276 bool opt_click =
false;
278 while ((opt_display && !g.
end()) || (!opt_display && g.
getFrameIndex() < 30)) {
282 std::vector<cv::KeyPoint> queryKeyPoints;
283 detector->detect(matImg, queryKeyPoints);
285 cv::Mat queryDescriptors;
286 extractor->compute(matImg, queryKeyPoints, queryDescriptors);
288 std::vector<std::vector<cv::DMatch> > knn_matches;
289 std::vector<cv::DMatch> matches;
290 matcher->knnMatch(queryDescriptors, trainDescriptors, knn_matches, 2);
291 for (std::vector<std::vector<cv::DMatch> >::const_iterator it = knn_matches.begin(); it != knn_matches.end();
293 if (it->size() > 1) {
294 double ratio = (*it)[0].distance / (*it)[1].distance;
296 matches.push_back((*it)[0]);
302 for (std::vector<cv::DMatch>::const_iterator it = matches.begin(); it != matches.end(); ++it) {
303 vpPoint pt(points3f[(
size_t)(it->trainIdx)].x, points3f[(
size_t)(it->trainIdx)].y,
304 points3f[(
size_t)(it->trainIdx)].z);
306 double x = 0.0, y = 0.0;
308 queryKeyPoints[(
size_t)(it->queryIdx)].pt.y, x, y);
315 bool is_pose_estimated =
false;
316 if (estimated_pose.
npt >= 4) {
318 unsigned int nb_inliers = (
unsigned int)(0.6 * estimated_pose.
npt);
323 is_pose_estimated =
true;
326 is_pose_estimated =
false;
335 for (std::vector<cv::DMatch>::const_iterator it = matches.begin(); it != matches.end(); ++it) {
336 vpImagePoint leftPt(trainKeyPoints[(
size_t)it->trainIdx].pt.y, trainKeyPoints[(
size_t)it->trainIdx].pt.x);
337 vpImagePoint rightPt(queryKeyPoints[(
size_t)it->queryIdx].pt.y,
338 queryKeyPoints[(
size_t)it->queryIdx].pt.x + Iref.
getWidth());
342 if (is_pose_estimated) {
353 if (opt_click_allowed && opt_display) {
381 int main(
int argc,
const char **argv)
384 std::string env_ipath;
385 bool opt_click_allowed =
true;
386 bool opt_display =
true;
389 if (getOptions(argc, argv, opt_click_allowed, opt_display) ==
false) {
397 if (env_ipath.empty()) {
398 std::cerr <<
"Please set the VISP_INPUT_IMAGE_PATH environment "
407 std::cout <<
"-- Test on gray level images" << std::endl;
408 run_test(env_ipath, opt_click_allowed, opt_display, I, Imatch, Iref);
414 std::cout <<
"-- Test on color images" << std::endl;
415 run_test(env_ipath, opt_click_allowed, opt_display, I, Imatch, Iref);
420 std::cerr << e.
what() << std::endl;
424 std::cout <<
"testKeyPoint-4 is ok !" << std::endl;
431 std::cerr <<
"You need OpenCV library." << std::endl;
Generic class defining intrinsic camera parameters.
void initPersProjWithoutDistortion(double px, double py, double u0, double v0)
static const vpColor none
static const vpColor green
Display for windows using GDI (available on any windows 32 platform).
The vpDisplayGTK allows to display image using the GTK 3rd party library. Thus to enable this class G...
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
void init(vpImage< unsigned char > &I, int win_x=-1, int win_y=-1, const std::string &win_title="") vp_override
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
virtual void setDownScalingFactor(unsigned int scale)
static void display(const vpImage< unsigned char > &I)
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
unsigned int getDownScalingFactor()
error that can be emitted by ViSP classes.
const char * what() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition of the vpImage class member functions.
unsigned int getWidth() const
void resize(unsigned int h, unsigned int w)
resize the image : Image initialization
void insert(const vpImage< Type > &src, const vpImagePoint &topLeft)
unsigned int getHeight() const
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
static double rad(double deg)
Make the complete tracking of an object by using its CAD model.
virtual void setNearClippingDistance(const double &dist) vp_override
virtual void setFarClippingDistance(const double &dist) vp_override
virtual void loadConfigFile(const std::string &configFile, bool verbose=true) vp_override
virtual void setClipping(const unsigned int &flags) vp_override
virtual void setCameraParameters(const vpCameraParameters &cam) vp_override
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false) vp_override
void setMovingEdge(const vpMe &me)
virtual void setPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cdMo) vp_override
virtual void getCameraParameters(vpCameraParameters &cam) const
virtual void getPose(vpHomogeneousMatrix &cMo) const
virtual void setAngleDisappear(const double &a)
virtual void initClick(const vpImage< unsigned char > &I, const std::string &initFile, bool displayHelp=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix())
virtual void initFromPose(const vpImage< unsigned char > &I, const std::string &initFile)
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix())
virtual void setAngleAppear(const double &a)
virtual std::pair< std::vector< vpPolygon >, std::vector< std::vector< vpPoint > > > getPolygonFaces(bool orderPolygons=true, bool useVisibility=true, bool clipPolygon=false)
virtual unsigned int getClipping() const
void setMu1(const double &mu_1)
void setRange(const unsigned int &range)
void setLikelihoodThresholdType(const vpLikelihoodThresholdType likelihood_threshold_type)
void setNbTotalSample(const int &ntotal_sample)
void setMaskNumber(const unsigned int &mask_number)
void setThreshold(const double &threshold)
void setSampleStep(const double &sample_step)
void setMaskSize(const unsigned int &mask_size)
void setMu2(const double &mu_2)
static bool parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, int flags)
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
void setRansacMaxTrials(const int &rM)
void addPoint(const vpPoint &P)
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
unsigned int npt
Number of point used in pose computation.
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool(*func)(const vpHomogeneousMatrix &)=nullptr)
void setRansacThreshold(const double &t)
Class that enables to manipulate easily a video file or a sequence of images. As it inherits from the...
void acquire(vpImage< vpRGBa > &I)
void open(vpImage< vpRGBa > &I)
void setFileName(const std::string &filename)
long getFrameIndex() const
void display(vpImage< unsigned char > &I, const std::string &title)
Display a gray-scale image.