44 #include <visp3/core/vpConfig.h>
46 #if defined(VISP_HAVE_OPENCV) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_FEATURES2D)
48 #include <visp3/core/vpHomogeneousMatrix.h>
49 #include <visp3/core/vpImage.h>
50 #include <visp3/core/vpIoTools.h>
51 #include <visp3/gui/vpDisplayGDI.h>
52 #include <visp3/gui/vpDisplayGTK.h>
53 #include <visp3/gui/vpDisplayOpenCV.h>
54 #include <visp3/gui/vpDisplayX.h>
55 #include <visp3/io/vpImageIo.h>
56 #include <visp3/io/vpParseArgv.h>
57 #include <visp3/io/vpVideoReader.h>
58 #include <visp3/mbt/vpMbEdgeTracker.h>
59 #include <visp3/vision/vpKeyPoint.h>
62 #define GETOPTARGS "cdh"
64 #ifdef ENABLE_VISP_NAMESPACE
68 void usage(
const char *name,
const char *badparam);
69 bool getOptions(
int argc,
const char **argv,
bool &click_allowed,
bool &display);
79 void usage(
const char *name,
const char *badparam)
82 Test keypoints matching.\n\
92 Disable the mouse click. Useful to automate the \n\
93 execution of this program without human intervention.\n\
96 Turn off the display.\n\
102 fprintf(stdout,
"\nERROR: Bad parameter [%s]\n", badparam);
116 bool getOptions(
int argc,
const char **argv,
bool &click_allowed,
bool &display)
124 click_allowed =
false;
130 usage(argv[0],
nullptr);
135 usage(argv[0], optarg_);
141 if ((c == 1) || (c == -1)) {
143 usage(argv[0],
nullptr);
144 std::cerr <<
"ERROR: " << std::endl;
145 std::cerr <<
" Bad argument " << optarg_ << std::endl << std::endl;
152 template <
typename Type>
153 void run_test(
const std::string &env_ipath,
bool opt_click_allowed,
bool opt_display,
vpImage<Type> &I,
156 #if VISP_HAVE_DATASET_VERSION >= 0x030600
157 std::string ext(
"png");
159 std::string ext(
"pgm");
170 #if defined(VISP_HAVE_X11)
171 vpDisplayX display, display2;
172 #elif defined(VISP_HAVE_GTK)
174 #elif defined(VISP_HAVE_GDI)
176 #elif defined(HAVE_OPENCV_HIGHGUI)
182 display.init(I, 0, 0,
"ORB keypoints matching");
194 #if defined(VISP_HAVE_PUGIXML)
226 if (opt_display && opt_click_allowed) {
227 tracker.initClick(I, init_file);
230 vpHomogeneousMatrix cMoi(0.02044769891, 0.1101505452, 0.5078963719, 2.063603907, 1.110231561, -0.4392789872);
239 cv::Ptr<cv::FeatureDetector> detector;
240 cv::Ptr<cv::DescriptorExtractor> extractor;
241 cv::Ptr<cv::DescriptorMatcher> matcher;
243 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000)
244 detector = cv::ORB::create(500, 1.2f, 1);
245 extractor = cv::ORB::create(500, 1.2f, 1);
246 #elif (VISP_HAVE_OPENCV_VERSION >= 0x020301)
247 detector = cv::FeatureDetector::create(
"ORB");
248 extractor = cv::DescriptorExtractor::create(
"ORB");
250 matcher = cv::DescriptorMatcher::create(
"BruteForce-Hamming");
252 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000)
253 detector->set(
"nLevels", 1);
257 std::vector<cv::KeyPoint> trainKeyPoints;
260 detector->detect(matImg, trainKeyPoints);
263 std::vector<vpPolygon> polygons;
264 std::vector<std::vector<vpPoint> > roisPt;
265 std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair = tracker.
getPolygonFaces(
false);
266 polygons = pair.first;
267 roisPt = pair.second;
270 std::vector<cv::Point3f> points3f;
274 cv::Mat trainDescriptors;
275 extractor->compute(matImg, trainKeyPoints, trainDescriptors);
277 if (trainKeyPoints.size() != (
size_t)trainDescriptors.rows || trainKeyPoints.size() != points3f.size()) {
287 bool opt_click =
false;
289 while ((opt_display && !g.
end()) || (!opt_display && g.
getFrameIndex() < 30)) {
293 std::vector<cv::KeyPoint> queryKeyPoints;
294 detector->detect(matImg, queryKeyPoints);
296 cv::Mat queryDescriptors;
297 extractor->compute(matImg, queryKeyPoints, queryDescriptors);
299 std::vector<std::vector<cv::DMatch> > knn_matches;
300 std::vector<cv::DMatch> matches;
301 matcher->knnMatch(queryDescriptors, trainDescriptors, knn_matches, 2);
302 for (std::vector<std::vector<cv::DMatch> >::const_iterator it = knn_matches.begin(); it != knn_matches.end();
304 if (it->size() > 1) {
305 double ratio = (*it)[0].distance / (*it)[1].distance;
307 matches.push_back((*it)[0]);
313 for (std::vector<cv::DMatch>::const_iterator it = matches.begin(); it != matches.end(); ++it) {
314 vpPoint pt(points3f[(
size_t)(it->trainIdx)].x, points3f[(
size_t)(it->trainIdx)].y,
315 points3f[(
size_t)(it->trainIdx)].z);
317 double x = 0.0, y = 0.0;
319 queryKeyPoints[(
size_t)(it->queryIdx)].pt.y, x, y);
326 bool is_pose_estimated =
false;
327 if (estimated_pose.
npt >= 4) {
329 unsigned int nb_inliers = (
unsigned int)(0.6 * estimated_pose.
npt);
334 is_pose_estimated =
true;
337 is_pose_estimated =
false;
346 for (std::vector<cv::DMatch>::const_iterator it = matches.begin(); it != matches.end(); ++it) {
347 vpImagePoint leftPt(trainKeyPoints[(
size_t)it->trainIdx].pt.y, trainKeyPoints[(
size_t)it->trainIdx].pt.x);
348 vpImagePoint rightPt(queryKeyPoints[(
size_t)it->queryIdx].pt.y,
349 queryKeyPoints[(
size_t)it->queryIdx].pt.x + Iref.
getWidth());
353 if (is_pose_estimated) {
364 if (opt_click_allowed && opt_display) {
386 int main(
int argc,
const char **argv)
389 std::string env_ipath;
390 bool opt_click_allowed =
true;
391 bool opt_display =
true;
394 if (getOptions(argc, argv, opt_click_allowed, opt_display) ==
false) {
402 if (env_ipath.empty()) {
403 std::cerr <<
"Please set the VISP_INPUT_IMAGE_PATH environment "
412 std::cout <<
"-- Test on gray level images" << std::endl;
413 run_test(env_ipath, opt_click_allowed, opt_display, I, Imatch, Iref);
419 std::cout <<
"-- Test on color images" << std::endl;
420 run_test(env_ipath, opt_click_allowed, opt_display, I, Imatch, Iref);
425 std::cerr << e.
what() << std::endl;
429 std::cout <<
"testKeyPoint-4 is ok !" << std::endl;
436 std::cerr <<
"You need OpenCV library." << std::endl;
Generic class defining intrinsic camera parameters.
void initPersProjWithoutDistortion(double px, double py, double u0, double v0)
static const vpColor none
static const vpColor green
Display for windows using GDI (available on any windows 32 platform).
The vpDisplayGTK allows to display image using the GTK 3rd party library. Thus to enable this class G...
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
unsigned int getDownScalingFactor()
error that can be emitted by ViSP classes.
const char * what() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition of the vpImage class member functions.
unsigned int getWidth() const
void resize(unsigned int h, unsigned int w)
resize the image : Image initialization
void insert(const vpImage< Type > &src, const vpImagePoint &topLeft)
unsigned int getHeight() const
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=nullptr)
static double rad(double deg)
Make the complete tracking of an object by using its CAD model.
virtual void setNearClippingDistance(const double &dist) VP_OVERRIDE
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, unsigned int thickness=1, bool displayFullModel=false) VP_OVERRIDE
virtual void setFarClippingDistance(const double &dist) VP_OVERRIDE
virtual void setClipping(const unsigned int &flags) VP_OVERRIDE
void setMovingEdge(const vpMe &me)
virtual void setCameraParameters(const vpCameraParameters &cam) VP_OVERRIDE
virtual void setPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cdMo) VP_OVERRIDE
virtual void loadConfigFile(const std::string &configFile, bool verbose=true) VP_OVERRIDE
virtual void getCameraParameters(vpCameraParameters &cam) const
virtual void getPose(vpHomogeneousMatrix &cMo) const
virtual void setAngleDisappear(const double &a)
virtual void initFromPose(const vpImage< unsigned char > &I, const std::string &initFile)
virtual void loadModel(const std::string &modelFile, bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix())
virtual void setAngleAppear(const double &a)
virtual std::pair< std::vector< vpPolygon >, std::vector< std::vector< vpPoint > > > getPolygonFaces(bool orderPolygons=true, bool useVisibility=true, bool clipPolygon=false)
virtual unsigned int getClipping() const
void setMu1(const double &mu_1)
void setRange(const unsigned int &range)
void setLikelihoodThresholdType(const vpLikelihoodThresholdType likelihood_threshold_type)
void setNbTotalSample(const int &ntotal_sample)
void setMaskNumber(const unsigned int &mask_number)
void setThreshold(const double &threshold)
void setSampleStep(const double &sample_step)
void setMaskSize(const unsigned int &mask_size)
void setMu2(const double &mu_2)
static bool parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, int flags)
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
void setRansacMaxTrials(const int &rM)
void addPoint(const vpPoint &P)
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
unsigned int npt
Number of point used in pose computation.
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, FuncCheckValidityPose func=nullptr)
void setRansacThreshold(const double &t)
Class that enables to manipulate easily a video file or a sequence of images. As it inherits from the...
void acquire(vpImage< vpRGBa > &I)
void open(vpImage< vpRGBa > &I)
void setFileName(const std::string &filename)
long getFrameIndex() const