#include <visp3/core/vpConfig.h>
#include <visp3/detection/vpDetectorAprilTag.h>
#include <visp3/core/vpXmlParserCamera.h>
#include <visp3/gui/vpDisplayGDI.h>
#include <visp3/gui/vpDisplayOpenCV.h>
#include <visp3/gui/vpDisplayX.h>
#include <visp3/io/vpImageIo.h>
int main(int argc, const char **argv)
{
#ifdef ENABLE_VISP_NAMESPACE
#endif
#if defined(VISP_HAVE_APRILTAG) && (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV))
#ifdef ENABLE_VISP_NAMESPACE
#endif
std::string input_filename = "AprilTag.pgm";
double tagSize = 0.053;
float quad_decimate = 1.0;
int nThreads = 1;
std::string intrinsic_file = "";
std::string camera_name = "";
bool display_tag = false;
int color_id = -1;
unsigned int thickness = 2;
bool z_aligned = false;
for (int i = 1; i < argc; i++) {
if (std::string(argv[i]) == "--pose_method" && i + 1 < argc) {
}
else if (std::string(argv[i]) == "--tag_size" && i + 1 < argc) {
tagSize = atof(argv[i + 1]);
}
else if (std::string(argv[i]) == "--input" && i + 1 < argc) {
input_filename = std::string(argv[i + 1]);
}
else if (std::string(argv[i]) == "--quad_decimate" && i + 1 < argc) {
quad_decimate = static_cast<float>(atof(argv[i + 1]));
}
else if (std::string(argv[i]) == "--nthreads" && i + 1 < argc) {
nThreads = atoi(argv[i + 1]);
}
#if defined(VISP_HAVE_PUGIXML)
else if (std::string(argv[i]) == "--intrinsic" && i + 1 < argc) {
intrinsic_file = std::string(argv[i + 1]);
}
else if (std::string(argv[i]) == "--camera_name" && i + 1 < argc) {
camera_name = std::string(argv[i + 1]);
}
#endif
else if (std::string(argv[i]) == "--display_tag") {
display_tag = true;
}
else if (std::string(argv[i]) == "--color" && i + 1 < argc) {
color_id = atoi(argv[i + 1]);
}
else if (std::string(argv[i]) == "--thickness" && i + 1 < argc) {
thickness = static_cast<unsigned int>(atoi(argv[i + 1]));
}
else if (std::string(argv[i]) == "--tag_family" && i + 1 < argc) {
}
else if (std::string(argv[i]) == "--z_aligned") {
z_aligned = true;
}
else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") {
std::cout << "Usage: " << argv[0]
<< " [--input <input file>] [--tag_size <tag_size in m>]"
" [--quad_decimate <quad_decimate>] [--nthreads <nb>]"
" [--intrinsic <intrinsic file>] [--camera_name <camera name>]"
" [--pose_method <method> (0: HOMOGRAPHY, 1: HOMOGRAPHY_VIRTUAL_VS, "
" 2: DEMENTHON_VIRTUAL_VS, 3: LAGRANGE_VIRTUAL_VS, "
" 4: BEST_RESIDUAL_VIRTUAL_VS, 5: HOMOGRAPHY_ORTHOGONAL_ITERATION) (default: 0)]"
" [--tag_family <family> (0: TAG_36h11, 1: TAG_36h10 (DEPRECATED), 2: TAG_36ARTOOLKIT (DEPRECATED),"
" 3: TAG_25h9, 4: TAG_25h7 (DEPRECATED), 5: TAG_16h5, 6: TAG_CIRCLE21h7, 7: TAG_CIRCLE49h12,"
" 8: TAG_CUSTOM48h12, 9: TAG_STANDARD41h12, 10: TAG_STANDARD52h13) (default: 0)]"
" [--display_tag] [--color <color_id (0, 1, ...)>]"
" [--thickness <thickness>] [--z_aligned]"
" [--help]"
<< std::endl;
return EXIT_SUCCESS;
}
}
#if defined(VISP_HAVE_PUGIXML)
if (!intrinsic_file.empty() && !camera_name.empty()) {
}
#endif
std::cout << cam << std::endl;
std::cout << "poseEstimationMethod: " << poseEstimationMethod << std::endl;
std::cout << "tagFamily: " << tagFamily << std::endl;
std::cout << "nThreads : " << nThreads << std::endl;
std::cout << "Z aligned: " << z_aligned << std::endl;
try {
#ifdef VISP_HAVE_X11
vpDisplayX d(I);
#elif defined(VISP_HAVE_GDI)
#elif defined(HAVE_OPENCV_HIGHGUI)
#endif
std::vector<vpHomogeneousMatrix> cMo_vec;
detector.
detect(I, tagSize, cam, cMo_vec);
std::stringstream ss;
ss <<
"Detection time: " << t <<
" ms for " << detector.
getNbObjects() <<
" tags";
std::vector<vpImagePoint> p = detector.
getPolygon(i);
std::size_t tag_id_pos = message.find("id: ");
if (tag_id_pos != std::string::npos) {
int tag_id = atoi(message.substr(tag_id_pos + 4).c_str());
ss.str("");
ss << "Tag id: " << tag_id;
}
for (size_t j = 0; j < p.size(); j++) {
std::ostringstream number;
number << j;
}
}
for (size_t i = 0; i < cMo_vec.size(); i++) {
}
#ifdef VISP_HAVE_X11
vpDisplayX d2(I_color, 50, 50);
#elif defined(VISP_HAVE_GDI)
#elif defined(HAVE_OPENCV_HIGHGUI)
#endif
std::vector<std::vector<vpImagePoint> > tagsCorners = detector.
getTagsCorners();
tagsCorners.pop_back();
cMo_vec.pop_back();
}
std::cerr <<
"Catch an exception: " << e.
getMessage() << std::endl;
}
#else
(void)argc;
(void)argv;
#endif
return EXIT_SUCCESS;
}
Generic class defining intrinsic camera parameters.
void initPersProjWithoutDistortion(double px, double py, double u0, double v0)
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
static vpColor getColor(const unsigned int &i)
static const vpColor none
static const vpColor blue
static const vpColor green
void setZAlignedWithCameraAxis(bool zAlignedWithCameraFrame)
std::vector< std::vector< vpImagePoint > > getTagsCorners() const
void setDisplayTag(bool display, const vpColor &color=vpColor::none, unsigned int thickness=2)
void setAprilTagQuadDecimate(float quadDecimate)
void displayFrames(const vpImage< unsigned char > &I, const std::vector< vpHomogeneousMatrix > &cMo_vec, const vpCameraParameters &cam, double size, const vpColor &color, unsigned int thickness=1) const
bool detect(const vpImage< unsigned char > &I) VP_OVERRIDE
@ TAG_36h11
AprilTag 36h11 pattern (recommended)
void setAprilTagNbThreads(int nThreads)
void setAprilTagPoseEstimationMethod(const vpPoseEstimationMethod &poseEstimationMethod)
void displayTags(const vpImage< unsigned char > &I, const std::vector< std::vector< vpImagePoint > > &tagsCorners, const vpColor &color=vpColor::none, unsigned int thickness=1) const
std::vector< std::vector< vpImagePoint > > & getPolygon()
vpRect getBBox(size_t i) const
size_t getNbObjects() const
std::vector< std::string > & getMessage()
Display for windows using GDI (available on any windows 32 platform).
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
static void flush(const vpImage< unsigned char > &I)
static void displayRectangle(const vpImage< unsigned char > &I, const vpImagePoint &topLeft, unsigned int width, unsigned int height, const vpColor &color, bool fill=false, unsigned int thickness=1)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
const char * getMessage() const
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
static void read(vpImage< unsigned char > &I, const std::string &filename, int backend=IO_DEFAULT_BACKEND)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Defines a rectangle in the plane.
XML parser to load and save intrinsic camera parameters.
int parse(vpCameraParameters &cam, const std::string &filename, const std::string &camera_name, const vpCameraParameters::vpCameraParametersProjType &projModel, unsigned int image_width=0, unsigned int image_height=0, bool verbose=true)
VISP_EXPORT double measureTimeMs()