35 #include <visp3/core/vpConfig.h> 37 #ifdef VISP_HAVE_APRILTAG 41 #include <common/homography.h> 45 #include <tag36artoolkit.h> 49 #include <visp3/core/vpDisplay.h> 50 #include <visp3/core/vpPixelMeterConversion.h> 51 #include <visp3/core/vpPoint.h> 52 #include <visp3/detection/vpDetectorAprilTag.h> 53 #include <visp3/vision/vpPose.h> 55 #ifndef DOXYGEN_SHOULD_SKIP_THIS 56 class vpDetectorAprilTag::Impl
65 m_tf = tag36h11_create();
69 m_tf = tag36h10_create();
73 m_tf = tag36artoolkit_create();
77 m_tf = tag25h9_create();
81 m_tf = tag25h7_create();
85 m_tf = tag16h5_create();
93 m_td = apriltag_detector_create();
94 apriltag_detector_add_family(m_td, m_tf);
102 apriltag_detector_destroy(m_td);
106 tag36h11_destroy(m_tf);
110 tag36h10_destroy(m_tf);
114 tag36artoolkit_destroy(m_tf);
118 tag25h9_destroy(m_tf);
122 tag25h7_destroy(m_tf);
126 tag16h5_destroy(m_tf);
134 apriltag_detections_destroy(m_detections);
140 std::vector<std::string> &messages,
const bool computePose,
const bool displayTag,
141 const vpColor color,
const unsigned int thickness)
143 std::vector<vpHomogeneousMatrix> tagPosesPrev = m_tagPoses;
146 image_u8_t im = {(int32_t)I.
getWidth(),
152 apriltag_detections_destroy(m_detections);
156 m_detections = apriltag_detector_detect(m_td, &im);
157 int nb_detections = zarray_size(m_detections);
158 bool detected = nb_detections > 0;
160 polygons.resize((
size_t)nb_detections);
161 messages.resize((
size_t)nb_detections);
163 for (
int i = 0; i < zarray_size(m_detections); i++) {
164 apriltag_detection_t *det;
165 zarray_get(m_detections, i, &det);
167 std::vector<vpImagePoint> polygon;
168 for (
int j = 0; j < 4; j++) {
169 polygon.push_back(
vpImagePoint(det->p[j][1], det->p[j][0]));
171 polygons[i] = polygon;
172 std::stringstream ss;
174 messages[i] = ss.str();
195 if ((
int)tagPosesPrev.size() > i) {
196 cMo = tagPosesPrev[i];
199 if (
getPose(i, m_tagSize, m_cam, cMo)) {
200 m_tagPoses.push_back(cMo);
210 if (m_detections == NULL) {
213 apriltag_detection_t *det;
214 zarray_get(m_detections, static_cast<int>(tagIndex), &det);
216 int nb_detections = zarray_size(m_detections);
217 if (tagIndex >= (
size_t)nb_detections) {
226 matd_t *M = homography_to_pose(det->H, fx, fy, cx, cy, tagSize / 2);
228 for (
int i = 0; i < 3; i++) {
229 for (
int j = 0; j < 3; j++) {
230 cMo[i][j] = MATD_EL(M, i, j);
232 cMo[i][3] = MATD_EL(M, i, 3);
240 oMo[0][0] = 1; oMo[0][1] = 0; oMo[0][2] = 0;
241 oMo[1][0] = 0; oMo[1][1] = -1; oMo[1][2] = 0;
242 oMo[2][0] = 0; oMo[2][1] = 0; oMo[2][2] = -1;
253 double x = 0.0, y = 0.0;
254 std::vector<vpPoint> pts(4);
261 imPt.
set_uv(det->p[0][0], det->p[0][1]);
273 imPt.
set_uv(det->p[1][0], det->p[1][1]);
285 imPt.
set_uv(det->p[2][0], det->p[2][1]);
297 imPt.
set_uv(det->p[3][0], det->p[3][1]);
310 double residual_dementhon = std::numeric_limits<double>::max(),
311 residual_lagrange = std::numeric_limits<double>::max();
322 if (residual_dementhon < residual_lagrange) {
323 if (residual_dementhon < residual_homography) {
326 cMo = cMo_homography;
328 }
else if (residual_lagrange < residual_homography) {
346 void getTagPoses(std::vector<vpHomogeneousMatrix> &tagPoses)
const { tagPoses = m_tagPoses; }
350 void setNbThreads(
const int nThreads) { m_td->nthreads = nThreads; }
352 void setQuadDecimate(
const float quadDecimate) { m_td->quad_decimate = quadDecimate; }
354 void setQuadSigma(
const float quadSigma) { m_td->quad_sigma = quadSigma; }
356 void setRefineDecode(
const bool refineDecode) { m_td->refine_decode = refineDecode ? 1 : 0; }
358 void setRefineEdges(
const bool refineEdges) { m_td->refine_edges = refineEdges ? 1 : 0; }
360 void setRefinePose(
const bool refinePose) { m_td->refine_pose = refinePose ? 1 : 0; }
362 void setTagSize(
const double tagSize) { m_tagSize = tagSize; }
370 std::map<vpPoseEstimationMethod, vpPose::vpPoseMethodType> m_mapOfCorrespondingPoseMethods;
373 std::vector<vpHomogeneousMatrix> m_tagPoses;
375 apriltag_detector_t *m_td;
376 apriltag_family_t *m_tf;
377 zarray_t *m_detections;
380 #endif // DOXYGEN_SHOULD_SKIP_THIS 389 m_impl(new Impl(tagFamily, poseEstimationMethod))
433 std::vector<vpHomogeneousMatrix> &cMo_vec)
439 m_impl->setTagSize(tagSize);
440 m_impl->setCameraParameters(cam);
444 m_impl->getTagPoses(cMo_vec);
477 return (m_impl->getPose(tagIndex, tagSize, cam, cMo));
488 m_impl->setNbThreads(nThreads);
499 m_impl->setPoseEstimationMethod(poseEstimationMethod);
586 m_impl->setZAlignedWithCameraAxis(zAlignedWithCameraFrame);
589 #elif !defined(VISP_BUILD_SHARED_LIBS) 592 void dummy_vpDetectorAprilTag() {}
void setAprilTagQuadDecimate(const float quadDecimate)
void setAprilTagRefineEdges(const bool refineEdges)
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool(*func)(const vpHomogeneousMatrix &)=NULL)
void setAprilTagPoseEstimationMethod(const vpPoseEstimationMethod &poseEstimationMethod)
unsigned int getWidth() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
bool m_zAlignedWithCameraFrame
Type * bitmap
points toward the bitmap
Class to define colors available for display functionnalities.
void addPoints(const std::vector< vpPoint > &lP)
static const vpColor none
error that can be emited by ViSP classes.
void set_x(const double x)
Set the point x coordinate in the image plane.
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
static const vpColor green
vpColor m_displayTagColor
Class that defines what is a point.
bool getPose(size_t tagIndex, const double tagSize, const vpCameraParameters &cam, vpHomogeneousMatrix &cMo)
size_t m_nb_objects
Number of detected objects.
void setZAlignedWithCameraAxis(bool zAlignedWithCameraFrame)
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
Generic class defining intrinsic camera parameters.
void set_y(const double y)
Set the point y coordinate in the image plane.
vpPoseEstimationMethod m_poseEstimationMethod
void setWorldCoordinates(const double oX, const double oY, const double oZ)
void setAprilTagRefineDecode(const bool refineDecode)
std::vector< std::vector< vpImagePoint > > m_polygon
vpDetectorAprilTag(const vpAprilTagFamily &tagFamily=TAG_36h11, const vpPoseEstimationMethod &poseEstimationMethod=HOMOGRAPHY_VIRTUAL_VS)
unsigned int getHeight() const
void set_uv(const double u, const double v)
void setAprilTagQuadSigma(const float quadSigma)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1)
static const vpColor yellow
unsigned int m_displayTagThickness
std::vector< std::string > m_message
Message attached to each object.
vpAprilTagFamily m_tagFamily
void setAprilTagRefinePose(const bool refinePose)
virtual ~vpDetectorAprilTag()
void setAprilTagNbThreads(const int nThreads)
double computeResidual(const vpHomogeneousMatrix &cMo) const
Compute and return the sum of squared residuals expressed in meter^2 for the pose matrix cMo...
static const vpColor blue
bool detect(const vpImage< unsigned char > &I)