#include <visp3/core/vpConfig.h>
#ifdef VISP_HAVE_MODULE_SENSOR
#include <visp3/sensor/vpOccipitalStructure.h>
#endif
#include <visp3/detection/vpDetectorAprilTag.h>
#include <visp3/core/vpImageConvert.h>
#include <visp3/core/vpXmlParserCamera.h>
#include <visp3/gui/vpDisplayGDI.h>
#include <visp3/gui/vpDisplayOpenCV.h>
#include <visp3/gui/vpDisplayX.h>
#include <visp3/vision/vpPose.h>
int main(int argc, const char **argv)
{
#if defined(VISP_HAVE_APRILTAG) && defined(VISP_HAVE_OCCIPITAL_STRUCTURE)
#ifdef ENABLE_VISP_NAMESPACE
#endif
double tagSize = 0.053;
float quad_decimate = 1.0;
int nThreads = 1;
bool display_tag = false;
int color_id = -1;
unsigned int thickness = 2;
bool align_frame = false;
#if !(defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV))
bool display_off = true;
std::cout << "Warning: There is no 3rd party (X11, GDI or openCV) to dislay images..." << std::endl;
#else
bool display_off = false;
#endif
for (int i = 1; i < argc; i++) {
if (std::string(argv[i]) == "--pose_method" && i + 1 < argc) {
}
else if (std::string(argv[i]) == "--tag_size" && i + 1 < argc) {
tagSize = atof(argv[i + 1]);
}
else if (std::string(argv[i]) == "--quad_decimate" && i + 1 < argc) {
quad_decimate = (float)atof(argv[i + 1]);
}
else if (std::string(argv[i]) == "--nthreads" && i + 1 < argc) {
nThreads = atoi(argv[i + 1]);
}
else if (std::string(argv[i]) == "--display_tag") {
display_tag = true;
}
else if (std::string(argv[i]) == "--display_off") {
display_off = true;
}
else if (std::string(argv[i]) == "--color" && i + 1 < argc) {
color_id = atoi(argv[i + 1]);
}
else if (std::string(argv[i]) == "--thickness" && i + 1 < argc) {
thickness = (unsigned int)atoi(argv[i + 1]);
}
else if (std::string(argv[i]) == "--tag_family" && i + 1 < argc) {
}
else if (std::string(argv[i]) == "--z_aligned") {
align_frame = true;
}
else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") {
std::cout << "Usage: " << argv[0]
<< " [--tag_size <tag_size in m> (default: 0.053)]"
" [--quad_decimate <quad_decimate> (default: 1)]"
" [--nthreads <nb> (default: 1)]"
" [--pose_method <method> (0: HOMOGRAPHY, 1: HOMOGRAPHY_VIRTUAL_VS, "
" 2: DEMENTHON_VIRTUAL_VS, 3: LAGRANGE_VIRTUAL_VS, "
" 4: BEST_RESIDUAL_VIRTUAL_VS, 5: HOMOGRAPHY_ORTHOGONAL_ITERATION) (default: 0)]"
" [--tag_family <family> (0: TAG_36h11, 1: TAG_36h10 (DEPRECATED), 2: TAG_36ARTOOLKIT (DEPRECATED),"
" 3: TAG_25h9, 4: TAG_25h7 (DEPRECATED), 5: TAG_16h5, 6: TAG_CIRCLE21h7, 7: TAG_CIRCLE49h12,"
" 8: TAG_CUSTOM48h12, 9: TAG_STANDARD41h12, 10: TAG_STANDARD52h13) (default: 0)]"
" [--display_tag] [--z_aligned]";
#if (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV))
std::cout << " [--display_off] [--color <color id>] [--thickness <line thickness>]";
#endif
std::cout << " [--help]" << std::endl;
return EXIT_SUCCESS;
}
}
try {
std::cout << "Use Occipital Structure grabber" << std::endl;
ST::CaptureSessionSettings settings;
settings.source = ST::CaptureSessionSourceId::StructureCore;
settings.structureCore.visibleEnabled = true;
settings.applyExpensiveCorrection = true;
unsigned int width = 640, height = 480;
std::cout <<
"I_color: " << I_color.
getWidth() <<
" " << I_color.
getHeight() << std::endl;
std::cout <<
"I_depth_raw: " << I_depth_raw.
getWidth() <<
" " << I_depth_raw.
getHeight() << std::endl;
g.
acquire(
reinterpret_cast<unsigned char *
>(I_color.
bitmap),
reinterpret_cast<unsigned char *
>(I_depth_raw.
bitmap));
std::cout << "Read camera parameters from Structure core device" << std::endl;
std::cout << cam << std::endl;
std::cout << "poseEstimationMethod: " << poseEstimationMethod << std::endl;
std::cout << "tagFamily: " << tagFamily << std::endl;
std::cout << "nThreads : " << nThreads << std::endl;
std::cout << "Z aligned: " << align_frame << std::endl;
if (!display_off) {
#ifdef VISP_HAVE_X11
d1 = new vpDisplayX(I_color, 100, 30, "Pose from Homography");
d2 =
new vpDisplayX(I_color2, I_color.
getWidth() + 120, 30,
"Pose from RGBD fusion");
d3 =
new vpDisplayX(I_depth, 100, I_color.
getHeight() + 70,
"Depth");
#elif defined(VISP_HAVE_GDI)
d1 =
new vpDisplayGDI(I_color, 100, 30,
"Pose from Homography");
#elif defined(HAVE_OPENCV_HIGHGUI)
#endif
}
std::vector<double> time_vec;
for (;;) {
reinterpret_cast<unsigned char *
>(I_depth_raw.
bitmap));
I_color2 = I_color;
#ifdef VISP_HAVE_OPENMP
#pragma omp parallel for
#endif
for (
unsigned int i = 0; i < I_depth_raw.
getHeight(); i++) {
for (
unsigned int j = 0; j < I_depth_raw.
getWidth(); j++) {
float Z = I_depth_raw[i][j] * 0.001;
depthMap[i][j] = Z;
}
else {
depthMap[i][j] = 0;
}
}
}
std::vector<vpHomogeneousMatrix> cMo_vec;
detector.
detect(I, tagSize, cam, cMo_vec);
for (size_t i = 0; i < cMo_vec.size(); i++) {
}
std::vector<std::vector<vpImagePoint> > tags_corners = detector.
getPolygon();
std::vector<int> tags_id = detector.
getTagsId();
std::map<int, double> tags_size;
tags_size[-1] = tagSize;
std::vector<std::vector<vpPoint> > tags_points3d = detector.
getTagsPoints3D(tags_id, tags_size);
for (size_t i = 0; i < tags_corners.size(); i++) {
double confidence_index;
&confidence_index)) {
if (confidence_index > 0.5) {
}
else if (confidence_index > 0.25) {
}
else {
}
std::stringstream ss;
ss << "Tag id " << tags_id[i] << " confidence: " << confidence_index;
}
}
time_vec.push_back(t);
std::stringstream ss;
ss <<
"Detection time: " << t <<
" ms for " << detector.
getNbObjects() <<
" tags";
break;
}
std::cout << "Benchmark loop processing time" << std::endl;
if (!display_off) {
delete d1;
delete d2;
delete d3;
}
}
std::cerr <<
"Catch an exception: " << e.
getMessage() << std::endl;
}
return EXIT_SUCCESS;
#else
(void)argc;
(void)argv;
#ifndef VISP_HAVE_APRILTAG
std::cout << "Enable Apriltag support, configure and build ViSP to run this tutorial" << std::endl;
#else
std::cout << "Install Structure Core SDK, configure and build ViSP again to use this example" << std::endl;
#endif
#endif
return EXIT_SUCCESS;
}
Generic class defining intrinsic camera parameters.
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
static vpColor getColor(const unsigned int &i)
static const vpColor none
static const vpColor orange
void setZAlignedWithCameraAxis(bool zAlignedWithCameraFrame)
void setDisplayTag(bool display, const vpColor &color=vpColor::none, unsigned int thickness=2)
void setAprilTagQuadDecimate(float quadDecimate)
std::vector< std::vector< vpPoint > > getTagsPoints3D(const std::vector< int > &tagsId, const std::map< int, double > &tagsSize) const
bool detect(const vpImage< unsigned char > &I) VP_OVERRIDE
@ TAG_36h11
AprilTag 36h11 pattern (recommended)
void setAprilTagNbThreads(int nThreads)
void setAprilTagPoseEstimationMethod(const vpPoseEstimationMethod &poseEstimationMethod)
std::vector< int > getTagsId() const
std::vector< std::vector< vpImagePoint > > & getPolygon()
size_t getNbObjects() const
Display for windows using GDI (available on any windows 32 platform).
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Class that defines generic functionalities for display.
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
const char * getMessage() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void createDepthHistogram(const vpImage< uint16_t > &src_depth, vpImage< vpRGBa > &dest_rgba)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
unsigned int getWidth() const
void resize(unsigned int h, unsigned int w)
resize the image : Image initialization
Type * bitmap
points toward the bitmap
unsigned int getHeight() const
static bool isNaN(double value)
static double getMedian(const std::vector< double > &v)
static double getStdev(const std::vector< double > &v, bool useBesselCorrection=false)
static double getMean(const std::vector< double > &v)
vpCameraParameters getCameraParameters(const vpOccipitalStructureStream stream_type, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithoutDistortion)
void acquire(vpImage< unsigned char > &gray, bool undistorted=false, double *ts=nullptr)
bool open(const ST::CaptureSessionSettings &settings)
static bool computePlanarObjectPoseFromRGBD(const vpImage< float > &depthMap, const std::vector< vpImagePoint > &corners, const vpCameraParameters &colorIntrinsics, const std::vector< vpPoint > &point3d, vpHomogeneousMatrix &cMo, double *confidence_index=nullptr)
VISP_EXPORT double measureTimeMs()