1 #include <visp3/core/vpConfig.h>
3 #ifdef VISP_HAVE_MODULE_SENSOR
4 #include <visp3/sensor/vpOccipitalStructure.h>
7 #include <visp3/detection/vpDetectorAprilTag.h>
9 #include <visp3/core/vpImageConvert.h>
10 #include <visp3/core/vpXmlParserCamera.h>
11 #include <visp3/gui/vpDisplayGDI.h>
12 #include <visp3/gui/vpDisplayOpenCV.h>
13 #include <visp3/gui/vpDisplayX.h>
14 #include <visp3/vision/vpPose.h>
16 int main(
int argc,
const char **argv)
19 #if defined(VISP_HAVE_APRILTAG) && defined(VISP_HAVE_OCCIPITAL_STRUCTURE)
21 #ifdef ENABLE_VISP_NAMESPACE
27 double tagSize = 0.053;
28 float quad_decimate = 1.0;
30 bool display_tag =
false;
32 unsigned int thickness = 2;
33 bool align_frame =
false;
35 #if !(defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV))
36 bool display_off =
true;
37 std::cout <<
"Warning: There is no 3rd party (X11, GDI or openCV) to dislay images..." << std::endl;
39 bool display_off =
false;
42 for (
int i = 1; i < argc; i++) {
43 if (std::string(argv[i]) ==
"--pose_method" && i + 1 < argc) {
46 else if (std::string(argv[i]) ==
"--tag_size" && i + 1 < argc) {
47 tagSize = atof(argv[i + 1]);
49 else if (std::string(argv[i]) ==
"--quad_decimate" && i + 1 < argc) {
50 quad_decimate = (float)atof(argv[i + 1]);
52 else if (std::string(argv[i]) ==
"--nthreads" && i + 1 < argc) {
53 nThreads = atoi(argv[i + 1]);
55 else if (std::string(argv[i]) ==
"--display_tag") {
58 else if (std::string(argv[i]) ==
"--display_off") {
61 else if (std::string(argv[i]) ==
"--color" && i + 1 < argc) {
62 color_id = atoi(argv[i + 1]);
64 else if (std::string(argv[i]) ==
"--thickness" && i + 1 < argc) {
65 thickness = (
unsigned int)atoi(argv[i + 1]);
67 else if (std::string(argv[i]) ==
"--tag_family" && i + 1 < argc) {
70 else if (std::string(argv[i]) ==
"--z_aligned") {
73 else if (std::string(argv[i]) ==
"--help" || std::string(argv[i]) ==
"-h") {
74 std::cout <<
"Usage: " << argv[0]
75 <<
" [--tag_size <tag_size in m> (default: 0.053)]"
76 " [--quad_decimate <quad_decimate> (default: 1)]"
77 " [--nthreads <nb> (default: 1)]"
78 " [--pose_method <method> (0: HOMOGRAPHY, 1: HOMOGRAPHY_VIRTUAL_VS, "
79 " 2: DEMENTHON_VIRTUAL_VS, 3: LAGRANGE_VIRTUAL_VS, "
80 " 4: BEST_RESIDUAL_VIRTUAL_VS, 5: HOMOGRAPHY_ORTHOGONAL_ITERATION) (default: 0)]"
81 " [--tag_family <family> (0: TAG_36h11, 1: TAG_36h10 (DEPRECATED), 2: TAG_36ARTOOLKIT (DEPRECATED),"
82 " 3: TAG_25h9, 4: TAG_25h7 (DEPRECATED), 5: TAG_16h5, 6: TAG_CIRCLE21h7, 7: TAG_CIRCLE49h12,"
83 " 8: TAG_CUSTOM48h12, 9: TAG_STANDARD41h12, 10: TAG_STANDARD52h13) (default: 0)]"
84 " [--display_tag] [--z_aligned]";
85 #if (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI) || defined(VISP_HAVE_OPENCV))
86 std::cout <<
" [--display_off] [--color <color id>] [--thickness <line thickness>]";
88 std::cout <<
" [--help]" << std::endl;
95 std::cout <<
"Use Occipital Structure grabber" << std::endl;
97 ST::CaptureSessionSettings settings;
98 settings.source = ST::CaptureSessionSourceId::StructureCore;
99 settings.structureCore.visibleEnabled =
true;
100 settings.applyExpensiveCorrection =
true;
101 unsigned int width = 640, height = 480;
110 std::cout <<
"I_color: " << I_color.getWidth() <<
" " << I_color.getHeight() << std::endl;
111 std::cout <<
"I_depth_raw: " << I_depth_raw.getWidth() <<
" " << I_depth_raw.getHeight() << std::endl;
113 g.
acquire(
reinterpret_cast<unsigned char *
>(I_color.bitmap),
reinterpret_cast<unsigned char *
>(I_depth_raw.bitmap));
115 std::cout <<
"Read camera parameters from Structure core device" << std::endl;
120 std::cout << cam << std::endl;
121 std::cout <<
"poseEstimationMethod: " << poseEstimationMethod << std::endl;
122 std::cout <<
"tagFamily: " << tagFamily << std::endl;
123 std::cout <<
"nThreads : " << nThreads << std::endl;
124 std::cout <<
"Z aligned: " << align_frame << std::endl;
135 d1 =
new vpDisplayX(I_color, 100, 30,
"Pose from Homography");
136 d2 =
new vpDisplayX(I_color2, I_color.getWidth() + 120, 30,
"Pose from RGBD fusion");
137 d3 =
new vpDisplayX(I_depth, 100, I_color.getHeight() + 70,
"Depth");
138 #elif defined(VISP_HAVE_GDI)
139 d1 =
new vpDisplayGDI(I_color, 100, 30,
"Pose from Homography");
140 d2 =
new vpDisplayGDI(I_color2, I_color.getWidth() + 120, 30,
"Pose from RGBD fusion");
141 d3 =
new vpDisplayGDI(I_depth, 100, I_color.getHeight() + 70,
"Depth");
142 #elif defined(HAVE_OPENCV_HIGHGUI)
144 d2 =
new vpDisplayOpenCV(I_color2, I_color.getWidth() + 120, 30,
"Pose from RGBD fusion");
145 d3 =
new vpDisplayOpenCV(I_depth, 100, I_color.getHeight() + 70,
"Depth");
154 detector.setAprilTagQuadDecimate(quad_decimate);
155 detector.setAprilTagPoseEstimationMethod(poseEstimationMethod);
156 detector.setAprilTagNbThreads(nThreads);
158 detector.setZAlignedWithCameraAxis(align_frame);
160 std::vector<double> time_vec;
165 g.
acquire(
reinterpret_cast<unsigned char *
>(I_color.bitmap),
166 reinterpret_cast<unsigned char *
>(I_depth_raw.bitmap));
173 depthMap.
resize(I_depth_raw.getHeight(), I_depth_raw.getWidth());
174 #ifdef VISP_HAVE_OPENMP
175 #pragma omp parallel for
177 for (
unsigned int i = 0; i < I_depth_raw.getHeight(); i++) {
178 for (
unsigned int j = 0; j < I_depth_raw.getWidth(); j++) {
180 float Z = I_depth_raw[i][j] * 0.001;
193 std::vector<vpHomogeneousMatrix> cMo_vec;
194 detector.detect(I, tagSize, cam, cMo_vec);
197 for (
size_t i = 0; i < cMo_vec.size(); i++) {
202 std::vector<std::vector<vpImagePoint> > tags_corners = detector.getPolygon();
203 std::vector<int> tags_id = detector.getTagsId();
204 std::map<int, double> tags_size;
205 tags_size[-1] = tagSize;
206 std::vector<std::vector<vpPoint> > tags_points3d = detector.getTagsPoints3D(tags_id, tags_size);
207 for (
size_t i = 0; i < tags_corners.size(); i++) {
209 double confidence_index;
211 &confidence_index)) {
212 if (confidence_index > 0.5) {
215 else if (confidence_index > 0.25) {
221 std::stringstream ss;
222 ss <<
"Tag id " << tags_id[i] <<
" confidence: " << confidence_index;
232 time_vec.push_back(t);
234 std::stringstream ss;
235 ss <<
"Detection time: " << t <<
" ms for " << detector.getNbObjects() <<
" tags";
246 std::cout <<
"Benchmark loop processing time" << std::endl;
247 std::cout <<
"Mean / Median / Std: " <<
vpMath::getMean(time_vec) <<
" ms"
259 std::cerr <<
"Catch an exception: " << e.
getMessage() << std::endl;
266 #ifndef VISP_HAVE_APRILTAG
267 std::cout <<
"Enable Apriltag support, configure and build ViSP to run this tutorial" << std::endl;
269 std::cout <<
"Install Structure Core SDK, configure and build ViSP again to use this example" << std::endl;
Generic class defining intrinsic camera parameters.
@ perspectiveProjWithoutDistortion
Perspective projection without distortion model.
static vpColor getColor(const unsigned int &i)
static const vpColor none
static const vpColor orange
@ TAG_36h11
AprilTag 36h11 pattern (recommended)
Display for windows using GDI (available on any windows 32 platform).
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Class that defines generic functionalities for display.
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
const char * getMessage() const
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void createDepthHistogram(const vpImage< uint16_t > &src_depth, vpImage< vpRGBa > &dest_rgba)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
void resize(unsigned int h, unsigned int w)
resize the image : Image initialization
static bool isNaN(double value)
static double getMedian(const std::vector< double > &v)
static double getStdev(const std::vector< double > &v, bool useBesselCorrection=false)
static double getMean(const std::vector< double > &v)
vpCameraParameters getCameraParameters(const vpOccipitalStructureStream stream_type, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithoutDistortion)
void acquire(vpImage< unsigned char > &gray, bool undistorted=false, double *ts=nullptr)
bool open(const ST::CaptureSessionSettings &settings)
static bool computePlanarObjectPoseFromRGBD(const vpImage< float > &depthMap, const std::vector< vpImagePoint > &corners, const vpCameraParameters &colorIntrinsics, const std::vector< vpPoint > &point3d, vpHomogeneousMatrix &cMo, double *confidence_index=nullptr)
VISP_EXPORT double measureTimeMs()