Visual Servoing Platform  version 3.6.1 under development (2024-12-11)
servoAfma6AprilTagPBVS.cpp
1 /*
2  * ViSP, open source Visual Servoing Platform software.
3  * Copyright (C) 2005 - 2024 by Inria. All rights reserved.
4  *
5  * This software is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation; either version 2 of the License, or
8  * (at your option) any later version.
9  * See the file LICENSE.txt at the root directory of this source
10  * distribution for additional information about the GNU GPL.
11  *
12  * For using ViSP with software that can not be combined with the GNU
13  * GPL, please contact Inria about acquiring a ViSP Professional
14  * Edition License.
15  *
16  * See https://visp.inria.fr for more information.
17  *
18  * This software was developed at:
19  * Inria Rennes - Bretagne Atlantique
20  * Campus Universitaire de Beaulieu
21  * 35042 Rennes Cedex
22  * France
23  *
24  * If you have questions regarding the use of this file, please contact
25  * Inria at visp@inria.fr
26  *
27  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
28  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
29  *
30  * Description:
31  * Data acquisition with RealSense RGB-D sensor and Franka robot.
32  */
33 
49 #include <iostream>
50 
51 #include <visp3/core/vpConfig.h>
52 
53 #if defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_DISPLAY) && defined(VISP_HAVE_AFMA6)
54 
55 #include <visp3/core/vpCameraParameters.h>
56 #include <visp3/detection/vpDetectorAprilTag.h>
57 #include <visp3/gui/vpDisplayFactory.h>
58 #include <visp3/gui/vpPlot.h>
59 #include <visp3/io/vpImageIo.h>
60 #include <visp3/robot/vpRobotAfma6.h>
61 #include <visp3/sensor/vpRealSense2.h>
62 #include <visp3/visual_features/vpFeatureThetaU.h>
63 #include <visp3/visual_features/vpFeatureTranslation.h>
64 #include <visp3/vs/vpServo.h>
65 #include <visp3/vs/vpServoDisplay.h>
66 
67 #ifdef ENABLE_VISP_NAMESPACE
68 using namespace VISP_NAMESPACE_NAME;
69 #endif
70 
71 void display_point_trajectory(const vpImage<unsigned char> &I,
72  const std::vector<vpImagePoint> &vip,
73  std::vector<vpImagePoint> *traj_vip)
74 {
75  for (size_t i = 0; i < vip.size(); ++i) {
76  if (traj_vip[i].size()) {
77  // Add the point only if distance with the previous > 1 pixel
78  if (vpImagePoint::distance(vip[i], traj_vip[i].back()) > 1.) {
79  traj_vip[i].push_back(vip[i]);
80  }
81  }
82  else {
83  traj_vip[i].push_back(vip[i]);
84  }
85  }
86  for (size_t i = 0; i < vip.size(); ++i) {
87  for (size_t j = 1; j < traj_vip[i].size(); ++j) {
88  vpDisplay::displayLine(I, traj_vip[i][j - 1], traj_vip[i][j], vpColor::green, 2);
89  }
90  }
91 }
92 
93 int main(int argc, char **argv)
94 {
95  double opt_tagSize = 0.120;
96  int opt_quad_decimate = 2;
97  bool opt_verbose = false;
98  bool opt_plot = false;
99  bool opt_adaptive_gain = false;
100  bool opt_task_sequencing = false;
101  double opt_convergence_threshold_t = 0.0005; // Value in [m]
102  double opt_convergence_threshold_tu = 0.5; // Value in [deg]
103 
104  bool display_tag = true;
105 
106  for (int i = 1; i < argc; ++i) {
107  if ((std::string(argv[i]) == "--tag-size") && (i + 1 < argc)) {
108  opt_tagSize = std::stod(argv[i + 1]);
109  ++i;
110  }
111  else if (std::string(argv[i]) == "--verbose") {
112  opt_verbose = true;
113  }
114  else if (std::string(argv[i]) == "--plot") {
115  opt_plot = true;
116  }
117  else if (std::string(argv[i]) == "--adaptive-gain") {
118  opt_adaptive_gain = true;
119  }
120  else if (std::string(argv[i]) == "--task-sequencing") {
121  opt_task_sequencing = true;
122  }
123  else if ((std::string(argv[i]) == "--quad-decimate") && (i + 1 < argc)) {
124  opt_quad_decimate = std::stoi(argv[i + 1]);
125  ++i;
126  }
127  else if (std::string(argv[i]) == "--no-convergence-threshold") {
128  opt_convergence_threshold_t = 0.;
129  opt_convergence_threshold_tu = 0.;
130  }
131  else if ((std::string(argv[i]) == "--help") || (std::string(argv[i]) == "-h")) {
132  std::cout
133  << argv[0]
134  << " [--tag-size <marker size in meter; default " << opt_tagSize << ">]"
135  << " [--quad-decimate <decimation; default " << opt_quad_decimate << ">]"
136  << " [--adaptive-gain]"
137  << " [--plot]"
138  << " [--task-sequencing]"
139  << " [--no-convergence-threshold]"
140  << " [--verbose]"
141  << " [--help] [-h]"
142  << std::endl;;
143  return EXIT_SUCCESS;
144  }
145  }
146 
147  vpRobotAfma6 robot;
149 
150  // Load the end-effector to camera frame transformation obtained
151  // using a camera intrinsic model with distortion
152  robot.init(vpAfma6::TOOL_INTEL_D435_CAMERA, projModel);
153 
154  try {
155  std::cout << "WARNING: This example will move the robot! "
156  << "Please make sure to have the user stop button at hand!" << std::endl
157  << "Press Enter to continue..." << std::endl;
158  std::cin.ignore();
159 
160  vpRealSense2 rs;
161  rs2::config config;
162  unsigned int width = 640, height = 480, fps = 60;
163  config.enable_stream(RS2_STREAM_COLOR, width, height, RS2_FORMAT_RGBA8, fps);
164  config.enable_stream(RS2_STREAM_DEPTH, width, height, RS2_FORMAT_Z16, fps);
165  config.enable_stream(RS2_STREAM_INFRARED, width, height, RS2_FORMAT_Y8, fps);
166  rs.open(config);
167 
168  // Warm up camera
170  for (size_t i = 0; i < 10; ++i) {
171  rs.acquire(I);
172  }
173 
174  // Get camera intrinsics
175  vpCameraParameters cam;
176  robot.getCameraParameters(cam, I);
177  std::cout << "cam:\n" << cam << std::endl;
178 
179  std::shared_ptr<vpDisplay> d = vpDisplayFactory::createDisplay(I, 10, 10, "Current image");
180 
183  // vpDetectorAprilTag::vpPoseEstimationMethod poseEstimationMethod = vpDetectorAprilTag::BEST_RESIDUAL_VIRTUAL_VS;
184  vpDetectorAprilTag detector(tagFamily);
185  detector.setAprilTagPoseEstimationMethod(poseEstimationMethod);
186  detector.setDisplayTag(display_tag);
187  detector.setAprilTagQuadDecimate(opt_quad_decimate);
188  detector.setZAlignedWithCameraAxis(true);
189 
190  // Tag frame for pose estimation is the following
191  // - When using
192  // detector.setZAlignedWithCameraAxis(false);
193  // detector.detect();
194  // we consider the tag frame (o) such as z_o axis is not aligned with camera frame
195  // (meaning z_o axis is facing the camera)
196  //
197  // 3 y 2
198  // |
199  // o--x
200  //
201  // 0 1
202  //
203  // In that configuration, it is more difficult to set a desired camera pose c_M_o.
204  // To ease this step we can introduce an extra transformation matrix o'_M_o to align the axis
205  // with the camera frame:
206  //
207  // o'--x
208  // |
209  // y
210  //
211  // where
212  // | 1 0 0 0 |
213  // o'_M_o = | 0 -1 0 0 |
214  // | 0 0 -1 0 |
215  // | 0 0 0 1 |
216  //
217  // Defining the desired camera pose in frame o' becomes than easier.
218  //
219  // - When using rather
220  // detector.setZAlignedWithCameraAxis(true);
221  // detector.detect();
222  // we consider the tag frame (o) such as z_o axis is aligned with camera frame
223  //
224  // 3 2
225  //
226  // o--x
227  // |
228  // 0 y 1
229  //
230  // In that configuration, it is easier to define a desired camera pose c_M_o since all the axis
231  // (camera frame and tag frame are aligned)
232 
233  // Servo
234  vpHomogeneousMatrix cd_M_c, c_M_o, o_M_o;
235 
236  // Desired pose to reach
237  vpHomogeneousMatrix cd_M_o(vpTranslationVector(0, 0, opt_tagSize * 3.5), // 3.5 times tag with along camera z axis
239  if (!detector.isZAlignedWithCameraAxis()) {
240  vpHomogeneousMatrix oprim_M_o = { 1, 0, 0, 0,
241  0, -1, 0, 0,
242  0, 0, -1, 0,
243  0, 0, 0, 1 };
244  cd_M_o *= oprim_M_o;
245  }
246 
247  cd_M_c = cd_M_o * c_M_o.inverse();
248 
249  // Create current visual features
252  s_t.buildFrom(cd_M_c);
253  s_tu.buildFrom(cd_M_c);
254 
255  // Create desired visual features
258 
259  vpServo task;
260  task.addFeature(s_t, s_t_d);
261  task.addFeature(s_tu, s_tu_d);
264 
265  // Set the gain
266  if (opt_adaptive_gain) {
267  vpAdaptiveGain lambda(1.5, 0.4, 30); // lambda(0)=4, lambda(oo)=0.4 and lambda'(0)=30
268  task.setLambda(lambda);
269  }
270  else {
271  task.setLambda(0.5);
272  }
273 
274  vpPlot *plotter = nullptr;
275  int iter_plot = 0;
276 
277  if (opt_plot) {
278  plotter = new vpPlot(2, static_cast<int>(250 * 2), 500, static_cast<int>(I.getWidth()) + 80, 10,
279  "Real time curves plotter");
280  plotter->setTitle(0, "Visual features error");
281  plotter->setTitle(1, "Camera velocities");
282  plotter->initGraph(0, 6);
283  plotter->initGraph(1, 6);
284  plotter->setLegend(0, 0, "error_feat_tx");
285  plotter->setLegend(0, 1, "error_feat_ty");
286  plotter->setLegend(0, 2, "error_feat_tz");
287  plotter->setLegend(0, 3, "error_feat_theta_ux");
288  plotter->setLegend(0, 4, "error_feat_theta_uy");
289  plotter->setLegend(0, 5, "error_feat_theta_uz");
290  plotter->setLegend(1, 0, "vc_x");
291  plotter->setLegend(1, 1, "vc_y");
292  plotter->setLegend(1, 2, "vc_z");
293  plotter->setLegend(1, 3, "wc_x");
294  plotter->setLegend(1, 4, "wc_y");
295  plotter->setLegend(1, 5, "wc_z");
296  }
297 
298  bool final_quit = false;
299  bool has_converged = false;
300  bool send_velocities = false;
301  bool servo_started = false;
302  std::vector<vpImagePoint> *traj_vip = nullptr; // To memorize point trajectory
303 
304  static double t_init_servo = vpTime::measureTimeMs();
305 
307 
308  while (!has_converged && !final_quit) {
309  double t_start = vpTime::measureTimeMs();
310 
311  rs.acquire(I);
312 
314 
315  std::vector<vpHomogeneousMatrix> c_M_o_vec;
316  detector.detect(I, opt_tagSize, cam, c_M_o_vec);
317 
318  std::stringstream ss;
319  ss << "Left click to " << (send_velocities ? "stop the robot" : "servo the robot") << ", right click to quit.";
320  vpDisplay::displayText(I, 20, 20, ss.str(), vpColor::red);
321 
322  vpColVector v_c(6);
323 
324  // Only one tag is detected
325  if (c_M_o_vec.size() == 1) {
326  c_M_o = c_M_o_vec[0];
327 
328  static bool first_time = true;
329  if (first_time) {
330  // Introduce security wrt tag positioning in order to avoid PI rotation
331  std::vector<vpHomogeneousMatrix> v_o_M_o(2), v_cd_M_c(2);
332  v_o_M_o[1].buildFrom(0, 0, 0, 0, 0, M_PI);
333  for (size_t i = 0; i < 2; ++i) {
334  v_cd_M_c[i] = cd_M_o * v_o_M_o[i] * c_M_o.inverse();
335  }
336  if (std::fabs(v_cd_M_c[0].getThetaUVector().getTheta()) < std::fabs(v_cd_M_c[1].getThetaUVector().getTheta())) {
337  o_M_o = v_o_M_o[0];
338  }
339  else {
340  std::cout << "Desired frame modified to avoid PI rotation of the camera" << std::endl;
341  o_M_o = v_o_M_o[1]; // Introduce PI rotation
342  }
343  }
344 
345  // Update current visual features
346  cd_M_c = cd_M_o * o_M_o * c_M_o.inverse();
347  s_t.buildFrom(cd_M_c);
348  s_tu.buildFrom(cd_M_c);
349 
350  if (opt_task_sequencing) {
351  if (!servo_started) {
352  if (send_velocities) {
353  servo_started = true;
354  }
355  t_init_servo = vpTime::measureTimeMs();
356  }
357  v_c = task.computeControlLaw((vpTime::measureTimeMs() - t_init_servo) / 1000.);
358  }
359  else {
360  v_c = task.computeControlLaw();
361  }
362 
363  // Display desired and current pose features
364  vpDisplay::displayFrame(I, cd_M_o * o_M_o, cam, opt_tagSize / 1.5, vpColor::yellow, 2);
365  vpDisplay::displayFrame(I, c_M_o, cam, opt_tagSize / 2, vpColor::none, 3);
366  // Get tag corners
367  std::vector<vpImagePoint> vip = detector.getPolygon(0);
368  // Get the tag cog corresponding to the projection of the tag frame in the image
369  vip.push_back(detector.getCog(0));
370  // Display the trajectory of the points
371  if (first_time) {
372  traj_vip = new std::vector<vpImagePoint>[vip.size()];
373  }
374  display_point_trajectory(I, vip, traj_vip);
375 
376  if (opt_plot) {
377  plotter->plot(0, iter_plot, task.getError());
378  plotter->plot(1, iter_plot, v_c);
379  iter_plot++;
380  }
381 
382  if (opt_verbose) {
383  std::cout << "v_c: " << v_c.t() << std::endl;
384  }
385 
386  vpTranslationVector cd_t_c = cd_M_c.getTranslationVector();
387  vpThetaUVector cd_tu_c = cd_M_c.getThetaUVector();
388  double error_tr = sqrt(cd_t_c.sumSquare());
389  double error_tu = vpMath::deg(sqrt(cd_tu_c.sumSquare()));
390 
391  ss.str("");
392  ss << "error_t: " << error_tr;
393  vpDisplay::displayText(I, 20, static_cast<int>(I.getWidth()) - 150, ss.str(), vpColor::red);
394  ss.str("");
395  ss << "error_tu: " << error_tu;
396  vpDisplay::displayText(I, 40, static_cast<int>(I.getWidth()) - 150, ss.str(), vpColor::red);
397 
398  if (opt_verbose)
399  std::cout << "error translation: " << error_tr << " ; error rotation: " << error_tu << std::endl;
400 
401  if ((error_tr < opt_convergence_threshold_t) && (error_tu < opt_convergence_threshold_tu)) {
402  has_converged = true;
403  std::cout << "Servo task has converged" << std::endl;
404  ;
405  vpDisplay::displayText(I, 100, 20, "Servo task has converged", vpColor::red);
406  }
407 
408  if (first_time) {
409  first_time = false;
410  }
411  } // end if (c_M_o_vec.size() == 1)
412  else {
413  v_c = 0;
414  }
415 
416  if (!send_velocities) {
417  v_c = 0;
418  }
419 
420  // Send to the robot
422 
423  ss.str("");
424  ss << "Loop time: " << vpTime::measureTimeMs() - t_start << " ms";
425  vpDisplay::displayText(I, 40, 20, ss.str(), vpColor::red);
426  vpDisplay::flush(I);
427 
429  if (vpDisplay::getClick(I, button, false)) {
430  switch (button) {
432  send_velocities = !send_velocities;
433  break;
434 
436  final_quit = true;
437  break;
438 
439  default:
440  break;
441  }
442  }
443  }
444  std::cout << "Stop the robot " << std::endl;
446 
447  if (opt_plot && plotter != nullptr) {
448  delete plotter;
449  plotter = nullptr;
450  }
451 
452  if (!final_quit) {
453  while (!final_quit) {
454  rs.acquire(I);
456 
457  vpDisplay::displayText(I, 20, 20, "Click to quit the program.", vpColor::red);
458  vpDisplay::displayText(I, 40, 20, "Visual servo converged.", vpColor::red);
459 
460  if (vpDisplay::getClick(I, false)) {
461  final_quit = true;
462  }
463 
464  vpDisplay::flush(I);
465  }
466  }
467 
468  if (traj_vip) {
469  delete[] traj_vip;
470  }
471  }
472  catch (const vpException &e) {
473  std::cout << "ViSP exception: " << e.what() << std::endl;
474  std::cout << "Stop the robot " << std::endl;
476  return EXIT_FAILURE;
477  }
478 
479  return EXIT_SUCCESS;
480 }
481 #else
482 int main()
483 {
484 #if !defined(VISP_HAVE_REALSENSE2)
485  std::cout << "Install librealsense-2.x" << std::endl;
486 #endif
487 #if !defined(VISP_HAVE_AFMA6)
488  std::cout << "ViSP is not build with Afma-6 robot support..." << std::endl;
489 #endif
490  return EXIT_SUCCESS;
491 }
492 #endif
Adaptive gain computation.
@ TOOL_INTEL_D435_CAMERA
Definition: vpAfma6.h:131
Generic class defining intrinsic camera parameters.
@ perspectiveProjWithDistortion
Perspective projection with distortion model.
Implementation of column vector and the associated operations.
Definition: vpColVector.h:191
static const vpColor red
Definition: vpColor.h:217
static const vpColor none
Definition: vpColor.h:229
static const vpColor yellow
Definition: vpColor.h:225
static const vpColor green
Definition: vpColor.h:220
@ TAG_36h11
AprilTag 36h11 pattern (recommended)
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1, bool segment=true)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Definition: vpException.h:60
const char * what() const
Definition: vpException.cpp:71
Class that defines a 3D visual feature from a axis/angle parametrization that represent the rotatio...
Class that defines the translation visual feature .
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpThetaUVector getThetaUVector() const
vpHomogeneousMatrix & buildFrom(const vpTranslationVector &t, const vpRotationMatrix &R)
vpHomogeneousMatrix inverse() const
vpTranslationVector getTranslationVector() const
static double distance(const vpImagePoint &iP1, const vpImagePoint &iP2)
unsigned int getWidth() const
Definition: vpImage.h:242
static double rad(double deg)
Definition: vpMath.h:129
static double deg(double rad)
Definition: vpMath.h:119
This class enables real time drawing of 2D or 3D graphics. An instance of the class open a window whi...
Definition: vpPlot.h:112
void initGraph(unsigned int graphNum, unsigned int curveNbr)
Definition: vpPlot.cpp:203
void setLegend(unsigned int graphNum, unsigned int curveNum, const std::string &legend)
Definition: vpPlot.cpp:552
void plot(unsigned int graphNum, unsigned int curveNum, double x, double y)
Definition: vpPlot.cpp:270
void setTitle(unsigned int graphNum, const std::string &title)
Definition: vpPlot.cpp:510
void acquire(vpImage< unsigned char > &grey, double *ts=nullptr)
bool open(const rs2::config &cfg=rs2::config())
Control of Irisa's gantry robot named Afma6.
Definition: vpRobotAfma6.h:212
void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel) VP_OVERRIDE
@ CAMERA_FRAME
Definition: vpRobot.h:84
@ STATE_VELOCITY_CONTROL
Initialize the velocity controller.
Definition: vpRobot.h:67
@ STATE_STOP
Stops robot motion especially in velocity and acceleration control.
Definition: vpRobot.h:66
virtual vpRobotStateType setRobotState(const vpRobot::vpRobotStateType newState)
Definition: vpRobot.cpp:202
double sumSquare() const
void setInteractionMatrixType(const vpServoIteractionMatrixType &interactionMatrixType, const vpServoInversionType &interactionMatrixInversion=PSEUDO_INVERSE)
Definition: vpServo.cpp:380
@ EYEINHAND_CAMERA
Definition: vpServo.h:161
void addFeature(vpBasicFeature &s_cur, vpBasicFeature &s_star, unsigned int select=vpBasicFeature::FEATURE_ALL)
Definition: vpServo.cpp:331
void setLambda(double c)
Definition: vpServo.h:986
void setServo(const vpServoType &servo_type)
Definition: vpServo.cpp:134
vpColVector getError() const
Definition: vpServo.h:510
vpColVector computeControlLaw()
Definition: vpServo.cpp:705
@ CURRENT
Definition: vpServo.h:202
Implementation of a rotation vector as axis-angle minimal representation.
Class that consider the case of a translation vector.
std::shared_ptr< vpDisplay > createDisplay()
Return a smart pointer vpDisplay specialization if a GUI library is available or nullptr otherwise.
VISP_EXPORT double measureTimeMs()