Visual Servoing Platform  version 3.6.1 under development (2024-11-15)
servoSimuPoint2DhalfCamVelocity3.cpp
1 /****************************************************************************
2  *
3  * ViSP, open source Visual Servoing Platform software.
4  * Copyright (C) 2005 - 2023 by Inria. All rights reserved.
5  *
6  * This software is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  * See the file LICENSE.txt at the root directory of this source
11  * distribution for additional information about the GNU GPL.
12  *
13  * For using ViSP with software that can not be combined with the GNU
14  * GPL, please contact Inria about acquiring a ViSP Professional
15  * Edition License.
16  *
17  * See https://visp.inria.fr for more information.
18  *
19  * This software was developed at:
20  * Inria Rennes - Bretagne Atlantique
21  * Campus Universitaire de Beaulieu
22  * 35042 Rennes Cedex
23  * France
24  *
25  * If you have questions regarding the use of this file, please contact
26  * Inria at visp@inria.fr
27  *
28  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
29  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
30  *
31  * Description:
32  * Simulation of a 2 1/2 D visual servoing using theta U visual features.
33  *
34 *****************************************************************************/
35 
45 #include <stdio.h>
46 #include <stdlib.h>
47 
48 #include <visp3/core/vpConfig.h>
49 #include <visp3/core/vpHomogeneousMatrix.h>
50 #include <visp3/core/vpMath.h>
51 #include <visp3/core/vpPoint.h>
52 #include <visp3/io/vpParseArgv.h>
53 #include <visp3/robot/vpSimulatorCamera.h>
54 #include <visp3/visual_features/vpFeatureBuilder.h>
55 #include <visp3/visual_features/vpFeaturePoint.h>
56 #include <visp3/visual_features/vpFeatureThetaU.h>
57 #include <visp3/visual_features/vpGenericFeature.h>
58 #include <visp3/vs/vpServo.h>
59 
60 // List of allowed command line options
61 #define GETOPTARGS "h"
62 
63 #ifdef ENABLE_VISP_NAMESPACE
64 using namespace VISP_NAMESPACE_NAME;
65 #endif
66 
67 void usage(const char *name, const char *badparam);
68 bool getOptions(int argc, const char **argv);
69 
78 void usage(const char *name, const char *badparam)
79 {
80  fprintf(stdout, "\n\
81 Simulation of a 2 1/2 D visual servoing (x,y,logZ, theta U):\n\
82 - eye-in-hand control law,\n\
83 - velocity computed in the camera frame,\n\
84 - without display.\n\
85  \n\
86 SYNOPSIS\n\
87  %s [-h]\n",
88  name);
89 
90  fprintf(stdout, "\n\
91 OPTIONS: Default\n\
92  \n\
93  -h\n\
94  Print the help.\n");
95 
96  if (badparam)
97  fprintf(stdout, "\nERROR: Bad parameter [%s]\n", badparam);
98 }
99 
109 bool getOptions(int argc, const char **argv)
110 {
111  const char *optarg_;
112  int c;
113  while ((c = vpParseArgv::parse(argc, argv, GETOPTARGS, &optarg_)) > 1) {
114 
115  switch (c) {
116  case 'h':
117  usage(argv[0], nullptr);
118  return false;
119 
120  default:
121  usage(argv[0], optarg_);
122  return false;
123  }
124  }
125 
126  if ((c == 1) || (c == -1)) {
127  // standalone param or error
128  usage(argv[0], nullptr);
129  std::cerr << "ERROR: " << std::endl;
130  std::cerr << " Bad argument " << optarg_ << std::endl << std::endl;
131  return false;
132  }
133 
134  return true;
135 }
136 
137 int main(int argc, const char **argv)
138 {
139 #if (defined(VISP_HAVE_LAPACK) || defined(VISP_HAVE_EIGEN3) || defined(VISP_HAVE_OPENCV))
140  try {
141  // Read the command line options
142  if (getOptions(argc, argv) == false) {
143  return EXIT_FAILURE;
144  }
145 
146  std::cout << std::endl;
147  std::cout << "-------------------------------------------------------" << std::endl;
148  std::cout << " simulation of a 2 1/2 D visual servoing " << std::endl;
149  std::cout << "-------------------------------------------------------" << std::endl;
150  std::cout << std::endl;
151 
152  // In this example we will simulate a visual servoing task.
153  // In simulation, we have to define the scene frane Ro and the
154  // camera frame Rc.
155  // The camera location is given by an homogenous matrix cMo that
156  // describes the position of the scene or object frame in the camera frame.
157 
158  vpServo task;
159 
160  // sets the initial camera location
161  // we give the camera location as a size 6 vector (3 translations in meter
162  // and 3 rotation (theta U representation)
163  vpPoseVector c_r_o(0.1, 0.2, 2, vpMath::rad(20), vpMath::rad(10), vpMath::rad(50));
164 
165  // this pose vector is then transformed in a 4x4 homogeneous matrix
166  vpHomogeneousMatrix cMo(c_r_o);
167 
168  // We define a robot
169  // The vpSimulatorCamera implements a simple moving that is juste defined
170  // by its location cMo
171  vpSimulatorCamera robot;
172 
173  // Compute the position of the object in the world frame
174  vpHomogeneousMatrix wMc, wMo;
175  robot.getPosition(wMc);
176  wMo = wMc * cMo;
177 
178  // Now that the current camera position has been defined,
179  // let us defined the defined camera location.
180  // It is defined by cdMo
181  // sets the desired camera location " ) ;
182  vpPoseVector cd_r_o(0, 0, 1, vpMath::rad(0), vpMath::rad(0), vpMath::rad(0));
183  vpHomogeneousMatrix cdMo(cd_r_o);
184 
185  //----------------------------------------------------------------------
186  // A 2 1/2 D visual servoing can be defined by
187  // - the position of a point x,y
188  // - the difference between this point depth and a desire depth
189  // modeled by log Z/Zd to be regulated to 0
190  // - the rotation that the camera has to realized cdMc
191 
192  // Let us now defined the current value of these features
193 
194  // since we simulate we have to define a 3D point that will
195  // forward-projected to define the current position x,y of the
196  // reference point
197 
198  //------------------------------------------------------------------
199  // First feature (x,y)
200  //------------------------------------------------------------------
201  // Let oP be this ... point,
202  // a vpPoint class has three main member
203  // .oP : 3D coordinates in scene frame
204  // .cP : 3D coordinates in camera frame
205  // .p : 2D
206 
207  //------------------------------------------------------------------
208  // sets the point coordinates in the world frame
209  vpPoint P(0, 0, 0);
210  // computes the P coordinates in the camera frame and its
211  // 2D coordinates cP and then p
212  // computes the point coordinates in the camera frame and its 2D
213  // coordinates
214  P.track(cMo);
215 
216  // We also defined (again by forward projection) the desired position
217  // of this point according to the desired camera position
218  vpPoint Pd(0, 0, 0);
219  Pd.track(cdMo);
220 
221  // Nevertheless, a vpPoint is not a feature, this is just a "tracker"
222  // from which the feature are built
223  // a feature is juste defined by a vector s, a way to compute the
224  // interaction matrix and the error, and if required a (or a vector of)
225  // 3D information
226 
227  // for a point (x,y) Visp implements the vpFeaturePoint class.
228  // we no defined a feature for x,y (and for (x*,y*))
229  vpFeaturePoint p, pd;
230 
231  // and we initialized the vector s=(x,y) of p from the tracker P
232  // Z coordinates in p is also initialized, it will be used to compute
233  // the interaction matrix
235  vpFeatureBuilder::create(pd, Pd);
236 
237  // This visual has to be regulated to zero
238 
239  //------------------------------------------------------------------
240  // 2nd feature ThetaUz and 3rd feature t
241  // The thetaU feature is defined, tu represents the rotation that the
242  // camera has to realized. t the translation. the complete displacement is
243  // then defined by:
244  //------------------------------------------------------------------
245  vpHomogeneousMatrix cdMc;
246  // compute the rotation that the camera has to achieve
247  cdMc = cdMo * cMo.inverse();
248 
249  // from this displacement, we extract the rotation cdRc represented by
250  // the angle theta and the rotation axis u
252  tuz.buildFrom(cdMc);
253  // And the translations
255  t.buildFrom(cdMc);
256 
257  // This visual has to be regulated to zero
258 
259  // sets the desired rotation (always zero !)
260  // since s is the rotation that the camera has to achieve
261 
262  //------------------------------------------------------------------
263  // Let us now the task itself
264  //------------------------------------------------------------------
265 
266  // define the task
267  // - we want an eye-in-hand control law
268  // - robot is controlled in the camera frame
269  // we choose to control the robot in the camera frame
271  // Interaction matrix is computed with the current value of s
273 
274  // we build the task by "stacking" the visual feature
275  // previously defined
276  task.addFeature(t);
277  task.addFeature(p, pd);
278  task.addFeature(tuz, vpFeatureThetaU::TUz); // selection of TUz
279 
280  // addFeature(X,Xd) means X should be regulated to Xd
281  // addFeature(X) means that X should be regulated to 0
282  // some features such as vpFeatureThetaU MUST be regulated to zero
283  // (otherwise, it will results in an error at exectution level)
284 
285  // set the gain
286  task.setLambda(1);
287 
288  // Display task information " ) ;
289  task.print();
290  //------------------------------------------------------------------
291  // An now the closed loop
292 
293  unsigned int iter = 0;
294  // loop
295  while (iter++ < 200) {
296  std::cout << "---------------------------------------------" << iter << std::endl;
297  vpColVector v;
298 
299  // get the robot position
300  robot.getPosition(wMc);
301  // Compute the position of the object frame in the camera frame
302  cMo = wMc.inverse() * wMo;
303 
304  // update the feature
305  P.track(cMo);
307 
308  cdMc = cdMo * cMo.inverse();
309  tuz.buildFrom(cdMc);
310  t.buildFrom(cdMc);
311 
312  // compute the control law: v = -lambda L^+(s-sd)
313  v = task.computeControlLaw();
314 
315  // send the camera velocity to the controller
317 
318  std::cout << "|| s - s* || = " << (task.getError()).sumSquare() << std::endl;
319  }
320 
321  // Display task information
322  task.print();
323  // Final camera location
324  std::cout << "Final camera location: \n" << cMo << std::endl;
325  return EXIT_SUCCESS;
326  }
327  catch (const vpException &e) {
328  std::cout << "Catch a ViSP exception: " << e << std::endl;
329  return EXIT_SUCCESS;
330  }
331 #else
332  (void)argc;
333  (void)argv;
334  std::cout << "Cannot run this example: install Lapack, Eigen3 or OpenCV" << std::endl;
335  return EXIT_SUCCESS;
336 #endif
337 }
Implementation of column vector and the associated operations.
Definition: vpColVector.h:191
error that can be emitted by ViSP classes.
Definition: vpException.h:60
static void create(vpFeaturePoint &s, const vpCameraParameters &cam, const vpImagePoint &t)
Class that defines a 2D point visual feature which is composed by two parameters that are the cartes...
Class that defines a 3D visual feature from a axis/angle parametrization that represent the rotatio...
Class that defines the translation visual feature .
Implementation of an homogeneous matrix and operations on such kind of matrices.
vpHomogeneousMatrix & buildFrom(const vpTranslationVector &t, const vpRotationMatrix &R)
vpHomogeneousMatrix inverse() const
static double rad(double deg)
Definition: vpMath.h:129
static bool parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, int flags)
Definition: vpParseArgv.cpp:70
Class that defines a 3D point in the object frame and allows forward projection of a 3D point in the ...
Definition: vpPoint.h:79
Implementation of a pose vector and operations on poses.
Definition: vpPoseVector.h:203
void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel) VP_OVERRIDE
@ CAMERA_FRAME
Definition: vpRobot.h:84
void setInteractionMatrixType(const vpServoIteractionMatrixType &interactionMatrixType, const vpServoInversionType &interactionMatrixInversion=PSEUDO_INVERSE)
Definition: vpServo.cpp:380
@ EYEINHAND_CAMERA
Definition: vpServo.h:161
void addFeature(vpBasicFeature &s_cur, vpBasicFeature &s_star, unsigned int select=vpBasicFeature::FEATURE_ALL)
Definition: vpServo.cpp:331
void print(const vpServo::vpServoPrintType display_level=ALL, std::ostream &os=std::cout)
Definition: vpServo.cpp:171
void setLambda(double c)
Definition: vpServo.h:986
void setServo(const vpServoType &servo_type)
Definition: vpServo.cpp:134
vpColVector getError() const
Definition: vpServo.h:510
vpColVector computeControlLaw()
Definition: vpServo.cpp:705
@ CURRENT
Definition: vpServo.h:202
Class that defines the simplest robot: a free flying camera.