ViSP  2.6.2
servoSimuPoint2DhalfCamVelocity2.cpp
1 
2 /****************************************************************************
3  *
4  * $Id: servoSimuPoint2DhalfCamVelocity2.cpp 2457 2010-01-07 10:41:18Z nmelchio $
5  *
6  * This file is part of the ViSP software.
7  * Copyright (C) 2005 - 2012 by INRIA. All rights reserved.
8  *
9  * This software is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * ("GPL") version 2 as published by the Free Software Foundation.
12  * See the file LICENSE.txt at the root directory of this source
13  * distribution for additional information about the GNU GPL.
14  *
15  * For using ViSP with software that can not be combined with the GNU
16  * GPL, please contact INRIA about acquiring a ViSP Professional
17  * Edition License.
18  *
19  * See http://www.irisa.fr/lagadic/visp/visp.html for more information.
20  *
21  * This software was developed at:
22  * INRIA Rennes - Bretagne Atlantique
23  * Campus Universitaire de Beaulieu
24  * 35042 Rennes Cedex
25  * France
26  * http://www.irisa.fr/lagadic
27  *
28  * If you have questions regarding the use of this file, please contact
29  * INRIA at visp@inria.fr
30  *
31  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
32  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
33  *
34  *
35  * Description:
36  * Simulation of a 2 1/2 D visual servoing using theta U visual features.
37  *
38  * Authors:
39  * Eric Marchand
40  * Fabien Spindler
41  *
42  *****************************************************************************/
43 
64 #include <visp/vpMath.h>
65 #include <visp/vpHomogeneousMatrix.h>
66 #include <visp/vpPoint.h>
67 #include <visp/vpFeaturePoint.h>
68 #include <visp/vpFeatureThetaU.h>
69 #include <visp/vpGenericFeature.h>
70 #include <visp/vpServo.h>
71 #include <visp/vpRobotCamera.h>
72 #include <visp/vpDebug.h>
73 #include <visp/vpFeatureBuilder.h>
74 #include <visp/vpParseArgv.h>
75 #include <stdlib.h>
76 #include <stdio.h>
77 // List of allowed command line options
78 #define GETOPTARGS "h"
79 
88 void usage(const char *name, const char *badparam)
89 {
90  fprintf(stdout, "\n\
91 Simulation of a 2 1/2 D visual servoing (x,y,log Z, theta U):\n\
92 - eye-in-hand control law,\n\
93 - velocity computed in the camera frame,\n\
94 - without display.\n\
95  \n\
96 SYNOPSIS\n\
97  %s [-h]\n", name);
98 
99  fprintf(stdout, "\n\
100 OPTIONS: Default\n\
101  \n\
102  -h\n\
103  Print the help.\n");
104 
105  if (badparam)
106  fprintf(stdout, "\nERROR: Bad parameter [%s]\n", badparam);
107 }
108 
118 bool getOptions(int argc, const char **argv)
119 {
120  const char *optarg;
121  int c;
122  while ((c = vpParseArgv::parse(argc, argv, GETOPTARGS, &optarg)) > 1) {
123 
124  switch (c) {
125  case 'h': usage(argv[0], NULL); return false; break;
126 
127  default:
128  usage(argv[0], optarg);
129  return false; break;
130  }
131  }
132 
133  if ((c == 1) || (c == -1)) {
134  // standalone param or error
135  usage(argv[0], NULL);
136  std::cerr << "ERROR: " << std::endl;
137  std::cerr << " Bad argument " << optarg << std::endl << std::endl;
138  return false;
139  }
140 
141  return true;
142 }
143 
144 int
145 main(int argc, const char ** argv)
146 {
147  // Read the command line options
148  if (getOptions(argc, argv) == false) {
149  exit (-1);
150  }
151 
152  std::cout << std::endl ;
153  std::cout << "-------------------------------------------------------" << std::endl ;
154  std::cout << " simulation of a 2 1/2 D visual servoing " << std::endl ;
155  std::cout << "-------------------------------------------------------" << std::endl ;
156  std::cout << std::endl ;
157 
158  // In this example we will simulate a visual servoing task.
159  // In simulation, we have to define the scene frane Ro and the
160  // camera frame Rc.
161  // The camera location is given by an homogenous matrix cMo that
162  // describes the position of the camera in the scene frame.
163 
164 
165  vpServo task ;
166 
167 
168  vpTRACE("sets the initial camera location " ) ;
169  // we give the camera location as a size 6 vector (3 translations in meter
170  // and 3 rotation (theta U representation) )
171  vpPoseVector c_r_o(0.1,0.2,2,
172  vpMath::rad(20), vpMath::rad(10), vpMath::rad(50)
173  ) ;
174 
175  // this pose vector is then transformed in a 4x4 homogeneous matrix
176  vpHomogeneousMatrix cMo(c_r_o) ;
177 
178  // We define a robot
179  // The vpRobotCamera implements a simple moving that is juste defined
180  // by its location cMo
181  vpRobotCamera robot ;
182 
183  // the robot position is set to the defined cMo position
184  robot.setPosition(cMo) ;
185 
186  // Now that the current camera position has been defined,
187  // let us defined the defined camera location.
188  // It is defined by cdMo
189  vpTRACE("sets the desired camera location " ) ;
190  vpPoseVector cd_r_o(0,0,1,
192  vpHomogeneousMatrix cdMo(cd_r_o) ;
193 
194 
195 
196  //----------------------------------------------------------------------
197  // A 2 1/2 D visual servoing can be defined by
198  // - the position of a point x,y
199  // - the difference between this point depth and a desire depth
200  // modeled by log Z/Zd to be regulated to 0
201  // - the rotation that the camera has to realized cdMc
202 
203  // Let us now defined the current value of these features
204 
205 
206  // since we simulate we have to define a 3D point that will
207  // forward-projected to define the current position x,y of the
208  // reference point
209 
210  //------------------------------------------------------------------
211  // First feature (x,y)
212  //------------------------------------------------------------------
213  vpTRACE("1st feature (x,y)");
214  // Let oP be this ... point,
215  // a vpPoint class has three main member
216  // .oP : 3D coordinates in scene frame
217  // .cP : 3D coordinates in camera frame
218  // .p : 2D
219 
220  //------------------------------------------------------------------
221  vpTRACE("\tsets the point coordinates in the world frame " ) ;
222  vpPoint point ;
223  // defined point coordinates in the scene frame : oP
224  point.setWorldCoordinates(0,0,0) ;
225  // computes the point coordinates in the camera frame and its
226  // 2D coordinates cP and then p
227  vpTRACE("\tproject : computes the point coordinates in the camera frame and its 2D coordinates" ) ;
228  point.track(cMo) ;
229 
230  // We also defined (again by forward projection) the desired position
231  // of this point according to the desired camera position
232  vpPoint pointd ;
233  pointd.setWorldCoordinates(0,0,0) ;
234  pointd.track(cdMo) ;
235 
236  // Nevertheless, a vpPoint is not a feature, this is just a "tracker"
237  // from which the feature are built
238  // a feature is juste defined by a vector s, a way to compute the
239  // interaction matrix and the error, and if required a (or a vector of)
240  // 3D information
241 
242  // for a point (x,y) Visp implements the vpFeaturePoint class.
243  // we no defined a feature for x,y (and for (x*,y*))
244  vpFeaturePoint p,pd ;
245 
246  // and we initialized the vector s=(x,y) of p from the tracker P
247  // Z coordinates in p is also initialized, it will be used to compute
248  // the interaction matrix
249  vpFeatureBuilder::create(p,point) ;
250  vpFeatureBuilder::create(pd,pointd) ;
251 
252 
253  //------------------------------------------------------------------
254  vpTRACE("2nd feature (logZ)") ;
255  vpTRACE("\tnot necessary to project twice (reuse p)") ;
256  //------------------------------------------------------------------
257  // Second feature log (Z/Zd)
258  //
259 
260  // This case in intersting since this visual feature has not
261  // been predefined in VisP
262  // In such case we have a generic feature class vpGenericFeature
263  // We will have to defined
264  // the vector s : .set_s(...)
265  // the interaction matrix Ls : .setInteractionMatrix(...)
266 
267  // log(Z/Zd) is then a size 1 vector logZ
268  vpGenericFeature logZ(1) ;
269  // initialized to s = log(Z/Zd)
270  // Let us note that here we use the point P and Pd, it's not necessary
271  // to forward project twice (it's already done)
272  logZ.set_s(log(point.get_Z()/pointd.get_Z())) ;
273 
274 
275  // This visual has to be regulated to zero
276  vpTRACE("3rd feature ThetaU") ;
277 
278  //------------------------------------------------------------------
279  // 3rd feature ThetaU
280  // The thetaU feature is defined, tu represents the rotation that the camera
281  // has to realized.
282  // the complete displacement is then defined by:
283  //------------------------------------------------------------------
284  vpHomogeneousMatrix cdMc ;
285  vpTRACE("\tcompute the rotation that the camera has to realize " ) ;
286  cdMc = cdMo*cMo.inverse() ;
287 
288  // from this displacement, we extract the rotation cdRc represented by
289  // the angle theta and the rotation axis u
291  tu.buildFrom(cdMc) ;
292  // This visual has to be regulated to zero
293 
294  vpTRACE("\tsets the desired rotation (always zero !) ") ;
295  vpTRACE("\tsince s is the rotation that the camera has to realize ") ;
296 
297 
298  //------------------------------------------------------------------
299  // Let us now the task itself
300  //------------------------------------------------------------------
301 
302  vpTRACE("define the task") ;
303  vpTRACE("\t we want an eye-in-hand control law") ;
304  vpTRACE("\t robot is controlled in the camera frame") ;
305  // we choose to control the robot in the camera frame
307  // Interaction matrix is computed with the current value of s
309 
310  // we build the task by "stacking" the visual feature
311  // previously defined
312  task.addFeature(p,pd) ;
313  task.addFeature(logZ) ;
314  task.addFeature(tu) ;
315  // addFeature(X,Xd) means X should be regulated to Xd
316  // addFeature(X) means that X should be regulated to 0
317  // some features such as vpFeatureThetaU MUST be regulated to zero
318  // (otherwise, it will results in an error at exectution level)
319 
320  vpTRACE("\t set the gain") ;
321  task.setLambda(1) ;
322 
323 
324  vpTRACE("Display task information " ) ;
325  task.print() ;
326  //------------------------------------------------------------------
327  // An now the closed loop
328 
329  unsigned int iter=0 ;
330  vpTRACE("\t loop") ;
331  while(iter++<200)
332  {
333  std::cout << "---------------------------------------------" << iter <<std::endl ;
334  vpColVector v ;
335 
336  if (iter==1) vpTRACE("\t\t get the robot position ") ;
337  robot.getPosition(cMo) ;
338 
339  if (iter==1) vpTRACE("\t\t update the feature ") ;
340  point.track(cMo) ;
341  vpFeatureBuilder::create(p,point) ;
342 
343  cdMc = cdMo*cMo.inverse() ;
344  tu.buildFrom(cdMc) ;
345 
346  if (iter==1) vpTRACE("\t\t there is no feature for logZ, we explicitely "
347  "build the related interaction matrix") ;
348  logZ.set_s(log(point.get_Z()/pointd.get_Z())) ;
349  vpMatrix LlogZ(1,6) ;
350  LlogZ[0][0] = LlogZ[0][1] = LlogZ[0][5] = 0 ;
351  LlogZ[0][2] = -1/p.get_Z() ;
352  LlogZ[0][3] = -p.get_y() ;
353  LlogZ[0][4] = p.get_x() ;
354 
355  logZ.setInteractionMatrix(LlogZ) ;
356 
357 
358  if (iter==1) vpTRACE("\t\t compute the control law ") ;
359  v = task.computeControlLaw() ;
360 
361  if (iter==1) task.print() ;
362 
363  if (iter==1) vpTRACE("\t\t send the camera velocity to the controller ") ;
365  // Note that for vpRobotCamera, camera position cMo, is updated using the
366  // exponential map.
367 
368 
369  std::cout << ( task.getError() ).sumSquare() <<std::endl ; ;
370  }
371 
372  vpTRACE("Display task information " ) ;
373  task.print() ;
374  task.kill();
375  vpTRACE("Final camera location " ) ;
376  std::cout << cMo << std::endl ;
377 }
378 
Definition of the vpMatrix class.
Definition: vpMatrix.h:96
The class provides a data structure for the homogeneous matrices as well as a set of operations on th...
#define vpTRACE
Definition: vpDebug.h:401
void addFeature(vpBasicFeature &s, vpBasicFeature &s_star, const unsigned int select=vpBasicFeature::FEATURE_ALL)
create a new ste of two visual features
Definition: vpServo.cpp:444
void setLambda(double _lambda)
set the gain lambda
Definition: vpServo.h:250
void track(const vpHomogeneousMatrix &cMo)
Class that defines a 2D point visual feature which is composed by two parameters that are the cartes...
void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel)
static bool parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, int flags)
Definition: vpParseArgv.cpp:79
Class that defines what is a point.
Definition: vpPoint.h:65
void kill()
destruction (memory deallocation if required)
Definition: vpServo.cpp:177
vpColVector getError() const
Definition: vpServo.h:298
vpColVector computeControlLaw()
compute the desired control law
Definition: vpServo.cpp:883
Class that defines the simplest robot: a free flying camera.
Definition: vpRobotCamera.h:65
double get_Z() const
void getPosition(vpColVector &q)
void setPosition(const vpRobot::vpControlFrameType, const vpColVector &)
Set a displacement (frame has to be specified) in position control.
void buildFrom(const vpTranslationVector &t, const vpRotationMatrix &R)
Construction from translation vector and rotation matrix.
void setInteractionMatrixType(const vpServoIteractionMatrixType &interactionMatrixType, const vpServoInversionType &interactionMatrixInversion=PSEUDO_INVERSE)
Set the type of the interaction matrix (current, mean, desired, user).
Definition: vpServo.cpp:509
static double rad(double deg)
Definition: vpMath.h:100
double get_Z() const
Get the point Z coordinate in the camera frame.
Definition: vpPoint.h:129
Class that provides a data structure for the column vectors as well as a set of operations on these v...
Definition: vpColVector.h:72
double get_y() const
double get_x() const
The pose is a complete representation of every rigid motion in the euclidian space.
Definition: vpPoseVector.h:92
vpHomogeneousMatrix inverse() const
Class that defines a 3D visual feature from a axis/angle parametrization that represent the rotatio...
void print(const vpServo::vpServoPrintType display_level=ALL, std::ostream &os=std::cout)
Definition: vpServo.cpp:258
Class that enables to define a feature or a set of features which are not implemented in ViSP as a sp...
static void create(vpFeaturePoint &s, const vpCameraParameters &cam, const vpDot &d)
Class required to compute the visual servoing control law.
Definition: vpServo.h:150
void setServo(vpServoType _servo_type)
Choice of the visual servoing control law.
Definition: vpServo.cpp:214
void setWorldCoordinates(const double ox, const double oy, const double oz)
Set the point world coordinates. We mean here the coordinates of the point in the object frame...
Definition: vpPoint.cpp:74