ViSP  2.9.0
servoSimuPoint2DhalfCamVelocity2.cpp
1 
2 /****************************************************************************
3  *
4  * $Id: servoSimuPoint2DhalfCamVelocity2.cpp 2457 2010-01-07 10:41:18Z nmelchio $
5  *
6  * This file is part of the ViSP software.
7  * Copyright (C) 2005 - 2014 by INRIA. All rights reserved.
8  *
9  * This software is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * ("GPL") version 2 as published by the Free Software Foundation.
12  * See the file LICENSE.txt at the root directory of this source
13  * distribution for additional information about the GNU GPL.
14  *
15  * For using ViSP with software that can not be combined with the GNU
16  * GPL, please contact INRIA about acquiring a ViSP Professional
17  * Edition License.
18  *
19  * See http://www.irisa.fr/lagadic/visp/visp.html for more information.
20  *
21  * This software was developed at:
22  * INRIA Rennes - Bretagne Atlantique
23  * Campus Universitaire de Beaulieu
24  * 35042 Rennes Cedex
25  * France
26  * http://www.irisa.fr/lagadic
27  *
28  * If you have questions regarding the use of this file, please contact
29  * INRIA at visp@inria.fr
30  *
31  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
32  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
33  *
34  *
35  * Description:
36  * Simulation of a 2 1/2 D visual servoing using theta U visual features.
37  *
38  * Authors:
39  * Eric Marchand
40  * Fabien Spindler
41  *
42  *****************************************************************************/
43 
44 
55 #include <stdlib.h>
56 #include <stdio.h>
57 
58 #include <visp/vpFeatureBuilder.h>
59 #include <visp/vpFeaturePoint.h>
60 #include <visp/vpFeatureThetaU.h>
61 #include <visp/vpGenericFeature.h>
62 #include <visp/vpHomogeneousMatrix.h>
63 #include <visp/vpMath.h>
64 #include <visp/vpParseArgv.h>
65 #include <visp/vpPoint.h>
66 #include <visp/vpServo.h>
67 #include <visp/vpSimulatorCamera.h>
68 
69 // List of allowed command line options
70 #define GETOPTARGS "h"
71 
72 void usage(const char *name, const char *badparam);
73 bool getOptions(int argc, const char **argv);
74 
83 void usage(const char *name, const char *badparam)
84 {
85  fprintf(stdout, "\n\
86 Simulation of a 2 1/2 D visual servoing (x,y,log Z, theta U):\n\
87 - eye-in-hand control law,\n\
88 - velocity computed in the camera frame,\n\
89 - without display.\n\
90  \n\
91 SYNOPSIS\n\
92  %s [-h]\n", name);
93 
94  fprintf(stdout, "\n\
95 OPTIONS: Default\n\
96  \n\
97  -h\n\
98  Print the help.\n");
99 
100  if (badparam)
101  fprintf(stdout, "\nERROR: Bad parameter [%s]\n", badparam);
102 }
103 
113 bool getOptions(int argc, const char **argv)
114 {
115  const char *optarg_;
116  int c;
117  while ((c = vpParseArgv::parse(argc, argv, GETOPTARGS, &optarg_)) > 1) {
118 
119  switch (c) {
120  case 'h': usage(argv[0], NULL); return false; break;
121 
122  default:
123  usage(argv[0], optarg_);
124  return false; break;
125  }
126  }
127 
128  if ((c == 1) || (c == -1)) {
129  // standalone param or error
130  usage(argv[0], NULL);
131  std::cerr << "ERROR: " << std::endl;
132  std::cerr << " Bad argument " << optarg_ << std::endl << std::endl;
133  return false;
134  }
135 
136  return true;
137 }
138 
139 int
140 main(int argc, const char ** argv)
141 {
142  try {
143  // Read the command line options
144  if (getOptions(argc, argv) == false) {
145  exit (-1);
146  }
147 
148  std::cout << std::endl ;
149  std::cout << "-------------------------------------------------------" << std::endl ;
150  std::cout << " simulation of a 2 1/2 D visual servoing " << std::endl ;
151  std::cout << "-------------------------------------------------------" << std::endl ;
152  std::cout << std::endl ;
153 
154  // In this example we will simulate a visual servoing task.
155  // In simulation, we have to define the scene frane Ro and the
156  // camera frame Rc.
157  // The camera location is given by an homogenous matrix cMo that
158  // describes the position of the camera in the scene frame.
159 
160  vpServo task ;
161 
162  // sets the initial camera location
163  // we give the camera location as a size 6 vector (3 translations in meter
164  // and 3 rotation (theta U representation)
165  vpPoseVector c_r_o(0.1,0.2,2,
166  vpMath::rad(20), vpMath::rad(10), vpMath::rad(50)
167  ) ;
168 
169  // this pose vector is then transformed in a 4x4 homogeneous matrix
170  vpHomogeneousMatrix cMo(c_r_o) ;
171 
172  // We define a robot
173  // The vpSimulatorCamera implements a simple moving that is juste defined
174  // by its location cMo
175  vpSimulatorCamera robot ;
176 
177  // Compute the position of the object in the world frame
178  vpHomogeneousMatrix wMc, wMo;
179  robot.getPosition(wMc) ;
180  wMo = wMc * cMo;
181 
182  // Now that the current camera position has been defined,
183  // let us defined the defined camera location.
184  // It is defined by cdMo
185  // sets the desired camera location
186  vpPoseVector cd_r_o(0,0,1,
188  vpHomogeneousMatrix cdMo(cd_r_o) ;
189 
190 
191  //----------------------------------------------------------------------
192  // A 2 1/2 D visual servoing can be defined by
193  // - the position of a point x,y
194  // - the difference between this point depth and a desire depth
195  // modeled by log Z/Zd to be regulated to 0
196  // - the rotation that the camera has to realized cdMc
197 
198  // Let us now defined the current value of these features
199 
200 
201  // since we simulate we have to define a 3D point that will
202  // forward-projected to define the current position x,y of the
203  // reference point
204 
205  //------------------------------------------------------------------
206  // First feature (x,y)
207  //------------------------------------------------------------------
208  // Let oP be this ... point,
209  // a vpPoint class has three main member
210  // .oP : 3D coordinates in scene frame
211  // .cP : 3D coordinates in camera frame
212  // .p : 2D
213 
214  //------------------------------------------------------------------
215  // sets the point coordinates in the world frame
216  vpPoint point ;
217  // defined point coordinates in the scene frame : oP
218  point.setWorldCoordinates(0,0,0) ;
219  // computes the point coordinates in the camera frame and its
220  // 2D coordinates cP and then p
221  // computes the point coordinates in the camera frame and its 2D coordinates" ) ;
222  point.track(cMo) ;
223 
224  // We also defined (again by forward projection) the desired position
225  // of this point according to the desired camera position
226  vpPoint pointd ;
227  pointd.setWorldCoordinates(0,0,0) ;
228  pointd.track(cdMo) ;
229 
230  // Nevertheless, a vpPoint is not a feature, this is just a "tracker"
231  // from which the feature are built
232  // a feature is juste defined by a vector s, a way to compute the
233  // interaction matrix and the error, and if required a (or a vector of)
234  // 3D information
235 
236  // for a point (x,y) Visp implements the vpFeaturePoint class.
237  // we no defined a feature for x,y (and for (x*,y*))
238  vpFeaturePoint p,pd ;
239 
240  // and we initialized the vector s=(x,y) of p from the tracker P
241  // Z coordinates in p is also initialized, it will be used to compute
242  // the interaction matrix
243  vpFeatureBuilder::create(p,point) ;
244  vpFeatureBuilder::create(pd,pointd) ;
245 
246  //------------------------------------------------------------------
247  // Second feature log (Z/Zd)
248  // not necessary to project twice (reuse p)
249 
250  // This case in intersting since this visual feature has not
251  // been predefined in VisP
252  // In such case we have a generic feature class vpGenericFeature
253  // We will have to defined
254  // the vector s : .set_s(...)
255  // the interaction matrix Ls : .setInteractionMatrix(...)
256 
257  // log(Z/Zd) is then a size 1 vector logZ
258  vpGenericFeature logZ(1) ;
259  // initialized to s = log(Z/Zd)
260  // Let us note that here we use the point P and Pd, it's not necessary
261  // to forward project twice (it's already done)
262  logZ.set_s(log(point.get_Z()/pointd.get_Z())) ;
263 
264  // This visual has to be regulated to zero
265 
266  //------------------------------------------------------------------
267  // 3rd feature ThetaU
268  // The thetaU feature is defined, tu represents the rotation that the camera
269  // has to realized.
270  // the complete displacement is then defined by:
271  //------------------------------------------------------------------
272  vpHomogeneousMatrix cdMc ;
273  // compute the rotation that the camera has to achieve
274  cdMc = cdMo*cMo.inverse() ;
275 
276  // from this displacement, we extract the rotation cdRc represented by
277  // the angle theta and the rotation axis u
279  tu.buildFrom(cdMc) ;
280  // This visual has to be regulated to zero
281 
282  // sets the desired rotation (always zero !)
283  // since s is the rotation that the camera has to realize
284 
285  //------------------------------------------------------------------
286  // Let us now the task itself
287  //------------------------------------------------------------------
288 
289  // define the task
290  // - we want an eye-in-hand control law
291  // - robot is controlled in the camera frame
292  // we choose to control the robot in the camera frame
294  // Interaction matrix is computed with the current value of s
296 
297  // we build the task by "stacking" the visual feature
298  // previously defined
299  task.addFeature(p,pd) ;
300  task.addFeature(logZ) ;
301  task.addFeature(tu) ;
302  // addFeature(X,Xd) means X should be regulated to Xd
303  // addFeature(X) means that X should be regulated to 0
304  // some features such as vpFeatureThetaU MUST be regulated to zero
305  // (otherwise, it will results in an error at exectution level)
306 
307  // set the gain
308  task.setLambda(1) ;
309 
310  // Display task information
311  task.print() ;
312  //------------------------------------------------------------------
313  // An now the closed loop
314 
315  unsigned int iter=0 ;
316  // loop
317  while(iter++<200)
318  {
319  std::cout << "---------------------------------------------" << iter <<std::endl ;
320  vpColVector v ;
321 
322  // get the robot position
323  robot.getPosition(wMc) ;
324  // Compute the position of the camera wrt the object frame
325  cMo = wMc.inverse() * wMo;
326 
327  // update the feature
328  point.track(cMo) ;
329  vpFeatureBuilder::create(p,point) ;
330 
331  cdMc = cdMo*cMo.inverse() ;
332  tu.buildFrom(cdMc) ;
333 
334  // there is no feature for logZ, we explicitely build
335  // the related interaction matrix") ;
336  logZ.set_s(log(point.get_Z()/pointd.get_Z())) ;
337  vpMatrix LlogZ(1,6) ;
338  LlogZ[0][0] = LlogZ[0][1] = LlogZ[0][5] = 0 ;
339  LlogZ[0][2] = -1/p.get_Z() ;
340  LlogZ[0][3] = -p.get_y() ;
341  LlogZ[0][4] = p.get_x() ;
342 
343  logZ.setInteractionMatrix(LlogZ) ;
344 
345  // compute the control law
346  v = task.computeControlLaw() ;
347 
348  // send the camera velocity to the controller ") ;
350 
351  std::cout << "|| s - s* || = " << ( task.getError() ).sumSquare() <<std::endl ;
352  }
353 
354  // Display task information
355  task.print() ;
356  task.kill();
357  // Final camera location
358  std::cout << cMo << std::endl ;
359  return 0;
360  }
361  catch(vpException e) {
362  std::cout << "Catch a ViSP exception: " << e << std::endl;
363  return 1;
364  }
365 }
366 
Definition of the vpMatrix class.
Definition: vpMatrix.h:98
void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel)
The class provides a data structure for the homogeneous matrices as well as a set of operations on th...
Class that defines the simplest robot: a free flying camera.
void addFeature(vpBasicFeature &s, vpBasicFeature &s_star, const unsigned int select=vpBasicFeature::FEATURE_ALL)
Definition: vpServo.cpp:449
error that can be emited by ViSP classes.
Definition: vpException.h:76
void track(const vpHomogeneousMatrix &cMo)
Class that defines a 2D point visual feature which is composed by two parameters that are the cartes...
static bool parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, int flags)
Definition: vpParseArgv.cpp:79
Class that defines what is a point.
Definition: vpPoint.h:65
void kill()
Definition: vpServo.cpp:189
vpColVector getError() const
Definition: vpServo.h:257
vpColVector computeControlLaw()
Definition: vpServo.cpp:902
double get_Z() const
void getPosition(vpHomogeneousMatrix &wMc) const
void setLambda(double c)
Definition: vpServo.h:370
void buildFrom(const vpTranslationVector &t, const vpRotationMatrix &R)
Construction from translation vector and rotation matrix.
void setInteractionMatrixType(const vpServoIteractionMatrixType &interactionMatrixType, const vpServoInversionType &interactionMatrixInversion=PSEUDO_INVERSE)
Definition: vpServo.cpp:522
static double rad(double deg)
Definition: vpMath.h:100
double get_Z() const
Get the point Z coordinate in the camera frame.
Definition: vpPoint.h:122
Class that provides a data structure for the column vectors as well as a set of operations on these v...
Definition: vpColVector.h:72
double get_y() const
double get_x() const
The pose is a complete representation of every rigid motion in the euclidian space.
Definition: vpPoseVector.h:92
vpHomogeneousMatrix inverse() const
Class that defines a 3D visual feature from a axis/angle parametrization that represent the rotatio...
void print(const vpServo::vpServoPrintType display_level=ALL, std::ostream &os=std::cout)
Definition: vpServo.cpp:251
Class that enables to define a feature or a set of features which are not implemented in ViSP as a sp...
static void create(vpFeaturePoint &s, const vpCameraParameters &cam, const vpDot &d)
void setServo(const vpServoType &servo_type)
Definition: vpServo.cpp:220
void setWorldCoordinates(const double ox, const double oy, const double oz)
Set the point world coordinates. We mean here the coordinates of the point in the object frame...
Definition: vpPoint.cpp:74