ViSP  2.8.0
servoSimuPoint2DhalfCamVelocity3.cpp
1 
2 /****************************************************************************
3  *
4  * $Id: servoSimuPoint2DhalfCamVelocity3.cpp 2457 2010-01-07 10:41:18Z nmelchio $
5  *
6  * This file is part of the ViSP software.
7  * Copyright (C) 2005 - 2013 by INRIA. All rights reserved.
8  *
9  * This software is free software; you can redistribute it and/or
10  * modify it under the terms of the GNU General Public License
11  * ("GPL") version 2 as published by the Free Software Foundation.
12  * See the file LICENSE.txt at the root directory of this source
13  * distribution for additional information about the GNU GPL.
14  *
15  * For using ViSP with software that can not be combined with the GNU
16  * GPL, please contact INRIA about acquiring a ViSP Professional
17  * Edition License.
18  *
19  * See http://www.irisa.fr/lagadic/visp/visp.html for more information.
20  *
21  * This software was developed at:
22  * INRIA Rennes - Bretagne Atlantique
23  * Campus Universitaire de Beaulieu
24  * 35042 Rennes Cedex
25  * France
26  * http://www.irisa.fr/lagadic
27  *
28  * If you have questions regarding the use of this file, please contact
29  * INRIA at visp@inria.fr
30  *
31  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
32  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
33  *
34  *
35  * Description:
36  * Simulation of a 2 1/2 D visual servoing using theta U visual features.
37  *
38  * Authors:
39  * Eric Marchand
40  * Fabien Spindler
41  *
42  *****************************************************************************/
43 
44 
55 #include <stdlib.h>
56 #include <stdio.h>
57 
58 #include <visp/vpFeatureBuilder.h>
59 #include <visp/vpFeaturePoint.h>
60 #include <visp/vpFeatureThetaU.h>
61 #include <visp/vpGenericFeature.h>
62 #include <visp/vpHomogeneousMatrix.h>
63 #include <visp/vpMath.h>
64 #include <visp/vpParseArgv.h>
65 #include <visp/vpPoint.h>
66 #include <visp/vpServo.h>
67 #include <visp/vpSimulatorCamera.h>
68 
69 // List of allowed command line options
70 #define GETOPTARGS "h"
71 
80 void usage(const char *name, const char *badparam)
81 {
82  fprintf(stdout, "\n\
83 Simulation of a 2 1/2 D visual servoing (x,y,logZ, theta U):\n\
84 - eye-in-hand control law,\n\
85 - velocity computed in the camera frame,\n\
86 - without display.\n\
87  \n\
88 SYNOPSIS\n\
89  %s [-h]\n", name);
90 
91  fprintf(stdout, "\n\
92 OPTIONS: Default\n\
93  \n\
94  -h\n\
95  Print the help.\n");
96 
97  if (badparam)
98  fprintf(stdout, "\nERROR: Bad parameter [%s]\n", badparam);
99 }
100 
110 bool getOptions(int argc, const char **argv)
111 {
112  const char *optarg;
113  int c;
114  while ((c = vpParseArgv::parse(argc, argv, GETOPTARGS, &optarg)) > 1) {
115 
116  switch (c) {
117  case 'h': usage(argv[0], NULL); return false; break;
118 
119  default:
120  usage(argv[0], optarg);
121  return false; break;
122  }
123  }
124 
125  if ((c == 1) || (c == -1)) {
126  // standalone param or error
127  usage(argv[0], NULL);
128  std::cerr << "ERROR: " << std::endl;
129  std::cerr << " Bad argument " << optarg << std::endl << std::endl;
130  return false;
131  }
132 
133  return true;
134 }
135 
136 int
137 main(int argc, const char ** argv)
138 {
139  // Read the command line options
140  if (getOptions(argc, argv) == false) {
141  exit (-1);
142  }
143 
144  std::cout << std::endl ;
145  std::cout << "-------------------------------------------------------" << std::endl ;
146  std::cout << " simulation of a 2 1/2 D visual servoing " << std::endl ;
147  std::cout << "-------------------------------------------------------" << std::endl ;
148  std::cout << std::endl ;
149 
150  // In this example we will simulate a visual servoing task.
151  // In simulation, we have to define the scene frane Ro and the
152  // camera frame Rc.
153  // The camera location is given by an homogenous matrix cMo that
154  // describes the position of the camera in the scene frame.
155 
156  vpServo task ;
157 
158  // sets the initial camera location
159  // we give the camera location as a size 6 vector (3 translations in meter
160  // and 3 rotation (theta U representation)
161  vpPoseVector c_r_o(0.1,0.2,2,
162  vpMath::rad(20), vpMath::rad(10), vpMath::rad(50)
163  ) ;
164 
165  // this pose vector is then transformed in a 4x4 homogeneous matrix
166  vpHomogeneousMatrix cMo(c_r_o) ;
167 
168  // We define a robot
169  // The vpSimulatorCamera implements a simple moving that is juste defined
170  // by its location cMo
171  vpSimulatorCamera robot ;
172 
173  // Compute the position of the object in the world frame
174  vpHomogeneousMatrix wMc, wMo;
175  robot.getPosition(wMc) ;
176  wMo = wMc * cMo;
177 
178  // Now that the current camera position has been defined,
179  // let us defined the defined camera location.
180  // It is defined by cdMo
181  // sets the desired camera location " ) ;
182  vpPoseVector cd_r_o(0,0,1,
184  vpHomogeneousMatrix cdMo(cd_r_o) ;
185 
186  //----------------------------------------------------------------------
187  // A 2 1/2 D visual servoing can be defined by
188  // - the position of a point x,y
189  // - the difference between this point depth and a desire depth
190  // modeled by log Z/Zd to be regulated to 0
191  // - the rotation that the camera has to realized cdMc
192 
193  // Let us now defined the current value of these features
194 
195 
196  // since we simulate we have to define a 3D point that will
197  // forward-projected to define the current position x,y of the
198  // reference point
199 
200  //------------------------------------------------------------------
201  // First feature (x,y)
202  //------------------------------------------------------------------
203  // Let oP be this ... point,
204  // a vpPoint class has three main member
205  // .oP : 3D coordinates in scene frame
206  // .cP : 3D coordinates in camera frame
207  // .p : 2D
208 
209  //------------------------------------------------------------------
210  // sets the point coordinates in the world frame
211  vpPoint P ;
212  // defined point coordinates in the scene frame : oP
213  P.setWorldCoordinates(0,0,0) ;
214  // computes the P coordinates in the camera frame and its
215  // 2D coordinates cP and then p
216  // computes the point coordinates in the camera frame and its 2D coordinates
217  P.track(cMo) ;
218 
219  // We also defined (again by forward projection) the desired position
220  // of this point according to the desired camera position
221  vpPoint Pd ;
222  Pd.setWorldCoordinates(0,0,0) ;
223  Pd.track(cdMo) ;
224 
225  // Nevertheless, a vpPoint is not a feature, this is just a "tracker"
226  // from which the feature are built
227  // a feature is juste defined by a vector s, a way to compute the
228  // interaction matrix and the error, and if required a (or a vector of)
229  // 3D information
230 
231  // for a point (x,y) Visp implements the vpFeaturePoint class.
232  // we no defined a feature for x,y (and for (x*,y*))
233  vpFeaturePoint p,pd ;
234 
235  // and we initialized the vector s=(x,y) of p from the tracker P
236  // Z coordinates in p is also initialized, it will be used to compute
237  // the interaction matrix
239  vpFeatureBuilder::create(pd,Pd) ;
240 
241  // This visual has to be regulated to zero
242 
243  //------------------------------------------------------------------
244  // 2nd feature ThetaUz and 3rd feature t
245  // The thetaU feature is defined, tu represents the rotation that the camera
246  // has to realized. t the translation.
247  // the complete displacement is then defined by:
248  //------------------------------------------------------------------
249  vpHomogeneousMatrix cdMc ;
250  // compute the rotation that the camera has to achieve
251  cdMc = cdMo*cMo.inverse() ;
252 
253  // from this displacement, we extract the rotation cdRc represented by
254  // the angle theta and the rotation axis u
256  tuz.buildFrom(cdMc) ;
257  // And the translations
259  t.buildFrom(cdMc) ;
260 
261  // This visual has to be regulated to zero
262 
263  // sets the desired rotation (always zero !)
264  // since s is the rotation that the camera has to achieve
265 
266  //------------------------------------------------------------------
267  // Let us now the task itself
268  //------------------------------------------------------------------
269 
270  // define the task
271  // - we want an eye-in-hand control law
272  // - robot is controlled in the camera frame
273  // we choose to control the robot in the camera frame
275  // Interaction matrix is computed with the current value of s
277 
278  // we build the task by "stacking" the visual feature
279  // previously defined
280  task.addFeature(t) ;
281  task.addFeature(p,pd) ;
282  task.addFeature(tuz,vpFeatureThetaU::TUz) ; //selection of TUz
283 
284  // addFeature(X,Xd) means X should be regulated to Xd
285  // addFeature(X) means that X should be regulated to 0
286  // some features such as vpFeatureThetaU MUST be regulated to zero
287  // (otherwise, it will results in an error at exectution level)
288 
289  // set the gain
290  task.setLambda(1) ;
291 
292  // Display task information " ) ;
293  task.print() ;
294  //------------------------------------------------------------------
295  // An now the closed loop
296 
297  unsigned int iter=0 ;
298  // loop
299  while(iter++<200)
300  {
301  std::cout << "---------------------------------------------" << iter <<std::endl ;
302  vpColVector v ;
303 
304  // get the robot position
305  robot.getPosition(wMc) ;
306  // Compute the position of the camera wrt the object frame
307  cMo = wMc.inverse() * wMo;
308 
309  // update the feature
310  P.track(cMo) ;
312 
313  cdMc = cdMo*cMo.inverse() ;
314  tuz.buildFrom(cdMc) ;
315  t.buildFrom(cdMc) ;
316 
317  // compute the control law: v = -lambda L^+(s-sd)
318  v = task.computeControlLaw() ;
319 
320  // send the camera velocity to the controller
322 
323  std::cout << "|| s - s* || = " << ( task.getError() ).sumSquare() <<std::endl ;
324  }
325 
326  // Display task information
327  task.print() ;
328  task.kill();
329  // Final camera location
330  std::cout << "Final camera location: \n" << cMo << std::endl ;
331 }
332 
Class that defines the translation visual feature .
void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel)
The class provides a data structure for the homogeneous matrices as well as a set of operations on th...
Class that defines the simplest robot: a free flying camera.
void addFeature(vpBasicFeature &s, vpBasicFeature &s_star, const unsigned int select=vpBasicFeature::FEATURE_ALL)
create a new ste of two visual features
Definition: vpServo.cpp:444
void setLambda(double _lambda)
set the gain lambda
Definition: vpServo.h:253
void track(const vpHomogeneousMatrix &cMo)
Class that defines a 2D point visual feature which is composed by two parameters that are the cartes...
static bool parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, int flags)
Definition: vpParseArgv.cpp:79
Class that defines what is a point.
Definition: vpPoint.h:65
void kill()
destruction (memory deallocation if required)
Definition: vpServo.cpp:177
vpColVector getError() const
Definition: vpServo.h:301
vpColVector computeControlLaw()
compute the desired control law
Definition: vpServo.cpp:883
void getPosition(vpHomogeneousMatrix &wMc) const
void buildFrom(const vpTranslationVector &t, const vpRotationMatrix &R)
Construction from translation vector and rotation matrix.
void setInteractionMatrixType(const vpServoIteractionMatrixType &interactionMatrixType, const vpServoInversionType &interactionMatrixInversion=PSEUDO_INVERSE)
Set the type of the interaction matrix (current, mean, desired, user).
Definition: vpServo.cpp:509
static double rad(double deg)
Definition: vpMath.h:100
Class that provides a data structure for the column vectors as well as a set of operations on these v...
Definition: vpColVector.h:72
The pose is a complete representation of every rigid motion in the euclidian space.
Definition: vpPoseVector.h:92
vpHomogeneousMatrix inverse() const
Class that defines a 3D visual feature from a axis/angle parametrization that represent the rotatio...
void print(const vpServo::vpServoPrintType display_level=ALL, std::ostream &os=std::cout)
Definition: vpServo.cpp:258
static void create(vpFeaturePoint &s, const vpCameraParameters &cam, const vpDot &d)
Class required to compute the visual servoing control law descbribed in and .
Definition: vpServo.h:153
void setServo(vpServoType _servo_type)
Choice of the visual servoing control law.
Definition: vpServo.cpp:214
void setWorldCoordinates(const double ox, const double oy, const double oz)
Set the point world coordinates. We mean here the coordinates of the point in the object frame...
Definition: vpPoint.cpp:74