ViSP  2.6.2
servoAfma6Points2DCamVelocityEyeToHand.cpp
1 /****************************************************************************
2  *
3  * $Id: servoAfma6Points2DCamVelocityEyeToHand.cpp 3668 2012-04-04 09:07:10Z fspindle $
4  *
5  * This file is part of the ViSP software.
6  * Copyright (C) 2005 - 2012 by INRIA. All rights reserved.
7  *
8  * This software is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * ("GPL") version 2 as published by the Free Software Foundation.
11  * See the file LICENSE.txt at the root directory of this source
12  * distribution for additional information about the GNU GPL.
13  *
14  * For using ViSP with software that can not be combined with the GNU
15  * GPL, please contact INRIA about acquiring a ViSP Professional
16  * Edition License.
17  *
18  * See http://www.irisa.fr/lagadic/visp/visp.html for more information.
19  *
20  * This software was developed at:
21  * INRIA Rennes - Bretagne Atlantique
22  * Campus Universitaire de Beaulieu
23  * 35042 Rennes Cedex
24  * France
25  * http://www.irisa.fr/lagadic
26  *
27  * If you have questions regarding the use of this file, please contact
28  * INRIA at visp@inria.fr
29  *
30  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
31  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
32  *
33  *
34  * Description:
35  * tests the control law
36  * eye-to-hand control
37  * velocity computed in the camera frame
38  *
39  * Authors:
40  * Eric Marchand
41  *
42  *****************************************************************************/
64 #include <visp/vpConfig.h>
65 #include <visp/vpDebug.h> // Debug trace
66 #include <stdlib.h>
67 #include <cmath> // std::fabs
68 #include <limits> // numeric_limits
69 #include <list>
70 #if (defined (VISP_HAVE_AFMA6) && defined (VISP_HAVE_DC1394_2))
71 
72 #define SAVE 0
73 
74 #include <visp/vp1394TwoGrabber.h>
75 #include <visp/vpImage.h>
76 #include <visp/vpImagePoint.h>
77 #include <visp/vpMath.h>
78 #include <visp/vpHomogeneousMatrix.h>
79 #include <visp/vpFeaturePoint.h>
80 #include <visp/vpPoint.h>
81 #include <visp/vpServo.h>
82 #include <visp/vpFeatureBuilder.h>
83 #include <visp/vpRobotAfma6.h>
84 #include <visp/vpException.h>
85 #include <visp/vpMatrixException.h>
86 #include <visp/vpServoDisplay.h>
87 #include <visp/vpDot.h>
88 #include <visp/vpPose.h>
89 #include <visp/vpImageIo.h>
90 #include <visp/vpDisplay.h>
91 #include <visp/vpDisplayX.h>
92 
93 #define L 0.006
94 #define D 0
95 
96 int main()
97 {
98  try
99  {
100  vpServo task ;
101 
102  vpCameraParameters cam ;
104  int i ;
105 
109  g.open(I) ;
110 
111  g.acquire(I) ;
112 
113  vpDisplayX display(I,100,100,"Servo") ;
114 
115  vpDisplay::display(I) ;
116  vpDisplay::flush(I) ;
117 
118  std::cout << std::endl ;
119  std::cout << "-------------------------------------------------------" << std::endl ;
120  std::cout << " Test program for vpServo " <<std::endl ;
121  std::cout << " Eye-to-hand task control" << std::endl ;
122  std::cout << " Simulation " << std::endl ;
123  std::cout << " task : servo a point " << std::endl ;
124  std::cout << "-------------------------------------------------------" << std::endl ;
125  std::cout << std::endl ;
126 
127  int nbPoint =7 ;
128 
129  vpDot dot[nbPoint] ;
130  vpImagePoint cog;
131 
132  for (i=0 ; i < nbPoint ; i++)
133  {
134  dot[i].initTracking(I) ;
135  dot[i].setGraphics(true) ;
136  dot[i].track(I) ;
137  vpDisplay::flush(I) ;
138  dot[i].setGraphics(false) ;
139  }
140 
141  // Compute the pose 3D model
142  vpPoint point[nbPoint] ;
143  point[0].setWorldCoordinates(-2*L,D, -3*L) ;
144  point[1].setWorldCoordinates(0,D, -3*L) ;
145  point[2].setWorldCoordinates(2*L,D, -3*L) ;
146 
147  point[3].setWorldCoordinates(-L,D,-L) ;
148  point[4].setWorldCoordinates(L,D, -L) ;
149  point[5].setWorldCoordinates(L,D, L) ;
150  point[6].setWorldCoordinates(-L,D, L) ;
151 
152  vpRobotAfma6 robot ;
153  // Update camera parameters
154  robot.getCameraParameters (cam, I);
155 
156  vpHomogeneousMatrix cMo, cdMo ;
157  vpPose pose ;
158  pose.clearPoint() ;
159  for (i=0 ; i < nbPoint ; i++)
160  {
161  cog = dot[i].getCog();
162  double x=0, y=0;
163  vpPixelMeterConversion::convertPoint(cam, cog, x, y) ;
164  point[i].set_x(x) ;
165  point[i].set_y(y) ;
166  pose.addPoint(point[i]) ;
167  }
168 
169  // compute the initial pose using Dementhon method followed by a non linear
170  // minimisation method
172 
173 
174  std::cout << cMo << std::endl ;
175  cMo.print() ;
176 
177  /*------------------------------------------------------------------
178  -- Learning the desired position
179  -- or reading the desired position
180  ------------------------------------------------------------------
181  */
182  std::cout << " Learning 0/1 " <<std::endl ;
183  char name[FILENAME_MAX] ;
184  sprintf(name,"cdMo.dat") ;
185  int learning ;
186  std::cin >> learning ;
187  if (learning ==1)
188  {
189  // save the object position
190  vpTRACE("Save the location of the object in a file cdMo.dat") ;
191  std::ofstream f(name) ;
192  cMo.save(f) ;
193  f.close() ;
194  exit(1) ;
195  }
196 
197 
198  {
199  vpTRACE("Loading desired location from cdMo.dat") ;
200  std::ifstream f("cdMo.dat") ;
201  cdMo.load(f) ;
202  f.close() ;
203  }
204 
205  vpFeaturePoint p[nbPoint], pd[nbPoint] ;
206 
207  // set the desired position of the point by forward projection using
208  // the pose cdMo
209  for (i=0 ; i < nbPoint ; i++)
210  {
211  vpColVector cP, p ;
212  point[i].changeFrame(cdMo, cP) ;
213  point[i].projection(cP, p) ;
214 
215  pd[i].set_x(p[0]) ;
216  pd[i].set_y(p[1]) ;
217  }
218 
219 
220 
221  //------------------------------------------------------------------
222 
223  vpTRACE("define the task") ;
224  vpTRACE("\t we want an eye-in-hand control law") ;
225  vpTRACE("\t robot is controlled in the camera frame") ;
228 
229 
230  for (i=0 ; i < nbPoint ; i++)
231  {
232  task.addFeature(p[i],pd[i]) ;
233  }
234 
235 
236  vpTRACE("Display task information " ) ;
237  task.print() ;
238 
239 
240  //------------------------------------------------------------------
241 
242 
243  double convergence_threshold = 0.00; //025 ;
245 
246  //-------------------------------------------------------------
247  double error =1 ;
248  unsigned int iter=0 ;
249  vpTRACE("\t loop") ;
251  vpColVector v ; // computed robot velocity
252 
253 
254  // position of the object in the effector frame
255  vpHomogeneousMatrix oMcamrobot ;
256  oMcamrobot[0][3] = -0.05 ;
257 
258  vpImage<vpRGBa> Ic ;
259  int it = 0 ;
260 
261  double lambda_av =0.1;
262  double alpha = 1 ; //1 ;
263  double beta =3 ; //3 ;
264 
265  std::cout << "alpha 0.7" << std::endl;
266  std::cin >> alpha ;
267  std::cout << "beta 5" << std::endl;
268  std::cin >> beta ;
269  std::list<vpImagePoint> Lcog ;
270  vpImagePoint ip;
271  while(error > convergence_threshold)
272  {
273  std::cout << "---------------------------------------------" << iter++ <<std::endl ;
274 
275  g.acquire(I) ;
276  vpDisplay::display(I) ;
277  ip.set_i( 265 );
278  ip.set_j( 150 );
280  "Eye-To-Hand Visual Servoing",
281  vpColor::green) ;
282  ip.set_i( 280 );
283  ip.set_j( 150 );
285  "IRISA-INRIA Rennes, Lagadic project",
286  vpColor::green) ;
287  try
288  {
289  for (i=0 ; i < nbPoint ; i++)
290  {
291  dot[i].track(I) ;
292  Lcog.push_back( dot[i].getCog() );
293  }
294  }
295  catch(...)
296  {
297  vpTRACE("Error detected while tracking visual features") ;
298  robot.stopMotion() ;
299  exit(1) ;
300  }
301 
302  // compute the initial pose using a non linear minimisation method
303  pose.clearPoint() ;
304 
305  for (i=0 ; i < nbPoint ; i++)
306  {
307  double x=0, y=0;
308  cog = dot[i].getCog();
309  vpPixelMeterConversion::convertPoint(cam, cog, x, y) ;
310  point[i].set_x(x) ;
311  point[i].set_y(y) ;
312 
313  vpColVector cP ;
314  point[i].changeFrame(cdMo, cP) ;
315 
316  p[i].set_x(x) ;
317  p[i].set_y(y) ;
318  p[i].set_Z(cP[2]) ;
319 
320  pose.addPoint(point[i]) ;
321 
322  point[i].display(I,cMo,cam, vpColor::green) ;
323  point[i].display(I,cdMo,cam, vpColor::blue) ;
324  }
325  pose.computePose(vpPose::LOWE, cMo) ;
326  vpDisplay::flush(I) ;
327 
329  vpHomogeneousMatrix cMe, camrobotMe ;
330  robot.get_cMe(camrobotMe) ;
331  cMe = cMo *oMcamrobot * camrobotMe ;
332 
333 
334  task.set_cVe(cMe) ;
335 
336  vpMatrix eJe ;
337  robot.get_eJe(eJe) ;
338  task.set_eJe(eJe) ;
339 
340 
341  // Compute the adaptative gain (speed up the convergence)
342  double gain ;
343  if (iter>2)
344  {
345  if (std::fabs(alpha) <= std::numeric_limits<double>::epsilon())
346  gain = lambda_av ;
347  else
348  {
349  gain = alpha * exp (-beta * ( task.getError() ).sumSquare() ) + lambda_av;
350  }
351  }
352  else gain = lambda_av ;
353  if (SAVE==1)
354  gain = gain/5 ;
355 
356  vpTRACE("%f %f %f %f %f",alpha, beta, lambda_av, ( task.getError() ).sumSquare(), gain) ;
357  task.setLambda(gain) ;
358 
359 
360  v = task.computeControlLaw() ;
361 
362  // display points trajectory
363  for (std::list<vpImagePoint>::const_iterator it_cog = Lcog.begin(); it_cog != Lcog.end(); ++it_cog)
364  {
366  }
367  vpServoDisplay::display(task,cam,I) ;
369 
370  error = ( task.getError() ).sumSquare() ;
371  std::cout << "|| s - s* || = "<< error<<std::endl ;
372 
373  if (error>7)
374  {
375  vpTRACE("Error detected while tracking visual features") ;
376  robot.stopMotion() ;
377  exit(1) ;
378  }
379 
380  // display the pose
381  // pose.display(I,cMo,cam, 0.04, vpColor::red) ;
382  // display the pose
383  // pose.display(I,cdMo,cam, 0.04, vpColor::blue) ;
384  if ((SAVE==1) && (iter %3==0))
385  {
386 
387  vpDisplay::getImage(I,Ic) ;
388  sprintf(name,"/tmp/marchand/image.%04d.ppm",it++) ;
389  vpImageIo::writePPM(Ic,name) ;
390  }
391  }
392  v = 0 ;
395  task.kill();
396  }
397  catch (...)
398  {
399  vpERROR_TRACE(" Test failed") ;
400  return 0;
401  }
402 }
403 
404 #else
405 int
406 main()
407 {
408  vpERROR_TRACE("You do not have an afma6 robot or a firewire framegrabber connected to your computer...");
409 }
410 
411 #endif
void set_j(const double j)
Definition: vpImagePoint.h:156
Definition of the vpMatrix class.
Definition: vpMatrix.h:96
void projection(const vpColVector &_cP, vpColVector &_p)
Projection onto the image plane of a point. Input: the 3D coordinates in the camera frame _cP...
Definition: vpPoint.cpp:132
void get_eJe(vpMatrix &_eJe)
static void display(vpServo &s, const vpCameraParameters &cam, vpImage< unsigned char > &I, vpColor currentColor=vpColor::green, vpColor desiredColor=vpColor::red, unsigned int thickness=1)
void print()
Print the matrix as a vector [T thetaU].
The class provides a data structure for the homogeneous matrices as well as a set of operations on th...
#define vpERROR_TRACE
Definition: vpDebug.h:379
#define vpTRACE
Definition: vpDebug.h:401
void getCameraParameters(vpCameraParameters &cam, const unsigned int &image_width, const unsigned int &image_height)
Definition: vpAfma6.cpp:1226
void display(const vpImage< unsigned char > &I, const vpCameraParameters &cam, const vpColor &color=vpColor::green, const unsigned int thickness=1)
Definition: vpPoint.cpp:309
Define the X11 console to display images.
Definition: vpDisplayX.h:152
void addFeature(vpBasicFeature &s, vpBasicFeature &s_star, const unsigned int select=vpBasicFeature::FEATURE_ALL)
create a new ste of two visual features
Definition: vpServo.cpp:444
void set_i(const double i)
Definition: vpImagePoint.h:145
void setLambda(double _lambda)
set the gain lambda
Definition: vpServo.h:250
void track(const vpImage< unsigned char > &I)
Definition: vpDot.cpp:787
void set_x(const double x)
Set the point x coordinate in the image plane.
Definition: vpPoint.h:183
Class that defines a 2D point visual feature which is composed by two parameters that are the cartes...
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
Point coordinates conversion from pixel coordinates to normalized coordinates in meter...
static const vpColor green
Definition: vpColor.h:168
void acquire(vpImage< unsigned char > &I)
void set_cVe(vpVelocityTwistMatrix &_cVe)
Definition: vpServo.h:227
static void flush(const vpImage< unsigned char > &I)
Definition: vpDisplay.cpp:1964
void set_y(const double y)
void load(std::ifstream &f)
Control of Irisa's gantry robot named Afma6.
Definition: vpRobotAfma6.h:214
static const vpColor red
Definition: vpColor.h:165
Class that defines what is a point.
Definition: vpPoint.h:65
void set_x(const double x)
void open(vpImage< unsigned char > &I)
vpImagePoint getCog() const
Definition: vpDot.h:249
void kill()
destruction (memory deallocation if required)
Definition: vpServo.cpp:177
Initialize the velocity controller.
Definition: vpRobot.h:70
vpColVector getError() const
Definition: vpServo.h:298
vpColVector computeControlLaw()
compute the desired control law
Definition: vpServo.cpp:883
static void display(const vpImage< unsigned char > &I)
Definition: vpDisplay.cpp:186
void set_eJe(vpMatrix &_eJe)
Definition: vpServo.h:235
Class used for pose computation from N points (pose from point only).
Definition: vpPose.h:80
Generic class defining intrinsic camera parameters.
void set_y(const double y)
Set the point y coordinate in the image plane.
Definition: vpPoint.h:185
vpRobot::vpRobotStateType setRobotState(vpRobot::vpRobotStateType newState)
static void getImage(const vpImage< unsigned char > &Is, vpImage< vpRGBa > &Id)
Definition: vpDisplay.cpp:300
void setInteractionMatrixType(const vpServoIteractionMatrixType &interactionMatrixType, const vpServoInversionType &interactionMatrixInversion=PSEUDO_INVERSE)
Set the type of the interaction matrix (current, mean, desired, user).
Definition: vpServo.cpp:509
void get_cMe(vpHomogeneousMatrix &_cMe)
void save(std::ofstream &f) const
Class that provides a data structure for the column vectors as well as a set of operations on these v...
Definition: vpColVector.h:72
void setGraphics(const bool activate)
Definition: vpDot.h:225
virtual void displayCharString(const vpImagePoint &ip, const char *text, const vpColor &color=vpColor::green)=0
void setFramerate(vp1394TwoFramerateType fps)
void setVideoMode(vp1394TwoVideoModeType videomode)
void print(const vpServo::vpServoPrintType display_level=ALL, std::ostream &os=std::cout)
Definition: vpServo.cpp:258
This tracker is meant to track a dot (connex pixels with same gray level) on a vpImage.
Definition: vpDot.h:80
static void writePPM(const vpImage< unsigned char > &I, const char *filename)
Definition: vpImageIo.cpp:1223
Class for firewire ieee1394 video devices using libdc1394-2.x api.
void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &velocity)
void computePose(vpPoseMethodType methode, vpHomogeneousMatrix &cMo)
compute the pose for a given method
Definition: vpPose.cpp:298
virtual bool getClick(bool blocking=true)=0
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:92
void changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &_cP)
Definition: vpPoint.cpp:150
Class required to compute the visual servoing control law.
Definition: vpServo.h:150
void addPoint(const vpPoint &P)
Add a new point in this array.
Definition: vpPose.cpp:148
void initTracking(const vpImage< unsigned char > &I)
Definition: vpDot.cpp:638
void setServo(vpServoType _servo_type)
Choice of the visual servoing control law.
Definition: vpServo.cpp:214
virtual void displayPoint(const vpImagePoint &ip, const vpColor &color)=0
static const vpColor blue
Definition: vpColor.h:171
void setWorldCoordinates(const double ox, const double oy, const double oz)
Set the point world coordinates. We mean here the coordinates of the point in the object frame...
Definition: vpPoint.cpp:74
void clearPoint()
suppress all the point in the array of point
Definition: vpPose.cpp:126