ViSP  2.8.0
servoPioneerPoint2DDepth.cpp
1 /****************************************************************************
2  *
3  * $Id: servoPioneerPoint2DDepth.cpp 4056 2013-01-05 13:04:42Z fspindle $
4  *
5  * This file is part of the ViSP software.
6  * Copyright (C) 2005 - 2013 by INRIA. All rights reserved.
7  *
8  * This software is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * ("GPL") version 2 as published by the Free Software Foundation.
11  * See the file LICENSE.txt at the root directory of this source
12  * distribution for additional information about the GNU GPL.
13  *
14  * For using ViSP with software that can not be combined with the GNU
15  * GPL, please contact INRIA about acquiring a ViSP Professional
16  * Edition License.
17  *
18  * See http://www.irisa.fr/lagadic/visp/visp.html for more information.
19  *
20  * This software was developed at:
21  * INRIA Rennes - Bretagne Atlantique
22  * Campus Universitaire de Beaulieu
23  * 35042 Rennes Cedex
24  * France
25  * http://www.irisa.fr/lagadic
26  *
27  * If you have questions regarding the use of this file, please contact
28  * INRIA at visp@inria.fr
29  *
30  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
31  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
32  *
33  *
34  * Description:
35  * IBVS on Pioneer P3DX mobile platform
36  *
37  * Authors:
38  * Fabien Spindler
39  *
40  *****************************************************************************/
41 #include <iostream>
42 
43 #include <visp/vpConfig.h>
44 
45 #include <visp/vpRobotPioneer.h>
46 #include <visp/vpCameraParameters.h>
47 #include <visp/vpDisplayGDI.h>
48 #include <visp/vpDisplayX.h>
49 #include <visp/vpDot2.h>
50 #include <visp/vpFeatureBuilder.h>
51 #include <visp/vpFeatureDepth.h>
52 #include <visp/vpFeaturePoint.h>
53 #include <visp/vpHomogeneousMatrix.h>
54 #include <visp/vpImage.h>
55 #include <visp/vpImageConvert.h>
56 #include <visp/vp1394TwoGrabber.h>
57 #include <visp/vp1394CMUGrabber.h>
58 #include <visp/vpV4l2Grabber.h>
59 #include <visp/vpOpenCVGrabber.h>
60 #include <visp/vpServo.h>
61 #include <visp/vpVelocityTwistMatrix.h>
62 
63 #if defined(VISP_HAVE_DC1394_2) || defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_CMU1394) || defined(VISP_HAVE_OPENCV)
64 #if defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)
65 #if defined(VISP_HAVE_PIONEER)
66 # define TEST_COULD_BE_ACHIEVED
67 #endif
68 #endif
69 #endif
70 
71 #undef VISP_HAVE_OPENCV // To use a firewire camera
72 #undef VISP_HAVE_V4L2 // To use a firewire camera
73 
93 #ifdef TEST_COULD_BE_ACHIEVED
94 int main(int argc, char **argv)
95 {
96  vpImage<unsigned char> I; // Create a gray level image container
97  double depth = 1.;
98  double lambda = 0.6;
99  double coef = 1./6.77; // Scale parameter used to estimate the depth Z of the blob from its surface
100 
101  vpRobotPioneer robot;
102  ArArgumentParser parser(&argc, argv);
103  parser.loadDefaultArguments();
104 
105  // ArRobotConnector connects to the robot, get some initial data from it such as type and name,
106  // and then loads parameter files for this robot.
107  ArRobotConnector robotConnector(&parser, &robot);
108  if(!robotConnector.connectRobot())
109  {
110  ArLog::log(ArLog::Terse, "Could not connect to the robot.");
111  if(parser.checkHelpAndWarnUnparsed())
112  {
113  Aria::logOptions();
114  Aria::exit(1);
115  }
116  }
117  if (!Aria::parseArgs())
118  {
119  Aria::logOptions();
120  Aria::shutdown();
121  return false;
122  }
123 
124  // Wait 3 sec to be sure that the low level Aria thread used to control
125  // the robot is started. Without this delay we experienced a delay (arround 2.2 sec)
126  // between the velocity send to the robot and the velocity that is really applied
127  // to the wheels.
128  vpTime::sleepMs(3000);
129 
130  std::cout << "Robot connected" << std::endl;
131 
132  // Camera parameters. In this experiment we don't need a precise calibration of the camera
133  vpCameraParameters cam;
134 
135  // Create the camera framegrabber
136 #if defined(VISP_HAVE_OPENCV)
137  int device = 1;
138  std::cout << "Use device: " << device << std::endl;
139  cv::VideoCapture g(device); // open the default camera
140  g.set(CV_CAP_PROP_FRAME_WIDTH, 640);
141  g.set(CV_CAP_PROP_FRAME_HEIGHT, 480);
142  if(!g.isOpened()) // check if we succeeded
143  return -1;
144  cv::Mat frame;
145  g >> frame; // get a new frame from camera
146  vpImageConvert::convert(frame, I);
147 
148  // Logitec sphere parameters
149  cam.initPersProjWithoutDistortion(558, 555, 312, 210);
150 #elif defined(VISP_HAVE_V4L2)
151  // Create a grabber based on v4l2 third party lib (for usb cameras under Linux)
152  vpV4l2Grabber g;
153  g.setScale(1);
154  g.setInput(0);
155  g.setDevice("/dev/video1");
156  g.open(I);
157  // Logitec sphere parameters
158  cam.initPersProjWithoutDistortion(558, 555, 312, 210);
159 #elif defined(VISP_HAVE_DC1394_2)
160  // Create a grabber based on libdc1394-2.x third party lib (for firewire cameras under Linux)
161  vp1394TwoGrabber g(false);
164  // AVT Pike 032C parameters
165  cam.initPersProjWithoutDistortion(800, 795, 320, 216);
166 #elif defined(VISP_HAVE_CMU1394)
167  // Create a grabber based on CMU 1394 third party lib (for firewire cameras under windows)
169  g.setVideoMode(0, 5); // 640x480 MONO8
170  g.setFramerate(4); // 30 Hz
171  g.open(I);
172  // AVT Pike 032C parameters
173  cam.initPersProjWithoutDistortion(800, 795, 320, 216);
174 #endif
175 
176  // Acquire an image from the grabber
177 #if defined(VISP_HAVE_OPENCV)
178  g >> frame; // get a new frame from camera
179  vpImageConvert::convert(frame, I);
180 #else
181  g.acquire(I);
182 #endif
183 
184  // Create an image viewer
185 #if defined(VISP_HAVE_X11)
186  vpDisplayX d(I, 10, 10, "Current frame");
187 #elif defined(VISP_HAVE_GDI)
188  vpDisplayGDI d(I, 10, 10, "Current frame");
189 #endif
191  vpDisplay::flush(I);
192 
193  // Create a blob tracker
194  vpDot2 dot;
195  dot.setGraphics(true);
196  dot.setComputeMoments(true);
197  dot.setEllipsoidShapePrecision(0.); // to track a blob without any constraint on the shape
198  dot.setGrayLevelPrecision(0.9); // to set the blob gray level bounds for binarisation
199  dot.setEllipsoidBadPointsPercentage(0.5); // to be accept 50% of bad inner and outside points with bad gray level
200  dot.initTracking(I);
201  vpDisplay::flush(I);
202 
203  vpServo task;
206  task.setLambda(lambda) ;
208  cVe = robot.get_cVe() ;
209  task.set_cVe(cVe) ;
210 
211  std::cout << "cVe: \n" << cVe << std::endl;
212 
213  vpMatrix eJe;
214  robot.get_eJe(eJe) ;
215  task.set_eJe(eJe) ;
216  std::cout << "eJe: \n" << eJe << std::endl;
217 
218  // Current and desired visual feature associated to the x coordinate of the point
219  vpFeaturePoint s_x, s_xd;
220 
221  // Create the current x visual feature
222  vpFeatureBuilder::create(s_x, cam, dot);
223 
224  // Create the desired x* visual feature
225  s_xd.buildFrom(0, 0, depth);
226 
227  // Add the feature
228  task.addFeature(s_x, s_xd) ;
229 
230  // Create the current log(Z/Z*) visual feature
231  vpFeatureDepth s_Z, s_Zd;
232  // Surface of the blob estimated from the image moment m00 and converted in meters
233  double surface = 1./sqrt(dot.m00/(cam.get_px()*cam.get_py()));
234  double Z, Zd;
235  // Initial depth of the blob in from of the camera
236  Z = coef * surface ;
237  // Desired depth Z* of the blob. This depth is learned and equal to the initial depth
238  Zd = Z;
239 
240  std::cout << "Z " << Z << std::endl;
241  s_Z.buildFrom(s_x.get_x(), s_x.get_y(), Z , 0); // log(Z/Z*) = 0 that's why the last parameter is 0
242  s_Zd.buildFrom(s_x.get_x(), s_x.get_y(), Zd , 0); // log(Z/Z*) = 0 that's why the last parameter is 0
243 
244  // Add the feature
245  task.addFeature(s_Z, s_Zd) ;
246 
247  vpColVector v; // vz, wx
248 
249  try
250  {
251  while(1)
252  {
253  // Acquire a new image
254 #if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100)
255  g >> frame; // get a new frame from camera
256  vpImageConvert::convert(frame, I);
257 #else
258  g.acquire(I);
259 #endif
260  // Set the image as background of the viewer
262 
263  // Does the blob tracking
264  dot.track(I);
265  // Update the current x feature
266  vpFeatureBuilder::create(s_x, cam, dot);
267 
268  // Update log(Z/Z*) feature. Since the depth Z change, we need to update the intection matrix
269  surface = 1./sqrt(dot.m00/(cam.get_px()*cam.get_py()));
270  Z = coef * surface ;
271  s_Z.buildFrom(s_x.get_x(), s_x.get_y(), Z, log(Z/Zd)) ;
272 
273  robot.get_cVe(cVe) ;
274  task.set_cVe(cVe) ;
275 
276  robot.get_eJe(eJe) ;
277  task.set_eJe(eJe) ;
278 
279  // Compute the control law. Velocities are computed in the mobile robot reference frame
280  v = task.computeControlLaw() ;
281 
282  std::cout << "Send velocity to the pionner: " << v[0] << " m/s "
283  << vpMath::deg(v[1]) << " deg/s" << std::endl;
284 
285  // Send the velocity to the robot
287 
288  // Draw a vertical line which corresponds to the desired x coordinate of the dot cog
289  vpDisplay::displayLine(I, 0, 320, 479, 320, vpColor::red);
290  vpDisplay::flush(I);
291 
292  // A click in the viewer to exit
293  if ( vpDisplay::getClick(I, false) )
294  break;
295  }
296  }
297  catch(...)
298  {
299  }
300 
301  std::cout << "Ending robot thread..." << std::endl;
302  robot.stopRunning();
303 
304  // wait for the thread to stop
305  robot.waitForRunExit();
306 
307  // Kill the servo task
308  task.print() ;
309  task.kill();
310 }
311 #else
312 int main()
313 {
314  std::cout << "You don't have the right 3rd party libraries to run this example..." << std::endl;
315 }
316 #endif
Definition of the vpMatrix class.
Definition: vpMatrix.h:96
void setVideoMode(unsigned long format, unsigned long mode)
void open(vpImage< unsigned char > &I)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
void get_eJe(vpMatrix &eJe)
void open(vpImage< unsigned char > &I)
Display for windows using GDI (available on any windows 32 platform).
Definition: vpDisplayGDI.h:133
void setEllipsoidBadPointsPercentage(const double &percentage=0.0)
Definition: vpDot2.h:288
vpVelocityTwistMatrix get_cVe() const
Definition: vpUnicycle.h:89
void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel)
static void sleepMs(double t)
Definition: vpTime.cpp:236
void buildFrom(const double x, const double y, const double Z, const double LogZoverZstar)
Define the X11 console to display images.
Definition: vpDisplayX.h:152
void addFeature(vpBasicFeature &s, vpBasicFeature &s_star, const unsigned int select=vpBasicFeature::FEATURE_ALL)
create a new ste of two visual features
Definition: vpServo.cpp:444
void setDevice(const char *devname)
Class that defines a 3D point visual feature which is composed by one parameters that is that defin...
void setLambda(double _lambda)
set the gain lambda
Definition: vpServo.h:253
Class that defines a 2D point visual feature which is composed by two parameters that are the cartes...
Interface for Pioneer mobile robots based on Aria 3rd party library.
double get_py() const
This tracker is meant to track a blob (connex pixels with same gray level) on a vpImage.
Definition: vpDot2.h:131
void track(const vpImage< unsigned char > &I)
Definition: vpDot2.cpp:444
void set_cVe(vpVelocityTwistMatrix &_cVe)
Definition: vpServo.h:230
static void flush(const vpImage< unsigned char > &I)
Definition: vpDisplay.cpp:1991
static const vpColor red
Definition: vpColor.h:167
void initPersProjWithoutDistortion(const double px, const double py, const double u0, const double v0)
void setGrayLevelPrecision(const double &grayLevelPrecision)
Definition: vpDot2.cpp:796
void kill()
destruction (memory deallocation if required)
Definition: vpServo.cpp:177
Firewire cameras video capture based on CMU 1394 Digital Camera SDK.
void setFramerate(unsigned long fps)
vpColVector computeControlLaw()
compute the desired control law
Definition: vpServo.cpp:883
void acquire(vpImage< unsigned char > &I)
static void display(const vpImage< unsigned char > &I)
Definition: vpDisplay.cpp:203
void set_eJe(vpMatrix &_eJe)
Definition: vpServo.h:238
Generic class defining intrinsic camera parameters.
void setComputeMoments(const bool activate)
Definition: vpDot2.h:274
Class that consider the particular case of twist transformation matrix that allows to transform a vel...
void setScale(unsigned scale=vpV4l2Grabber::DEFAULT_SCALE)
double get_px() const
void setEllipsoidShapePrecision(const double &ellipsoidShapePrecision)
Definition: vpDot2.cpp:871
void setInput(unsigned input=vpV4l2Grabber::DEFAULT_INPUT)
void setInteractionMatrixType(const vpServoIteractionMatrixType &interactionMatrixType, const vpServoInversionType &interactionMatrixInversion=PSEUDO_INVERSE)
Set the type of the interaction matrix (current, mean, desired, user).
Definition: vpServo.cpp:509
void buildFrom(const double x, const double y, const double Z)
Class for the Video4Linux2 video device.
static double deg(double rad)
Definition: vpMath.h:93
Class that provides a data structure for the column vectors as well as a set of operations on these v...
Definition: vpColVector.h:72
double get_y() const
double get_x() const
void initTracking(const vpImage< unsigned char > &I, unsigned int size=0)
Definition: vpDot2.cpp:245
void print(const vpServo::vpServoPrintType display_level=ALL, std::ostream &os=std::cout)
Definition: vpServo.cpp:258
Class for firewire ieee1394 video devices using libdc1394-2.x api.
virtual bool getClick(bool blocking=true)=0
virtual void displayLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1)=0
double m00
Definition: vpDot2.h:367
static void create(vpFeaturePoint &s, const vpCameraParameters &cam, const vpDot &d)
Class required to compute the visual servoing control law descbribed in and .
Definition: vpServo.h:153
void setServo(vpServoType _servo_type)
Choice of the visual servoing control law.
Definition: vpServo.cpp:214
void setGraphics(const bool activate)
Definition: vpDot2.h:312
void setFramerate(vpV4l2FramerateType framerate)