ViSP  2.7.0
servoPioneerPoint2DDepthWithoutVpServo.cpp

Example that shows how to control the Pioneer mobile robot by IBVS visual servoing with respect to a blob. The current visual features that are used are s = (x, log(Z/Z*)). The desired one are s* = (x*, 0), with:

The degrees of freedom that are controlled are (vx, wz), where wz is the rotational velocity and vx the translational velocity of the mobile platform at point M located at the middle between the two wheels.

The feature x allows to control wy, while log(Z/Z*) allows to control vz. The value of x is measured thanks to a blob tracker. The value of Z is estimated from the surface of the blob that is proportional to the depth Z.

/****************************************************************************
*
* $Id: servoPioneerPoint2DDepthWithoutVpServo.cpp 4056 2013-01-05 13:04:42Z fspindle $
*
* This file is part of the ViSP software.
* Copyright (C) 2005 - 2013 by INRIA. All rights reserved.
*
* This software is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* ("GPL") version 2 as published by the Free Software Foundation.
* See the file LICENSE.txt at the root directory of this source
* distribution for additional information about the GNU GPL.
*
* For using ViSP with software that can not be combined with the GNU
* GPL, please contact INRIA about acquiring a ViSP Professional
* Edition License.
*
* See http://www.irisa.fr/lagadic/visp/visp.html for more information.
*
* This software was developed at:
* INRIA Rennes - Bretagne Atlantique
* Campus Universitaire de Beaulieu
* 35042 Rennes Cedex
* France
* http://www.irisa.fr/lagadic
*
* If you have questions regarding the use of this file, please contact
* INRIA at visp@inria.fr
*
* This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
* WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
*
*
* Description:
* IBVS on Pioneer P3DX mobile platform
*
* Authors:
* Fabien Spindler
*
*****************************************************************************/
#include <iostream>
#include <visp/vpConfig.h>
#include <visp/vpRobotPioneer.h>
#include <visp/vpCameraParameters.h>
#include <visp/vpDisplayGDI.h>
#include <visp/vpDisplayX.h>
#include <visp/vpDot2.h>
#include <visp/vpFeatureBuilder.h>
#include <visp/vpFeatureDepth.h>
#include <visp/vpFeaturePoint.h>
#include <visp/vpHomogeneousMatrix.h>
#include <visp/vpImage.h>
#include <visp/vpImageConvert.h>
#include <visp/vp1394TwoGrabber.h>
#include <visp/vp1394CMUGrabber.h>
#include <visp/vpOpenCVGrabber.h>
#include <visp/vpV4l2Grabber.h>
#include <visp/vpVelocityTwistMatrix.h>
#if defined(VISP_HAVE_DC1394_2) || defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_CMU1394) || defined(VISP_HAVE_OPENCV)
#if defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)
#if defined(VISP_HAVE_PIONEER)
# define TEST_COULD_BE_ACHIEVED
#endif
#endif
#endif
#undef VISP_HAVE_OPENCV // To use a firewire camera
#undef VISP_HAVE_V4L2 // To use a firewire camera
#ifdef TEST_COULD_BE_ACHIEVED
int main(int argc, char **argv)
{
vpImage<unsigned char> I; // Create a gray level image container
double depth = 1.;
double lambda = 0.6;
double coef = 1./6.77; // Scale parameter used to estimate the depth Z of the blob from its surface
ArArgumentParser parser(&argc, argv);
parser.loadDefaultArguments();
// ArRobotConnector connects to the robot, get some initial data from it such as type and name,
// and then loads parameter files for this robot.
ArRobotConnector robotConnector(&parser, &robot);
if(!robotConnector.connectRobot())
{
ArLog::log(ArLog::Terse, "Could not connect to the robot.");
if(parser.checkHelpAndWarnUnparsed())
{
Aria::logOptions();
Aria::exit(1);
}
}
if (!Aria::parseArgs())
{
Aria::logOptions();
Aria::shutdown();
return false;
}
// Wait 3 sec to be sure that the low level Aria thread used to control
// the robot is started. Without this delay we experienced a delay (arround 2.2 sec)
// between the velocity send to the robot and the velocity that is really applied
// to the wheels.
std::cout << "Robot connected" << std::endl;
// Camera parameters. In this experiment we don't need a precise calibration of the camera
// Create the camera framegrabber
#if defined(VISP_HAVE_OPENCV)
int device = 1;
std::cout << "Use device: " << device << std::endl;
cv::VideoCapture g(device); // open the default camera
g.set(CV_CAP_PROP_FRAME_WIDTH, 640);
g.set(CV_CAP_PROP_FRAME_HEIGHT, 480);
if(!g.isOpened()) // check if we succeeded
return -1;
cv::Mat frame;
g >> frame; // get a new frame from camera
// Logitec sphere parameters
cam.initPersProjWithoutDistortion(558, 555, 312, 210);
#elif defined(VISP_HAVE_V4L2)
// Create a grabber based on v4l2 third party lib (for usb cameras under Linux)
g.setScale(1);
g.setInput(0);
g.setDevice("/dev/video1");
g.open(I);
// Logitec sphere parameters
cam.initPersProjWithoutDistortion(558, 555, 312, 210);
#elif defined(VISP_HAVE_DC1394_2)
// Create a grabber based on libdc1394-2.x third party lib (for firewire cameras under Linux)
vp1394TwoGrabber g(false);
// AVT Pike 032C parameters
cam.initPersProjWithoutDistortion(800, 795, 320, 216);
#elif defined(VISP_HAVE_CMU1394)
// Create a grabber based on CMU 1394 third party lib (for firewire cameras under windows)
g.setVideoMode(0, 5); // 640x480 MONO8
g.setFramerate(4); // 30 Hz
g.open(I);
// AVT Pike 032C parameters
cam.initPersProjWithoutDistortion(800, 795, 320, 216);
#endif
// Acquire an image from the grabber
#if defined(VISP_HAVE_OPENCV)
g >> frame; // get a new frame from camera
#else
g.acquire(I);
#endif
// Create an image viewer
#if defined(VISP_HAVE_X11)
vpDisplayX d(I, 10, 10, "Current frame");
#elif defined(VISP_HAVE_GDI)
vpDisplayGDI d(I, 10, 10, "Current frame");
#endif
// Create a blob tracker
vpDot2 dot;
dot.setGraphics(true);
dot.setComputeMoments(true);
dot.setEllipsoidShapePrecision(0.); // to track a blob without any constraint on the shape
dot.setGrayLevelPrecision(0.9); // to set the blob gray level bounds for binarisation
dot.setEllipsoidBadPointsPercentage(0.5); // to be accept 50% of bad inner and outside points with bad gray level
dot.initTracking(I);
// Current and desired visual feature associated to the x coordinate of the point
vpFeaturePoint s_x, s_xd;
// Create the current x visual feature
vpFeatureBuilder::create(s_x, cam, dot);
// Create the desired x* visual feature
s_xd.buildFrom(0, 0, depth);
// Create the current log(Z/Z*) visual feature
// Surface of the blob estimated from the image moment m00 and converted in meters
double surface = 1./sqrt(dot.m00/(cam.get_px()*cam.get_py()));
double Z, Zd;
// Initial depth of the blob in from of the camera
Z = coef * surface ;
// Desired depth Z* of the blob. This depth is learned and equal to the initial depth
Zd = Z;
s_Z.buildFrom(s_x.get_x(), s_x.get_y(), Z , 0); // log(Z/Z*) = 0 that's why the last parameter is 0
vpMatrix L_Z = s_Z.interaction();
vpMatrix eJe; // pioneer jacobian
robot.get_eJe(eJe);
vpMatrix L; // Interaction matrix
L.stackMatrices(L_x); // constant since build with the desired feature
L.stackMatrices(L_Z); // not constant since it corresponds to log(Z/Z*) that evolves at each iteration
vpColVector v; // vz, wx
s_Zd.buildFrom(0, 0, 1, 0); // The value of s* is 0 with Z=1 meter.
try
{
while(1)
{
// Acquire a new image
#if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100)
g >> frame; // get a new frame from camera
#else
g.acquire(I);
#endif
// Set the image as background of the viewer
// Does the blob tracking
dot.track(I);
// Update the current x feature
vpFeatureBuilder::create(s_x, cam, dot);
// Update log(Z/Z*) feature. Since the depth Z change, we need to update the intection matrix
surface = 1./sqrt(dot.m00/(cam.get_px()*cam.get_py()));
Z = coef * surface ;
s_Z.buildFrom(s_x.get_x(), s_x.get_y(), Z, log(Z/Zd)) ;
L_Z = s_Z.interaction();
// Update the global interaction matrix
L.stackMatrices(L_x); // constant since build with the desired feature
L.stackMatrices(L_Z); // not constant since it corresponds to log(Z/Z*) that evolves at each iteration
// Update the global error s-s*
vpColVector error;
error.stack( s_x.error( s_xd, vpFeaturePoint::selectX() ) );
error.stack( s_Z.error( s_Zd ) );
// Compute the control law. Velocities are computed in the mobile robot reference frame
v = -lambda * (L * cVe * eJe).pseudoInverse() * error;
std::cout << "Send velocity to the pionner: " << v[0] << " m/s "
<< vpMath::deg(v[1]) << " deg/s" << std::endl;
// Send the velocity to the robot
// Draw a vertical line which corresponds to the desired x coordinate of the dot cog
vpDisplay::displayLine(I, 0, 320, 479, 320, vpColor::red);
// A click in the viewer to exit
if ( vpDisplay::getClick(I, false) )
break;
}
}
catch(...)
{
}
std::cout << "Ending robot thread..." << std::endl;
robot.stopRunning();
// wait for the thread to stop
robot.waitForRunExit();
}
#else
int main()
{
std::cout << "You don't have the right 3rd party libraries to run this example..." << std::endl;
}
#endif