ViSP  2.8.0
tutorial-ibvs-4pts-ogre-tracking.cpp
1 
2 #include <visp/vpDisplayX.h>
3 #include <visp/vpDisplayGDI.h>
4 #include <visp/vpAROgre.h>
5 #include <visp/vpFeatureBuilder.h>
6 #include <visp/vpPose.h>
7 #include <visp/vpServo.h>
8 #include <visp/vpServoDisplay.h>
9 #include <visp/vpSimulatorCamera.h>
10 
11 void display_trajectory(const vpImage<unsigned char> &I, const std::vector<vpDot2> &dot, unsigned int thickness)
12 {
13  static std::vector<vpImagePoint> traj[4];
14  for (unsigned int i=0; i<4; i++) {
15  traj[i].push_back(dot[i].getCog());
16  }
17  for (unsigned int i=0; i<4; i++) {
18  for (unsigned int j=1; j<traj[i].size(); j++) {
19  vpDisplay::displayLine(I, traj[i][j-1], traj[i][j], vpColor::green, thickness);
20  }
21  }
22 }
23 
24 #if defined(VISP_HAVE_OGRE)
25 void ogre_get_render_image(vpAROgre &ogre, const vpImage<unsigned char> &background,
26  #if VISP_VERSION_INT > VP_VERSION_INT(2,7,0)
27  const
28  #endif
30 {
31  static vpImage<vpRGBa> Irender; // Image from ogre scene rendering
32  ogre.display(background, cMo);
33  ogre.getRenderingOutput(Irender, cMo);
34 
35  vpImageConvert::convert(Irender, I);
36  // Due to the light that was added to the scene, we need to threshold the image
37  vpImageTools::binarise(I, (unsigned char)254, (unsigned char)255, (unsigned char)0, (unsigned char)255, (unsigned char)255);
38 }
39 #endif
40 
41 int main()
42 {
43 #if defined(VISP_HAVE_OGRE) && (defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI))
44  unsigned int thickness = 3;
45 
46  vpHomogeneousMatrix cdMo(0, 0, 0.75, 0, 0, 0);
47  vpHomogeneousMatrix cMo(0.15, -0.1, 1., vpMath::rad(10), vpMath::rad(-10), vpMath::rad(50));
48 
49  // Color image used as background texture.
50  vpImage<unsigned char> background(480, 640, 255);
51 
52  // Parameters of our camera
53  vpCameraParameters cam(840, 840, background.getWidth()/2, background.getHeight()/2);
54 
55  // Define the target as 4 points
56  std::vector<vpPoint> point(4) ;
57  point[0].setWorldCoordinates(-0.1,-0.1, 0);
58  point[1].setWorldCoordinates( 0.1,-0.1, 0);
59  point[2].setWorldCoordinates( 0.1, 0.1, 0);
60  point[3].setWorldCoordinates(-0.1, 0.1, 0);
61 
62  // Our object
63  // A simulator with the camera parameters defined above,
64  // and the background image size
65  vpAROgre ogre;
66  ogre.setCameraParameters(cam);
67  ogre.setShowConfigDialog(false);
68  ogre.addResource("./"); // Add the path to the Sphere.mesh resource
69  ogre.init(background, false, true);
70  //ogre.setWindowPosition(680, 400);
71 
72  // Create the scene that contains 4 spheres
73  // Sphere.mesh contains a sphere with 1 meter radius
74  std::vector<std::string> name(4);
75  for (int i=0; i<4; i++) {
76  std::ostringstream s; s << "Sphere" << i; name[i] = s.str();
77  ogre.load(name[i], "Sphere.mesh");
78  ogre.setScale(name[i], 0.02f, 0.02f, 0.02f); // Rescale the sphere to 2 cm radius
79  // Set the position of each sphere in the object frame
80  ogre.setPosition(name[i], vpTranslationVector(point[i].get_oX(), point[i].get_oY(), point[i].get_oZ()));
81  ogre.setRotation(name[i], vpRotationMatrix(M_PI/2, 0, 0));
82  }
83 
84  // Add an optional point light source
85  Ogre::Light * light = ogre.getSceneManager()->createLight();
86  light->setDiffuseColour(1, 1, 1); // scaled RGB values
87  light->setSpecularColour(1, 1, 1); // scaled RGB values
88  light->setPosition((Ogre::Real)cdMo[0][3], (Ogre::Real)cdMo[1][3], (Ogre::Real)(-cdMo[2][3]));
89  light->setType(Ogre::Light::LT_POINT);
90 
91  vpServo task ;
94  task.setLambda(0.5);
95 
96  // Image used for the image processing
98 
99  // Render the scene at the desired position
100  ogre_get_render_image(ogre, background, cdMo, I);
101 
102  // Display the image in which we will do the tracking
103 #if defined(VISP_HAVE_X11)
104  vpDisplayX d(I, 0, 0, "Camera view at desired position");
105 #elif defined(VISP_HAVE_GDI)
106  vpDisplayGDI d(I, 0, 0, "Camera view at desired position");
107 #else
108  std::cout << "No image viewer is available..." << std::endl;
109 #endif
110 
112  vpDisplay::displayCharString(I, 10, 10, "Click in the 4 dots to learn their positions", vpColor::red);
113  vpDisplay::flush(I);
114 
115  std::vector<vpDot2> dot(4);
116  vpFeaturePoint p[4], pd[4];
117 
118  for (int i = 0 ; i < 4 ; i++) {
119  // Compute the desired feature at the desired position
120  dot[i].setGraphics(true);
121  dot[i].setGraphicsThickness(thickness);
122  dot[i].initTracking(I);
123  vpDisplay::flush(I);
124  vpFeatureBuilder::create(pd[i], cam, dot[i].getCog());
125  }
126 
127  // Render the scene at the initial position
128  ogre_get_render_image(ogre, background, cMo, I);
129 
131  vpDisplay::setTitle(I, "Current camera view");
132  vpDisplay::displayCharString(I, 10, 10, "Click in the 4 dots to initialise the tracking and start the servo", vpColor::red);
133  vpDisplay::flush(I);
134 
135  for (int i = 0 ; i < 4 ; i++) {
136  // We notice that if we project the scene at a given pose, the pose estimated from
137  // the rendered image differs a little. That's why we cannot simply compute the desired
138  // feature from the desired pose using the next two lines. We will rather compute the
139  // desired position of the features from a learning stage.
140  // point[i].project(cdMo);
141  // vpFeatureBuilder::create(pd[i], point[i]);
142 
143  // Compute the current feature at the initial position
144  dot[i].setGraphics(true);
145  dot[i].initTracking(I);
146  vpDisplay::flush(I);
147  vpFeatureBuilder::create(p[i], cam, dot[i].getCog());
148  }
149 
150  for (int i = 0 ; i < 4 ; i++) {
151  // Set the feature Z coordinate from the pose
152  vpColVector cP;
153  point[i].changeFrame(cMo, cP) ;
154  p[i].set_Z(cP[2]);
155 
156  task.addFeature(p[i], pd[i]);
157  }
158 
159  vpHomogeneousMatrix wMc, wMo;
160  vpSimulatorCamera robot;
161  robot.setSamplingTime(0.040);
162  robot.getPosition(wMc);
163  wMo = wMc * cMo;
164 
165  for (; ; ) {
166  // From the camera position in the world frame we retrieve the object position
167  robot.getPosition(wMc);
168  cMo = wMc.inverse() * wMo;
169 
170  // Update the scene from the new camera position
171  ogre_get_render_image(ogre, background, cMo, I);
172 
174 
175  for (int i = 0 ; i < 4 ; i++) {
176  dot[i].track(I);
177  vpFeatureBuilder::create(p[i], cam, dot[i].getCog());
178  }
179 
180  for (int i = 0 ; i < 4 ; i++) {
181  // Set the feature Z coordinate from the pose
182  vpColVector cP;
183  point[i].changeFrame(cMo, cP) ;
184  p[i].set_Z(cP[2]);
185  }
186 
187  vpColVector v = task.computeControlLaw();
188 
189  display_trajectory(I, dot, thickness);
190  vpServoDisplay::display(task, cam, I, vpColor::green, vpColor::red, thickness+2) ;
192 
193  vpDisplay::flush(I);
194  if (vpDisplay::getClick(I, false))
195  break;
196 
197  vpTime::wait( robot.getSamplingTime() * 1000);
198  }
199  task.kill();
200 #endif
201 }
202 
static void display(vpServo &s, const vpCameraParameters &cam, vpImage< unsigned char > &I, vpColor currentColor=vpColor::green, vpColor desiredColor=vpColor::red, unsigned int thickness=1)
void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel)
void setRotation(const std::string &name, const vpRotationMatrix &wRo)
Definition: vpAROgre.cpp:684
unsigned int getWidth() const
Definition: vpImage.h:159
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
double getSamplingTime() const
The class provides a data structure for the homogeneous matrices as well as a set of operations on th...
Class that defines the simplest robot: a free flying camera.
Display for windows using GDI (available on any windows 32 platform).
Definition: vpDisplayGDI.h:133
void setShowConfigDialog(const bool showConfigDialog)
Definition: vpAROgre.h:221
Define the X11 console to display images.
Definition: vpDisplayX.h:152
void addFeature(vpBasicFeature &s, vpBasicFeature &s_star, const unsigned int select=vpBasicFeature::FEATURE_ALL)
create a new ste of two visual features
Definition: vpServo.cpp:444
void setLambda(double _lambda)
set the gain lambda
Definition: vpServo.h:253
Class that defines a 2D point visual feature which is composed by two parameters that are the cartes...
Implementation of an augmented reality viewer.
Definition: vpAROgre.h:90
static int wait(double t0, double t)
Definition: vpTime.cpp:149
static const vpColor green
Definition: vpColor.h:170
static void flush(const vpImage< unsigned char > &I)
Definition: vpDisplay.cpp:1991
Ogre::SceneManager * getSceneManager()
Definition: vpAROgre.h:148
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMw)
Definition: vpAROgre.cpp:597
static const vpColor red
Definition: vpColor.h:167
The vpRotationMatrix considers the particular case of a rotation matrix.
virtual void setSamplingTime(const double &delta_t)
void kill()
destruction (memory deallocation if required)
Definition: vpServo.cpp:177
vpColVector computeControlLaw()
compute the desired control law
Definition: vpServo.cpp:883
virtual void init(vpImage< unsigned char > &I, bool bufferedKeys=false, bool hidden=false)
Definition: vpAROgre.cpp:131
static void binarise(vpImage< Type > &I, Type threshold1, Type threshold2, Type value1, Type value2, Type value3)
Definition: vpImageTools.h:221
static void display(const vpImage< unsigned char > &I)
Definition: vpDisplay.cpp:203
void setCameraParameters(const vpCameraParameters &cameraP)
Definition: vpAROgre.cpp:637
Generic class defining intrinsic camera parameters.
void getPosition(vpHomogeneousMatrix &wMc) const
virtual void setTitle(const char *title)=0
void getRenderingOutput(vpImage< vpRGBa > &I, const vpHomogeneousMatrix &cMo)
Definition: vpAROgre.cpp:1017
void setInteractionMatrixType(const vpServoIteractionMatrixType &interactionMatrixType, const vpServoInversionType &interactionMatrixInversion=PSEUDO_INVERSE)
Set the type of the interaction matrix (current, mean, desired, user).
Definition: vpServo.cpp:509
static double rad(double deg)
Definition: vpMath.h:100
Class that provides a data structure for the column vectors as well as a set of operations on these v...
Definition: vpColVector.h:72
virtual void displayCharString(const vpImagePoint &ip, const char *text, const vpColor &color=vpColor::green)=0
vpHomogeneousMatrix inverse() const
void setScale(const std::string &name, const float factorx, const float factory, const float factorz)
Definition: vpAROgre.cpp:754
void addResource(const std::string &resourceLocation)
Definition: vpAROgre.h:124
unsigned int getHeight() const
Definition: vpImage.h:150
void setPosition(const std::string &name, const vpTranslationVector &wTo)
Definition: vpAROgre.cpp:660
virtual bool getClick(bool blocking=true)=0
void set_Z(const double Z)
virtual void displayLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1)=0
static void create(vpFeaturePoint &s, const vpCameraParameters &cam, const vpDot &d)
Class required to compute the visual servoing control law descbribed in and .
Definition: vpServo.h:153
void load(const std::string &name, const std::string &model)
Definition: vpAROgre.cpp:647
Class that consider the case of a translation vector.
void setServo(vpServoType _servo_type)
Choice of the visual servoing control law.
Definition: vpServo.cpp:214