Visual Servoing Platform  version 3.2.0 under development (2019-01-22)
vpRealSense2.cpp
1 /****************************************************************************
2  *
3  * ViSP, open source Visual Servoing Platform software.
4  * Copyright (C) 2005 - 2019 by Inria. All rights reserved.
5  *
6  * This software is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  * See the file LICENSE.txt at the root directory of this source
11  * distribution for additional information about the GNU GPL.
12  *
13  * For using ViSP with software that can not be combined with the GNU
14  * GPL, please contact Inria about acquiring a ViSP Professional
15  * Edition License.
16  *
17  * See http://visp.inria.fr for more information.
18  *
19  * This software was developed at:
20  * Inria Rennes - Bretagne Atlantique
21  * Campus Universitaire de Beaulieu
22  * 35042 Rennes Cedex
23  * France
24  *
25  * If you have questions regarding the use of this file, please contact
26  * Inria at visp@inria.fr
27  *
28  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
29  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
30  *
31  * Description:
32  * librealSense2 interface.
33  *
34  *****************************************************************************/
35 
36 #include <visp3/core/vpConfig.h>
37 
38 #if defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_CPP11_COMPATIBILITY)
39 #include <iomanip>
40 #include <map>
41 #include <set>
42 #include <cstring>
43 #include <visp3/core/vpImageConvert.h>
44 #include <visp3/sensor/vpRealSense2.h>
45 
46 #define MANUAL_POINTCLOUD 1
47 
52  : m_colorIntrinsics(), m_depth2ColorExtrinsics(), m_depthIntrinsics(), m_depthScale(0.0f), m_invalidDepthValue(0.0f),
53  m_max_Z(8.0f), m_pipe(), m_pipelineProfile(), m_pointcloud(), m_points()
54 {
55 }
56 
62 
68 {
69  auto data = m_pipe.wait_for_frames();
70  auto color_frame = data.get_color_frame();
71  getGreyFrame(color_frame, grey);
72 }
73 
79 {
80  auto data = m_pipe.wait_for_frames();
81  auto color_frame = data.get_color_frame();
82  getColorFrame(color_frame, color);
83 }
84 
93 void vpRealSense2::acquire(unsigned char *const data_image, unsigned char *const data_depth,
94  std::vector<vpColVector> *const data_pointCloud, unsigned char *const data_infrared,
95  rs2::align *const align_to)
96 {
97  auto data = m_pipe.wait_for_frames();
98  if (align_to != NULL)
99 #if (RS2_API_VERSION > ((2 * 10000) + (9 * 100) + 0))
100  data = align_to->process(data);
101 #else
102  data = align_to->proccess(data);
103 #endif
104 
105  if (data_image != NULL) {
106  auto color_frame = data.get_color_frame();
107  getNativeFrameData(color_frame, data_image);
108  }
109 
110  if (data_depth != NULL || data_pointCloud != NULL) {
111  auto depth_frame = data.get_depth_frame();
112  if (data_depth != NULL)
113  getNativeFrameData(depth_frame, data_depth);
114 
115  if (data_pointCloud != NULL)
116  getPointcloud(depth_frame, *data_pointCloud);
117  }
118 
119  if (data_infrared != NULL) {
120  auto infrared_frame = data.first(RS2_STREAM_INFRARED);
121  getNativeFrameData(infrared_frame, data_infrared);
122  }
123 }
124 
125 #ifdef VISP_HAVE_PCL
126 
136 void vpRealSense2::acquire(unsigned char *const data_image, unsigned char *const data_depth,
137  std::vector<vpColVector> *const data_pointCloud,
138  pcl::PointCloud<pcl::PointXYZ>::Ptr &pointcloud, unsigned char *const data_infrared,
139  rs2::align *const align_to)
140 {
141  auto data = m_pipe.wait_for_frames();
142  if (align_to != NULL)
143 #if (RS2_API_VERSION > ((2 * 10000) + (9 * 100) + 0))
144  data = align_to->process(data);
145 #else
146  data = align_to->proccess(data);
147 #endif
148 
149  if (data_image != NULL) {
150  auto color_frame = data.get_color_frame();
151  getNativeFrameData(color_frame, data_image);
152  }
153 
154  if (data_depth != NULL || data_pointCloud != NULL || pointcloud != NULL) {
155  auto depth_frame = data.get_depth_frame();
156  if (data_depth != NULL)
157  getNativeFrameData(depth_frame, data_depth);
158 
159  if (data_pointCloud != NULL)
160  getPointcloud(depth_frame, *data_pointCloud);
161 
162  if (pointcloud != NULL)
163  getPointcloud(depth_frame, pointcloud);
164  }
165 
166  if (data_infrared != NULL) {
167  auto infrared_frame = data.first(RS2_STREAM_INFRARED);
168  getNativeFrameData(infrared_frame, data_infrared);
169  }
170 }
171 
182 void vpRealSense2::acquire(unsigned char *const data_image, unsigned char *const data_depth,
183  std::vector<vpColVector> *const data_pointCloud,
184  pcl::PointCloud<pcl::PointXYZRGB>::Ptr &pointcloud, unsigned char *const data_infrared,
185  rs2::align *const align_to)
186 {
187  auto data = m_pipe.wait_for_frames();
188  if (align_to != NULL)
189 #if (RS2_API_VERSION > ((2 * 10000) + (9 * 100) + 0))
190  data = align_to->process(data);
191 #else
192  data = align_to->proccess(data);
193 #endif
194 
195  auto color_frame = data.get_color_frame();
196  if (data_image != NULL) {
197  getNativeFrameData(color_frame, data_image);
198  }
199 
200  if (data_depth != NULL || data_pointCloud != NULL || pointcloud != NULL) {
201  auto depth_frame = data.get_depth_frame();
202  if (data_depth != NULL)
203  getNativeFrameData(depth_frame, data_depth);
204 
205  if (data_pointCloud != NULL)
206  getPointcloud(depth_frame, *data_pointCloud);
207 
208  if (pointcloud != NULL)
209  getPointcloud(depth_frame, color_frame, pointcloud);
210  }
211 
212  if (data_infrared != NULL) {
213  auto infrared_frame = data.first(RS2_STREAM_INFRARED);
214  getNativeFrameData(infrared_frame, data_infrared);
215  }
216 }
217 #endif
218 
230 void vpRealSense2::close() { m_pipe.stop(); }
231 
242 {
243  auto rs_stream = m_pipelineProfile.get_stream(stream).as<rs2::video_stream_profile>();
244  auto intrinsics = rs_stream.get_intrinsics();
245 
246  vpCameraParameters cam;
247  double u0 = intrinsics.ppx;
248  double v0 = intrinsics.ppy;
249  double px = intrinsics.fx;
250  double py = intrinsics.fy;
251 
253  double kdu = intrinsics.coeffs[0];
254  cam.initPersProjWithDistortion(px, py, u0, v0, -kdu, kdu);
255  } else {
256  cam.initPersProjWithoutDistortion(px, py, u0, v0);
257  }
258 
259  return cam;
260 }
261 
269 rs2_intrinsics vpRealSense2::getIntrinsics(const rs2_stream &stream) const
270 {
271  auto vsp = m_pipelineProfile.get_stream(stream).as<rs2::video_stream_profile>();
272  return vsp.get_intrinsics();
273 }
274 
275 void vpRealSense2::getColorFrame(const rs2::frame &frame, vpImage<vpRGBa> &color)
276 {
277  auto vf = frame.as<rs2::video_frame>();
278  const unsigned int width = (unsigned int)vf.get_width();
279  const unsigned int height = (unsigned int)vf.get_height();
280  color.resize(height, width);
281 
282  if (frame.get_profile().format() == RS2_FORMAT_RGB8) {
283  vpImageConvert::RGBToRGBa((unsigned char *)frame.get_data(), (unsigned char *)color.bitmap, width, height);
284  } else if (frame.get_profile().format() == RS2_FORMAT_RGBA8) {
285  memcpy((unsigned char *)color.bitmap, (unsigned char *)frame.get_data(), width * height * sizeof(vpRGBa));
286  } else if (frame.get_profile().format() == RS2_FORMAT_BGR8) {
287  vpImageConvert::BGRToRGBa((unsigned char *)frame.get_data(), (unsigned char *)color.bitmap, width, height);
288  } else {
289  throw vpException(vpException::fatalError, "RealSense Camera - color stream not supported!");
290  }
291 }
292 
298 {
299  return m_depthScale;
300 }
301 
302 void vpRealSense2::getGreyFrame(const rs2::frame &frame, vpImage<unsigned char> &grey)
303 {
304  auto vf = frame.as<rs2::video_frame>();
305  const unsigned int width = (unsigned int)vf.get_width();
306  const unsigned int height = (unsigned int)vf.get_height();
307  grey.resize(height, width);
308 
309  if (frame.get_profile().format() == RS2_FORMAT_RGB8) {
310  vpImageConvert::RGBToGrey((unsigned char *)frame.get_data(), (unsigned char *)grey.bitmap, width, height);
311  } else if (frame.get_profile().format() == RS2_FORMAT_RGBA8) {
312  vpImageConvert::RGBaToGrey((unsigned char *)frame.get_data(), (unsigned char *)grey.bitmap, width * height);
313  } else if (frame.get_profile().format() == RS2_FORMAT_BGR8) {
314  vpImageConvert::BGRToGrey((unsigned char *)frame.get_data(), (unsigned char *)grey.bitmap, width, height);
315  } else {
316  throw vpException(vpException::fatalError, "RealSense Camera - color stream not supported!");
317  }
318 }
319 
320 void vpRealSense2::getNativeFrameData(const rs2::frame &frame, unsigned char *const data)
321 {
322  auto vf = frame.as<rs2::video_frame>();
323  int size = vf.get_width() * vf.get_height();
324 
325  switch (frame.get_profile().format()) {
326  case RS2_FORMAT_RGB8:
327  case RS2_FORMAT_BGR8:
328  memcpy(data, frame.get_data(), size * 3);
329  break;
330 
331  case RS2_FORMAT_RGBA8:
332  case RS2_FORMAT_BGRA8:
333  memcpy(data, frame.get_data(), size * 4);
334  break;
335 
336  case RS2_FORMAT_Y16:
337  case RS2_FORMAT_Z16:
338  memcpy(data, frame.get_data(), size * 2);
339  break;
340 
341  case RS2_FORMAT_Y8:
342  memcpy(data, frame.get_data(), size);
343  break;
344 
345  default:
346  break;
347  }
348 }
349 
350 void vpRealSense2::getPointcloud(const rs2::depth_frame &depth_frame, std::vector<vpColVector> &pointcloud)
351 {
352  if (m_depthScale <= std::numeric_limits<float>::epsilon()) {
353  std::stringstream ss;
354  ss << "Error, depth scale <= 0: " << m_depthScale;
355  throw vpException(vpException::fatalError, ss.str());
356  }
357 
358  auto vf = depth_frame.as<rs2::video_frame>();
359  const int width = vf.get_width();
360  const int height = vf.get_height();
361  pointcloud.resize((size_t)(width * height));
362 
363  const uint16_t *p_depth_frame = reinterpret_cast<const uint16_t *>(depth_frame.get_data());
364 
365  // Multi-threading if OpenMP
366  // Concurrent writes at different locations are safe
367  #pragma omp parallel for schedule(dynamic)
368  for (int i = 0; i < height; i++) {
369  auto depth_pixel_index = i * width;
370 
371  for (int j = 0; j < width; j++, depth_pixel_index++) {
372  if (p_depth_frame[depth_pixel_index] == 0) {
373  pointcloud[(size_t)depth_pixel_index].resize(4, false);
374  pointcloud[(size_t)depth_pixel_index][0] = m_invalidDepthValue;
375  pointcloud[(size_t)depth_pixel_index][1] = m_invalidDepthValue;
376  pointcloud[(size_t)depth_pixel_index][2] = m_invalidDepthValue;
377  pointcloud[(size_t)depth_pixel_index][3] = 1.0;
378  continue;
379  }
380 
381  // Get the depth value of the current pixel
382  auto pixels_distance = m_depthScale * p_depth_frame[depth_pixel_index];
383 
384  float points[3];
385  const float pixel[] = {(float)j, (float)i};
386  rs2_deproject_pixel_to_point(points, &m_depthIntrinsics, pixel, pixels_distance);
387 
388  if (pixels_distance > m_max_Z)
389  points[0] = points[1] = points[2] = m_invalidDepthValue;
390 
391  pointcloud[(size_t)depth_pixel_index].resize(4, false);
392  pointcloud[(size_t)depth_pixel_index][0] = points[0];
393  pointcloud[(size_t)depth_pixel_index][1] = points[1];
394  pointcloud[(size_t)depth_pixel_index][2] = points[2];
395  pointcloud[(size_t)depth_pixel_index][3] = 1.0;
396  }
397  }
398 }
399 
400 #ifdef VISP_HAVE_PCL
401 void vpRealSense2::getPointcloud(const rs2::depth_frame &depth_frame, pcl::PointCloud<pcl::PointXYZ>::Ptr &pointcloud)
402 {
403  if (m_depthScale <= std::numeric_limits<float>::epsilon()) {
404  std::stringstream ss;
405  ss << "Error, depth scale <= 0: " << m_depthScale;
406  throw vpException(vpException::fatalError, ss.str());
407  }
408 
409  auto vf = depth_frame.as<rs2::video_frame>();
410  const int width = vf.get_width();
411  const int height = vf.get_height();
412  pointcloud->width = (uint32_t)width;
413  pointcloud->height = (uint32_t)height;
414  pointcloud->resize((size_t)(width * height));
415 
416 #if MANUAL_POINTCLOUD // faster to compute manually when tested
417  const uint16_t *p_depth_frame = reinterpret_cast<const uint16_t *>(depth_frame.get_data());
418 
419  // Multi-threading if OpenMP
420  // Concurrent writes at different locations are safe
421 #pragma omp parallel for schedule(dynamic)
422  for (int i = 0; i < height; i++) {
423  auto depth_pixel_index = i * width;
424 
425  for (int j = 0; j < width; j++, depth_pixel_index++) {
426  if (p_depth_frame[depth_pixel_index] == 0) {
427  pointcloud->points[(size_t)(depth_pixel_index)].x = m_invalidDepthValue;
428  pointcloud->points[(size_t)(depth_pixel_index)].y = m_invalidDepthValue;
429  pointcloud->points[(size_t)(depth_pixel_index)].z = m_invalidDepthValue;
430  continue;
431  }
432 
433  // Get the depth value of the current pixel
434  auto pixels_distance = m_depthScale * p_depth_frame[depth_pixel_index];
435 
436  float points[3];
437  const float pixel[] = {(float)j, (float)i};
438  rs2_deproject_pixel_to_point(points, &m_depthIntrinsics, pixel, pixels_distance);
439 
440  if (pixels_distance > m_max_Z)
441  points[0] = points[1] = points[2] = m_invalidDepthValue;
442 
443  pointcloud->points[(size_t)(depth_pixel_index)].x = points[0];
444  pointcloud->points[(size_t)(depth_pixel_index)].y = points[1];
445  pointcloud->points[(size_t)(depth_pixel_index)].z = points[2];
446  }
447  }
448 #else
449  m_points = m_pointcloud.calculate(depth_frame);
450  auto vertices = m_points.get_vertices();
451 
452  for (size_t i = 0; i < m_points.size(); i++) {
453  if (vertices[i].z <= 0.0f || vertices[i].z > m_max_Z) {
454  pointcloud->points[i].x = m_invalidDepthValue;
455  pointcloud->points[i].y = m_invalidDepthValue;
456  pointcloud->points[i].z = m_invalidDepthValue;
457  } else {
458  pointcloud->points[i].x = vertices[i].x;
459  pointcloud->points[i].y = vertices[i].y;
460  pointcloud->points[i].z = vertices[i].z;
461  }
462  }
463 #endif
464 }
465 
466 void vpRealSense2::getPointcloud(const rs2::depth_frame &depth_frame, const rs2::frame &color_frame,
467  pcl::PointCloud<pcl::PointXYZRGB>::Ptr &pointcloud)
468 {
469  if (m_depthScale <= std::numeric_limits<float>::epsilon()) {
470  std::stringstream ss;
471  ss << "Error, depth scale <= 0: " << m_depthScale;
472  throw vpException(vpException::fatalError, ss.str());
473  }
474 
475  auto vf = depth_frame.as<rs2::video_frame>();
476  const int width = vf.get_width();
477  const int height = vf.get_height();
478  pointcloud->width = (uint32_t)width;
479  pointcloud->height = (uint32_t)height;
480  pointcloud->resize((size_t)(width * height));
481 
482  const uint16_t *p_depth_frame = reinterpret_cast<const uint16_t *>(depth_frame.get_data());
483  const int color_width = m_colorIntrinsics.width;
484  const int color_height = m_colorIntrinsics.height;
485  auto color_format = color_frame.as<rs2::video_frame>().get_profile().format();
486  bool swap_rgb = color_format == RS2_FORMAT_BGR8 || color_format == RS2_FORMAT_BGRA8;
487  unsigned int nb_color_pixel = (color_format == RS2_FORMAT_RGB8 || color_format == RS2_FORMAT_BGR8) ? 3 : 4;
488  const unsigned char *p_color_frame = reinterpret_cast<const unsigned char *>(color_frame.get_data());
489 
490  // Multi-threading if OpenMP
491  // Concurrent writes at different locations are safe
492 #pragma omp parallel for schedule(dynamic)
493  for (int i = 0; i < height; i++) {
494  auto depth_pixel_index = i * width;
495 
496  for (int j = 0; j < width; j++, depth_pixel_index++) {
497  if (p_depth_frame[depth_pixel_index] == 0) {
498  pointcloud->points[(size_t)depth_pixel_index].x = m_invalidDepthValue;
499  pointcloud->points[(size_t)depth_pixel_index].y = m_invalidDepthValue;
500  pointcloud->points[(size_t)depth_pixel_index].z = m_invalidDepthValue;
501 
502  // For out of bounds color data, default to a shade of blue in order to
503  // visually distinguish holes. This color value is same as the librealsense
504  // out of bounds color value.
505 #if PCL_VERSION_COMPARE(<, 1, 1, 0)
506  unsigned int r = 96, g = 157, b = 198;
507  uint32_t rgb = (static_cast<uint32_t>(r) << 16 | static_cast<uint32_t>(g) << 8 | static_cast<uint32_t>(b));
508 
509  pointcloud->points[(size_t)depth_pixel_index].rgb = *reinterpret_cast<float *>(&rgb);
510 #else
511  pointcloud->points[(size_t)depth_pixel_index].r = (uint8_t)96;
512  pointcloud->points[(size_t)depth_pixel_index].g = (uint8_t)157;
513  pointcloud->points[(size_t)depth_pixel_index].b = (uint8_t)198;
514 #endif
515  continue;
516  }
517 
518  // Get the depth value of the current pixel
519  auto pixels_distance = m_depthScale * p_depth_frame[depth_pixel_index];
520 
521  float depth_point[3];
522  const float pixel[] = {(float)j, (float)i};
523  rs2_deproject_pixel_to_point(depth_point, &m_depthIntrinsics, pixel, pixels_distance);
524 
525  if (pixels_distance > m_max_Z)
526  depth_point[0] = depth_point[1] = depth_point[2] = m_invalidDepthValue;
527 
528  pointcloud->points[(size_t)depth_pixel_index].x = depth_point[0];
529  pointcloud->points[(size_t)depth_pixel_index].y = depth_point[1];
530  pointcloud->points[(size_t)depth_pixel_index].z = depth_point[2];
531 
532  float color_point[3];
533  rs2_transform_point_to_point(color_point, &m_depth2ColorExtrinsics, depth_point);
534  float color_pixel[2];
535  rs2_project_point_to_pixel(color_pixel, &m_colorIntrinsics, color_point);
536 
537  if (color_pixel[1] < 0 || color_pixel[1] >= color_height || color_pixel[0] < 0 || color_pixel[0] >= color_width) {
538 // For out of bounds color data, default to a shade of blue in order to
539 // visually distinguish holes. This color value is same as the librealsense
540 // out of bounds color value.
541 #if PCL_VERSION_COMPARE(<, 1, 1, 0)
542  unsigned int r = 96, g = 157, b = 198;
543  uint32_t rgb = (static_cast<uint32_t>(r) << 16 | static_cast<uint32_t>(g) << 8 | static_cast<uint32_t>(b));
544 
545  pointcloud->points[(size_t)depth_pixel_index].rgb = *reinterpret_cast<float *>(&rgb);
546 #else
547  pointcloud->points[(size_t)depth_pixel_index].r = (uint8_t)96;
548  pointcloud->points[(size_t)depth_pixel_index].g = (uint8_t)157;
549  pointcloud->points[(size_t)depth_pixel_index].b = (uint8_t)198;
550 #endif
551  } else {
552  unsigned int i_ = (unsigned int)color_pixel[1];
553  unsigned int j_ = (unsigned int)color_pixel[0];
554 
555 #if PCL_VERSION_COMPARE(<, 1, 1, 0)
556  uint32_t rgb = 0;
557  if (swap_rgb) {
558  rgb =
559  (static_cast<uint32_t>(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel]) |
560  static_cast<uint32_t>(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 1]) << 8 |
561  static_cast<uint32_t>(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 2]) << 16);
562  } else {
563  rgb = (static_cast<uint32_t>(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel]) << 16 |
564  static_cast<uint32_t>(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 1]) << 8 |
565  static_cast<uint32_t>(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 2]));
566  }
567 
568  pointcloud->points[(size_t)(i * depth_width + j)].rgb = *reinterpret_cast<float *>(&rgb);
569 #else
570  if (swap_rgb) {
571  pointcloud->points[(size_t)depth_pixel_index].b =
572  (uint32_t)p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel];
573  pointcloud->points[(size_t)depth_pixel_index].g =
574  (uint32_t)p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 1];
575  pointcloud->points[(size_t)depth_pixel_index].r =
576  (uint32_t)p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 2];
577  } else {
578  pointcloud->points[(size_t)depth_pixel_index].r =
579  (uint32_t)p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel];
580  pointcloud->points[(size_t)depth_pixel_index].g =
581  (uint32_t)p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 1];
582  pointcloud->points[(size_t)depth_pixel_index].b =
583  (uint32_t)p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 2];
584  }
585 #endif
586  }
587  }
588  }
589 }
590 
591 #endif
592 
598 vpHomogeneousMatrix vpRealSense2::getTransformation(const rs2_stream &from, const rs2_stream &to) const
599 {
600  auto from_stream = m_pipelineProfile.get_stream(from);
601  auto to_stream = m_pipelineProfile.get_stream(to);
602  rs2_extrinsics extrinsics = from_stream.get_extrinsics_to(to_stream);
603 
606  for (unsigned int i = 0; i < 3; i++) {
607  t[i] = extrinsics.translation[i];
608  for (unsigned int j = 0; j < 3; j++)
609  R[i][j] = extrinsics.rotation[j * 3 + i]; //rotation is column-major order
610  }
611 
612  vpHomogeneousMatrix to_M_from(t, R);
613  return to_M_from;
614 }
615 
619 void vpRealSense2::open(const rs2::config &cfg)
620 {
621  m_pipelineProfile = m_pipe.start(cfg);
622 
623  rs2::device dev = m_pipelineProfile.get_device();
624 
625  // Go over the device's sensors
626  for (rs2::sensor &sensor : dev.query_sensors()) {
627  // Check if the sensor is a depth sensor
628  if (rs2::depth_sensor dpt = sensor.as<rs2::depth_sensor>()) {
629  m_depthScale = dpt.get_depth_scale();
630  }
631  }
632 
633  try {
634  auto depth_stream = m_pipelineProfile.get_stream(RS2_STREAM_DEPTH).as<rs2::video_stream_profile>();
635  m_depthIntrinsics = depth_stream.get_intrinsics();
636 
637  try {
638  auto color_stream = m_pipelineProfile.get_stream(RS2_STREAM_COLOR);
639  m_depth2ColorExtrinsics = depth_stream.get_extrinsics_to(color_stream);
640  } catch (const std::runtime_error &e) {
641  std::cout << "Error getting depth to color extrinsics: " << e.what() << std::endl;
642  }
643  } catch (const std::runtime_error &e) {
644  std::cout << "Error getting depth intrinsics: " << e.what() << std::endl;
645  }
646 
647  try {
648  m_colorIntrinsics = m_pipelineProfile.get_stream(RS2_STREAM_COLOR).as<rs2::video_stream_profile>().get_intrinsics();
649  } catch (const std::runtime_error &e) {
650  std::cout << "Error getting color intrinsics: " << e.what() << std::endl;
651  }
652 }
653 
654 namespace
655 {
656 // Helper functions to print information about the RealSense device
657 void print(const rs2_extrinsics &extrinsics, std::ostream &os)
658 {
659  std::stringstream ss;
660  ss << "Rotation Matrix:\n";
661 
662  for (auto i = 0; i < 3; ++i) {
663  for (auto j = 0; j < 3; ++j) {
664  ss << std::left << std::setw(15) << std::setprecision(5) << extrinsics.rotation[j * 3 + i];
665  }
666  ss << std::endl;
667  }
668 
669  ss << "\nTranslation Vector: ";
670  for (size_t i = 0; i < sizeof(extrinsics.translation) / sizeof(extrinsics.translation[0]); ++i)
671  ss << std::setprecision(15) << extrinsics.translation[i] << " ";
672 
673  os << ss.str() << "\n\n";
674 }
675 
676 void print(const rs2_intrinsics &intrinsics, std::ostream &os)
677 {
678  std::stringstream ss;
679  ss << std::left << std::setw(14) << "Width: "
680  << "\t" << intrinsics.width << "\n"
681  << std::left << std::setw(14) << "Height: "
682  << "\t" << intrinsics.height << "\n"
683  << std::left << std::setw(14) << "PPX: "
684  << "\t" << std::setprecision(15) << intrinsics.ppx << "\n"
685  << std::left << std::setw(14) << "PPY: "
686  << "\t" << std::setprecision(15) << intrinsics.ppy << "\n"
687  << std::left << std::setw(14) << "Fx: "
688  << "\t" << std::setprecision(15) << intrinsics.fx << "\n"
689  << std::left << std::setw(14) << "Fy: "
690  << "\t" << std::setprecision(15) << intrinsics.fy << "\n"
691  << std::left << std::setw(14) << "Distortion: "
692  << "\t" << rs2_distortion_to_string(intrinsics.model) << "\n"
693  << std::left << std::setw(14) << "Coeffs: ";
694 
695  for (size_t i = 0; i < sizeof(intrinsics.coeffs) / sizeof(intrinsics.coeffs[0]); ++i)
696  ss << "\t" << std::setprecision(15) << intrinsics.coeffs[i] << " ";
697 
698  os << ss.str() << "\n\n";
699 }
700 
701 void safe_get_intrinsics(const rs2::video_stream_profile &profile, rs2_intrinsics &intrinsics)
702 {
703  try {
704  intrinsics = profile.get_intrinsics();
705  } catch (...) {
706  }
707 }
708 
709 bool operator==(const rs2_intrinsics &lhs, const rs2_intrinsics &rhs)
710 {
711  return lhs.width == rhs.width && lhs.height == rhs.height && lhs.ppx == rhs.ppx && lhs.ppy == rhs.ppy &&
712  lhs.fx == rhs.fx && lhs.fy == rhs.fy && lhs.model == rhs.model &&
713  !std::memcmp(lhs.coeffs, rhs.coeffs, sizeof(rhs.coeffs));
714 }
715 
716 std::string get_str_formats(const std::set<rs2_format> &formats)
717 {
718  std::stringstream ss;
719  for (auto format = formats.begin(); format != formats.end(); ++format) {
720  ss << *format << ((format != formats.end()) && (next(format) == formats.end()) ? "" : "/");
721  }
722  return ss.str();
723 }
724 
725 struct stream_and_resolution {
726  rs2_stream stream;
727  int stream_index;
728  int width;
729  int height;
730  std::string stream_name;
731 
732  bool operator<(const stream_and_resolution &obj) const
733  {
734  return (std::make_tuple(stream, stream_index, width, height) <
735  std::make_tuple(obj.stream, obj.stream_index, obj.width, obj.height));
736  }
737 };
738 
739 struct stream_and_index {
740  rs2_stream stream;
741  int stream_index;
742 
743  bool operator<(const stream_and_index &obj) const
744  {
745  return (std::make_tuple(stream, stream_index) < std::make_tuple(obj.stream, obj.stream_index));
746  }
747 };
748 } // anonymous namespace
749 
770 std::ostream &operator<<(std::ostream &os, const vpRealSense2 &rs)
771 {
772  os << std::left << std::setw(30) << "Device Name" << std::setw(20) << "Serial Number" << std::setw(20)
773  << "Firmware Version" << std::endl;
774 
775  rs2::device dev = rs.m_pipelineProfile.get_device();
776  os << std::left << std::setw(30) << dev.get_info(RS2_CAMERA_INFO_NAME) << std::setw(20)
777  << dev.get_info(RS2_CAMERA_INFO_SERIAL_NUMBER) << std::setw(20) << dev.get_info(RS2_CAMERA_INFO_FIRMWARE_VERSION)
778  << std::endl;
779 
780  // Show which options are supported by this device
781  os << " Device info: \n";
782  for (auto j = 0; j < RS2_CAMERA_INFO_COUNT; ++j) {
783  auto param = static_cast<rs2_camera_info>(j);
784  if (dev.supports(param))
785  os << " " << std::left << std::setw(30) << rs2_camera_info_to_string(rs2_camera_info(param)) << ": \t"
786  << dev.get_info(param) << "\n";
787  }
788 
789  os << "\n";
790 
791  for (auto &&sensor : dev.query_sensors()) {
792  os << "Options for " << sensor.get_info(RS2_CAMERA_INFO_NAME) << std::endl;
793 
794  os << std::setw(55) << " Supported options:" << std::setw(10) << "min" << std::setw(10) << " max" << std::setw(6)
795  << " step" << std::setw(10) << " default" << std::endl;
796  for (auto j = 0; j < RS2_OPTION_COUNT; ++j) {
797  auto opt = static_cast<rs2_option>(j);
798  if (sensor.supports(opt)) {
799  auto range = sensor.get_option_range(opt);
800  os << " " << std::left << std::setw(50) << opt << " : " << std::setw(5) << range.min << "... "
801  << std::setw(12) << range.max << std::setw(6) << range.step << std::setw(10) << range.def << "\n";
802  }
803  }
804 
805  os << "\n";
806  }
807 
808  for (auto &&sensor : dev.query_sensors()) {
809  os << "Stream Profiles supported by " << sensor.get_info(RS2_CAMERA_INFO_NAME) << "\n";
810 
811  os << std::setw(55) << " Supported modes:" << std::setw(10) << "stream" << std::setw(10) << " resolution"
812  << std::setw(6) << " fps" << std::setw(10) << " format"
813  << "\n";
814  // Show which streams are supported by this device
815  for (auto &&profile : sensor.get_stream_profiles()) {
816  if (auto video = profile.as<rs2::video_stream_profile>()) {
817  os << " " << profile.stream_name() << "\t " << video.width() << "x" << video.height() << "\t@ "
818  << profile.fps() << "Hz\t" << profile.format() << "\n";
819  } else {
820  os << " " << profile.stream_name() << "\t@ " << profile.fps() << "Hz\t" << profile.format() << "\n";
821  }
822  }
823 
824  os << "\n";
825  }
826 
827  std::map<stream_and_index, rs2::stream_profile> streams;
828  std::map<stream_and_resolution, std::vector<std::pair<std::set<rs2_format>, rs2_intrinsics> > > intrinsics_map;
829  for (auto &&sensor : dev.query_sensors()) {
830  // Intrinsics
831  for (auto &&profile : sensor.get_stream_profiles()) {
832  if (auto video = profile.as<rs2::video_stream_profile>()) {
833  if (streams.find(stream_and_index{profile.stream_type(), profile.stream_index()}) == streams.end()) {
834  streams[stream_and_index{profile.stream_type(), profile.stream_index()}] = profile;
835  }
836 
837  rs2_intrinsics intrinsics{};
838  stream_and_resolution stream_res{profile.stream_type(), profile.stream_index(), video.width(), video.height(),
839  profile.stream_name()};
840  safe_get_intrinsics(video, intrinsics);
841  auto it = std::find_if(
842  (intrinsics_map[stream_res]).begin(), (intrinsics_map[stream_res]).end(),
843  [&](const std::pair<std::set<rs2_format>, rs2_intrinsics> &kvp) { return intrinsics == kvp.second; });
844  if (it == (intrinsics_map[stream_res]).end()) {
845  (intrinsics_map[stream_res]).push_back({{profile.format()}, intrinsics});
846  } else {
847  it->first.insert(profile.format()); // If the intrinsics are equals,
848  // add the profile format to
849  // format set
850  }
851  }
852  }
853  }
854 
855  os << "Provided Intrinsic:\n";
856  for (auto &kvp : intrinsics_map) {
857  auto stream_res = kvp.first;
858  for (auto &intrinsics : kvp.second) {
859  auto formats = get_str_formats(intrinsics.first);
860  os << "Intrinsic of \"" << stream_res.stream_name << "\"\t " << stream_res.width << "x" << stream_res.height
861  << "\t " << formats << "\n";
862  if (intrinsics.second == rs2_intrinsics{}) {
863  os << "Intrinsic NOT available!\n\n";
864  } else {
865  print(intrinsics.second, os);
866  }
867  }
868  }
869 
870  // Print Extrinsics
871  os << "\nProvided Extrinsic:\n";
872  for (auto kvp1 = streams.begin(); kvp1 != streams.end(); ++kvp1) {
873  for (auto kvp2 = streams.begin(); kvp2 != streams.end(); ++kvp2) {
874  os << "Extrinsic from \"" << kvp1->second.stream_name() << "\"\t "
875  << "To"
876  << "\t \"" << kvp2->second.stream_name() << "\"\n";
877  auto extrinsics = kvp1->second.get_extrinsics_to(kvp2->second);
878  print(extrinsics, os);
879  }
880  }
881 
882  return os;
883 }
884 
885 #elif !defined(VISP_BUILD_SHARED_LIBS)
886 // Work arround to avoid warning: libvisp_sensor.a(vpRealSense2.cpp.o) has no
887 // symbols
888 void dummy_vpRealSense2(){};
889 #endif
static void BGRToRGBa(unsigned char *bgr, unsigned char *rgba, unsigned int width, unsigned int height, bool flip=false)
static void RGBToGrey(unsigned char *rgb, unsigned char *grey, unsigned int size)
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void RGBToRGBa(unsigned char *rgb, unsigned char *rgba, unsigned int size)
static void BGRToGrey(unsigned char *bgr, unsigned char *grey, unsigned int width, unsigned int height, bool flip=false)
Type * bitmap
points toward the bitmap
Definition: vpImage.h:133
virtual ~vpRealSense2()
error that can be emited by ViSP classes.
Definition: vpException.h:71
float m_depthScale
Definition: vpRealSense2.h:346
void open(const rs2::config &cfg=rs2::config())
rs2_intrinsics m_depthIntrinsics
Definition: vpRealSense2.h:345
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion) const
Definition: vpRGBa.h:66
Implementation of a rotation matrix and operations on such kind of matrices.
float getDepthScale()
void initPersProjWithoutDistortion(const double px, const double py, const double u0, const double v0)
rs2_intrinsics m_colorIntrinsics
Definition: vpRealSense2.h:343
rs2::pipeline_profile m_pipelineProfile
Definition: vpRealSense2.h:350
void getGreyFrame(const rs2::frame &frame, vpImage< unsigned char > &grey)
Generic class defining intrinsic camera parameters.
rs2_extrinsics m_depth2ColorExtrinsics
Definition: vpRealSense2.h:344
void resize(const unsigned int h, const unsigned int w)
resize the image : Image initialization
Definition: vpImage.h:866
void acquire(vpImage< unsigned char > &grey)
rs2::pointcloud m_pointcloud
Definition: vpRealSense2.h:351
void getPointcloud(const rs2::depth_frame &depth_frame, std::vector< vpColVector > &pointcloud)
vpHomogeneousMatrix getTransformation(const rs2_stream &from, const rs2_stream &to) const
rs2::pipeline m_pipe
Definition: vpRealSense2.h:349
rs2::points m_points
Definition: vpRealSense2.h:352
rs2_intrinsics getIntrinsics(const rs2_stream &stream) const
float m_invalidDepthValue
Definition: vpRealSense2.h:347
void getNativeFrameData(const rs2::frame &frame, unsigned char *const data)
friend VISP_EXPORT std::ostream & operator<<(std::ostream &os, const vpRealSense2 &rs)
static void RGBaToGrey(unsigned char *rgba, unsigned char *grey, unsigned int size)
void getColorFrame(const rs2::frame &frame, vpImage< vpRGBa > &color)
Class that consider the case of a translation vector.
void initPersProjWithDistortion(const double px, const double py, const double u0, const double v0, const double kud, const double kdu)