Visual Servoing Platform  version 3.1.0
vpRealSense2.cpp
1 /****************************************************************************
2  *
3  * This file is part of the ViSP software.
4  * Copyright (C) 2005 - 2017 by Inria. All rights reserved.
5  *
6  * This software is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  * See the file LICENSE.txt at the root directory of this source
11  * distribution for additional information about the GNU GPL.
12  *
13  * For using ViSP with software that can not be combined with the GNU
14  * GPL, please contact Inria about acquiring a ViSP Professional
15  * Edition License.
16  *
17  * See http://visp.inria.fr for more information.
18  *
19  * This software was developed at:
20  * Inria Rennes - Bretagne Atlantique
21  * Campus Universitaire de Beaulieu
22  * 35042 Rennes Cedex
23  * France
24  *
25  * If you have questions regarding the use of this file, please contact
26  * Inria at visp@inria.fr
27  *
28  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
29  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
30  *
31  * Description:
32  * librealSense2 interface.
33  *
34  *****************************************************************************/
35 
36 #include <visp3/core/vpConfig.h>
37 
38 #if defined(VISP_HAVE_REALSENSE2) && defined(VISP_HAVE_CPP11_COMPATIBILITY)
39 #include <iomanip>
40 #include <map>
41 #include <set>
42 #include <visp3/core/vpImageConvert.h>
43 #include <visp3/sensor/vpRealSense2.h>
44 
45 #define MANUAL_POINTCLOUD 1
46 
51  : m_colorIntrinsics(), m_depth2ColorExtrinsics(), m_depthIntrinsics(), m_depthScale(0.0f), m_invalidDepthValue(0.0f),
52  m_max_Z(8.0f), m_pipe(), m_pipelineProfile(), m_pointcloud(), m_points()
53 {
54 }
55 
61 
67 {
68  auto data = m_pipe.wait_for_frames();
69  auto color_frame = data.get_color_frame();
70  getGreyFrame(color_frame, grey);
71 }
72 
78 {
79  auto data = m_pipe.wait_for_frames();
80  auto color_frame = data.get_color_frame();
81  getColorFrame(color_frame, color);
82 }
83 
92 void vpRealSense2::acquire(unsigned char *const data_image, unsigned char *const data_depth,
93  std::vector<vpColVector> *const data_pointCloud, unsigned char *const data_infrared,
94  rs2::align *const align_to)
95 {
96  auto data = m_pipe.wait_for_frames();
97  if (align_to != NULL)
98  data = align_to->proccess(data);
99 
100  if (data_image != NULL) {
101  auto color_frame = data.get_color_frame();
102  getNativeFrameData(color_frame, data_image);
103  }
104 
105  if (data_depth != NULL || data_pointCloud != NULL) {
106  auto depth_frame = data.get_depth_frame();
107  if (data_depth != NULL)
108  getNativeFrameData(depth_frame, data_depth);
109 
110  if (data_pointCloud != NULL)
111  getPointcloud(depth_frame, *data_pointCloud);
112  }
113 
114  if (data_infrared != NULL) {
115  auto infrared_frame = data.first(RS2_STREAM_INFRARED);
116  getNativeFrameData(infrared_frame, data_infrared);
117  }
118 }
119 
120 #ifdef VISP_HAVE_PCL
121 
131 void vpRealSense2::acquire(unsigned char *const data_image, unsigned char *const data_depth,
132  std::vector<vpColVector> *const data_pointCloud,
133  pcl::PointCloud<pcl::PointXYZ>::Ptr &pointcloud, unsigned char *const data_infrared,
134  rs2::align *const align_to)
135 {
136  auto data = m_pipe.wait_for_frames();
137  if (align_to != NULL)
138  data = align_to->proccess(data);
139 
140  if (data_image != NULL) {
141  auto color_frame = data.get_color_frame();
142  getNativeFrameData(color_frame, data_image);
143  }
144 
145  if (data_depth != NULL || data_pointCloud != NULL || pointcloud != NULL) {
146  auto depth_frame = data.get_depth_frame();
147  if (data_depth != NULL)
148  getNativeFrameData(depth_frame, data_depth);
149 
150  if (data_pointCloud != NULL)
151  getPointcloud(depth_frame, *data_pointCloud);
152 
153  if (pointcloud != NULL)
154  getPointcloud(depth_frame, pointcloud);
155  }
156 
157  if (data_infrared != NULL) {
158  auto infrared_frame = data.first(RS2_STREAM_INFRARED);
159  getNativeFrameData(infrared_frame, data_infrared);
160  }
161 }
162 
173 void vpRealSense2::acquire(unsigned char *const data_image, unsigned char *const data_depth,
174  std::vector<vpColVector> *const data_pointCloud,
175  pcl::PointCloud<pcl::PointXYZRGB>::Ptr &pointcloud, unsigned char *const data_infrared,
176  rs2::align *const align_to)
177 {
178  auto data = m_pipe.wait_for_frames();
179  if (align_to != NULL)
180  data = align_to->proccess(data);
181 
182  auto color_frame = data.get_color_frame();
183  if (data_image != NULL) {
184  getNativeFrameData(color_frame, data_image);
185  }
186 
187  if (data_depth != NULL || data_pointCloud != NULL || pointcloud != NULL) {
188  auto depth_frame = data.get_depth_frame();
189  if (data_depth != NULL)
190  getNativeFrameData(depth_frame, data_depth);
191 
192  if (data_pointCloud != NULL)
193  getPointcloud(depth_frame, *data_pointCloud);
194 
195  if (pointcloud != NULL)
196  getPointcloud(depth_frame, color_frame, pointcloud);
197  }
198 
199  if (data_infrared != NULL) {
200  auto infrared_frame = data.first(RS2_STREAM_INFRARED);
201  getNativeFrameData(infrared_frame, data_infrared);
202  }
203 }
204 #endif
205 
217 void vpRealSense2::close() { m_pipe.stop(); }
218 
229 {
230  auto rs_stream = m_pipelineProfile.get_stream(stream).as<rs2::video_stream_profile>();
231  auto intrinsics = rs_stream.get_intrinsics();
232 
233  vpCameraParameters cam;
234  double u0 = intrinsics.ppx;
235  double v0 = intrinsics.ppy;
236  double px = intrinsics.fx;
237  double py = intrinsics.fy;
238 
240  double kdu = intrinsics.coeffs[0];
241  cam.initPersProjWithDistortion(px, py, u0, v0, -kdu, kdu);
242  } else {
243  cam.initPersProjWithoutDistortion(px, py, u0, v0);
244  }
245 
246  return cam;
247 }
248 
256 rs2_intrinsics vpRealSense2::getIntrinsics(const rs2_stream &stream) const
257 {
258  auto vsp = m_pipelineProfile.get_stream(stream).as<rs2::video_stream_profile>();
259  return vsp.get_intrinsics();
260 }
261 
262 void vpRealSense2::getColorFrame(const rs2::frame &frame, vpImage<vpRGBa> &color)
263 {
264  auto vf = frame.as<rs2::video_frame>();
265  const unsigned int width = (unsigned int)vf.get_width();
266  const unsigned int height = (unsigned int)vf.get_height();
267  color.resize(height, width);
268 
269  if (frame.get_profile().format() == RS2_FORMAT_RGB8) {
270  vpImageConvert::RGBToRGBa((unsigned char *)frame.get_data(), (unsigned char *)color.bitmap, width, height);
271  } else if (frame.get_profile().format() == RS2_FORMAT_RGBA8) {
272  memcpy((unsigned char *)color.bitmap, (unsigned char *)frame.get_data(), width * height * sizeof(vpRGBa));
273  } else if (frame.get_profile().format() == RS2_FORMAT_BGR8) {
274  vpImageConvert::BGRToRGBa((unsigned char *)frame.get_data(), (unsigned char *)color.bitmap, width, height);
275  } else {
276  throw vpException(vpException::fatalError, "RealSense Camera - color stream not supported!");
277  }
278 }
279 
280 void vpRealSense2::getGreyFrame(const rs2::frame &frame, vpImage<unsigned char> &grey)
281 {
282  auto vf = frame.as<rs2::video_frame>();
283  const unsigned int width = (unsigned int)vf.get_width();
284  const unsigned int height = (unsigned int)vf.get_height();
285  grey.resize(height, width);
286 
287  if (frame.get_profile().format() == RS2_FORMAT_RGB8) {
288  vpImageConvert::RGBToGrey((unsigned char *)frame.get_data(), (unsigned char *)grey.bitmap, width, height);
289  } else if (frame.get_profile().format() == RS2_FORMAT_RGBA8) {
290  vpImageConvert::RGBaToGrey((unsigned char *)frame.get_data(), (unsigned char *)grey.bitmap, width * height);
291  } else if (frame.get_profile().format() == RS2_FORMAT_BGR8) {
292  vpImageConvert::BGRToGrey((unsigned char *)frame.get_data(), (unsigned char *)grey.bitmap, width, height);
293  } else {
294  throw vpException(vpException::fatalError, "RealSense Camera - color stream not supported!");
295  }
296 }
297 
298 void vpRealSense2::getNativeFrameData(const rs2::frame &frame, unsigned char *const data)
299 {
300  auto vf = frame.as<rs2::video_frame>();
301  int size = vf.get_width() * vf.get_height();
302 
303  switch (frame.get_profile().format()) {
304  case RS2_FORMAT_RGB8:
305  case RS2_FORMAT_BGR8:
306  memcpy(data, frame.get_data(), size * 3);
307  break;
308 
309  case RS2_FORMAT_RGBA8:
310  case RS2_FORMAT_BGRA8:
311  memcpy(data, frame.get_data(), size * 4);
312  break;
313 
314  case RS2_FORMAT_Y16:
315  case RS2_FORMAT_Z16:
316  memcpy(data, frame.get_data(), size * 2);
317  break;
318 
319  case RS2_FORMAT_Y8:
320  memcpy(data, frame.get_data(), size);
321  break;
322 
323  default:
324  break;
325  }
326 }
327 
328 void vpRealSense2::getPointcloud(const rs2::depth_frame &depth_frame, std::vector<vpColVector> &pointcloud)
329 {
330  auto vf = depth_frame.as<rs2::video_frame>();
331  const int width = vf.get_width();
332  const int height = vf.get_height();
333  pointcloud.resize((size_t)(width * height));
334 
335  const uint16_t *p_depth_frame = reinterpret_cast<const uint16_t *>(depth_frame.get_data());
336 
337 #pragma omp parallel for schedule(dynamic)
338  for (int i = 0; i < height; i++) {
339  auto depth_pixel_index = i * width;
340 
341  for (int j = 0; j < width; j++, depth_pixel_index++) {
342  // Get the depth value of the current pixel
343  auto pixels_distance = m_depthScale * p_depth_frame[depth_pixel_index];
344 
345  float points[3];
346  const float pixel[] = {(float)j, (float)i};
347  rs2_deproject_pixel_to_point(points, &m_depthIntrinsics, pixel, pixels_distance);
348 
349  if (pixels_distance <= 0 || pixels_distance > m_max_Z)
350  points[0] = points[1] = points[2] = m_invalidDepthValue;
351 
352  vpColVector v(4);
353  v[0] = points[0];
354  v[1] = points[1];
355  v[2] = points[2];
356  v[3] = 1.0;
357  pointcloud[(size_t)depth_pixel_index] = v;
358  }
359  }
360 }
361 
362 #ifdef VISP_HAVE_PCL
363 void vpRealSense2::getPointcloud(const rs2::depth_frame &depth_frame, pcl::PointCloud<pcl::PointXYZ>::Ptr &pointcloud)
364 {
365  auto vf = depth_frame.as<rs2::video_frame>();
366  const int width = vf.get_width();
367  const int height = vf.get_height();
368  pointcloud->width = (uint32_t)width;
369  pointcloud->height = (uint32_t)height;
370  pointcloud->resize((size_t)(width * height));
371 
372 #if MANUAL_POINTCLOUD // faster when tested
373  const uint16_t *p_depth_frame = reinterpret_cast<const uint16_t *>(depth_frame.get_data());
374 
375 #pragma omp parallel for schedule(dynamic)
376  for (int i = 0; i < height; i++) {
377  auto depth_pixel_index = i * width;
378 
379  for (int j = 0; j < width; j++, depth_pixel_index++) {
380  // Get the depth value of the current pixel
381  auto pixels_distance = m_depthScale * p_depth_frame[depth_pixel_index];
382 
383  float points[3];
384  const float pixel[] = {(float)j, (float)i};
385  rs2_deproject_pixel_to_point(points, &m_depthIntrinsics, pixel, pixels_distance);
386 
387  if (pixels_distance <= 0 || pixels_distance > m_max_Z)
388  points[0] = points[1] = points[2] = m_invalidDepthValue;
389 
390  pointcloud->points[(size_t)(depth_pixel_index)].x = points[0];
391  pointcloud->points[(size_t)(depth_pixel_index)].y = points[1];
392  pointcloud->points[(size_t)(depth_pixel_index)].z = points[2];
393  }
394  }
395 #else
396  m_points = m_pointcloud.calculate(depth_frame);
397  auto vertices = m_points.get_vertices();
398 
399  for (size_t i = 0; i < m_points.size(); i++) {
400  if (vertices[i].z <= 0.0f || vertices[i].z > m_max_Z) {
401  pointcloud->points[i].x = m_invalidDepthValue;
402  pointcloud->points[i].y = m_invalidDepthValue;
403  pointcloud->points[i].z = m_invalidDepthValue;
404  } else {
405  pointcloud->points[i].x = vertices[i].x;
406  pointcloud->points[i].y = vertices[i].y;
407  pointcloud->points[i].z = vertices[i].z;
408  }
409  }
410 #endif
411 }
412 
413 void vpRealSense2::getPointcloud(const rs2::depth_frame &depth_frame, const rs2::frame &color_frame,
414  pcl::PointCloud<pcl::PointXYZRGB>::Ptr &pointcloud)
415 {
416  auto vf = depth_frame.as<rs2::video_frame>();
417  const int width = vf.get_width();
418  const int height = vf.get_height();
419  pointcloud->width = (uint32_t)width;
420  pointcloud->height = (uint32_t)height;
421  pointcloud->resize((size_t)(width * height));
422 
423  const uint16_t *p_depth_frame = reinterpret_cast<const uint16_t *>(depth_frame.get_data());
424  const int color_width = m_colorIntrinsics.width;
425  const int color_height = m_colorIntrinsics.height;
426  auto color_format = color_frame.as<rs2::video_frame>().get_profile().format();
427  bool swap_rgb = color_format == RS2_FORMAT_BGR8 || color_format == RS2_FORMAT_BGRA8;
428  unsigned int nb_color_pixel = (color_format == RS2_FORMAT_RGB8 || color_format == RS2_FORMAT_BGR8) ? 3 : 4;
429  const unsigned char *p_color_frame = reinterpret_cast<const unsigned char *>(color_frame.get_data());
430 
431 #pragma omp parallel for schedule(dynamic)
432  for (int i = 0; i < height; i++) {
433  auto depth_pixel_index = i * width;
434 
435  for (int j = 0; j < width; j++, depth_pixel_index++) {
436  // Get the depth value of the current pixel
437  auto pixels_distance = m_depthScale * p_depth_frame[depth_pixel_index];
438 
439  float depth_point[3];
440  const float pixel[] = {(float)j, (float)i};
441  rs2_deproject_pixel_to_point(depth_point, &m_depthIntrinsics, pixel, pixels_distance);
442 
443  if (pixels_distance <= 0 || pixels_distance > m_max_Z)
444  depth_point[0] = depth_point[1] = depth_point[2] = m_invalidDepthValue;
445 
446  pointcloud->points[(size_t)depth_pixel_index].x = depth_point[0];
447  pointcloud->points[(size_t)depth_pixel_index].y = depth_point[1];
448  pointcloud->points[(size_t)depth_pixel_index].z = depth_point[2];
449 
450  float color_point[3];
451  rs2_transform_point_to_point(color_point, &m_depth2ColorExtrinsics, depth_point);
452  float color_pixel[2];
453  rs2_project_point_to_pixel(color_pixel, &m_colorIntrinsics, color_point);
454 
455  if (color_pixel[1] < 0 || color_pixel[1] >= color_height || color_pixel[0] < 0 || color_pixel[0] >= color_width) {
456 // For out of bounds color data, default to a shade of blue in order to
457 // visually distinguish holes. This color value is same as the librealsense
458 // out of bounds color value.
459 #if PCL_VERSION_COMPARE(<, 1, 1, 0)
460  unsigned int r = 96, g = 157, b = 198;
461  uint32_t rgb = (static_cast<uint32_t>(r) << 16 | static_cast<uint32_t>(g) << 8 | static_cast<uint32_t>(b));
462 
463  pointcloud->points[(size_t)depth_pixel_index].rgb = *reinterpret_cast<float *>(&rgb);
464 #else
465  pointcloud->points[(size_t)depth_pixel_index].r = (uint8_t)96;
466  pointcloud->points[(size_t)depth_pixel_index].g = (uint8_t)157;
467  pointcloud->points[(size_t)depth_pixel_index].b = (uint8_t)198;
468 #endif
469  } else {
470  unsigned int i_ = (unsigned int)color_pixel[1];
471  unsigned int j_ = (unsigned int)color_pixel[0];
472 
473 #if PCL_VERSION_COMPARE(<, 1, 1, 0)
474  uint32_t rgb = 0;
475  if (swap_rgb) {
476  rgb =
477  (static_cast<uint32_t>(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel]) |
478  static_cast<uint32_t>(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 1]) << 8 |
479  static_cast<uint32_t>(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 2]) << 16);
480  } else {
481  rgb = (static_cast<uint32_t>(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel]) << 16 |
482  static_cast<uint32_t>(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 1]) << 8 |
483  static_cast<uint32_t>(p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 2]));
484  }
485 
486  pointcloud->points[(size_t)(i * depth_width + j)].rgb = *reinterpret_cast<float *>(&rgb);
487 #else
488  if (swap_rgb) {
489  pointcloud->points[(size_t)depth_pixel_index].b =
490  (uint32_t)p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel];
491  pointcloud->points[(size_t)depth_pixel_index].g =
492  (uint32_t)p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 1];
493  pointcloud->points[(size_t)depth_pixel_index].r =
494  (uint32_t)p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 2];
495  } else {
496  pointcloud->points[(size_t)depth_pixel_index].r =
497  (uint32_t)p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel];
498  pointcloud->points[(size_t)depth_pixel_index].g =
499  (uint32_t)p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 1];
500  pointcloud->points[(size_t)depth_pixel_index].b =
501  (uint32_t)p_color_frame[(i_ * (unsigned int)color_width + j_) * nb_color_pixel + 2];
502  }
503 #endif
504  }
505  }
506  }
507 }
508 
509 #endif
510 
516 vpHomogeneousMatrix vpRealSense2::getTransformation(const rs2_stream &from, const rs2_stream &to) const
517 {
518  auto from_stream = m_pipelineProfile.get_stream(from);
519  auto to_stream = m_pipelineProfile.get_stream(to);
520  rs2_extrinsics extrinsics = from_stream.get_extrinsics_to(to_stream);
521 
524  for (unsigned int i = 0; i < 3; i++) {
525  t[i] = extrinsics.translation[i];
526  for (unsigned int j = 0; j < 3; j++)
527  R[i][j] = extrinsics.rotation[i * 3 + j];
528  }
529 
530  vpHomogeneousMatrix to_M_from(t, R);
531  return to_M_from;
532 }
533 
537 void vpRealSense2::open(const rs2::config &cfg)
538 {
539  m_pipelineProfile = m_pipe.start(cfg);
540 
541  rs2::device dev = m_pipelineProfile.get_device();
542 
543  // Go over the device's sensors
544  for (rs2::sensor &sensor : dev.query_sensors()) {
545  // Check if the sensor if a depth sensor
546  if (rs2::depth_sensor dpt = sensor.as<rs2::depth_sensor>()) {
547  m_depthScale = dpt.get_depth_scale();
548  }
549  }
550 
551  auto depth_stream = m_pipelineProfile.get_stream(RS2_STREAM_DEPTH).as<rs2::video_stream_profile>();
552  m_depthIntrinsics = depth_stream.get_intrinsics();
553 
554  auto color_stream = m_pipelineProfile.get_stream(RS2_STREAM_COLOR);
555  m_colorIntrinsics = m_pipelineProfile.get_stream(RS2_STREAM_COLOR).as<rs2::video_stream_profile>().get_intrinsics();
556  m_depth2ColorExtrinsics = depth_stream.get_extrinsics_to(color_stream);
557 }
558 
559 namespace
560 {
561 // Helper functions to print information about the RealSense device
562 void print(const rs2_extrinsics &extrinsics, std::ostream &os)
563 {
564  std::stringstream ss;
565  ss << "Rotation Matrix:\n";
566 
567  for (auto i = 0; i < 3; ++i) {
568  for (auto j = 0; j < 3; ++j) {
569  ss << std::left << std::setw(15) << std::setprecision(5) << extrinsics.rotation[j * 3 + i];
570  }
571  ss << std::endl;
572  }
573 
574  ss << "\nTranslation Vector: ";
575  for (size_t i = 0; i < sizeof(extrinsics.translation) / sizeof(extrinsics.translation[0]); ++i)
576  ss << std::setprecision(15) << extrinsics.translation[i] << " ";
577 
578  os << ss.str() << "\n\n";
579 }
580 
581 void print(const rs2_intrinsics &intrinsics, std::ostream &os)
582 {
583  std::stringstream ss;
584  ss << std::left << std::setw(14) << "Width: "
585  << "\t" << intrinsics.width << "\n"
586  << std::left << std::setw(14) << "Height: "
587  << "\t" << intrinsics.height << "\n"
588  << std::left << std::setw(14) << "PPX: "
589  << "\t" << std::setprecision(15) << intrinsics.ppx << "\n"
590  << std::left << std::setw(14) << "PPY: "
591  << "\t" << std::setprecision(15) << intrinsics.ppy << "\n"
592  << std::left << std::setw(14) << "Fx: "
593  << "\t" << std::setprecision(15) << intrinsics.fx << "\n"
594  << std::left << std::setw(14) << "Fy: "
595  << "\t" << std::setprecision(15) << intrinsics.fy << "\n"
596  << std::left << std::setw(14) << "Distortion: "
597  << "\t" << rs2_distortion_to_string(intrinsics.model) << "\n"
598  << std::left << std::setw(14) << "Coeffs: ";
599 
600  for (size_t i = 0; i < sizeof(intrinsics.coeffs) / sizeof(intrinsics.coeffs[0]); ++i)
601  ss << "\t" << std::setprecision(15) << intrinsics.coeffs[i] << " ";
602 
603  os << ss.str() << "\n\n";
604 }
605 
606 void safe_get_intrinsics(const rs2::video_stream_profile &profile, rs2_intrinsics &intrinsics)
607 {
608  try {
609  intrinsics = profile.get_intrinsics();
610  } catch (...) {
611  }
612 }
613 
614 bool operator==(const rs2_intrinsics &lhs, const rs2_intrinsics &rhs)
615 {
616  return lhs.width == rhs.width && lhs.height == rhs.height && lhs.ppx == rhs.ppx && lhs.ppy == rhs.ppy &&
617  lhs.fx == rhs.fx && lhs.fy == rhs.fy && lhs.model == rhs.model &&
618  !std::memcmp(lhs.coeffs, rhs.coeffs, sizeof(rhs.coeffs));
619 }
620 
621 std::string get_str_formats(const std::set<rs2_format> &formats)
622 {
623  std::stringstream ss;
624  for (auto format = formats.begin(); format != formats.end(); ++format) {
625  ss << *format << ((format != formats.end()) && (next(format) == formats.end()) ? "" : "/");
626  }
627  return ss.str();
628 }
629 
630 struct stream_and_resolution {
631  rs2_stream stream;
632  int stream_index;
633  int width;
634  int height;
635  std::string stream_name;
636 
637  bool operator<(const stream_and_resolution &obj) const
638  {
639  return (std::make_tuple(stream, stream_index, width, height) <
640  std::make_tuple(obj.stream, obj.stream_index, obj.width, obj.height));
641  }
642 };
643 
644 struct stream_and_index {
645  rs2_stream stream;
646  int stream_index;
647 
648  bool operator<(const stream_and_index &obj) const
649  {
650  return (std::make_tuple(stream, stream_index) < std::make_tuple(obj.stream, obj.stream_index));
651  }
652 };
653 } // anonymous namespace
654 
675 std::ostream &operator<<(std::ostream &os, const vpRealSense2 &rs)
676 {
677  os << std::left << std::setw(30) << "Device Name" << std::setw(20) << "Serial Number" << std::setw(20)
678  << "Firmware Version" << std::endl;
679 
680  rs2::device dev = rs.m_pipelineProfile.get_device();
681  os << std::left << std::setw(30) << dev.get_info(RS2_CAMERA_INFO_NAME) << std::setw(20)
682  << dev.get_info(RS2_CAMERA_INFO_SERIAL_NUMBER) << std::setw(20) << dev.get_info(RS2_CAMERA_INFO_FIRMWARE_VERSION)
683  << std::endl;
684 
685  // Show which options are supported by this device
686  os << " Device info: \n";
687  for (auto j = 0; j < RS2_CAMERA_INFO_COUNT; ++j) {
688  auto param = static_cast<rs2_camera_info>(j);
689  if (dev.supports(param))
690  os << " " << std::left << std::setw(30) << rs2_camera_info_to_string(rs2_camera_info(param)) << ": \t"
691  << dev.get_info(param) << "\n";
692  }
693 
694  os << "\n";
695 
696  for (auto &&sensor : dev.query_sensors()) {
697  os << "Options for " << sensor.get_info(RS2_CAMERA_INFO_NAME) << std::endl;
698 
699  os << std::setw(55) << " Supported options:" << std::setw(10) << "min" << std::setw(10) << " max" << std::setw(6)
700  << " step" << std::setw(10) << " default" << std::endl;
701  for (auto j = 0; j < RS2_OPTION_COUNT; ++j) {
702  auto opt = static_cast<rs2_option>(j);
703  if (sensor.supports(opt)) {
704  auto range = sensor.get_option_range(opt);
705  os << " " << std::left << std::setw(50) << opt << " : " << std::setw(5) << range.min << "... "
706  << std::setw(12) << range.max << std::setw(6) << range.step << std::setw(10) << range.def << "\n";
707  }
708  }
709 
710  os << "\n";
711  }
712 
713  for (auto &&sensor : dev.query_sensors()) {
714  os << "Stream Profiles supported by " << sensor.get_info(RS2_CAMERA_INFO_NAME) << "\n";
715 
716  os << std::setw(55) << " Supported modes:" << std::setw(10) << "stream" << std::setw(10) << " resolution"
717  << std::setw(6) << " fps" << std::setw(10) << " format"
718  << "\n";
719  // Show which streams are supported by this device
720  for (auto &&profile : sensor.get_stream_profiles()) {
721  if (auto video = profile.as<rs2::video_stream_profile>()) {
722  os << " " << profile.stream_name() << "\t " << video.width() << "x" << video.height() << "\t@ "
723  << profile.fps() << "Hz\t" << profile.format() << "\n";
724  } else {
725  os << " " << profile.stream_name() << "\t@ " << profile.fps() << "Hz\t" << profile.format() << "\n";
726  }
727  }
728 
729  os << "\n";
730  }
731 
732  std::map<stream_and_index, rs2::stream_profile> streams;
733  std::map<stream_and_resolution, std::vector<std::pair<std::set<rs2_format>, rs2_intrinsics> > > intrinsics_map;
734  for (auto &&sensor : dev.query_sensors()) {
735  // Intrinsics
736  for (auto &&profile : sensor.get_stream_profiles()) {
737  if (auto video = profile.as<rs2::video_stream_profile>()) {
738  if (streams.find(stream_and_index{profile.stream_type(), profile.stream_index()}) == streams.end()) {
739  streams[stream_and_index{profile.stream_type(), profile.stream_index()}] = profile;
740  }
741 
742  rs2_intrinsics intrinsics{};
743  stream_and_resolution stream_res{profile.stream_type(), profile.stream_index(), video.width(), video.height(),
744  profile.stream_name()};
745  safe_get_intrinsics(video, intrinsics);
746  auto it = std::find_if(
747  (intrinsics_map[stream_res]).begin(), (intrinsics_map[stream_res]).end(),
748  [&](const std::pair<std::set<rs2_format>, rs2_intrinsics> &kvp) { return intrinsics == kvp.second; });
749  if (it == (intrinsics_map[stream_res]).end()) {
750  (intrinsics_map[stream_res]).push_back({{profile.format()}, intrinsics});
751  } else {
752  it->first.insert(profile.format()); // If the intrinsics are equals,
753  // add the profile format to
754  // format set
755  }
756  }
757  }
758  }
759 
760  os << "Provided Intrinsic:\n";
761  for (auto &kvp : intrinsics_map) {
762  auto stream_res = kvp.first;
763  for (auto &intrinsics : kvp.second) {
764  auto formats = get_str_formats(intrinsics.first);
765  os << "Intrinsic of \"" << stream_res.stream_name << "\"\t " << stream_res.width << "x" << stream_res.height
766  << "\t " << formats << "\n";
767  if (intrinsics.second == rs2_intrinsics{}) {
768  os << "Intrinsic NOT available!\n\n";
769  } else {
770  print(intrinsics.second, os);
771  }
772  }
773  }
774 
775  // Print Extrinsics
776  os << "\nProvided Extrinsic:\n";
777  for (auto kvp1 = streams.begin(); kvp1 != streams.end(); ++kvp1) {
778  for (auto kvp2 = streams.begin(); kvp2 != streams.end(); ++kvp2) {
779  os << "Extrinsic from \"" << kvp1->second.stream_name() << "\"\t "
780  << "To"
781  << "\t \"" << kvp2->second.stream_name() << "\"\n";
782  auto extrinsics = kvp1->second.get_extrinsics_to(kvp2->second);
783  print(extrinsics, os);
784  }
785  }
786 
787  return os;
788 }
789 
790 #elif !defined(VISP_BUILD_SHARED_LIBS)
791 // Work arround to avoid warning: libvisp_sensor.a(vpRealSense2.cpp.o) has no
792 // symbols
793 void dummy_vpRealSense2(){};
794 #endif
static void BGRToRGBa(unsigned char *bgr, unsigned char *rgba, unsigned int width, unsigned int height, bool flip=false)
rs2_intrinsics getIntrinsics(const rs2_stream &stream) const
static void RGBToGrey(unsigned char *rgb, unsigned char *grey, unsigned int size)
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void RGBToRGBa(unsigned char *rgb, unsigned char *rgba, unsigned int size)
static void BGRToGrey(unsigned char *bgr, unsigned char *grey, unsigned int width, unsigned int height, bool flip=false)
Type * bitmap
points toward the bitmap
Definition: vpImage.h:133
virtual ~vpRealSense2()
error that can be emited by ViSP classes.
Definition: vpException.h:71
float m_depthScale
Definition: vpRealSense2.h:341
void open(const rs2::config &cfg=rs2::config())
rs2_intrinsics m_depthIntrinsics
Definition: vpRealSense2.h:340
Definition: vpRGBa.h:66
Implementation of a rotation matrix and operations on such kind of matrices.
void initPersProjWithoutDistortion(const double px, const double py, const double u0, const double v0)
rs2_intrinsics m_colorIntrinsics
Definition: vpRealSense2.h:338
rs2::pipeline_profile m_pipelineProfile
Definition: vpRealSense2.h:345
void getGreyFrame(const rs2::frame &frame, vpImage< unsigned char > &grey)
Generic class defining intrinsic camera parameters.
rs2_extrinsics m_depth2ColorExtrinsics
Definition: vpRealSense2.h:339
void resize(const unsigned int h, const unsigned int w)
resize the image : Image initialization
Definition: vpImage.h:856
void acquire(vpImage< unsigned char > &grey)
rs2::pointcloud m_pointcloud
Definition: vpRealSense2.h:346
void getPointcloud(const rs2::depth_frame &depth_frame, std::vector< vpColVector > &pointcloud)
rs2::pipeline m_pipe
Definition: vpRealSense2.h:344
Implementation of column vector and the associated operations.
Definition: vpColVector.h:72
rs2::points m_points
Definition: vpRealSense2.h:347
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion) const
float m_invalidDepthValue
Definition: vpRealSense2.h:342
void getNativeFrameData(const rs2::frame &frame, unsigned char *const data)
friend VISP_EXPORT std::ostream & operator<<(std::ostream &os, const vpRealSense2 &rs)
vpHomogeneousMatrix getTransformation(const rs2_stream &from, const rs2_stream &to) const
static void RGBaToGrey(unsigned char *rgba, unsigned char *grey, unsigned int size)
void getColorFrame(const rs2::frame &frame, vpImage< vpRGBa > &color)
Class that consider the case of a translation vector.
void initPersProjWithDistortion(const double px, const double py, const double u0, const double v0, const double kud, const double kdu)