Visual Servoing Platform  version 3.6.1 under development (2025-01-24)
tutorial-dnn-object-detection-live.cpp
1 #include <iostream>
3 
4 #include <visp3/core/vpConfig.h>
5 
6  // Check if std:c++17 or higher
7 #if defined(HAVE_OPENCV_DNN) && defined(HAVE_OPENCV_VIDEOIO) && \
8  ((__cplusplus >= 201703L) || (defined(_MSVC_LANG) && (_MSVC_LANG >= 201703L)))
9 
10 #include <visp3/core/vpIoTools.h>
11 #include <visp3/detection/vpDetectorDNNOpenCV.h>
12 #include <visp3/gui/vpDisplayGDI.h>
13 #include <visp3/gui/vpDisplayOpenCV.h>
14 #include <visp3/gui/vpDisplayX.h>
15 
16 #include <opencv2/videoio.hpp>
17 
18 #ifdef VISP_HAVE_NLOHMANN_JSON
19 #include VISP_NLOHMANN_JSON(json.hpp)
20 using json = nlohmann::json;
21 #endif
22 
23 #ifdef ENABLE_VISP_NAMESPACE
24 using namespace VISP_NAMESPACE_NAME;
25 #endif
26 
27 typedef enum
28 {
29  DETECTION_CONTAINER_MAP = 0,
30  DETECTION_CONTAINER_VECTOR = 1,
31  DETECTION_CONTAINER_BOTH = 2,
32  DETECTION_CONTAINER_COUNT = 3
33 } ChosenDetectionContainer;
34 
35 std::string chosenDetectionContainerToString(const ChosenDetectionContainer &choice)
36 {
37  switch (choice) {
38  case DETECTION_CONTAINER_MAP:
39  return "map";
40  case DETECTION_CONTAINER_VECTOR:
41  return "vector";
42  case DETECTION_CONTAINER_BOTH:
43  return "both";
44  default:
45  break;
46  }
47  return "unknown";
48 }
49 
50 ChosenDetectionContainer chosenDetectionContainerFromString(const std::string &choiceStr)
51 {
52  ChosenDetectionContainer choice(DETECTION_CONTAINER_COUNT);
53  bool hasFoundMatch = false;
54  for (unsigned int i = 0; i < DETECTION_CONTAINER_COUNT && !hasFoundMatch; i++) {
55  ChosenDetectionContainer candidate = (ChosenDetectionContainer)i;
56  hasFoundMatch = (chosenDetectionContainerToString(candidate) == vpIoTools::toLowerCase(choiceStr));
57  if (hasFoundMatch) {
58  choice = candidate;
59  }
60  }
61  return choice;
62 }
63 
64 std::string getAvailableDetectionContainer()
65 {
66  std::string availableContainers("< ");
67  for (unsigned int i = 0; i < DETECTION_CONTAINER_COUNT - 1; i++) {
68  std::string name = chosenDetectionContainerToString((ChosenDetectionContainer)i);
69  availableContainers += name + " , ";
70  }
71  availableContainers +=
72  chosenDetectionContainerToString((ChosenDetectionContainer)(DETECTION_CONTAINER_COUNT - 1)) + " >";
73  return availableContainers;
74 }
75 
76 int main(int argc, const char *argv[])
77 {
78  try {
79  std::string opt_device("0");
81  std::string opt_dnn_model = "opencv_face_detector_uint8.pb";
82  std::string opt_dnn_config = "opencv_face_detector.pbtxt";
83  std::string opt_dnn_framework = "none";
84  std::string opt_dnn_label_file = "";
85  vpDetectorDNNOpenCV::DNNResultsParsingType opt_dnn_type = vpDetectorDNNOpenCV::RESNET_10;
87  int opt_dnn_width = 300, opt_dnn_height = 300;
88  double opt_dnn_meanR = 104.0, opt_dnn_meanG = 177.0, opt_dnn_meanB = 123.0;
89  double opt_dnn_scale_factor = 1.0;
90  bool opt_dnn_swapRB = false;
91  bool opt_step_by_step = false;
92  float opt_dnn_confThresh = 0.5f;
93  float opt_dnn_nmsThresh = 0.4f;
94  double opt_dnn_filterThresh = 0.25;
95  ChosenDetectionContainer opt_dnn_containerType = DETECTION_CONTAINER_MAP;
96  bool opt_verbose = false;
97  std::string opt_input_json = "";
98  std::string opt_output_json = "";
99 
100  for (int i = 1; i < argc; i++) {
101  if (std::string(argv[i]) == "--device" && i + 1 < argc) {
102  opt_device = std::string(argv[++i]);
103  }
104  else if (std::string(argv[i]) == "--step-by-step") {
105  opt_step_by_step = true;
106  }
107  else if (std::string(argv[i]) == "--model" && i + 1 < argc) {
108  opt_dnn_model = std::string(argv[++i]);
109  }
110  else if (std::string(argv[i]) == "--type" && i + 1 < argc) {
111  opt_dnn_type = vpDetectorDNNOpenCV::dnnResultsParsingTypeFromString(std::string(argv[++i]));
112  }
113  else if (std::string(argv[i]) == "--config" && i + 1 < argc) {
114  opt_dnn_config = std::string(argv[++i]);
115  if (opt_dnn_config.find("none") != std::string::npos) {
116  opt_dnn_config = std::string();
117  }
118  }
119  else if (std::string(argv[i]) == "--framework" && i + 1 < argc) {
120  opt_dnn_framework = std::string(argv[++i]);
121  if (opt_dnn_framework.find("none") != std::string::npos) {
122  opt_dnn_framework = std::string();
123  }
124  }
125  else if (std::string(argv[i]) == "--width" && i + 1 < argc) {
126  opt_dnn_width = atoi(argv[++i]);
127  }
128  else if (std::string(argv[i]) == "--height" && i + 1 < argc) {
129  opt_dnn_height = atoi(argv[++i]);
130  }
131  else if (std::string(argv[i]) == "--mean" && i + 3 < argc) {
132  opt_dnn_meanR = atof(argv[++i]);
133  opt_dnn_meanG = atof(argv[++i]);
134  opt_dnn_meanB = atof(argv[++i]);
135  }
136  else if (std::string(argv[i]) == "--scale" && i + 1 < argc) {
137  opt_dnn_scale_factor = atof(argv[++i]);
138  }
139  else if (std::string(argv[i]) == "--swapRB") {
140  opt_dnn_swapRB = true;
141  }
142  else if (std::string(argv[i]) == "--confThresh" && i + 1 < argc) {
143  opt_dnn_confThresh = (float)atof(argv[++i]);
144  }
145  else if (std::string(argv[i]) == "--nmsThresh" && i + 1 < argc) {
146  opt_dnn_nmsThresh = (float)atof(argv[++i]);
147  }
148  else if (std::string(argv[i]) == "--filterThresh" && i + 1 < argc) {
149  opt_dnn_filterThresh = atof(argv[++i]);
150  }
151  else if (std::string(argv[i]) == "--labels" && i + 1 < argc) {
152  opt_dnn_label_file = std::string(argv[++i]);
153  }
154  else if (std::string(argv[i]) == "--container" && i + 1 < argc) {
155  opt_dnn_containerType = chosenDetectionContainerFromString(std::string(argv[++i]));
156  }
157  else if (std::string(argv[i]) == "--input-json" && i + 1 < argc) {
158  opt_input_json = std::string(std::string(argv[++i]));
159  }
160  else if (std::string(argv[i]) == "--output-json" && i + 1 < argc) {
161  opt_output_json = std::string(std::string(argv[++i]));
162  }
163  else if (std::string(argv[i]) == "--verbose" || std::string(argv[i]) == "-v") {
164  opt_verbose = true;
165  }
166  else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") {
167  std::cout << "\nSYNOPSIS " << std::endl
168  << argv[0] << " [--device <video>]"
169  << " [--model <dnn weights file>]"
170  << " [--type <dnn type>]"
171  << " [--config <dnn config file]"
172  << " [--framework <name>]"
173  << " [--width <blob width>] [--height <blob height>]"
174  << " [--mean <meanR meanG meanB>]"
175  << " [--scale <scale factor>]"
176  << " [--swapRB]"
177  << " [--confThresh <threshold>]"
178  << " [--nmsThresh <threshold>]"
179  << " [--filterThresh <threshold>]"
180  << " [--labels <file>]"
181  << " [--container <type>]"
182  << " [--input-json <path_to_input_json>]"
183  << " [--output-json <path_to_output_json>]"
184  << " [--step-by-step]"
185  << " [--verbose, -v]"
186  << " [--help, -h]" << std::endl;
187  std::cout << "\nOPTIONS " << std::endl
188  << " --device <video>" << std::endl
189  << " Camera device number or video name used to stream images." << std::endl
190  << " To use the first camera found on the bus set 0. On Ubuntu setting 0" << std::endl
191  << " will use /dev/video0 device. To use a video simply put the name of" << std::endl
192  << " the video, like \"path/my-video.mp4\" or \"path/image-%04d.png\"" << std::endl
193  << " if your video is a sequence of images." << std::endl
194  << " Default: " << opt_device << std::endl
195  << std::endl
196  << " --model <dnn weights file>" << std::endl
197  << " Path to dnn network trained weights." << std::endl
198  << " Default: " << opt_dnn_model << std::endl
199  << std::endl
200  << " --type <dnn type>" << std::endl
201  << " Type of dnn network. Admissible values are in " << std::endl
202  << " " << vpDetectorDNNOpenCV::getAvailableDnnResultsParsingTypes() << std::endl
203  << " Default: " << opt_dnn_type << std::endl
204  << std::endl
205  << " --config <dnn config file>" << std::endl
206  << " Path to dnn network config file or \"none\" not to use one. " << std::endl
207  << " Default: " << opt_dnn_config << std::endl
208  << std::endl
209  << " --framework <name>" << std::endl
210  << " Framework name or \"none\" not to specify one. " << std::endl
211  << " Default: " << opt_dnn_framework << std::endl
212  << std::endl
213  << " --width <blob width>" << std::endl
214  << " Input images will be resized to this width. " << std::endl
215  << " Default: " << opt_dnn_width << std::endl
216  << std::endl
217  << " --height <blob height>" << std::endl
218  << " Input images will be resized to this height. " << std::endl
219  << " Default: " << opt_dnn_height << std::endl
220  << std::endl
221  << " --mean <meanR meanG meanB>" << std::endl
222  << " Mean RGB subtraction values. " << std::endl
223  << " Default: " << opt_dnn_meanR << " " << opt_dnn_meanG << " " << opt_dnn_meanB << std::endl
224  << std::endl
225  << " --scale <scale factor>" << std::endl
226  << " Scale factor used to normalize the range of pixel values. " << std::endl
227  << " Default: " << opt_dnn_scale_factor << std::endl
228  << std::endl
229  << " --swapRB" << std::endl
230  << " When used this option allows to swap Red and Blue channels. " << std::endl
231  << std::endl
232  << " --confThresh <threshold>" << std::endl
233  << " Confidence threshold. " << std::endl
234  << " Default: " << opt_dnn_confThresh << std::endl
235  << std::endl
236  << " --nmsThresh <threshold>" << std::endl
237  << " Non maximum suppression threshold. " << std::endl
238  << " Default: " << opt_dnn_nmsThresh << std::endl
239  << std::endl
240  << " --filterThresh <threshold >" << std::endl
241  << " Filter threshold. Set 0. to disable." << std::endl
242  << " Default: " << opt_dnn_filterThresh << std::endl
243  << std::endl
244  << " --labels <file>" << std::endl
245  << " Path to label file either in txt or yaml format. Keep empty if unknown." << std::endl
246  << " Default: \"" << opt_dnn_label_file << "\"" << std::endl
247  << std::endl
248  << " --container <type>" << std::endl
249  << " Container type in " << getAvailableDetectionContainer() << std::endl
250  << " Default: " << chosenDetectionContainerToString(opt_dnn_containerType) << std::endl
251  << std::endl
252  << " --input-json <path_to_input_json>" << std::endl
253  << " Input JSON file used to configure the DNN. If set, the other arguments will be used to override the values set in the json file." << std::endl
254  << " Default: empty" << std::endl
255  << std::endl
256  << " --output-json <type>" << std::endl
257  << " Output JSON file where will be saved the DNN configuration. If empty, does not save the configuration." << std::endl
258  << " Default: empty" << std::endl
259  << std::endl
260  << " --step-by-step" << std::endl
261  << " Enable step by step mode, waiting for a user click to process next image." << std::endl
262  << std::endl
263  << " --verbose, -v" << std::endl
264  << " Enable verbose mode." << std::endl
265  << std::endl
266  << " --help, -h" << std::endl
267  << " Display this helper message." << std::endl
268  << std::endl;
269  return EXIT_SUCCESS;
270  }
271  }
272 
273  std::cout << "Video device : " << opt_device << std::endl;
274  std::cout << "Label file (optional): " << (opt_dnn_label_file.empty() ? "None" : opt_dnn_label_file) << std::endl;
275 
276  cv::VideoCapture capture;
277  bool hasCaptureOpeningSucceeded;
278  if (vpMath::isNumber(opt_device)) {
279  hasCaptureOpeningSucceeded = capture.open(std::atoi(opt_device.c_str()));
280  }
281  else {
282  hasCaptureOpeningSucceeded = capture.open(opt_device);
283  }
284  if (!hasCaptureOpeningSucceeded) {
285  std::cout << "Capture from camera: " << opt_device << " didn't work" << std::endl;
286  return EXIT_FAILURE;
287  }
288 
289  vpImage<vpRGBa> I;
290 #if defined(VISP_HAVE_X11)
291  vpDisplayX d;
292 #elif defined(VISP_HAVE_GDI)
293  vpDisplayGDI d;
294 #elif defined(HAVE_OPENCV_HIGHGUI)
295  vpDisplayOpenCV d;
296 #endif
298 
299  if (!opt_dnn_label_file.empty() && !vpIoTools::checkFilename(opt_dnn_label_file)) {
301  "The file containing the classes labels \"" + opt_dnn_label_file + "\" does not exist !"));
302  }
303 
304  vpDetectorDNNOpenCV dnn;
305 #ifdef VISP_HAVE_NLOHMANN_JSON
306  if (!opt_input_json.empty()) {
308  dnn.initFromJSON(opt_input_json);
310  }
311 #else
312  if (!opt_input_json.empty()) {
313  std::cerr << "Error: NLOHMANN JSON library is not installed, please install it following ViSP documentation to configure the vpDetectorDNNOpenCV from a JSON file." << std::endl;
314  return EXIT_FAILURE;
315  }
316 #endif
317  else {
319  vpDetectorDNNOpenCV::NetConfig netConfig(opt_dnn_confThresh, opt_dnn_nmsThresh, opt_dnn_label_file
320  , cv::Size(opt_dnn_width, opt_dnn_height), opt_dnn_filterThresh, cv::Scalar(opt_dnn_meanR, opt_dnn_meanG, opt_dnn_meanB)
321  , opt_dnn_scale_factor, opt_dnn_swapRB, opt_dnn_type
322  , opt_dnn_model, opt_dnn_config, opt_dnn_framework
323  );
324  dnn.setNetConfig(netConfig);
326  }
327 
328  std::cout << dnn.getNetConfig() << std::endl;
329 
330 #ifdef VISP_HAVE_NLOHMANN_JSON
331  if (!opt_output_json.empty()) {
332  dnn.saveConfigurationInJSON(opt_output_json);
333  }
334 #else
335  if (!opt_output_json.empty()) {
336  std::cerr << "Error: NLOHMANN JSON library is not installed, please install it following ViSP documentation to save the configuration in a JSON file." << std::endl;
337  }
338 #endif
339 
340  cv::Mat frame;
341  while (true) {
342  capture >> frame;
343  if (frame.type() == CV_8UC4) {
344  // RGBa format is not supported by the class, converting to BGR format
345  cv::Mat cpy = frame;
346  cv::cvtColor(cpy, frame, cv::COLOR_RGBA2BGR);
347  }
348  if (frame.empty())
349  break;
350 
351  if (I.getSize() == 0) {
352  vpImageConvert::convert(frame, I);
353  d.init(I);
354  vpDisplay::setTitle(I, "DNN object detection");
355  if (opt_verbose) {
356  std::cout << "Process image: " << I.getWidth() << " x " << I.getHeight() << std::endl;
357  }
358  }
359  else {
360  vpImageConvert::convert(frame, I);
361  }
362  if (opt_verbose) {
363  std::cout << "Process new image" << std::endl;
364  }
365 
367 
368  if (opt_dnn_containerType == DETECTION_CONTAINER_MAP || opt_dnn_containerType == DETECTION_CONTAINER_BOTH) {
369  double t = vpTime::measureTimeMs();
371  std::map<std::string, std::vector<vpDetectorDNNOpenCV::DetectedFeatures2D> > detections;
372  dnn.detect(frame, detections);
374  t = vpTime::measureTimeMs() - t;
375 
377  for (auto key_val : detections) {
378  if (opt_verbose) {
379  std::cout << " Class name : " << key_val.first << std::endl;
380  }
381  for (vpDetectorDNNOpenCV::DetectedFeatures2D detection : key_val.second) {
382  if (opt_verbose) {
383  std::cout << " Bounding box : " << detection.getBoundingBox() << std::endl;
384  std::cout << " Class Id : " << detection.getClassId() << std::endl;
385  if (detection.getClassName())
386  std::cout << " Class name : " << detection.getClassName().value() << std::endl;
387  std::cout << " Confidence score: " << detection.getConfidenceScore() << std::endl;
388  }
389  detection.display(I);
390  }
391  }
393 
394  std::ostringstream oss_map;
395  oss_map << "Detection time (map): " << t << " ms";
396  if (opt_verbose) {
397  // Displaying timing result in console
398  std::cout << " " << oss_map.str() << std::endl;
399  }
400  // Displaying timing result on the image
401  vpDisplay::displayText(I, 60, 20, oss_map.str(), vpColor::red);
402  }
403 
404  if (opt_dnn_containerType == DETECTION_CONTAINER_VECTOR || opt_dnn_containerType == DETECTION_CONTAINER_BOTH) {
405  double t_vector = vpTime::measureTimeMs();
407  std::vector<vpDetectorDNNOpenCV::DetectedFeatures2D> detections_vec;
408  dnn.detect(frame, detections_vec);
410  t_vector = vpTime::measureTimeMs() - t_vector;
411 
413  for (auto detection : detections_vec) {
414  if (opt_verbose) {
415  std::cout << " Bounding box : " << detection.getBoundingBox() << std::endl;
416  std::cout << " Class Id : " << detection.getClassId() << std::endl;
417  std::optional<std::string> classname_opt = detection.getClassName();
418  std::cout << " Class name : " << (classname_opt ? *classname_opt : "Not known") << std::endl;
419  std::cout << " Confidence score: " << detection.getConfidenceScore() << std::endl;
420  }
421  detection.display(I);
422  }
424 
425  std::ostringstream oss_vec;
426  oss_vec << "Detection time (vector): " << t_vector << " ms";
427  if (opt_verbose) {
428  // Displaying timing result in console
429  std::cout << " " << oss_vec.str() << std::endl;
430  }
431  // Displaying timing result on the image
432  vpDisplay::displayText(I, 80, 20, oss_vec.str(), vpColor::red);
433  }
434 
435  // // UI display
436  if (opt_step_by_step) {
437  vpDisplay::displayText(I, 20, 20, "Left click to display next image", vpColor::red);
438  }
439  vpDisplay::displayText(I, 40, 20, "Right click to quit", vpColor::red);
440 
441  vpDisplay::flush(I);
443 
444  if (vpDisplay::getClick(I, button, opt_step_by_step)) {
445  if (button == vpMouseButton::button1) {
446  // Left click => next image
447  continue;
448  }
449  else if (button == vpMouseButton::button3) {
450  // Right click => stop the program
451  break;
452  }
453  }
454  }
455 
456  }
457  catch (const vpException &e) {
458  std::cout << e.what() << std::endl;
459  }
460 }
461 
462 #else
463 
464 int main()
465 {
466 #if !defined(HAVE_OPENCV_DNN)
467  std::cout << "This tutorial needs OpenCV dnn module that is missing." << std::endl;
468 #endif
469 #if !defined(HAVE_OPENCV_VIDEOIO)
470  std::cout << "This tutorial needs OpenCV videoio module that is missing." << std::endl;
471 #endif
472 #if (__cplusplus >= 201703L) || (defined(_MSVC_LANG) && (_MSVC_LANG >= 201703L))
473  std::cout << "This tutorial needs std::c++17 standard enabled." << std::endl;
474 #endif
475  return EXIT_SUCCESS;
476 }
477 
478 #endif
static const vpColor red
Definition: vpColor.h:198
Display for windows using GDI (available on any windows 32 platform).
Definition: vpDisplayGDI.h:130
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
void init(vpImage< unsigned char > &I, int winx=-1, int winy=-1, const std::string &title="") VP_OVERRIDE
void setDownScalingFactor(unsigned int scale)
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void setTitle(const vpImage< unsigned char > &I, const std::string &windowtitle)
static void flush(const vpImage< unsigned char > &I)
@ SCALE_AUTO
Definition: vpDisplay.h:184
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
error that can be emitted by ViSP classes.
Definition: vpException.h:60
@ fatalError
Fatal error.
Definition: vpException.h:72
const char * what() const
Definition: vpException.cpp:71
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
unsigned int getWidth() const
Definition: vpImage.h:242
unsigned int getSize() const
Definition: vpImage.h:221
unsigned int getHeight() const
Definition: vpImage.h:181
static std::string toLowerCase(const std::string &input)
Return a lower-case version of the string input . Numbers and special characters stay the same.
Definition: vpIoTools.cpp:1339
static bool checkFilename(const std::string &filename)
Definition: vpIoTools.cpp:786
static bool isNumber(const std::string &str)
Definition: vpMath.cpp:214
VISP_EXPORT double measureTimeMs()