17 #include <visp3/core/vpConfig.h>
19 #if defined(VISP_HAVE_TENSORRT) && defined(VISP_HAVE_OPENCV)
21 #if defined(HAVE_OPENCV_CUDEV) && defined(HAVE_OPENCV_CUDAWARPING) && defined(HAVE_OPENCV_CUDAARITHM) && \
22 defined(HAVE_OPENCV_DNN) && defined(HAVE_OPENCV_VIDEOIO)
23 #include <visp3/core/vpImageConvert.h>
24 #include <visp3/core/vpIoTools.h>
25 #include <visp3/gui/vpDisplayX.h>
27 #include <opencv2/videoio.hpp>
30 #include <opencv2/core/cuda.hpp>
31 #include <opencv2/cudaarithm.hpp>
32 #include <opencv2/cudawarping.hpp>
33 #include <opencv2/dnn.hpp>
37 #include <cuda_runtime_api.h>
42 #include <NvOnnxParser.h>
47 #ifdef ENABLE_VISP_NAMESPACE
52 void preprocessImage(cv::Mat &img,
float *gpu_input,
const nvinfer1::Dims &dims,
float meanR,
float meanG,
float meanB)
55 std::cerr <<
"Image is empty." << std::endl;
59 cv::cuda::GpuMat gpu_frame;
61 gpu_frame.upload(img);
64 auto input_width = dims.d[3];
65 auto input_height = dims.d[2];
66 auto channels = dims.d[1];
67 auto input_size = cv::Size(input_width, input_height);
70 cv::cuda::GpuMat resized;
71 cv::cuda::resize(gpu_frame, resized, input_size, 0, 0, cv::INTER_NEAREST);
74 cv::cuda::GpuMat flt_image;
75 resized.convertTo(flt_image, CV_32FC3);
76 cv::cuda::subtract(flt_image, cv::Scalar(meanR, meanG, meanB), flt_image, cv::noArray(), -1);
77 cv::cuda::divide(flt_image, cv::Scalar(127.5f, 127.5f, 127.5f), flt_image, 1, -1);
80 std::vector<cv::cuda::GpuMat> chw;
81 for (
int i = 0; i < channels; ++i)
82 chw.emplace_back(cv::cuda::GpuMat(input_size, CV_32FC1, gpu_input + i * input_width * input_height));
83 cv::cuda::split(flt_image, chw);
88 size_t getSizeByDim(
const nvinfer1::Dims &dims)
91 for (
int i = 0; i < dims.nbDims; ++i)
98 std::vector<cv::Rect> postprocessResults(std::vector<void *> buffers,
const std::vector<nvinfer1::Dims> &output_dims,
99 int batch_size,
int image_width,
int image_height,
float confThresh,
100 float nmsThresh, std::vector<int> &classIds)
103 std::vector<cv::Rect> m_boxes, m_boxesNMS;
104 std::vector<int> m_classIds;
105 std::vector<float> m_confidences;
106 std::vector<int> m_indices;
109 std::vector<std::vector<float> > cpu_outputs;
110 for (
size_t i = 0; i < output_dims.size(); i++) {
111 cpu_outputs.push_back(std::vector<float>(getSizeByDim(output_dims[i]) * batch_size));
112 cudaMemcpy(cpu_outputs[i].data(), (
float *)buffers[1 + i], cpu_outputs[i].size() *
sizeof(
float),
113 cudaMemcpyDeviceToHost);
117 int N = output_dims[0].d[1], C = output_dims[0].d[2];
119 for (
int i = 0; i < N; i++)
121 uint32_t maxClass = 0;
122 float maxScore = -1000.0f;
124 for (
int j = 1; j < C; j++)
126 const float score = cpu_outputs[0][i * C + j];
128 if (score < confThresh)
131 if (score > maxScore) {
137 if (maxScore > confThresh) {
138 int left = (int)(cpu_outputs[1][4 * i] * image_width);
139 int top = (int)(cpu_outputs[1][4 * i + 1] * image_height);
140 int right = (int)(cpu_outputs[1][4 * i + 2] * image_width);
141 int bottom = (int)(cpu_outputs[1][4 * i + 3] * image_height);
142 int width = right - left + 1;
143 int height = bottom - top + 1;
145 m_boxes.push_back(cv::Rect(left, top, width, height));
146 m_classIds.push_back(maxClass);
147 m_confidences.push_back(maxScore);
151 cv::dnn::NMSBoxes(m_boxes, m_confidences, confThresh, nmsThresh, m_indices);
152 m_boxesNMS.resize(m_indices.size());
153 for (
size_t i = 0; i < m_indices.size(); ++i) {
154 int idx = m_indices[i];
155 m_boxesNMS[i] = m_boxes[idx];
158 classIds = m_classIds;
163 class Logger :
public nvinfer1::ILogger
166 void log(Severity severity,
const char *msg) noexcept
168 if ((severity == Severity::kERROR) || (severity == Severity::kINTERNAL_ERROR) || (severity == Severity::kVERBOSE))
169 std::cout << msg << std::endl;
176 template <
class T>
void operator()(T *obj)
const
183 template <
class T>
using TRTUniquePtr = std::unique_ptr<T, TRTDestroy>;
186 bool parseOnnxModel(
const std::string &model_path, TRTUniquePtr<nvinfer1::ICudaEngine> &engine,
187 TRTUniquePtr<nvinfer1::IExecutionContext> &context)
191 char cache_prefix[FILENAME_MAX];
192 char cache_path[FILENAME_MAX];
194 snprintf(cache_prefix, FILENAME_MAX,
"%s", model_path.c_str());
195 snprintf(cache_path, FILENAME_MAX,
"%s.engine", cache_prefix);
197 std::cout <<
"attempting to open engine cache file " << cache_path << std::endl;
201 char *engineStream =
nullptr;
202 size_t engineSize = 0;
205 struct stat filestat;
206 stat(cache_path, &filestat);
207 engineSize = filestat.st_size;
210 engineStream = (
char *)malloc(engineSize);
213 FILE *cacheFile =
nullptr;
214 cacheFile = fopen(cache_path,
"rb");
217 const size_t bytesRead = fread(engineStream, 1, engineSize, cacheFile);
219 if (bytesRead != engineSize)
221 std::cerr <<
"Error reading serialized engine into memory." << std::endl;
229 TRTUniquePtr<nvinfer1::IRuntime> infer { nvinfer1::createInferRuntime(gLogger) };
230 engine.reset(infer->deserializeCudaEngine(engineStream, engineSize,
nullptr));
231 context.reset(engine->createExecutionContext());
240 std::cerr <<
"Could not parse ONNX model. File not found" << std::endl;
244 TRTUniquePtr<nvinfer1::IBuilder> builder { nvinfer1::createInferBuilder(gLogger) };
245 TRTUniquePtr<nvinfer1::INetworkDefinition> network {
246 builder->createNetworkV2(1U << (uint32_t)nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH) };
247 TRTUniquePtr<nvonnxparser::IParser> parser { nvonnxparser::createParser(*network, gLogger) };
250 if (!parser->parseFromFile(model_path.c_str(),
static_cast<int>(nvinfer1::ILogger::Severity::kINFO))) {
251 std::cerr <<
"ERROR: could not parse the model." << std::endl;
255 TRTUniquePtr<nvinfer1::IBuilderConfig> config { builder->createBuilderConfig() };
257 config->setMaxWorkspaceSize(32 << 20);
259 if (builder->platformHasFastFp16()) {
260 config->setFlag(nvinfer1::BuilderFlag::kFP16);
263 builder->setMaxBatchSize(1);
265 engine.reset(builder->buildEngineWithConfig(*network, *config));
266 context.reset(engine->createExecutionContext());
268 TRTUniquePtr<nvinfer1::IHostMemory> serMem { engine->serialize() };
271 std::cout <<
"Failed to serialize CUDA engine." << std::endl;
275 const char *serData = (
char *)serMem->data();
276 const size_t serSize = serMem->size();
279 char *engineMemory = (
char *)malloc(serSize);
282 std::cout <<
"Failed to allocate memory to store CUDA engine." << std::endl;
286 memcpy(engineMemory, serData, serSize);
289 FILE *cacheFile =
nullptr;
290 cacheFile = fopen(cache_path,
"wb");
292 fwrite(engineMemory, 1, serSize, cacheFile);
300 int main(
int argc,
char **argv)
303 unsigned int opt_scale = 1;
304 std::string input =
"";
307 std::string config =
"";
308 float meanR = 127.5f, meanG = 127.5f, meanB = 127.5f;
309 float confThresh = 0.5f;
310 float nmsThresh = 0.4f;
312 for (
int i = 1; i < argc; i++) {
313 if (std::string(argv[i]) ==
"--device" && i + 1 < argc) {
314 opt_device = atoi(argv[i + 1]);
316 else if (std::string(argv[i]) ==
"--input" && i + 1 < argc) {
317 input = std::string(argv[i + 1]);
319 else if (std::string(argv[i]) ==
"--model" && i + 1 < argc) {
320 modelFile = std::string(argv[i + 1]);
322 else if (std::string(argv[i]) ==
"--config" && i + 1 < argc) {
323 config = std::string(argv[i + 1]);
325 else if (std::string(argv[i]) ==
"--input-scale" && i + 1 < argc) {
326 opt_scale = atoi(argv[i + 1]);
328 else if (std::string(argv[i]) ==
"--mean" && i + 3 < argc) {
329 meanR = atof(argv[i + 1]);
330 meanG = atof(argv[i + 2]);
331 meanB = atof(argv[i + 3]);
333 else if (std::string(argv[i]) ==
"--confThresh" && i + 1 < argc) {
334 confThresh = (float)atof(argv[i + 1]);
336 else if (std::string(argv[i]) ==
"--nmsThresh" && i + 1 < argc) {
337 nmsThresh = (float)atof(argv[i + 1]);
339 else if (std::string(argv[i]) ==
"--labels" && i + 1 < argc) {
340 labelFile = std::string(argv[i + 1]);
342 else if (std::string(argv[i]) ==
"--help" || std::string(argv[i]) ==
"-h") {
344 <<
" [--device <camera device number>] [--input <path to image or video>"
345 " (camera is used if input is empty)] [--model <path to net trained weights>]"
346 " [--config <path to net config file>]"
347 " [--input-scale <input scale factor>] [--mean <meanR meanG meanB>]"
348 " [--confThresh <confidence threshold>]"
349 " [--nmsThresh <NMS threshold>] [--labels <path to label file>]"
355 std::string model_path(modelFile);
358 std::vector<std::string> labels;
359 if (!labelFile.empty()) {
360 std::ifstream f_label(labelFile);
362 while (std::getline(f_label, line)) {
363 labels.push_back(line);
369 TRTUniquePtr<nvinfer1::ICudaEngine> engine {
nullptr };
370 TRTUniquePtr<nvinfer1::IExecutionContext> context {
nullptr };
371 if (!parseOnnxModel(model_path, engine, context))
373 std::cout <<
"Make sure the model file exists. To see available models, plese visit: "
374 "\n\twww.github.com/lagadic/visp-images/dnn/object_detection/"
380 std::vector<nvinfer1::Dims> input_dims;
381 std::vector<nvinfer1::Dims> output_dims;
382 std::vector<void *> buffers(engine->getNbBindings());
385 for (
int i = 0; i < engine->getNbBindings(); ++i) {
386 auto binding_size = getSizeByDim(engine->getBindingDimensions(i)) * batch_size *
sizeof(float);
387 cudaMalloc(&buffers[i], binding_size);
389 if (engine->bindingIsInput(i)) {
390 input_dims.emplace_back(engine->getBindingDimensions(i));
393 output_dims.emplace_back(engine->getBindingDimensions(i));
397 if (input_dims.empty() || output_dims.empty()) {
398 std::cerr <<
"Expect at least one input and one output for network" << std::endl;
404 cv::VideoCapture capture;
407 capture.open(opt_device);
413 if (!capture.isOpened()) {
414 std::cout <<
"Failed to open the camera" << std::endl;
418 int cap_width = (int)capture.get(cv::CAP_PROP_FRAME_WIDTH);
419 int cap_height = (int)capture.get(cv::CAP_PROP_FRAME_HEIGHT);
420 capture.set(cv::CAP_PROP_FRAME_WIDTH, cap_width / opt_scale);
421 capture.set(cv::CAP_PROP_FRAME_HEIGHT, cap_height / opt_scale);
430 while ((i++ < 20) && !capture.read(frame)) {
437 std::cout <<
"Image size: " << width <<
" x " << height << std::endl;
439 std::vector<cv::Rect> boxesNMS;
440 std::vector<int> classIds;
454 preprocessImage(frame, (
float *)buffers[0], input_dims[0], meanR, meanG, meanB);
457 context->enqueue(batch_size, buffers.data(), 0,
nullptr);
460 boxesNMS = postprocessResults(buffers, output_dims, batch_size, width, height, confThresh, nmsThresh, classIds);
467 for (
unsigned int i = 0; i < boxesNMS.size(); i++) {
477 for (
void *buf : buffers)
485 std::cout <<
"OpenCV is not built with CUDA." << std::endl;
494 std::cout <<
"ViSP is not built with TensorRT." << std::endl;
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void flush(const vpImage< unsigned char > &I)
static void displayRectangle(const vpImage< unsigned char > &I, const vpImagePoint &topLeft, unsigned int width, unsigned int height, const vpColor &color, bool fill=false, unsigned int thickness=1)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
unsigned int getWidth() const
unsigned int getHeight() const
Defines a rectangle in the plane.
VISP_EXPORT double measureTimeMs()