16 #include <visp3/core/vpConfig.h>
18 #if defined(VISP_HAVE_TENSORRT) && defined(VISP_HAVE_OPENCV)
20 #if defined(HAVE_OPENCV_CUDEV) && defined(HAVE_OPENCV_CUDAWARPING) && defined(HAVE_OPENCV_CUDAARITHM) && \
21 defined(HAVE_OPENCV_DNN) && defined(HAVE_OPENCV_VIDEOIO)
22 #include <visp3/core/vpImageConvert.h>
23 #include <visp3/core/vpIoTools.h>
24 #include <visp3/gui/vpDisplayX.h>
26 #include <opencv2/videoio.hpp>
29 #include <opencv2/core/cuda.hpp>
30 #include <opencv2/cudaarithm.hpp>
31 #include <opencv2/cudawarping.hpp>
32 #include <opencv2/dnn.hpp>
36 #include <cuda_runtime_api.h>
41 #include <NvOnnxParser.h>
46 #ifdef ENABLE_VISP_NAMESPACE
51 void preprocessImage(cv::Mat &img,
float *gpu_input,
const nvinfer1::Dims &dims,
float meanR,
float meanG,
float meanB)
54 std::cerr <<
"Image is empty." << std::endl;
58 cv::cuda::GpuMat gpu_frame;
60 gpu_frame.upload(img);
63 auto input_width = dims.d[3];
64 auto input_height = dims.d[2];
65 auto channels = dims.d[1];
66 auto input_size = cv::Size(input_width, input_height);
69 cv::cuda::GpuMat resized;
70 cv::cuda::resize(gpu_frame, resized, input_size, 0, 0, cv::INTER_NEAREST);
73 cv::cuda::GpuMat flt_image;
74 resized.convertTo(flt_image, CV_32FC3);
75 cv::cuda::subtract(flt_image, cv::Scalar(meanR, meanG, meanB), flt_image, cv::noArray(), -1);
76 cv::cuda::divide(flt_image, cv::Scalar(127.5f, 127.5f, 127.5f), flt_image, 1, -1);
79 std::vector<cv::cuda::GpuMat> chw;
80 for (
int i = 0; i < channels; ++i)
81 chw.emplace_back(cv::cuda::GpuMat(input_size, CV_32FC1, gpu_input + i * input_width * input_height));
82 cv::cuda::split(flt_image, chw);
87 size_t getSizeByDim(
const nvinfer1::Dims &dims)
90 for (
int i = 0; i < dims.nbDims; ++i)
97 std::vector<cv::Rect> postprocessResults(std::vector<void *> buffers,
const std::vector<nvinfer1::Dims> &output_dims,
98 int batch_size,
int image_width,
int image_height,
float confThresh,
99 float nmsThresh, std::vector<int> &classIds)
102 std::vector<cv::Rect> m_boxes, m_boxesNMS;
103 std::vector<int> m_classIds;
104 std::vector<float> m_confidences;
105 std::vector<int> m_indices;
108 std::vector<std::vector<float> > cpu_outputs;
109 for (
size_t i = 0; i < output_dims.size(); i++) {
110 cpu_outputs.push_back(std::vector<float>(getSizeByDim(output_dims[i]) * batch_size));
111 cudaMemcpy(cpu_outputs[i].data(), (
float *)buffers[1 + i], cpu_outputs[i].size() *
sizeof(
float),
112 cudaMemcpyDeviceToHost);
116 int N = output_dims[0].d[1], C = output_dims[0].d[2];
118 for (
int i = 0; i < N; i++)
120 uint32_t maxClass = 0;
121 float maxScore = -1000.0f;
123 for (
int j = 1; j < C; j++)
125 const float score = cpu_outputs[0][i * C + j];
127 if (score < confThresh)
130 if (score > maxScore) {
136 if (maxScore > confThresh) {
137 int left = (int)(cpu_outputs[1][4 * i] * image_width);
138 int top = (int)(cpu_outputs[1][4 * i + 1] * image_height);
139 int right = (int)(cpu_outputs[1][4 * i + 2] * image_width);
140 int bottom = (int)(cpu_outputs[1][4 * i + 3] * image_height);
141 int width = right - left + 1;
142 int height = bottom - top + 1;
144 m_boxes.push_back(cv::Rect(left, top, width, height));
145 m_classIds.push_back(maxClass);
146 m_confidences.push_back(maxScore);
150 cv::dnn::NMSBoxes(m_boxes, m_confidences, confThresh, nmsThresh, m_indices);
151 m_boxesNMS.resize(m_indices.size());
152 for (
size_t i = 0; i < m_indices.size(); ++i) {
153 int idx = m_indices[i];
154 m_boxesNMS[i] = m_boxes[idx];
157 classIds = m_classIds;
162 class Logger :
public nvinfer1::ILogger
165 void log(Severity severity,
const char *msg) noexcept
167 if ((severity == Severity::kERROR) || (severity == Severity::kINTERNAL_ERROR) || (severity == Severity::kVERBOSE))
168 std::cout << msg << std::endl;
175 template <
class T>
void operator()(T *obj)
const
182 template <
class T>
using TRTUniquePtr = std::unique_ptr<T, TRTDestroy>;
185 bool parseOnnxModel(
const std::string &model_path, TRTUniquePtr<nvinfer1::ICudaEngine> &engine,
186 TRTUniquePtr<nvinfer1::IExecutionContext> &context)
190 char cache_prefix[FILENAME_MAX];
191 char cache_path[FILENAME_MAX];
193 snprintf(cache_prefix, FILENAME_MAX,
"%s", model_path.c_str());
194 snprintf(cache_path, FILENAME_MAX,
"%s.engine", cache_prefix);
196 std::cout <<
"attempting to open engine cache file " << cache_path << std::endl;
200 char *engineStream =
nullptr;
201 size_t engineSize = 0;
204 struct stat filestat;
205 stat(cache_path, &filestat);
206 engineSize = filestat.st_size;
209 engineStream = (
char *)malloc(engineSize);
212 FILE *cacheFile =
nullptr;
213 cacheFile = fopen(cache_path,
"rb");
216 const size_t bytesRead = fread(engineStream, 1, engineSize, cacheFile);
218 if (bytesRead != engineSize)
220 std::cerr <<
"Error reading serialized engine into memory." << std::endl;
228 TRTUniquePtr<nvinfer1::IRuntime> infer { nvinfer1::createInferRuntime(gLogger) };
229 engine.reset(infer->deserializeCudaEngine(engineStream, engineSize,
nullptr));
230 context.reset(engine->createExecutionContext());
239 std::cerr <<
"Could not parse ONNX model. File not found" << std::endl;
243 TRTUniquePtr<nvinfer1::IBuilder> builder { nvinfer1::createInferBuilder(gLogger) };
244 TRTUniquePtr<nvinfer1::INetworkDefinition> network {
245 builder->createNetworkV2(1U << (uint32_t)nvinfer1::NetworkDefinitionCreationFlag::kEXPLICIT_BATCH) };
246 TRTUniquePtr<nvonnxparser::IParser> parser { nvonnxparser::createParser(*network, gLogger) };
249 if (!parser->parseFromFile(model_path.c_str(),
static_cast<int>(nvinfer1::ILogger::Severity::kINFO))) {
250 std::cerr <<
"ERROR: could not parse the model." << std::endl;
254 TRTUniquePtr<nvinfer1::IBuilderConfig> config { builder->createBuilderConfig() };
256 config->setMaxWorkspaceSize(32 << 20);
258 if (builder->platformHasFastFp16()) {
259 config->setFlag(nvinfer1::BuilderFlag::kFP16);
262 builder->setMaxBatchSize(1);
264 engine.reset(builder->buildEngineWithConfig(*network, *config));
265 context.reset(engine->createExecutionContext());
267 TRTUniquePtr<nvinfer1::IHostMemory> serMem { engine->serialize() };
270 std::cout <<
"Failed to serialize CUDA engine." << std::endl;
274 const char *serData = (
char *)serMem->data();
275 const size_t serSize = serMem->size();
278 char *engineMemory = (
char *)malloc(serSize);
281 std::cout <<
"Failed to allocate memory to store CUDA engine." << std::endl;
285 memcpy(engineMemory, serData, serSize);
288 FILE *cacheFile =
nullptr;
289 cacheFile = fopen(cache_path,
"wb");
291 fwrite(engineMemory, 1, serSize, cacheFile);
299 int main(
int argc,
char **argv)
302 unsigned int opt_scale = 1;
303 std::string input =
"";
306 std::string config =
"";
307 float meanR = 127.5f, meanG = 127.5f, meanB = 127.5f;
308 float confThresh = 0.5f;
309 float nmsThresh = 0.4f;
311 for (
int i = 1; i < argc; i++) {
312 if (std::string(argv[i]) ==
"--device" && i + 1 < argc) {
313 opt_device = atoi(argv[i + 1]);
315 else if (std::string(argv[i]) ==
"--input" && i + 1 < argc) {
316 input = std::string(argv[i + 1]);
318 else if (std::string(argv[i]) ==
"--model" && i + 1 < argc) {
319 modelFile = std::string(argv[i + 1]);
321 else if (std::string(argv[i]) ==
"--config" && i + 1 < argc) {
322 config = std::string(argv[i + 1]);
324 else if (std::string(argv[i]) ==
"--input-scale" && i + 1 < argc) {
325 opt_scale = atoi(argv[i + 1]);
327 else if (std::string(argv[i]) ==
"--mean" && i + 3 < argc) {
328 meanR = atof(argv[i + 1]);
329 meanG = atof(argv[i + 2]);
330 meanB = atof(argv[i + 3]);
332 else if (std::string(argv[i]) ==
"--confThresh" && i + 1 < argc) {
333 confThresh = (float)atof(argv[i + 1]);
335 else if (std::string(argv[i]) ==
"--nmsThresh" && i + 1 < argc) {
336 nmsThresh = (float)atof(argv[i + 1]);
338 else if (std::string(argv[i]) ==
"--labels" && i + 1 < argc) {
339 labelFile = std::string(argv[i + 1]);
341 else if (std::string(argv[i]) ==
"--help" || std::string(argv[i]) ==
"-h") {
343 <<
" [--device <camera device number>] [--input <path to image or video>"
344 " (camera is used if input is empty)] [--model <path to net trained weights>]"
345 " [--config <path to net config file>]"
346 " [--input-scale <input scale factor>] [--mean <meanR meanG meanB>]"
347 " [--confThresh <confidence threshold>]"
348 " [--nmsThresh <NMS threshold>] [--labels <path to label file>]"
354 std::string model_path(modelFile);
357 std::vector<std::string> labels;
358 if (!labelFile.empty()) {
359 std::ifstream f_label(labelFile);
361 while (std::getline(f_label, line)) {
362 labels.push_back(line);
368 TRTUniquePtr<nvinfer1::ICudaEngine> engine {
nullptr };
369 TRTUniquePtr<nvinfer1::IExecutionContext> context {
nullptr };
370 if (!parseOnnxModel(model_path, engine, context))
372 std::cout <<
"Make sure the model file exists. To see available models, plese visit: "
373 "\n\twww.github.com/lagadic/visp-images/dnn/object_detection/"
379 std::vector<nvinfer1::Dims> input_dims;
380 std::vector<nvinfer1::Dims> output_dims;
381 std::vector<void *> buffers(engine->getNbBindings());
384 for (
int i = 0; i < engine->getNbBindings(); ++i) {
385 auto binding_size = getSizeByDim(engine->getBindingDimensions(i)) * batch_size *
sizeof(float);
386 cudaMalloc(&buffers[i], binding_size);
388 if (engine->bindingIsInput(i)) {
389 input_dims.emplace_back(engine->getBindingDimensions(i));
392 output_dims.emplace_back(engine->getBindingDimensions(i));
396 if (input_dims.empty() || output_dims.empty()) {
397 std::cerr <<
"Expect at least one input and one output for network" << std::endl;
403 cv::VideoCapture capture;
406 capture.open(opt_device);
412 if (!capture.isOpened()) {
413 std::cout <<
"Failed to open the camera" << std::endl;
417 int cap_width = (int)capture.get(cv::CAP_PROP_FRAME_WIDTH);
418 int cap_height = (int)capture.get(cv::CAP_PROP_FRAME_HEIGHT);
419 capture.set(cv::CAP_PROP_FRAME_WIDTH, cap_width / opt_scale);
420 capture.set(cv::CAP_PROP_FRAME_HEIGHT, cap_height / opt_scale);
429 while ((i++ < 20) && !capture.read(frame)) {
436 std::cout <<
"Image size: " << width <<
" x " << height << std::endl;
438 std::vector<cv::Rect> boxesNMS;
439 std::vector<int> classIds;
453 preprocessImage(frame, (
float *)buffers[0], input_dims[0], meanR, meanG, meanB);
456 context->enqueue(batch_size, buffers.data(), 0,
nullptr);
459 boxesNMS = postprocessResults(buffers, output_dims, batch_size, width, height, confThresh, nmsThresh, classIds);
466 for (
unsigned int i = 0; i < boxesNMS.size(); i++) {
476 for (
void *buf : buffers)
484 std::cout <<
"OpenCV is not built with CUDA." << std::endl;
493 std::cout <<
"ViSP is not built with TensorRT." << std::endl;
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void flush(const vpImage< unsigned char > &I)
static void displayRectangle(const vpImage< unsigned char > &I, const vpImagePoint &topLeft, unsigned int width, unsigned int height, const vpColor &color, bool fill=false, unsigned int thickness=1)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
unsigned int getWidth() const
unsigned int getHeight() const
Defines a rectangle in the plane.
VISP_EXPORT double measureTimeMs()