KltOpencv

class KltOpencv(*args, **kwargs)

Bases: pybind11_object

Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this class OpenCV should be installed. Installation instructions are provided here https://visp.inria.fr/3rd_opencv .

The following example available in tutorial-klt-tracker.cpp shows how to use the main functions of the class.

#include <visp3/core/vpImageConvert.h>
#include <visp3/gui/vpDisplayOpenCV.h>
#include <visp3/io/vpVideoReader.h>
#include <visp3/klt/vpKltOpencv.h>

int main(int argc, const char *argv[])
{
#if (defined(HAVE_OPENCV_HIGHGUI) && defined(HAVE_OPENCV_VIDEOIO) || defined(VISP_HAVE_V4L2)) && defined(HAVE_OPENCV_IMGPROC) && defined(HAVE_OPENCV_VIDEO)
  try {
    std::string opt_videoname = "video-postcard.mp4";
    bool opt_init_by_click = false;
    unsigned int opt_subsample = 1;
    for (int i = 0; i < argc; i++) {
      if (std::string(argv[i]) == "--videoname")
        opt_videoname = std::string(argv[i + 1]);
      else if (std::string(argv[i]) == "--init-by-click")
        opt_init_by_click = true;
      else if (std::string(argv[i]) == "--subsample")
        opt_subsample = static_cast<unsigned int>(std::atoi(argv[i + 1]));
      else if (std::string(argv[i]) == "--help" || std::string(argv[i]) == "-h") {
        std::cout << "Usage: " << argv[0]
          << " [--videoname <video name>] [--subsample <scale factor>] [--init-by-click]"
          << " [--help] [-h]" << std::endl;
        return EXIT_SUCCESS;
      }
    }

    vpVideoReader  reader;
    reader. setFileName (opt_videoname);

    vpImage<unsigned char>  I, Iacq;
    reader. acquire (Iacq);
    Iacq. subsample (opt_subsample, opt_subsample, I);

    cv::Mat cvI;
    vpImageConvert::convert (I, cvI);

    vpDisplayOpenCV  d(I, 0, 0, "Klt tracking");
    vpDisplay::display (I);
    vpDisplay::flush (I);

    vpKltOpencv  tracker;
    tracker. setMaxFeatures (200);
    tracker. setWindowSize (10);
    tracker. setQuality (0.01);
    tracker. setMinDistance (15);
    tracker. setHarrisFreeParameter (0.04);
    tracker. setBlockSize (9);
    tracker. setUseHarris (1);
    tracker. setPyramidLevels (3);

    // Initialise the tracking
    if (opt_init_by_click) {
      vpMouseButton::vpMouseButtonType  button = vpMouseButton::button1 ;
      std::vector<cv::Point2f> feature;
      vpImagePoint  ip;
      do {
        vpDisplay::displayText (I, 10, 10, "Left click to select a point, right to start tracking", vpColor::red );
        if ( vpDisplay::getClick (I, ip, button, false)) {
          if (button == vpMouseButton::button1 ) {
            feature.push_back(cv::Point2f((float)ip. get_u (), (float)ip. get_v ()));
            vpDisplay::displayCross (I, ip, 12, vpColor::green );
          }
        }
        vpDisplay::flush (I);
        vpTime::wait (20);
      } while (button != vpMouseButton::button3 );
      tracker. initTracking (cvI, feature);
    }
    else {
      tracker. initTracking (cvI);
    }

    std::cout << "Tracker initialized with " << tracker. getNbFeatures () << " features" << std::endl;

    while (!reader. end ()) {
      double t = vpTime::measureTimeMs ();
      reader. acquire (Iacq);
      Iacq. subsample (opt_subsample, opt_subsample, I);
      vpDisplay::display (I);

      vpImageConvert::convert (I, cvI);

      if (opt_init_by_click && reader. getFrameIndex () == reader. getFirstFrameIndex () + 20) {
        vpMouseButton::vpMouseButtonType  button = vpMouseButton::button1 ;
        std::vector<cv::Point2f> feature;
        vpImagePoint  ip;
        do {
          vpDisplay::displayText (I, 10, 10, "Left click to select a point, right to start tracking", vpColor::red );
          if ( vpDisplay::getClick (I, ip, button, false)) {
            if (button == vpMouseButton::button1 ) {
              feature.push_back(cv::Point2f((float)ip. get_u (), (float)ip. get_v ()));
              vpDisplay::displayCross (I, ip, 12, vpColor::green );
            }
          }
          vpDisplay::flush (I);
          vpTime::wait (20);
        } while (button != vpMouseButton::button3 );
        tracker. initTracking (cvI, feature);
      }

      tracker. track (cvI);

      tracker. display (I, vpColor::red );

      vpDisplay::displayText (I, 10, 10, "Click to quit", vpColor::red );
      if ( vpDisplay::getClick (I, false))
        break;

      vpDisplay::flush (I);
      if (!reader. isVideoFormat ()) {
        vpTime::wait (t, 40);
      }
    }

    vpDisplay::getClick (I);
  }
  catch (const vpException  &e) {
    std::cout << "Catch an exception: " << e << std::endl;
    return EXIT_FAILURE;
  }
#else
  (void)argc;
  (void)argv;
#endif
  return EXIT_SUCCESS;
}

A line by line explanation is provided in tutorial-tracking-keypoint.

Overloaded function.

  1. __init__(self: visp._visp.klt.KltOpencv) -> None

Default constructor.

  1. __init__(self: visp._visp.klt.KltOpencv, copy: visp._visp.klt.KltOpencv) -> None

Copy constructor.

Methods

__init__

Overloaded function.

addFeature

Overloaded function.

display

Overloaded function.

displaySelf

Display features position and id.

getBlockSize

Get the size of the averaging block used to track the features.

getFeature

Get the 'index'th feature image coordinates.

getFeatures

Get the list of current features.

getFeaturesId

Get the unique id of each feature.

getHarrisFreeParameter

Get the free parameter of the Harris detector.

getMaxFeatures

Get the list of lost feature.

getMinDistance

Get the minimal Euclidean distance between detected corners during initialization.

getNbFeatures

Get the number of current features.

getNbPrevFeatures

Get the number of previous features.

getPrevFeatures

Get the list of previous features.

getPyramidLevels

Get the list of features id.

getQuality

Get the parameter characterizing the minimal accepted quality of image corners.

getWindowSize

Get the window size used to refine the corner locations.

initTracking

Overloaded function.

setBlockSize

Set the size of the averaging block used to track the features.

setHarrisFreeParameter

Set the free parameter of the Harris detector.

setInitialGuess

Overloaded function.

setMaxFeatures

Set the maximum number of features to track in the image.

setMinDistance

Set the minimal Euclidean distance between detected corners during initialization.

setMinEigThreshold

Set the minimal eigen value threshold used to reject a point during the tracking.

setPyramidLevels

Set the maximal pyramid level.

setQuality

Set the parameter characterizing the minimal accepted quality of image corners.

setTrackerId

Does nothing.

setUseHarris

Set the parameter indicating whether to use a Harris detector or the minimal eigenvalue of gradient matrices for corner detection.

setWindowSize

Set the window size used to refine the corner locations.

suppressFeature

Remove the feature with the given index as parameter.

track

Track KLT keypoints using the iterative Lucas-Kanade method with pyramids.

Inherited Methods

Operators

__doc__

__init__

Overloaded function.

__module__

Attributes

__annotations__

__init__(*args, **kwargs)

Overloaded function.

  1. __init__(self: visp._visp.klt.KltOpencv) -> None

Default constructor.

  1. __init__(self: visp._visp.klt.KltOpencv, copy: visp._visp.klt.KltOpencv) -> None

Copy constructor.

addFeature(*args, **kwargs)

Overloaded function.

  1. addFeature(self: visp._visp.klt.KltOpencv, x: float, y: float) -> None

Add a keypoint at the end of the feature list. The id of the feature is set to ensure that it is unique.

Parameters:
x

Coordinates of the feature in the image.

y

Coordinates of the feature in the image.

  1. addFeature(self: visp._visp.klt.KltOpencv, id: int, x: float, y: float) -> None

Add a keypoint at the end of the feature list.

Warning

This function doesn’t ensure that the id of the feature is unique. You should rather use addFeature(const float &, const float &) or addFeature(const cv::Point2f &) .

Parameters:
id

Feature id. Should be unique

x

Coordinates of the feature in the image.

y

Coordinates of the feature in the image.

  1. addFeature(self: visp._visp.klt.KltOpencv, f: cv::Point_<float>) -> None

Add a keypoint at the end of the feature list. The id of the feature is set to ensure that it is unique.

Parameters:
f

Coordinates of the feature in the image.

static display(*args, **kwargs)

Overloaded function.

  1. display(I: visp._visp.core.ImageGray, features: list[cv::Point_<float>], color: visp._visp.core.Color = vpColor::green, thickness: int = 1) -> None

Display features list.

Parameters:
I

The image used as background.

features

Vector of features.

color

Color used to display the points.

thickness

Thickness of the points.

  1. display(I: visp._visp.core.ImageRGBa, features: list[cv::Point_<float>], color: visp._visp.core.Color = vpColor::green, thickness: int = 1) -> None

Display features list.

Parameters:
I

The image used as background.

features

Vector of features.

color

Color used to display the points.

thickness

Thickness of the points.

  1. display(I: visp._visp.core.ImageGray, features: list[cv::Point_<float>], featuresid: list[int], color: visp._visp.core.Color = vpColor::green, thickness: int = 1) -> None

Display features list with ids.

Parameters:
I

The image used as background.

features

Vector of features.

featuresid

Vector of ids corresponding to the features.

color

Color used to display the points.

thickness

Thickness of the points

  1. display(I: visp._visp.core.ImageRGBa, features: list[cv::Point_<float>], featuresid: list[int], color: visp._visp.core.Color = vpColor::green, thickness: int = 1) -> None

Display features list with ids.

Parameters:
I

The image used as background.

features

Vector of features.

featuresid

Vector of ids corresponding to the features.

color

Color used to display the points.

thickness

Thickness of the points

displaySelf(self: visp._visp.klt.KltOpencv, I: visp._visp.core.ImageGray, color: visp._visp.core.Color = vpColor::red, thickness: int = 1) None

Display features position and id.

Parameters:
I

Image used as background. Display should be initialized on it.

color

Color used to display the features.

thickness

Thickness of the drawings.

getBlockSize(self) int

Get the size of the averaging block used to track the features.

getFeature(self, index: int) tuple[int, float, float]

Get the ‘index’th feature image coordinates. Beware that getFeature(i,…) may not represent the same feature before and after a tracking iteration (if a feature is lost, features are shifted in the array).

Parameters:
index: int

Index of feature.

Returns:

A tuple containing:

  • id: id of the feature.

  • x: x coordinate.

  • y: y coordinate.

getFeatures(self) list[cv::Point_<float>]

Get the list of current features.

getFeaturesId(self) list[int]

Get the unique id of each feature.

getHarrisFreeParameter(self) float

Get the free parameter of the Harris detector.

getMaxFeatures(self) int

Get the list of lost feature.

Get the maximum number of features to track in the image.

getMinDistance(self) float

Get the minimal Euclidean distance between detected corners during initialization.

getNbFeatures(self) int

Get the number of current features.

getNbPrevFeatures(self) int

Get the number of previous features.

getPrevFeatures(self) list[cv::Point_<float>]

Get the list of previous features.

getPyramidLevels(self) int

Get the list of features id.

Get the maximal pyramid level.

getQuality(self) float

Get the parameter characterizing the minimal accepted quality of image corners.

getWindowSize(self) int

Get the window size used to refine the corner locations.

initTracking(*args, **kwargs)

Overloaded function.

  1. initTracking(self: visp._visp.klt.KltOpencv, I: cv::Mat, mask: cv::Mat) -> None

Initialise the tracking by extracting KLT keypoints on the provided image.

Parameters:
I

Grey level image used as input. This image should have only 1 channel.

mask

Image mask used to restrict the keypoint detection area. If mask is nullptr, all the image will be considered.

  1. initTracking(self: visp._visp.klt.KltOpencv, I: cv::Mat, pts: list[cv::Point_<float>]) -> None

Set the points that will be used as initialization during the next call to track() .

Parameters:
I

Input image.

pts

Vector of points that should be tracked.

  1. initTracking(self: visp._visp.klt.KltOpencv, I: cv::Mat, pts: list[cv::Point_<float>], ids: list[int]) -> None

Set the points that will be used as initialization during the next call to track() .

Parameters:
I

Input image.

pts

Vector of points that should be tracked.

ids

Corresponding point ids.

setBlockSize(self, blockSize: int) None

Set the size of the averaging block used to track the features.

Warning

The input is a signed integer to be compatible with OpenCV. However, it must be a positive integer.

Parameters:
blockSize: int

Size of an average block for computing a derivative covariation matrix over each pixel neighborhood. Default value is set to 3.

setHarrisFreeParameter(self, harris_k: float) None

Set the free parameter of the Harris detector.

Parameters:
harris_k: float

Free parameter of the Harris detector. Default value is set to 0.04.

setInitialGuess(*args, **kwargs)

Overloaded function.

  1. setInitialGuess(self: visp._visp.klt.KltOpencv, guess_pts: list[cv::Point_<float>]) -> None

Set the points that will be used as initial guess during the next call to track() . A typical usage of this function is to predict the position of the features before the next call to track() .

Note

See initTracking()

Parameters:
guess_pts

Vector of points that should be tracked. The size of this vector should be the same as the one returned by getFeatures() . If this is not the case, an exception is returned. Note also that the id of the points is not modified.

  1. setInitialGuess(self: visp._visp.klt.KltOpencv, init_pts: list[cv::Point_<float>], guess_pts: list[cv::Point_<float>], fid: list[int]) -> None

Set the points that will be used as initial guess during the next call to track() . A typical usage of this function is to predict the position of the features before the next call to track() .

Note

See getPrevFeatures() ,getPrevFeaturesId

Note

See getFeatures() , getFeaturesId

Note

See initTracking()

Parameters:
init_pts

Initial points (could be obtained from getPrevFeatures() or getFeatures() ).

guess_pts

Prediction of the new position of the initial points. The size of this vector must be the same as the size of the vector of initial points.

fid

Identifiers of the initial points.

setMaxFeatures(self, maxCount: int) None

Set the maximum number of features to track in the image.

Parameters:
maxCount: int

Maximum number of features to detect and track. Default value is set to 500.

setMinDistance(self, minDistance: float) None

Set the minimal Euclidean distance between detected corners during initialization.

Parameters:
minDistance: float

Minimal possible Euclidean distance between the detected corners. Default value is set to 15.

setMinEigThreshold(self, minEigThreshold: float) None

Set the minimal eigen value threshold used to reject a point during the tracking.

Parameters:
minEigThreshold: float

Minimal eigen value threshold. Default value is set to 1e-4.

setPyramidLevels(self, pyrMaxLevel: int) None

Set the maximal pyramid level. If the level is zero, then no pyramid is computed for the optical flow.

Parameters:
pyrMaxLevel: int

0-based maximal pyramid level number; if set to 0, pyramids are not used (single level), if set to 1, two levels are used, and so on. Default value is set to 3.

setQuality(self, qualityLevel: float) None

Set the parameter characterizing the minimal accepted quality of image corners.

Parameters:
qualityLevel: float

Quality level parameter. Default value is set to 0.01. The parameter value is multiplied by the best corner quality measure, which is the minimal eigenvalue or the Harris function response. The corners with the quality measure less than the product are rejected. For example, if the best corner has the quality measure = 1500, and the qualityLevel=0.01, then all the corners with the quality measure less than 15 are rejected.

setTrackerId(self, tid: int) None

Does nothing. Just here for compat with previous releases that use OpenCV C api to do the tracking.

setUseHarris(self, useHarrisDetector: int) None

Set the parameter indicating whether to use a Harris detector or the minimal eigenvalue of gradient matrices for corner detection.

Parameters:
useHarrisDetector: int

If 1 (default value), use the Harris detector. If 0 use the eigenvalue.

setWindowSize(self, winSize: int) None

Set the window size used to refine the corner locations.

Parameters:
winSize: int

Half of the side length of the search window. Default value is set to 10. For example, if winSize=5 , then a 5*2+1 \(\times\) 5*2+1 = 11 \(\times\) 11 search window is used.

suppressFeature(self, index: int) None

Remove the feature with the given index as parameter.

Parameters:
index: int

Index of the feature to remove.

track(self: visp._visp.klt.KltOpencv, I: cv::Mat) None

Track KLT keypoints using the iterative Lucas-Kanade method with pyramids.

Parameters:
I

Input image.