#include <visp3/core/vpConfig.h>
#include <visp3/core/vpException.h>
#include <visp3/core/vpImageException.h>
#include <visp3/core/vpRGBa.h>
#include <visp3/io/vpVideoReader.h>
#include <visp3/io/vpVideoWriter.h>
#include <visp3/rbt/vpRBTracker.h>
#ifdef ENABLE_VISP_NAMESPACE
#endif
#include "render-based-tutorial-utils.h"
{
CmdArguments() : startFrame(0), frameStep(1), stepByStep(
false)
{
}
{
parser
.
addArgument(
"--color", colorSequence,
true,
"The color sequence (in video reader format, eg., /path/to/I\%04d.png)")
.
addArgument(
"--depth", depthFolder,
false,
"The depth images associated to the color sequence. Frames should be aligned")
.
addArgument(
"--start", startFrame,
false,
"The first frame of the sequence")
.
addArgument(
"--step", frameStep,
false,
"How many frames should be read between calls to the tracker")
.
addFlag(
"--step-by-step", stepByStep,
"Go through the sequence interactively, frame by frame");
}
void postProcessArguments()
{
if (colorSequence.empty()) {
}
}
std::string colorSequence;
std::string depthFolder;
unsigned int startFrame;
unsigned int frameStep;
bool stepByStep;
};
int main(int argc, const char **argv)
{
vpRBTrackerTutorial::BaseArguments baseArgs;
vpRBTrackerTutorial::vpRBExperimentLogger logger;
vpRBTrackerTutorial::vpRBExperimentPlotter plotter;
vpJsonArgumentParser parser(
"Tutorial showing how to use the Render-Based Tracker on an offline sequence",
"--config",
"/");
baseArgs.registerArguments(parser);
logger.registerArguments(parser);
plotter.registerArguments(parser);
parser.parse(argc, argv);
baseArgs.postProcessArguments();
plotter.postProcessArguments(baseArgs.display);
if (baseArgs.enableRenderProfiling) {
vpRBTrackerTutorial::enableRendererProfiling();
}
baseArgs.display = true;
logger.startLog();
std::vector<std::shared_ptr<vpDisplay>> displays, debugDisplays;
if (baseArgs.display) {
displays = vpRBTrackerTutorial::createDisplays(Id, Icol, depthDisplay, IProbaDisplay);
if (baseArgs.debugDisplay) {
1, 3,
0, 0,
20, 20,
"Normals in object frame", InormDisplay,
"Depth canny", ICannyDisplay,
"Color render", IRender
);
}
plotter.init(displays);
}
nlohmann::json result = nlohmann::json::array();
std::cout << "Starting init" << std::endl;
if (baseArgs.hasInlineInit()) {
}
else if (baseArgs.display) {
tracker.initClick(Id, baseArgs.initFile, true);
}
else {
}
if (baseArgs.display) {
}
unsigned int iter = 1;
while (true) {
for (
unsigned int sp = 0; sp < sequenceArgs.
frameStep; ++sp) {
std::stringstream depthName;
depthName << sequenceArgs.
depthFolder <<
"/" << std::setfill(
'0') << std::setw(6) << im <<
".npy";
float scale = 9.999999747378752e-05;
depth.resize(dataArray.getHeight(), dataArray.getWidth());
depthDisplay.resize(dataArray.getHeight(), dataArray.getWidth());
#ifdef VISP_HAVE_OPENMP
#pragma omp parallel for
#endif
for (unsigned int i = 0; i < dataArray.getSize(); ++i) {
float value = static_cast<float>(dataArray.bitmap[i]) * scale;
depth.bitmap[i] = value;
depthDisplay.bitmap[i] = value > baseArgs.maxDepthDisplay ? 0.f : static_cast<unsigned char>((depth.bitmap[i] / baseArgs.maxDepthDisplay) * 255.f);
}
}
}
if (depth.getSize() == 0) {
}
else {
tracker.
track(Id, Icol, depth);
}
if (baseArgs.display) {
if (baseArgs.debugDisplay) {
vpRBTrackerTutorial::displayNormals(lastFrame.
renders.
normals, InormDisplay);
}
}
tracker.
display(Id, Icol, depthDisplay);
if (depth.getSize() > 0) {
}
}
result.push_back(cMo);
logger.logFrame(tracker, iter, Id, Icol, depthDisplay, IProbaDisplay);
if (sequenceArgs.
stepByStep && baseArgs.display) {
}
std::cout << "Iter: " << iter << std::endl;
++im;
++iter;
break;
}
std::cout << "Frame took: " << frameEnd - frameStart << "ms" << std::endl;
plotter.plot(tracker, (frameEnd - expStart) / 1000.0);
}
logger.close();
return EXIT_SUCCESS;
}
Generic class defining intrinsic camera parameters.
static const vpColor none
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
static void display(const vpImage< unsigned char > &I)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0), const std::string &frameName="", const vpColor &textColor=vpColor::black, const vpImagePoint &textOffset=vpImagePoint(15, 15))
static void flush(const vpImage< unsigned char > &I)
error that can be emitted by ViSP classes.
@ badValue
Used to indicate that a value is not in the allowed range.
@ notImplementedError
Not implemented.
unsigned int getWidth() const
Return the number of columns in the image.
unsigned int getHeight() const
Return the number of rows in the image.
Implementation of an homogeneous matrix and operations on such kind of matrices.
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
unsigned int getSize() const
Command line argument parsing with support for JSON files. If a JSON file is supplied,...
vpJsonArgumentParser & addArgument(const std::string &name, T ¶meter, const bool required=true, const std::string &help="No description")
Add an argument that can be provided by the user, either via command line or through the json file.
vpJsonArgumentParser & addFlag(const std::string &name, bool ¶meter, const std::string &help="No description")
Add an argument that acts as a flag when specified on the command line. When this flag is specified,...
void track(const vpImage< unsigned char > &I)
vpCameraParameters getCameraParameters() const
void displayMask(vpImage< unsigned char > &Imask) const
void getPose(vpHomogeneousMatrix &cMo) const
const vpRBFeatureTrackerInput & getMostRecentFrame() const
void loadConfigurationFile(const std::string &filename)
void display(const vpImage< unsigned char > &I, const vpImage< vpRGBa > &IRGB, const vpImage< unsigned char > &depth)
void setPose(const vpHomogeneousMatrix &cMo)
Class that enables to manipulate easily a video file or a sequence of images. As it inherits from the...
void acquire(vpImage< vpRGBa > &I)
void open(vpImage< vpRGBa > &I)
void setFileName(const std::string &filename)
void setFirstFrameIndex(const long first_frame)
VISP_EXPORT NpyArray npy_load(std::string fname)
std::vector< std::shared_ptr< vpDisplay > > makeDisplayGrid(unsigned int rows, unsigned int cols, unsigned int startX, unsigned int startY, unsigned int paddingX, unsigned int paddingY, Args &... args)
Create a grid of displays, given a set of images. All the displays will be initialized in the correct...
VISP_EXPORT double measureTimeMs()
void registerArguments(vpJsonArgumentParser &parser)
std::string colorSequence
void postProcessArguments()
std::vector< size_t > shape
vpImage< vpRGBf > silhouetteCanny
vpImage< vpRGBf > normals
vpImage< unsigned char > isSilhouette
Image containing the orientation of the gradients.