打开摄像头,位置:tracker_run.cpp
bool TrackerRun::init()
{
ImgAcqParas imgAcqParas;
imgAcqParas.device =1; //_paras.device;
imgAcqParas.expansionStr = _paras.expansion;
...}
dsst_tracker.hpp中引用了gradientMex
//#include "gradientMex.hpp"
gradientMex.hpp中定义了两种fhog方法,一种是转置的,一种是直接计算的,
namespace piotr {
void fhog(float * const M, float * const O,
...}
fhogToCvCol方法声明:在scale_estimator.hpp:
if (paras.useFhogTranspose){
fhogToCvCol = &piotr::fhogToCvColT;
printf("useFhogTranspose \n");
}
else{
fhogToCvCol = &piotr::fhogToCol;
printf("useFhogTranspose no \n");
}
if (paras.useFhogTranspose)
cvFhog = &piotr::cvFhogT < T, DFC > ;
else
cvFhog = &piotr::cvFhog < T, DFC > ;
跟踪流程:
bool updateAtScalePos(const cv::Mat& image, const Point& oldPos, const T oldScale, Rect& boundingBox) { ++_frameIdx; if (!_isInitialized) return false; T newScale = oldScale; Point newPos = oldPos; cv::Point2i maxResponseIdx; cv::Mat response; // in case of error return the last box boundingBox = _lastBoundingBox; if (detectModel(image, response, maxResponseIdx, newPos, newScale) == false) return false; // return box Rect tempBoundingBox; tempBoundingBox.width = _baseTargetSz.width * newScale; tempBoundingBox.height = _baseTargetSz.height * newScale; tempBoundingBox.x = newPos.x - tempBoundingBox.width / 2; tempBoundingBox.y = newPos.y - tempBoundingBox.height / 2; if (_ENABLE_TRACKING_LOSS_DETECTION) { if (evalReponse(image, response, maxResponseIdx, tempBoundingBox) == false) return false; } if (updateModel(image, newPos, newScale) == false) return false; boundingBox &= Rect(0, 0, static_cast<T>(image.cols), static_cast<T>(image.rows)); boundingBox = tempBoundingBox; _lastBoundingBox = tempBoundingBox; return true; }
bool detectModel(const cv::Mat& image, cv::Mat& response, cv::Point2i& maxResponseIdx, Point& newPos, T& newScale) const { // find translation std::shared_ptr<DFC> xt(0); if (getTranslationFeatures(image, xt, newPos, newScale) == false) return false; std::shared_ptr<DFC> xtf; if (_USE_CCS) xtf = DFC::dftFeatures(xt); else xtf = DFC::dftFeatures(xt, cv::DFT_COMPLEX_OUTPUT); //dft时候添加参数DFT_COMPLEX_OUTPUT,就可以自动得到复数矩阵了 std::shared_ptr<DFC> sampleSpec = DFC::mulSpectrumsFeatures(_hfNumerator, xtf, false); cv::Mat sumXtf = DFC::sumFeatures(sampleSpec); cv::Mat hfDenLambda = addRealToSpectrum<T>(_LAMBDA, _hfDenominator); cv::Mat responseTf; if (_USE_CCS) divSpectrums(sumXtf, hfDenLambda, responseTf, 0, false); else divideSpectrumsNoCcs<T>(sumXtf, hfDenLambda, responseTf); cv::Mat translationResponse; idft(responseTf, translationResponse, cv::DFT_REAL_OUTPUT | cv::DFT_SCALE); cv::Point delta; double maxResponse; cv::Point_<T> subDelta; minMaxLoc(translationResponse, 0, &maxResponse, 0, &delta); subDelta = delta; if (_CELL_SIZE != 1) subDelta = subPixelDelta<T>(translationResponse, delta); T posDeltaX = (subDelta.x + 1 - floor(translationResponse.cols / consts::c2_0)) * newScale; T posDeltaY = (subDelta.y + 1 - floor(translationResponse.rows / consts::c2_0)) * newScale; newPos.x += round(posDeltaX * _CELL_SIZE); newPos.y += round(posDeltaY * _CELL_SIZE); if (_debug != 0) _debug->showResponse(translationResponse, maxResponse); if (_scaleEstimator) { //find scale T tempScale = newScale * _templateScaleFactor; if (_scaleEstimator->detectScale(image, newPos, tempScale) == false) return false; newScale = tempScale / _templateScaleFactor; } response = translationResponse; maxResponseIdx = delta; return true; }
bool getTranslationFeatures(const cv::Mat& image, std::shared_ptr<DFC>& features, const Point& pos, T scale) const { cv::Mat patch; Size patchSize = _templateSz * scale; if (getSubWindow(image, patch, patchSize, pos) == false) return false; if (_ORIGINAL_VERSION) depResize(patch, patch, _templateSz); else resize(patch, patch, _templateSz, 0, 0, _RESIZE_TYPE); if (_debug != 0) _debug->showPatch(patch); cv::Mat floatPatch; patch.convertTo(floatPatch, CV_32FC(3)); features.reset(new DFC()); cvFhog(floatPatch, features, _CELL_SIZE, DFC::numberOfChannels() - 1); // append gray-scale image if (patch.channels() == 1) { if (_CELL_SIZE != 1) resize(patch, patch, features->channels[0].size(), 0, 0, _RESIZE_TYPE); features->channels[DFC::numberOfChannels() - 1] = patch / 255.0 - 0.5; } else { if (_CELL_SIZE != 1) resize(patch, patch, features->channels[0].size(), 0, 0, _RESIZE_TYPE); cv::Mat grayFrame; cvtColor(patch, grayFrame, cv::COLOR_BGR2GRAY); grayFrame.convertTo(grayFrame, CV_TYPE); grayFrame = grayFrame / 255.0 - 0.5; features->channels[DFC::numberOfChannels() - 1] = grayFrame; } DFC::mulFeatures(features, _cosWindow); return true; }计算cvFhog:
void cvFhog(const cv::Mat& img, std::shared_ptr<OUT>& cvFeatures, int binSize, int fhogChannelsToCopy = 31) { const int orientations = 9; // ensure array is continuous const cv::Mat& image = (img.isContinuous() ? img : img.clone()); int channels = image.channels(); int computeChannels = 32; int width = image.cols; int height = image.rows; int widthBin = width / binSize; int heightBin = height / binSize; float* const I = (float*)wrCalloc(static_cast<size_t>(width * height * channels), sizeof(float)); float* const H = (float*)wrCalloc(static_cast<size_t>(widthBin * heightBin * computeChannels), sizeof(float)); float* const M = (float*)wrCalloc(static_cast<size_t>(width * height), sizeof(float)); float* const O = (float*)wrCalloc(static_cast<size_t>(width * height), sizeof(float)); // row major (interleaved) to col major (non interleaved;clustered) float* imageData = reinterpret_cast<float*>(image.data); float* const redChannel = I; float* const greenChannel = I + width * height; float* const blueChannel = I + 2 * width * height; int colMajorPos = 0, rowMajorPos = 0; for (int row = 0; row < height; ++row) { for (int col = 0; col < width; ++col) { colMajorPos = col * height + row; rowMajorPos = row * channels * width + col * channels; blueChannel[colMajorPos] = imageData[rowMajorPos]; greenChannel[colMajorPos] = imageData[rowMajorPos + 1]; redChannel[colMajorPos] = imageData[rowMajorPos + 2]; } } // calc fhog in col major gradMag(I, M, O, height, width, channels, true); if (fhogChannelsToCopy == 27) fhog(M, O, H, height, width, binSize, orientations, -1, 0.2f, false); else fhog(M, O, H, height, width, binSize, orientations, -1, 0.2f); // only copy the amount of the channels the user wants // or the amount that fits into the output array int channelsToCopy = std::min(fhogChannelsToCopy, OUT::numberOfChannels()); for (int c = 0; c < channelsToCopy; ++c) { cv::Mat_<PRIMITIVE_TYPE> m(heightBin, widthBin); cvFeatures->channels[c] = m; } PRIMITIVE_TYPE* cdata = 0; //col major to row major with separate channels for (int c = 0; c < channelsToCopy; ++c) { float* Hc = H + widthBin * heightBin * c; cdata = reinterpret_cast<PRIMITIVE_TYPE*>(cvFeatures->channels[c].data); for (int row = 0; row < heightBin; ++row) for (int col = 0; col < widthBin; ++col) cdata[row * widthBin + col] = Hc[row + heightBin * col]; } wrFree(M); wrFree(O); wrFree(I); wrFree(H); }
else { tStart = getTickCount(); _targetOnFrame = _tracker->update(_image, _boundingBox); tDuration = getTickCount() - tStart; }
fhog调用流程:
获取转移(平移/位置)特征
cvfhog getTranslationFeatures
fhog cvFhog
多尺度估计:
fhogToCvCol getScaleFeatures
fhog fhogToCol
fhogToCvCol getScaleFeatures for
fhog fhogToCol
fhogToCvCol getScaleFeatures for
fhog fhogToCol
fhogToCvCol getScaleFeatures for
fhog fhogToCol
fhogToCvCol getScaleFeatures for