使用c++现实opencv调用tracking算法代码

使用OpenCV进行目标跟踪(C++/Python)

在本教程里,我们将学习OpenCV3.0中引入的OpenCV跟踪API。我们将学习如何以及何时使用OpenCV3.4.1中提供的7中不同的跟踪器——BOOSTING,MIL,KCF,TLD,MEDIANFLOW,GOTURN和MOSSE。我们还将学习现代跟踪算法背后的一般理论。

我的朋友Boris Babenko完美的解决了这个问题,正如下面的这个完美的实时人脸跟踪器所示!开玩笑的说,下面的这个GIF描述了我们想要的理想物体跟踪器——速度,准确性和面对遮挡的鲁棒性。

什么是目标跟踪?

简单的说,在一个视频的连续帧中定位目标就称之为跟踪。

这个定义听起来很直接但是在计算机视觉和机器学习领域,跟踪是一个非常广泛的术语,涵盖概念上相似但技术上不同的想法。举个例子,下面所有不同的但却相关的方法均是可用在目标跟踪的研究领域的。

1.密集光流法:这类算法有助于估计视频帧中每个像素的运动矢量。

2.稀疏光流法:这类算法,如Kanade-Lucas-Tomashi(KLT)特征跟踪器,跟踪图像中几个特征点的位置。

3.卡尔曼滤波:这是一个非常流行的信号处理算法,用于根据先前的运动信息预测运动物体的位置。这种算法的早期应用之一是导弹制导!

4.Meanshift和Camshift:这些是用于定位密度函数的最大值的算法。它们也用于跟踪领域。

5.单目标跟踪器:在此类跟踪器中,第一帧使用矩形标记来指示我们要跟踪的对象的位置。然后使用跟踪算法在后续帧中跟踪对象。在大多数实际应用中,这些跟踪器与物体检测器结合使用。

6.多目标跟踪器:在我们有快速物体探测器的情况下,检测每个帧中的多个物体然后运行轨迹查找算法来识别一帧中的哪个矩形对应于下一帧中的矩形是有意义的。

c++实现

    #include 
    #include 
    #include 
     
    using namespace cv;
    using namespace std;
     
    // Convert to string
    #define SSTR( x ) static_cast< std::ostringstream & >( \
    ( std::ostringstream() << std::dec << x ) ).str()
     
    int main(int argc, char **argv)
    {
        // List of tracker types in OpenCV 3.2
        // NOTE : GOTURN implementation is buggy and does not work.
        string trackerTypes[6] = {"BOOSTING", "MIL", "KCF", "TLD","MEDIANFLOW", "GOTURN"};
        // vector  trackerTypes(types, std::end(types));
     
        // Create a tracker
        string trackerType = trackerTypes[2];
     
        Ptr tracker;
     
        #if (CV_MINOR_VERSION < 3)
        {
            tracker = Tracker::create(trackerType);
        }
        #else
        {
            if (trackerType == "BOOSTING")
                tracker = TrackerBoosting::create();
            if (trackerType == "MIL")
                tracker = TrackerMIL::create();
            if (trackerType == "KCF")
                tracker = TrackerKCF::create();
            if (trackerType == "TLD")
                tracker = TrackerTLD::create();
            if (trackerType == "MEDIANFLOW")
                tracker = TrackerMedianFlow::create();
            if (trackerType == "GOTURN")
                tracker = TrackerGOTURN::create();
        }
        #endif
        // Read video
        VideoCapture video("videos/chaplin.mp4");
         
        // Exit if video is not opened
        if(!video.isOpened())
        {
            cout << "Could not read video file" << endl;
            return 1;
             
        }
         
        // Read first frame
        Mat frame;
        bool ok = video.read(frame);
         
        // Define initial boundibg box
        Rect2d bbox(287, 23, 86, 320);
         
        // Uncomment the line below to select a different bounding box
        bbox = selectROI(frame, false);
     
        // Display bounding box.
        rectangle(frame, bbox, Scalar( 255, 0, 0 ), 2, 1 );
        imshow("Tracking", frame);
         
        tracker->init(frame, bbox);
         
        while(video.read(frame))
        {     
            // Start timer
            double timer = (double)getTickCount();
             
            // Update the tracking result
            bool ok = tracker->update(frame, bbox);
             
            // Calculate Frames per second (FPS)
            float fps = getTickFrequency() / ((double)getTickCount() - timer);
             
            if (ok)
            {
                // Tracking success : Draw the tracked object
                rectangle(frame, bbox, Scalar( 255, 0, 0 ), 2, 1 );
            }
            else
            {
                // Tracking failure detected.
                putText(frame, "Tracking failure detected", Point(100,80), FONT_HERSHEY_SIMPLEX, 0.75, Scalar(0,0,255),2);
            }
             
            // Display tracker type on frame
            putText(frame, trackerType + " Tracker", Point(100,20), FONT_HERSHEY_SIMPLEX, 0.75, Scalar(50,170,50),2);
             
            // Display FPS on frame
            putText(frame, "FPS : " + SSTR(int(fps)), Point(100,50), FONT_HERSHEY_SIMPLEX, 0.75, Scalar(50,170,50), 2);
     
            // Display frame.
            imshow("Tracking", frame);
             
            // Exit if ESC pressed.
            int k = waitKey(1);
            if(k == 27)
            {
                break;
            }
     
        }
    }

python实现

# import the necessary packages
from imutils.video import VideoStream
from imutils.video import FPS
import argparse
import imutils
import time
import cv2

ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", type=str,
	help="path to input video file")
ap.add_argument("-t", "--tracker", type=str, default="kcf",
	help="OpenCV object tracker type")
args = vars(ap.parse_args())

# extract the OpenCV version info
(major, minor) = cv2.__version__.split(".")[:2]
 
# if we are using OpenCV 3.2 OR BEFORE, we can use a special factory
# function to create our object tracker
if int(major) == 3 and int(minor) < 3:
	tracker = cv2.Tracker_create(args["tracker"].upper())
 
# otherwise, for OpenCV 3.3 OR NEWER, we need to explicity call the
# approrpiate object tracker constructor:
else:
	# initialize a dictionary that maps strings to their corresponding
	# OpenCV object tracker implementations
	OPENCV_OBJECT_TRACKERS = {
		"csrt": cv2.TrackerCSRT_create,
		"kcf": cv2.TrackerKCF_create,
		"boosting": cv2.TrackerBoosting_create,
		"mil": cv2.TrackerMIL_create,
		"tld": cv2.TrackerTLD_create,
		"medianflow": cv2.TrackerMedianFlow_create,
		"mosse": cv2.TrackerMOSSE_create
	}
 
	# grab the appropriate object tracker using our dictionary of
	# OpenCV object tracker objects
	tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]()
 
# initialize the bounding box coordinates of the object we are going
# to track
initBB = None

# if a video path was not supplied, grab the reference to the web cam
if not args.get("video", False):
	print("[INFO] starting video stream...")
	vs = VideoStream(src=0).start()
	time.sleep(1.0)
 
# otherwise, grab a reference to the video file
else:
	vs = cv2.VideoCapture(args["video"])

print("Starting...")
# initialize the FPS throughput estimator
fps = None

vs = cv2.VideoCapture("rtsp://admin:[email protected]//Streaming/Channels/1")

# loop over frames from the video stream
while True:
	# grab the current frame, then handle if we are using a
	# VideoStream or VideoCapture object
	frame = vs.read()
	frame = frame[1] if args.get("video", False) else frame
 
	# check to see if we have reached the end of the stream
	if frame is None:
		break
 
	# resize the frame (so we can process it faster) and grab the
	# frame dimensions
	frame = imutils.resize(frame, width=1000)
	(H, W) = frame.shape[:2]

	# check to see if we are currently tracking an object
	if initBB is not None:
		# grab the new bounding box coordinates of the object
		(success, box) = tracker.update(frame)
 
		# check to see if the tracking was a success
		if success:
			(x, y, w, h) = [int(v) for v in box]
			cv2.rectangle(frame, (x, y), (x + w, y + h),
				(0, 255, 0), 2)
 
		# update the FPS counter
		fps.update()
		fps.stop()
 
		# initialize the set of information we'll be displaying on
		# the frame
		info = [
			("Tracker", args["tracker"]),
			("Success", "Yes" if success else "No"),
			("FPS", "{:.2f}".format(fps.fps())),
		]
 
		# loop over the info tuples and draw them on our frame
		for (i, (k, v)) in enumerate(info):
			text = "{}: {}".format(k, v)
			cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
				cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
	# show the output frame
	cv2.imshow("Frame", frame)
	key = cv2.waitKey(1) & 0xFF
 
	# if the 's' key is selected, we are going to "select" a bounding
	# box to track
	if key == ord("s"):
		# select the bounding box of the object we want to track (make
		# sure you press ENTER or SPACE after selecting the ROI)
		initBB = cv2.selectROI("Frame", frame, fromCenter=False,
			showCrosshair=True)
 
		# start OpenCV object tracker using the supplied bounding box
		# coordinates, then start the FPS throughput estimator as well
		tracker.init(frame, initBB)
		fps = FPS().start()
	# if the `q` key was pressed, break from the loop
	elif key == ord("q"):
		break
 
# if we are using a webcam, release the pointer
if not args.get("video", False):
	vs.stop()
 
# otherwise, release the file pointer
else:
	vs.release()
 
# close all windows
cv2.destroyAllWindows()

你可能感兴趣的:(opencv)