在Xavier上调用摄像头实现yolo-v3检测

目前手上有两块板子,TX2和Xavier,然后代码都很容易移植,下面附上用摄像头实时监测的代码,yolov3

其实检测,主要就是调用摄像头的一些参数问题,记得好像(160,320)的图在xavier上有15Fps

import sys
import argparse
from yolo_xie import YOLO, detect_video
from PIL import Image
import cv2
import os
import time
timenow = time.strftime('%Y%m%d_%H%M%S')
import socket
#os.environ["CUDA_VISIBLE_DEVICES"] = "2"

#  python yolo_video_xie.py --input path

def detect_img(yolo):

    fourcc = cv2.VideoWriter_fourcc('M', 'J', 'P', 'G')

    cap = cv2.VideoCapture(0)
    cap.set(6,fourcc)

    k = 0
    #label_index=0
    while 1:
        k+=1
        ret, frame = cap.read()
        
        # print(frame.shape)

        # cv2.imshow('1',frame)
        # cv2.waitKey(1)
        

        frame = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
        # frame_crop = frame[799:1471,1566:2334,]
        # frame_crop = frame[800:1400,1300:2400,]
        image = Image.fromarray(frame)
        i_str = str(k).zfill(6)+'.jpg'
        r_image = yolo.detect_image(image, i_str)

        # cv2.imshow('1',r_image)
        # cv2.waitKey(0)

        r_image.save('/data/drone_detect/keras-yolo3_original/7.20/res_img/'+i_str)
        
        show_image = cv2.imread('/data/drone_detect/keras-yolo3_original/7.20/res_img/'+i_str)
        cv2.imshow('1',show_image)
        cv2.waitKey(1)
        # label = []
        # for i in range(len(person)):
        #     label.append(person[i][1])
        # for j in range(len(rubbish)):
        #     label.append(rubbish[j][1])
        # print(person,'1111111111111111111111')
        # print(rubbish,'2222222222222222222222')
        #instr = str(5).encode()
        #client.send(instr)
 

FLAGS = None


if __name__ == '__main__':
    # class YOLO defines the default value, so suppress any default here
    parser = argparse.ArgumentParser(argument_default=argparse.SUPPRESS)
    '''
    Command line options
    '''
    parser.add_argument(
        '--model', type=str,
        help='path to model weight file, default ' + YOLO.get_defaults("model_path")
    )

    parser.add_argument(
        '--anchors', type=str,
        help='path to anchor definitions, default ' + YOLO.get_defaults("anchors_path")
    )

    parser.add_argument(
        '--classes', type=str,
        help='path to class definitions, default ' + YOLO.get_defaults("classes_path")
    )

    parser.add_argument(
        '--gpu_num', type=int,
        help='Number of GPU to use, default ' + str(YOLO.get_defaults("gpu_num"))
    )

    parser.add_argument(
        '--image', default=False, action="store_true",
        help='Image detection mode, will ignore all positional arguments'
    )
    '''
    Command line positional arguments -- for video detection mode
    '''
    parser.add_argument(
        "--input", nargs='?', type=str,required=False,default='./path2your_video',
        help = "Video input path"
    )

    parser.add_argument(
        "--output", nargs='?', type=str, default="",
        help = "[Optional] Video output path"
    )

    # client = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
    # client.connect("/tmp/test.sock")

    FLAGS = parser.parse_args()

    if FLAGS.image:
        """
        Image detection mode, disregard any remaining command line arguments
        """
        print("Image detection mode")
        print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
        if "input" in FLAGS:
            print(" Ignoring remaining command line arguments: " + FLAGS.input + "," + FLAGS.output)
        detect_img(YOLO(**vars(FLAGS)))
    elif "input" in FLAGS:
        detect_video(YOLO(**vars(FLAGS)), FLAGS.input, FLAGS.output)
    else:
        print("Must specify at least video_input_path.  See usage with --help.")

 

你可能感兴趣的:(无人驾驶)