jetson测试多个CSI摄像头(python版)

首先,CSI摄像头和USB摄像头的调用方式是不一样的。

遇到过的错误

linano@jetson-nano$:python3 mpy-Copyl.py
[ WARN:0 ] qlobal /home/nvidia/host/build opencv/nv opencv/modules/videoio/src/cap qstreamer. Cpp (1757) handleMessage OpenCV | GStreamer warninq: Embedded video playback halted; module v412src0 reported: Internal data stream error
[WARN: 0] qlobal /home/nvidia/host/build opencv/nv opencv/modules/videoio/src/cap gst.reamer.cpp (886) open OpenCV | GStreamer warning: unable to start pipeline
[ WARN:0 ] global /home/nvidia/host/build opencv/nv opencv/modules/videoio/src/cap qatreamer.cpp (480) isPipelinePlaying Opencv | Gstreamer warning: Gstreamer: pipeline have not been created

问题原因:
楼主就是刚开始接触这csi摄像头。用cv2.VideoCapture(0) / cv2.VideoCapture(1)一直报错,但是CSI摄像头和USB摄像头的调用方式是不一样的。cv2.VideoCapture(0) / cv2.VideoCapture(1)通常用于usb。

Jetson调用多个CSI摄像头

我这里有两种方法:
1、利用JetCam:JetCam是用于NVIDIA Jetson的易于使用的Python摄像头界面。
对JetCam有兴趣的可以查看下面连接(例程更多,细节更多)
https://github.com/NVIDIA-AI-IOT/jetcam
2、利用opencv (我比较喜欢)
3、两种混合使用
参考:
https://github.com/JetsonHacksNano/CSI-Camera

下面的例程都是利用python调用csi摄像头,并且在ubuntu上显示
(并非利用jupyterLab在浏览器上显示,如果需要显示在jupyterLab,则参考我上面提供的两个网站)

例一:使用JetCam+opencv 在ubuntu上显示

from jetcam.csi_camera import CSICamera
import cv2

camera0 = CSICamera(capture_device=0, width=224, height=224)
camera1 = CSICamera(capture_device=1, width=224, height=224)
image0 = camera0.read()
print(image0.shape)
image1 = camera1.read()
print(image1.shape)
print(camera0.value.shape)
print(camera1.value.shape)
while 1:
    image0 = camera0.read()
    image1 = camera1.read()
    cv2.imshow("CSI Camera0", image0)
    cv2.imshow("CSI Camera1", image1)
    kk = cv2.waitKey(1)
    if kk == ord('q'):  # 按下 q 键,退出
        break

例二:使用opencv 在ubuntu上显示

# MIT License
# Copyright (c) 2019,2020 JetsonHacks
# See license
# A very simple code snippet
# Using two  CSI cameras (such as the Raspberry Pi Version 2) connected to a
# NVIDIA Jetson Nano Developer Kit (Rev B01) using OpenCV
# Drivers for the camera and OpenCV are included in the base image in JetPack 4.3+

# This script will open a window and place the camera stream from each camera in a window
# arranged horizontally.
# The camera streams are each read in their own thread, as when done sequentially there
# is a noticeable lag
# For better performance, the next step would be to experiment with having the window display
# in a separate thread

import cv2
import threading
import numpy as np

# gstreamer_pipeline returns a GStreamer pipeline for capturing from the CSI camera
# Flip the image by setting the flip_method (most common values: 0 and 2)
# display_width and display_height determine the size of each camera pane in the window on the screen

left_camera = None
right_camera = None


class CSI_Camera:

    def __init__ (self) :
        # Initialize instance variables
        # OpenCV video capture element
        self.video_capture = None
        # The last captured image from the camera
        self.frame = None
        self.grabbed = False
        # The thread where the video capture runs
        self.read_thread = None
        self.read_lock = threading.Lock()
        self.running = False


    def open(self, gstreamer_pipeline_string):
        try:
            self.video_capture = cv2.VideoCapture(
                gstreamer_pipeline_string, cv2.CAP_GSTREAMER
            )
            
        except RuntimeError:
            self.video_capture = None
            print("Unable to open camera")
            print("Pipeline: " + gstreamer_pipeline_string)
            return
        # Grab the first frame to start the video capturing
        self.grabbed, self.frame = self.video_capture.read()

    def start(self):
        if self.running:
            print('Video capturing is already running')
            return None
        # create a thread to read the camera image
        if self.video_capture != None:
            self.running=True
            self.read_thread = threading.Thread(target=self.updateCamera)
            self.read_thread.start()
        return self

    def stop(self):
        self.running=False
        self.read_thread.join()

    def updateCamera(self):
        # This is the thread to read images from the camera
        while self.running:
            try:
                grabbed, frame = self.video_capture.read()
                with self.read_lock:
                    self.grabbed=grabbed
                    self.frame=frame
            except RuntimeError:
                print("Could not read image from camera")
        # FIX ME - stop and cleanup thread
        # Something bad happened
        

    def read(self):
        with self.read_lock:
            frame = self.frame.copy()
            grabbed=self.grabbed
        return grabbed, frame

    def release(self):
        if self.video_capture != None:
            self.video_capture.release()
            self.video_capture = None
        # Now kill the thread
        if self.read_thread != None:
            self.read_thread.join()


# Currently there are setting frame rate on CSI Camera on Nano through gstreamer
# Here we directly select sensor_mode 3 (1280x720, 59.9999 fps)
def gstreamer_pipeline(
    sensor_id=0,
    sensor_mode=3,
    capture_width=1280,
    capture_height=720,
    display_width=1280,
    display_height=720,
    framerate=30,
    flip_method=0,
):
    return (
        "nvarguscamerasrc sensor-id=%d sensor-mode=%d ! "
        "video/x-raw(memory:NVMM), "
        "width=(int)%d, height=(int)%d, "
        "format=(string)NV12, framerate=(fraction)%d/1 ! "
        "nvvidconv flip-method=%d ! "
        "video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! "
        "videoconvert ! "
        "video/x-raw, format=(string)BGR ! appsink"
        % (
            sensor_id,
            sensor_mode,
            capture_width,
            capture_height,
            framerate,
            flip_method,
            display_width,
            display_height,
        )
    )


def start_cameras():
    left_camera = CSI_Camera()
    left_camera.open(
        gstreamer_pipeline(
            sensor_id=0,
            sensor_mode=3,
            flip_method=0,
            display_height=540,
            display_width=960,
        )
    )
    left_camera.start()

    right_camera = CSI_Camera()
    right_camera.open(
        gstreamer_pipeline(
            sensor_id=1,
            sensor_mode=3,
            flip_method=0,
            display_height=540,
            display_width=960,
        )
    )
    right_camera.start()

    cv2.namedWindow("CSI Cameras", cv2.WINDOW_AUTOSIZE)

    if (
        not left_camera.video_capture.isOpened()
        or not right_camera.video_capture.isOpened()
    ):
        # Cameras did not open, or no camera attached

        print("Unable to open any cameras")
        
        # TODO: Proper Cleanup
        SystemExit(0)

    while cv2.getWindowProperty("CSI Cameras", 0) >= 0 :
        
        _ , left_image=left_camera.read()
        _ , right_image=right_camera.read()
        camera_images = np.hstack((left_image, right_image))
        cv2.imshow("CSI Cameras", camera_images)

        # This also acts as
        keyCode = cv2.waitKey(30) & 0xFF
        # Stop the program on the ESC key
        if keyCode == 27:
            break

    left_camera.stop()
    left_camera.release()
    right_camera.stop()
    right_camera.release()
    cv2.destroyAllWindows()


if __name__ == "__main__":
    start_cameras()
    

你可能感兴趣的:(Jetson,python,CSI)