学校终于解封了,看了个知乎上的人日合订本,咱们好好记录这个时代吧。
yahboom采用的是jetbot里面的一个camera.py脚本定义的各个接口,也是基于opencv的,但是里面原来是为了在jupyter notebook显示用的,这里是参照别人写了一个显示摄像头捕捉画面的代码,具体如下:
import cv2
def gstreamer_pipeline(
capture_width=640,
capture_height=640,
display_width=224,
display_height=224,
framerate=24,
flip_method=0,
):
return (
"nvarguscamerasrc ! "
"video/x-raw(memory:NVMM), "
"width=(int)%d, height=(int)%d, "
"format=(string)NV12, framerate=(fraction)%d/1 ! "
"nvvidconv flip-method=%d ! "
"video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! "
"videoconvert ! "
"video/x-raw, format=(string)BGR ! appsink"
% (
capture_width,
capture_height,
framerate,
flip_method,
display_width,
display_height,
)
)
def show_camera():
cap = cv2.VideoCapture(gstreamer_pipeline(flip_method=0), cv2.CAP_GSTREAMER)
while cap.isOpened():
flag, img = cap.read()
cv2.imshow("CSI Camera", img)
kk = cv2.waitKey(1)
# do other things
if kk == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
'''
from jetbot import Camera
camera = Camera.instance(width=224, height=224)
import ipywidgets.widgets as widgets
image = widgets.Image(format='jpeg', width=224, height=224)
display(image)
from jetbot import bgr8_to_jpeg
import traitlets
camera_link = traitlets.dlink((camera, 'value'), (image, 'value'), transform=bgr8_to_jpeg)
'''
if __name__ == "__main__":
show_camera()
原始的camera.py代码内容:
import traitlets
from traitlets.config.configurable import SingletonConfigurable
import atexit
import cv2
import threading
import numpy as np
class Camera(SingletonConfigurable):
value = traitlets.Any()
# config
width = traitlets.Integer(default_value=224).tag(config=True)
height = traitlets.Integer(default_value=224).tag(config=True)
fps = traitlets.Integer(default_value=21).tag(config=True)
capture_width = traitlets.Integer(default_value=3280).tag(config=True)
capture_height = traitlets.Integer(default_value=2464).tag(config=True)
def __init__(self, *args, **kwargs):
self.value = np.empty((self.height, self.width, 3), dtype=np.uint8)
super(Camera, self).__init__(*args, **kwargs)
try:
self.cap = cv2.VideoCapture(self._gst_str(), cv2.CAP_GSTREAMER)
re, image = self.cap.read()
if not re:
raise RuntimeError('Could not read image from camera.')
self.value = image
self.start()
except:
self.stop()
raise RuntimeError(
'Could not initialize camera. Please see error trace.')
atexit.register(self.stop)
def _capture_frames(self):
while True:
re, image = self.cap.read()
if re:
self.value = image
else:
break
def _gst_str(self):
return 'nvarguscamerasrc ! video/x-raw(memory:NVMM), width=%d, height=%d, format=(string)NV12, framerate=(fraction)%d/1 ! nvvidconv ! video/x-raw, width=(int)%d, height=(int)%d, format=(string)BGRx ! videoconvert ! appsink' % (
self.capture_width, self.capture_height, self.fps, self.width, self.height)
def start(self):
if not self.cap.isOpened():
self.cap.open(self._gst_str(), cv2.CAP_GSTREAMER)
if not hasattr(self, 'thread') or not self.thread.isAlive():
self.thread = threading.Thread(target=self._capture_frames)
self.thread.start()
def stop(self):
if hasattr(self, 'cap'):
self.cap.release()
if hasattr(self, 'thread'):
self.thread.join()
def restart(self):
self.stop()
self.start()