场景:在检测识别到的图像内容时,保存一张图片。
首先看官方的获取图像的代码。
NvBufSurfaceMap (surface, -1, -1, NVBUF_MAP_READ);
// NvBufSurfaceSyncForCpu(surface, 0, 0);
NvBufSurfacePlaneParams *pParams = &GLOBAL_SURF->surfaceList[frame_meta->batch_id].planeParams;
unsigned int offset = 0;
for(unsigned int num_planes=0; num_planes < pParams->num_planes; num_planes++){
if(num_planes>0)
offset += pParams->height[num_planes-1]*(pParams->bytesPerPix[num_planes-1]*pParams->width[num_planes-1]);
for (unsigned int h = 0; h < pParams->height[num_planes]; h++) {
memcpy((void *)(src_data+offset+h*pParams->bytesPerPix[num_planes]*pParams->width[num_planes]),
(void *)((char *)GLOBAL_SURF->surfaceList[frame_meta->batch_id].mappedAddr.addr[num_planes]+h*pParams->pitch[num_planes]),
pParams->bytesPerPix[num_planes]*pParams->width[num_planes]
);
}
}
NvBufSurfaceUnMap (surface, -1, -1);
和
// cudaMemcpy((void *)src_data,
// (void *)surface->surfaceList[frame_meta->batch_id].dataPtr,
// surface->surfaceList[frame_meta->batch_id].dataSize,
// cudaMemcpyDeviceToHost);
注意事项:
1、如果你是DGPU,用cudaMemcopy
2、在jetson里面,没有独立的显存不能使用cudaMemcopy,建议使用第一种方式
3、注意通道,在deepstream5.0里面都是yuv的格式,在6.0里面都是RGBA,需要注意通道
cv::Mat frame = cv::Mat(frame_height , frame_width, CV_8UC4, src_data, frame_step);
cv::Mat out_mat = cv::Mat(cv::Size(frame_width, frame_height), CV_8UC3);
cv::cvtColor(frame, out_mat, CV_RGBA2BGR);
yuv格式的,其他博客很多人有就不加了
二、整段代码参考:
static GstPadProbeReturn
tiler_src_pad_buffer_probe (GstPad * pad, GstPadProbeInfo * info,
gpointer u_data)
{
GstBuffer *buf = (GstBuffer *)info->data;
GstMapInfo in_map_info;
NvBufSurface *surface = NULL;
guint num_rects = 0;
NvDsObjectMeta *obj_meta = NULL;
guint vehicle_count = 0;
guint person_count = 0;
NvDsMetaList * l_frame = NULL;
NvDsMetaList * l_obj = NULL;
//NvDsDisplayMeta *display_meta = NULL;
NvDsBatchMeta *batch_meta = gst_buffer_get_nvds_batch_meta (buf);
for (l_frame = batch_meta->frame_meta_list; l_frame != NULL; l_frame = l_frame->next){
NvDsFrameMeta *frame_meta = (NvDsFrameMeta *) (l_frame->data);
// printf("src name:%s\n", src[frame_meta->source_id]);
// printf("batch id:%d, source is:%d\n", frame_meta->batch_id, frame_meta->source_id);
for (l_obj = frame_meta->obj_meta_list; l_obj != NULL; l_obj = l_obj->next){
obj_meta = (NvDsObjectMeta *) (l_obj->data);
if (obj_meta->class_id == PGIE_CLASS_ID_PERSON){
obj_meta->rect_params.border_color.alpha = 0;
obj_meta->text_params.display_text = NULL;
}
}
if (!gst_buffer_map (buf, &in_map_info, GST_MAP_READ)){
g_print ("gst_buffer_map() error!");
gst_buffer_unmap(buf, &in_map_info);
return;
}
surface = (NvBufSurface *) in_map_info.data;
void* src_data = (char *)malloc(surface->surfaceList[frame_meta->batch_id].dataSize);
if (src_data == NULL) {
g_print("Error: failed to malloc src_data \n");
}
cudaMemcpy((void *)src_data,
(void *)surface->surfaceList[frame_meta->batch_id].dataPtr,
surface->surfaceList[frame_meta->batch_id].dataSize,
cudaMemcpyDeviceToHost);
printf("%d", surface->memType);
printf("%p\n", src_data);
char *text = "test hhhhh";
add_text(60, 180, text, batch_meta, frame_meta);
add_rect(60, 180, 200, 200, batch_meta, frame_meta);
}
return GST_PAD_PROBE_OK;
}
保存图片:
gint frame_width = (gint)surface->surfaceList[frame_meta->batch_id].width;
gint frame_height = (gint)surface->surfaceList[frame_meta->batch_id].height;
size_t frame_step = surface->surfaceList[frame_meta->batch_id].pitch;
gint img_width = frame_width / 2;
gint img_height = frame_height / 2;
cv::Mat frame = cv::Mat(frame_height , frame_width, CV_8UC4, src_data, frame_step);
cv::Mat out_mat = cv::Mat(cv::Size(frame_width, frame_height), CV_8UC3);
cv::cvtColor(frame, out_mat, CV_RGBA2BGR);
//cv::Size dsize = cv::Size(round(0.5 * frame_width), round(0.5 * frame_height));
cv::Size dsize = cv::Size(img_width, img_height);
cv::Mat new_mat;
cv::resize(out_mat, new_mat, dsize, 0, 0, cv::INTER_AREA);
// char *img_data = (char *)malloc(img_width * img_height * 3);
// memcpy(img_data, new_mat.data, img_width * img_height * 3);
// FILE *fp = fopen("1.txt","w");
// fwrite(img_data,1,640*360*3,fp);
// fclose(fp);
// cv::imwrite("1.png", new_mat);
参考的nvidia社区的内容如下
Access frame pointer in deepstream-app - DeepStream SDK - NVIDIA Developer Forums