nvidia论坛连接:DeepStream v5 bug | NvDufSurface memory leakage - DeepStream SDK - NVIDIA Developer Forums
在bbox里面使用这个方法,单线程不存在问题,多线程访问,会线程不安全需要加锁,,加锁后还是有内存泄漏就自己开辟内存管理
NvBufSurfaceMap (GLOBAL_SURF, -1, -1, NVBUF_MAP_READ);
// NvBufSurfaceSyncForCpu(surface, 0, 0);
NvBufSurfacePlaneParams *pParams = &GLOBAL_SURF->surfaceList[frame_meta->batch_id].planeParams;
unsigned int offset = 0;
for(unsigned int num_planes=0; num_planes < pParams->num_planes; num_planes++){
if(num_planes>0)
offset += pParams->height[num_planes-1]*(pParams->bytesPerPix[num_planes-1]*pParams->width[num_planes-1]);
for (unsigned int h = 0; h < pParams->height[num_planes]; h++) {
memcpy((void *)(src_data+offset+h*pParams->bytesPerPix[num_planes]*pParams->width[num_planes]),
(void *)((char *)GLOBAL_SURF->surfaceList[frame_meta->batch_id].mappedAddr.addr[num_planes]+h*pParams->pitch[num_planes]),
pParams->bytesPerPix[num_planes]*pParams->width[num_planes]
);
}
}
NvBufSurfaceUnMap (GLOBAL_SURF, -1, -1);
创建一块内存 使用copy方法保存,,一直往这个位置塞,不要再申请了,可以降低内存泄漏
补充代码的前半部分,大家可以摘选其中的部分
static void
all_bbox_generated (AppCtx * appCtx, GstBuffer * buf,
NvDsBatchMeta * batch_meta, guint index)
{
g_mutex_lock (&G_redis_lock);
struct timeval tv;
gettimeofday(&tv, NULL);
gint64 curentTimeMs = tv.tv_sec * 1000 + tv.tv_usec / 1000;
// if (curentTimeMs - appCtx->kafka_last_send_time < 10) {
// return;
// }
char *rtmp_server = appCtx->config.sink_bin_sub_bin_config[0].encoder_config.rtmp_server;
appCtx->kafka_last_send_time = curentTimeMs;
guint num_male = 0;
guint num_female = 0;
guint num_objects[128];
memset (num_objects, 0, sizeof (num_objects));
GstMapInfo in_map_info;
NvBufSurface *surface = NULL;
if (gst_buffer_map (buf, &in_map_info, GST_MAP_READ) == FALSE){
printf("gst_buffer_map is failed!\n");
gst_buffer_unmap (buf, &in_map_info);
return ;
}
surface = (NvBufSurface *) in_map_info.data;
if (GLOBAL_SURF == NULL) {
NvBufSurfaceCreateParams create_params;
/* An intermediate buffer for NV12/RGBA to BGR conversion will be
* required. Can be skipped if custom algorithm can work directly on NV12/RGBA. */
create_params.gpuId = 0;
create_params.width = 1280;
create_params.height = 720;
//create_params.size = surface->surfaceList[0].dataSize;
create_params.colorFormat = NVBUF_COLOR_FORMAT_RGBA;
create_params.layout = NVBUF_LAYOUT_PITCH;
create_params.memType = NVBUF_MEM_DEFAULT;
if (NvBufSurfaceCreate (&GLOBAL_SURF, 1,
&create_params) != 0) {
printf("NvBufSurfaceCreate error\n");
return;
}
GLOBAL_SURF->numFilled = 1;
printf("NvBufSurfaceCreate called\n");
}
if (NvBufSurfaceCopy (surface, GLOBAL_SURF) != 0) {
printf("NvBufSurfaceCopy error\n");
return;
}
for (NvDsMetaList * l_frame = batch_meta->frame_meta_list; l_frame != NULL;
l_frame = l_frame->next) {
NvDsFrameMeta *frame_meta = l_frame->data;
void* src_data = (char *)malloc(surface->surfaceList[frame_meta->batch_id].dataSize);
if (src_data == NULL) {
g_print("Error: failed to malloc src_data \n");
}
NvBufSurfaceMap (GLOBAL_SURF, -1, -1, NVBUF_MAP_READ);
// NvBufSurfaceSyncForCpu(surface, 0, 0);
NvBufSurfacePlaneParams *pParams = &GLOBAL_SURF->surfaceList[frame_meta->batch_id].planeParams;
unsigned int offset = 0;
for(unsigned int num_planes=0; num_planes < pParams->num_planes; num_planes++){
if(num_planes>0)
offset += pParams->height[num_planes-1]*(pParams->bytesPerPix[num_planes-1]*pParams->width[num_planes-1]);
for (unsigned int h = 0; h < pParams->height[num_planes]; h++) {
memcpy((void *)(src_data+offset+h*pParams->bytesPerPix[num_planes]*pParams->width[num_planes]),
(void *)((char *)GLOBAL_SURF->surfaceList[frame_meta->batch_id].mappedAddr.addr[num_planes]+h*pParams->pitch[num_planes]),
pParams->bytesPerPix[num_planes]*pParams->width[num_planes]
);
}
}
NvBufSurfaceUnMap (GLOBAL_SURF, -1, -1);