关于询问trt推理部分bug


void infer(xxxxxxxxxxxxxxxxxxx)
{

static int* num_dets = new int[out_size1];

cudaError_t state =
        cudaMemcpyAsync(buffs[0], &blob[0], in_size * sizeof(float), cudaMemcpyHostToDevice, stream);
    if (state) {
        cout << "transmit to device failed\n";
        std::abort();
    }
    context->enqueueV2(&buffs[0], stream, nullptr);
    state =
        cudaMemcpyAsync(num_dets, buffs[1], out_size1 * sizeof(int), cudaMemcpyDeviceToHost, stream);
    if (state) {
        cout << "transmit to host failed \n";
        std::abort();
    }

cudaStreamSynchronize(stream);

  delete blob;
  delete num_dets;

}

加上delete num_dets;

就报错

 这是为啥啊,按道理流同步也保证了前面的运行已经结束了?

 

另外为啥反馈整个推理过程这个num_dets 变量没有手动销毁但是也没有出现内存泄露的情况。

你可能感兴趣的:(深度学习算法部署,C++,bug,c++,tensorRT)