WebRTC VideoEngine综合应用示例(二)——集成OPENH264编解码器

本系列目前共三篇文章,后续还会更新

WebRTC VideoEngine综合应用示例(一)——视频通话的基本流程

WebRTC VideoEngine综合应用示例(二)——集成OPENH264编解码器

WebRTC VideoEngine综合应用示例(三)——集成X264编码和ffmpeg解码

关注下方公众号,回复“webrtc视频通话”,查看源码地址,是一个可以脱离webrtc那个大项目而独立运行的工程

关注公众号,掌握更多多媒体领域知识与资讯

文章帮到你了?可以扫描如下二维码进行打赏~,打赏多少您随意~

总述

WebRTC原生支持VP8和VP9,但也可以自行集成H264编解码器,比较常见的是OPENH264和X264(X264自身只有编码功能,如果要加入解码功能,可以再结合ffmpeg),总体来说,集成H264编解码器的流程和直接使用它们的库的流程类似,但是要先将相应功能依照WebRTC中对编解码器的封装形式重新封装,然后再通过注册外部编解码器的方法在主流程中使用它们。

下面先看一下WebRTC对编解码器的封装形式是怎么样的,定义在webrtc\modules\video_coding\codecs\interface\video_codec_interface.h中,如下

VideoEncoder

 

class EncodedImageCallback
{
public:
    virtual ~EncodedImageCallback() {};

    // Callback function which is called when an image has been encoded.
    //
    // Input:
    //          - encodedImage         : The encoded image
    //
    // Return value                    : > 0,   signals to the caller that one or more future frames
    //                                          should be dropped to keep bit rate or frame rate.
    //                                   = 0,   if OK.
    //                                   < 0,   on error.
    virtual int32_t
    Encoded(EncodedImage& encodedImage,
            const CodecSpecificInfo* codecSpecificInfo = NULL,
            const RTPFragmentationHeader* fragmentation = NULL) = 0;
};

class VideoEncoder
{
public:
    virtual ~VideoEncoder() {};

    // Initialize the encoder with the information from the VideoCodec.
    //
    // Input:
    //          - codecSettings     : Codec settings
    //          - numberOfCores     : Number of cores available for the encoder
    //          - maxPayloadSize    : The maximum size each payload is allowed
    //                                to have. Usually MTU - overhead.
    //
    // Return value                 : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
    virtual int32_t InitEncode(const VideoCodec* codecSettings, int32_t numberOfCores, uint32_t maxPayloadSize) = 0;

    // Encode an I420 image (as a part of a video stream). The encoded image
    // will be returned to the user through the encode complete callback.
    //
    // Input:
    //          - inputImage        : Image to be encoded
    //          - codecSpecificInfo : Pointer to codec specific data
    //          - frame_types        : The frame type to encode
    //
    // Return value                 : WEBRTC_VIDEO_CODEC_OK if OK, < 0
    //                                otherwise.
    virtual int32_t Encode(
        const I420VideoFrame& inputImage,
        const CodecSpecificInfo* codecSpecificInfo,
        const std::vector* frame_types) = 0;

    // Register an encode complete callback object.
    //
    // Input:
    //          - callback         : Callback object which handles encoded images.
    //
    // Return value                : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
    virtual int32_t RegisterEncodeCompleteCallback(EncodedImageCallback* callback) = 0;

    // Free encoder memory.
    //
    // Return value                : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
    virtual int32_t Release() = 0;

    // Inform the encoder about the packet loss and round trip time on the
    // network used to decide the best pattern and signaling.
    //
    //          - packetLoss       : Fraction lost (loss rate in percent =
    //                               100 * packetLoss / 255)
    //          - rtt              : Round-trip time in milliseconds
    //
    // Return value                : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
    virtual int32_t SetChannelParameters(uint32_t packetLoss, int rtt) = 0;

    // Inform the encoder about the new target bit rate.
    //
    //          - newBitRate       : New target bit rate
    //          - frameRate        : The target frame rate
    //
    // Return value                : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
    virtual int32_t SetRates(uint32_t newBitRate, uint32_t frameRate) = 0;

    // Use this function to enable or disable periodic key frames. Can be useful for codecs
    // which have other ways of stopping error propagation.
    //
    //          - enable           : Enable or disable periodic key frames
    //
    // Return value                : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
    virtual int32_t SetPeriodicKeyFrames(bool enable) { return WEBRTC_VIDEO_CODEC_ERROR; }

    // Codec configuration data to send out-of-band, i.e. in SIP call setup
    //
    //          - buffer           : Buffer pointer to where the configuration data
    //                               should be stored
    //          - size             : The size of the buffer in bytes
    //
    // Return value                : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
    virtual int32_t CodecConfigParameters(uint8_t* /*buffer*/, int32_t /*size*/) { return WEBRTC_VIDEO_CODEC_ERROR; }
};

VideoDecoder

 

 

class DecodedImageCallback
{
public:
    virtual ~DecodedImageCallback() {};

    // Callback function which is called when an image has been decoded.
    //
    // Input:
    //          - decodedImage         : The decoded image.
    //
    // Return value                    : 0 if OK, < 0 otherwise.
    virtual int32_t Decoded(I420VideoFrame& decodedImage) = 0;

    virtual int32_t ReceivedDecodedReferenceFrame(const uint64_t pictureId) {return -1;}

    virtual int32_t ReceivedDecodedFrame(const uint64_t pictureId) {return -1;}
};

class VideoDecoder
{
public:
    virtual ~VideoDecoder() {};

    // Initialize the decoder with the information from the VideoCodec.
    //
    // Input:
    //          - inst              : Codec settings
    //          - numberOfCores     : Number of cores available for the decoder
    //
    // Return value                 : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
    virtual int32_t InitDecode(const VideoCodec* codecSettings, int32_t numberOfCores) = 0;

    // Decode encoded image (as a part of a video stream). The decoded image
    // will be returned to the user through the decode complete callback.
    //
    // Input:
    //          - inputImage        : Encoded image to be decoded
    //          - missingFrames     : True if one or more frames have been lost
    //                                since the previous decode call.
    //          - fragmentation     : Specifies where the encoded frame can be
    //                                split into separate fragments. The meaning
    //                                of fragment is codec specific, but often
    //                                means that each fragment is decodable by
    //                                itself.
    //          - codecSpecificInfo : Pointer to codec specific data
    //          - renderTimeMs      : System time to render in milliseconds. Only
    //                                used by decoders with internal rendering.
    //
    // Return value                 : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
    virtual int32_t
    Decode(const EncodedImage& inputImage,
           bool missingFrames,
           const RTPFragmentationHeader* fragmentation,
           const CodecSpecificInfo* codecSpecificInfo = NULL,
           int64_t renderTimeMs = -1) = 0;

    // Register an decode complete callback object.
    //
    // Input:
    //          - callback         : Callback object which handles decoded images.
    //
    // Return value                : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
    virtual int32_t RegisterDecodeCompleteCallback(DecodedImageCallback* callback) = 0;

    // Free decoder memory.
    //
    // Return value                : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
    virtual int32_t Release() = 0;

    // Reset decoder state and prepare for a new call.
    //
    // Return value                : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
    virtual int32_t Reset() = 0;

    // Codec configuration data sent out-of-band, i.e. in SIP call setup
    //
    // Input/Output:
    //          - buffer           : Buffer pointer to the configuration data
    //          - size             : The size of the configuration data in
    //                               bytes
    //
    // Return value                : WEBRTC_VIDEO_CODEC_OK if OK, < 0 otherwise.
    virtual int32_t SetCodecConfigParameters(const uint8_t* /*buffer*/, int32_t /*size*/) { return WEBRTC_VIDEO_CODEC_ERROR; }

    // Create a copy of the codec and its internal state.
    //
    // Return value                : A copy of the instance if OK, NULL otherwise.
    virtual VideoDecoder* Copy() { return NULL; }
};

 

 

具体到WebRTC原生支持的VP8编解码器,可以参见webrtc\modules\video_coding\codecs\vp8\vp8_impl.h和vp8_impl.cc两个文件,其中定义的VP8EncoderImpl和VP8DecoderImpl这两个类分别继承自VideoEncoder和VideoDecoder类,并且加入了一些私有的成员变量和函数。这也就意味着,我们在试图集成H264编解码器时,也应该有H264EncoderImpl和H264DecoderImpl这样的两个类。

本文先以OPENH264为例做详细说明
首先当然要通过OPENH264项目编译出welsenc.lib和welsdec.lib两个库,再把codec_api.h、codec_app_def.h、codec_def.h、codec_ver.h四个头文件以及welsdec.dll welsenc.dll welsvp.dll三个动态库加入到工程目录下,并且在项目属性中进行相应设置。

编码功能的重新封装

先说编码部分,定义H264EncoderImpl类如下,关键在于加入OPENH264编码功能核心类ISVCEncoder* encoder_;

 

class H264EncoderImpl : public VideoEncoder{
 public:
  H264EncoderImpl();

  ~H264EncoderImpl();

  int Release();

  int InitEncode(const VideoCodec* codec_settings,
                         int number_of_cores,
                         size_t max_payload_size);

  int Encode(const I420VideoFrame& input_image,
                     const CodecSpecificInfo* codec_specific_info,
                     const std::vector* frame_types);

  int RegisterEncodeCompleteCallback(EncodedImageCallback* callback);

  int SetChannelParameters(uint32_t packet_loss, int rtt);

  int SetRates(uint32_t new_bitrate_kbit, uint32_t frame_rate);

 private:
  // Update frame size for codec.
  int UpdateCodecFrameSize(const I420VideoFrame& input_image);

  EncodedImage encoded_image_;
  EncodedImageCallback* encoded_complete_callback_;
  VideoCodec codec_;
  bool inited_;

//openh264编码功能类
  ISVCEncoder* encoder_;

};  // end of H264Encoder class

然后再来看一下每个方法的具体实现,基本可以参照如下的OPENH264基本编码流程

 

//setup encoder
int rv = WelsCreateSVCEncoder (&encoder_);
//initilize with basic parameter
SEncParamBase param;
memset (¶m, 0, sizeof (SEncParamBase));
param.iUsageType = usageType;
param.fMaxFrameRate = frameRate;
param.iPicWidth = width;
param.iPicHeight = height;
param.iTargetBitrate = 5000000;
encoder_->Initialize (¶m);
//set option, set option during encoding process
encoder_->SetOption (ENCODER_OPTION_TRACE_LEVEL, &g_LevelSetting);
int videoFormat = videoFormatI420;
encoder_->SetOption (ENCODER_OPTION_DATAFORMAT, &videoFormat);
//encode and store ouput bistream
int frameSize = width * height * 3 / 2;
BufferedData buf;
buf.SetLength (frameSize);
ASSERT_TRUE (buf.Length() == (size_t)frameSize);
SFrameBSInfo info;
memset (&info, 0, sizeof (SFrameBSInfo));
SSourcePicture pic;
memset (&pic, 0, sizeof (SsourcePicture));
pic.iPicWidth = width;
pic.iPicHeight = height;
pic.iColorFormat = videoFormatI420;
pic.iStride[0] = pic.iPicWidth;
pic.iStride[1] = pic.iStride[2] = pic.iPicWidth >> 1;
pic.pData[0] = buf.data();
pic.pData[1] = pic.pData[0] + width * height;
pic.pData[2] = pic.pData[1] + (width * height >> 2);
for(int num = 0;numEncodeFrame (&pic, &info);
   ASSERT_TRUE (rv == cmResultSuccess);
   if (info.eFrameType != videoFrameTypeSkip && cbk != NULL) {
    //output bitstream
   }
}
//teardown encoder
if (encoder_) {
    encoder_->Uninitialize();
    WelsDestroySVCEncoder (encoder_);
}

 

 

 

H264EncoderImpl()方法实现如下

 

H264EncoderImpl::H264EncoderImpl()
    : encoded_image_(),
      encoded_complete_callback_(NULL),
      inited_(false),
      encoder_(NULL)
{
  memset(&codec_, 0, sizeof(codec_));
}


Release()方法实现如下,这里调用了OPENH264的WelsDestroySVCEncoder方法

 

 

 

 

int H264EncoderImpl::Release() {
  if (encoded_image_._buffer != NULL) {
    delete [] encoded_image_._buffer;
    encoded_image_._buffer = NULL;
  }
  if (encoder_ != NULL) {
    encoder_->Uninitialize();
    WelsDestroySVCEncoder(encoder_);
    encoder_ = NULL;
  }
  inited_ = false;
  return WEBRTC_VIDEO_CODEC_OK;
}


InitEncode()方法实现如下,基本上就是OPENH264编码器的创建WelsCreateSVCEncoder与初始化encoder_->Initialize以及参数设置SEncParamBase的流程

 

 

 

 

int H264EncoderImpl::InitEncode(const VideoCodec* inst,
                               int number_of_cores,
                               size_t max_payload_size) {
  if (inst == NULL) {
    return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
  }
  if (inst->maxFramerate < 1) {
    return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
  }
  // allow zero to represent an unspecified maxBitRate
  if (inst->maxBitrate > 0 && inst->startBitrate > inst->maxBitrate) {
    return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
  }
  if (inst->width < 1 || inst->height < 1) {
    return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
  }
  if (number_of_cores < 1) {
    return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
  }

  int ret_val= Release();
  if (ret_val < 0) {
    return ret_val;
  }

  if (encoder_ == NULL) {
	ret_val = WelsCreateSVCEncoder(&encoder_);

    if (ret_val != 0) {
     WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, -1,
  	              "H264EncoderImpl::InitEncode() fails to create encoder ret_val %d",
    	           ret_val);
      return WEBRTC_VIDEO_CODEC_ERROR;
    }
  }
  SEncParamBase param;
  memset (param, 0, sizeof(SEncParamBase));
  param.iUsageType = CAMERA_VIDEO_REAL_TIME;
  param.iRCMode = RC_QUALITY_MODE;
  param.fMaxFrameRate = inst->maxFramerate;
  param.iPicWidth = inst->width;
  param.iPicHeight = inst->height;
  param.iTargetBitrate = inst->maxBitrate;

  ret_val =  encoder_->Initialize(param);
  int videoFormat = videoFormatI420;
  encoder_->SetOption(ENCODER_OPTION_DATAFORMAT, &videoFormat);

  if (ret_val != 0) {
	  WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, -1,
		  "H264EncoderImpl::InitEncode() fails to initialize encoder ret_val %d",
		  ret_val);
	  WelsDestroySVCEncoder(encoder_);
	  encoder_ = NULL;
	  return WEBRTC_VIDEO_CODEC_ERROR;
  }

  if (&codec_ != inst) {
    codec_ = *inst;
  }

  if (encoded_image_._buffer != NULL) {
    delete [] encoded_image_._buffer;
  }
  encoded_image_._size = CalcBufferSize(kI420, codec_.width, codec_.height);
  encoded_image_._buffer = new uint8_t[encoded_image_._size];
  encoded_image_._completeFrame = true;
  
  inited_ = true;
  WEBRTC_TRACE(webrtc::kTraceApiCall, webrtc::kTraceVideoCoding, -1,
               "H264EncoderImpl::InitEncode(width:%d, height:%d, framerate:%d, start_bitrate:%d, max_bitrate:%d)",
               inst->width, inst->height, inst->maxFramerate, inst->startBitrate, inst->maxBitrate);

  return WEBRTC_VIDEO_CODEC_OK;
}

 

Encode()方法中包含了两个功能,一方面是视频帧的编码,这一步骤同样基本可以参照OPENH264的编码流程

另一方面是将编码后数据封装为RTP包发送出去,具体的内容由WebRTC为我们提供的VCMEncodedFrameCallback类的Encoded方法实现,详见webrtc\modules\video_coding\main\source\generic_encoder.cc文件,对这一方法的调用则是在我们的H264EncoderImpl类的RegisterEncodeCompleteCallback方法中实现的,如下

 

int H264EncoderImpl::RegisterEncodeCompleteCallback(
    EncodedImageCallback* callback) {
  encoded_complete_callback_ = callback;
  return WEBRTC_VIDEO_CODEC_OK;
}

而我们需要做的只是获取与每个RTP fragment对应的NAL大小、类型等信息,并且写入RTPFragmentationHeader类中

 

Encode方法内容如下

 

int H264EncoderImpl::Encode(const I420VideoFrame& input_image,
                           const CodecSpecificInfo* codec_specific_info,
                           const std::vector* frame_types) {
  if (!inited_) {
    return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
  }
  if (input_image.IsZeroSize()) {
    return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
  }
  if (encoded_complete_callback_ == NULL) {
    return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
  }

  VideoFrameType frame_type = kDeltaFrame;
  // We only support one stream at the moment.
  if (frame_types && frame_types->size() > 0) {
    frame_type = (*frame_types)[0];
  }

  bool send_keyframe = (frame_type == kKeyFrame);
  if (send_keyframe) {
    encoder_->ForceIntraFrame(true);
    WEBRTC_TRACE(webrtc::kTraceApiCall, webrtc::kTraceVideoCoding, -1,
                 "H264EncoderImpl::EncodeKeyFrame(width:%d, height:%d)",
                 input_image.width(), input_image.height());
  }

  // Check for change in frame size.
  if (input_image.width() != codec_.width ||
      input_image.height() != codec_.height) {
    int ret = UpdateCodecFrameSize(input_image);
    if (ret < 0) {
      return ret;
    }
  }
 //编码过程,整个Encode方法是会被反复调用的
  SFrameBSInfo info;
  memset(&info, 0, sizeof(SFrameBSInfo));

  SSourcePicture pic;
  memset(&pic,0,sizeof(SSourcePicture));
  pic.iPicWidth = input_image.width();
  pic.iPicHeight = input_image.height();
  pic.iColorFormat = videoFormatI420;

  pic.iStride[0] = input_image.stride(kYPlane);
  pic.iStride[1] = input_image.stride(kUPlane);
  pic.iStride[2] = input_image.stride(kVPlane);

  pic.pData[0]   = const_cast(input_image.buffer(kYPlane));
  pic.pData[1]   = const_cast(input_image.buffer(kUPlane));
  pic.pData[2]   = const_cast(input_image.buffer(kVPlane));

  int retVal = encoder_->EncodeFrame(&pic, &info);
  if (retVal == videoFrameTypeSkip) {
    return WEBRTC_VIDEO_CODEC_OK;
  }
//获取与每个RTP fragment对应的NAL大小、类型等信息
  int layer = 0;

  uint32_t totalNaluCount = 0;
  while (layer < info.iLayerNum) {
	  const SLayerBSInfo* layer_bs_info = &info.sLayerInfo[layer];
	  if (layer_bs_info != NULL) {
		  totalNaluCount += layer_bs_info->iNalCount;
	  }
	  layer++;
  }
  if (totalNaluCount == 0) {
	  return WEBRTC_VIDEO_CODEC_OK;
  }
//这里我们认为每个分片恰好包含一个NAL单元,具体的RTP分片方法则由WebRTC的VCMPacketizationCallback实现,我们不用管
  RTPFragmentationHeader frag_info;
  frag_info.VerifyAndAllocateFragmentationHeader(totalNaluCount);
  
  encoded_image_._length = 0;
  layer = 0;
  uint32_t totalNaluIndex = 0;

  while (layer < info.iLayerNum) {
	  const SLayerBSInfo* layer_bs_info = &info.sLayerInfo[layer];
	  if (layer_bs_info != NULL) {
		  int layer_size = 0;
		  int nal_begin = 4;
		  uint8_t* nal_buffer = NULL;
		  char nal_type = 0;
		  for (int nal_index = 0; nal_index < layer_bs_info->iNalCount; nal_index++) {
			  nal_buffer = layer_bs_info->pBsBuf + nal_begin;
			  nal_type = (nal_buffer[0] & 0x1F);
			  layer_size += layer_bs_info->pNalLengthInByte[nal_index];
			  nal_begin += layer_size;
			  if (nal_type == 14) {
				  continue;
			  }
			  uint32_t currentNaluSize = layer_bs_info->pNalLengthInByte[nal_index] - 4;
			  memcpy(encoded_image_._buffer + encoded_image_._length, nal_buffer, currentNaluSize);
			  encoded_image_._length += currentNaluSize;

			  WEBRTC_TRACE(webrtc::kTraceApiCall, webrtc::kTraceVideoCoding, -1,
			                "H264EncoderImpl::Encode() nal_type %d, length:%d",
			                 nal_type, encoded_image_._length);
			      
			  // Offset of pointer to data for each fragm.
			  frag_info.fragmentationOffset[totalNaluIndex] = encoded_image_._length - currentNaluSize;
			  // Data size for each fragmentation
			  frag_info.fragmentationLength[totalNaluIndex] = currentNaluSize;
			  // Payload type of each fragmentation
			  frag_info.fragmentationPlType[totalNaluIndex] = nal_type;
			  // Timestamp difference relative "now" for
			  // each fragmentation
			  frag_info.fragmentationTimeDiff[totalNaluIndex] = 0;
			  totalNaluIndex++;
		  } // for
	  }
	  layer++;
  }


		  if (encoded_image_._length > 0) {
			  encoded_image_._timeStamp = input_image.timestamp();
			  encoded_image_.capture_time_ms_ = input_image.render_time_ms();
			  encoded_image_._encodedHeight = codec_.height;
			  encoded_image_._encodedWidth = codec_.width;
                          encoded_image_._frameType = frame_type;
// call back 
encoded_complete_callback_->Encoded(encoded_image_, NULL, &frag_info); 
} 
return WEBRTC_VIDEO_CODEC_OK;
}

 

 

 

 

最后是SetRate和SetChannelParameter两个方法,用于根据网络情况自适应改变码率和帧率,这里我们暂时不考虑,使用固定码率和帧率,二者的实现如下

 

 

int H264EncoderImpl::SetRates(uint32_t new_bitrate_kbit,
                             uint32_t new_framerate) {
  WEBRTC_TRACE(webrtc::kTraceApiCall, webrtc::kTraceVideoCoding, -1,
               "H264EncoderImpl::SetRates(%d, %d)", new_bitrate_kbit, new_framerate);
  if (!inited_) {
    return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
  }
  if (new_framerate < 1) {
    return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
  }
  // update bit rate
  if (codec_.maxBitrate > 0 && new_bitrate_kbit > codec_.maxBitrate) {
    new_bitrate_kbit = codec_.maxBitrate;
  }

  return WEBRTC_VIDEO_CODEC_OK;
}
int H264EncoderImpl::SetChannelParameters(uint32_t packet_loss, int rtt) {
	return WEBRTC_VIDEO_CODEC_OK;
}


由此编码功能的重新封装全部完成

 

 

解码功能的重新封装

定义H264DecoderImpl类如下,关键在于加入OPENH264解码功能核心类ISVCDecoder* decoder_;

class H264DecoderImpl : public VideoDecoder{
 public:
  enum {
	  MAX_ENCODED_IMAGE_SIZE = 32768
  };

  H264DecoderImpl();

  ~H264DecoderImpl();

  int InitDecode(const VideoCodec* inst, int number_of_cores);

  int Decode(const EncodedImage& input_image,
                     bool missing_frames,
                     const RTPFragmentationHeader* fragmentation,
                     const CodecSpecificInfo* codec_specific_info,
                     int64_t /*render_time_ms*/);

  int RegisterDecodeCompleteCallback(DecodedImageCallback* callback);

  int Release();

  int Reset();

  VideoDecoder* Copy();

 private:
  I420VideoFrame decoded_image_;
  DecodedImageCallback* decode_complete_callback_;
  bool inited_;
  VideoCodec codec_;
  bool key_frame_required_;

  ISVCDecoder* decoder_;
  unsigned char* buffer_with_start_code_;

};  

还是一样,先来看一下OPENH264自己的基本解码流程

 

//decoder declaration
ISVCDecoder *pSvcDecoder;
//input: encoded bitstream start position; should include start code prefix
unsigned char *pBuf =...;
//input: encoded bit stream length; should include the size of start code prefix
int iSize =...;
//output: [0~2] for Y,U,V buffer for Decoding only
unsigned char *pData[3] =...;
//in-out: for Decoding only: declare and initialize the output buffer info
memset(&sDstBufInfo, 0, sizeof(SBufferInfo));

//decoder creation
CreateDecoder(pSvcDecoder);//declare required parameter
SDecodingParam sDecParam = {0};
sDecParam.sVideoProperty.eVideoBsType = VIDEO_BITSTREAM_AVC;

//initialize the parameter and decoder context, allocate memory
Initialize(&sDecParam);
//do actual decoding process in slice level; this can be done in a loop until data ends
//for Decoding only
 iRet = DecodeFrameNoDelay(pBuf, iSize, pData, &sDstBufInfo);
 //or
 iRet = DecodeFrame2(pBuf, iSize, pData, &sDstBufInfo);
 
 //for Decoding only, pData can be used for render.
 if (sDstBufInfo.iBufferStatus==1){
     output pData[0], pData[1], pData[2];
 }
//uninitialize the decoder and memory free
Uninitialize();
//destroy the decoder
DestroyDecoder();

 

 

 

 

 

 
 

照葫芦画瓢,即可得出H264DecoderImpl类的各方法的具体实现

 

H264DecoderImpl()方法的实现如下

 

H264DecoderImpl::H264DecoderImpl()
		: decode_complete_callback_(NULL),
		inited_(false),
		key_frame_required_(true)
		,buffer_with_start_code_(NULL)
		,decoder_(NULL)
	{
		memset(&codec_, 0, sizeof(codec_));
		buffer_with_start_code_ = new unsigned char[MAX_ENCODED_IMAGE_SIZE];

	}

Release()方法的实现如下,调用了OPENH264的Uninitialize和WelsDestroyDecoder方法

 

 

int H264DecoderImpl::Release() {
		if (decoder_ != NULL) {
			decoder_->Uninitialize();
			WelsDestroyDecoder(decoder_);
			decoder_ = NULL;
		}
		inited_ = false;
		return WEBRTC_VIDEO_CODEC_OK;
	}


InitDecode方法的实现如下,进行了OPENH264解码参数的设置和初始化

 

 

int H264DecoderImpl::InitDecode(const VideoCodec* inst, int number_of_cores) {
		if (inst == NULL) {
			return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
		}
		int ret_val = Release();
		if (ret_val < 0) {
			return ret_val;
		}

		if (&codec_ != inst) {
			// Save VideoCodec instance for later; mainly for duplicating the decoder.
			codec_ = *inst;
		}
		if (decoder_ == NULL) {
			ret_val = WelsCreateDecoder(&decoder_);
			if (ret_val != 0) {
				decoder_ = NULL;
				return WEBRTC_VIDEO_CODEC_ERROR;
			}
		}
		SDecodingParam dec_param;
		memset(&dec_param, 0, sizeof(SDecodingParam));
		dec_param.eOutputColorFormat = videoFormatI420;
		dec_param.uiTargetDqLayer = UCHAR_MAX;
		dec_param.eEcActiveIdc = ERROR_CON_FRAME_COPY_CROSS_IDR;
		dec_param.sVideoProperty.eVideoBsType = VIDEO_BITSTREAM_DEFAULT;
		ret_val = decoder_->Initialize(&dec_param);
		if (ret_val != 0) {
			decoder_->Uninitialize();
			WelsDestroyDecoder(decoder_);
			decoder_ = NULL;
			return WEBRTC_VIDEO_CODEC_ERROR;
		}
		inited_ = true;

		// Always start with a complete key frame.
		key_frame_required_ = true;
		WEBRTC_TRACE(webrtc::kTraceApiCall, webrtc::kTraceVideoCoding, -1,
			"H264DecoderImpl::InitDecode(width:%d, height:%d, framerate:%d, start_bitrate:%d, max_bitrate:%d)",
			inst->width, inst->height, inst->maxFramerate, inst->startBitrate, inst->maxBitrate);
		return WEBRTC_VIDEO_CODEC_OK;
	}

Decode方法的实现如下,包含两个功能,一是解码接收到的图像帧,这方面可以参照OPENh264的解码流程

 

另一个是将解码数据转换为YUV420P像素格式的数据供渲染显示,具体的渲染显示由WebRTC为我们提供的VCMDecodedFrameCallback类的Decoded方法实现,详见webrtc\modules\video_coding\main\source\generic_decoder.cc文件,对这一方法的调用则是在我们的H264DecoderImpl类的RegisterDecodeCompleteCallback方法中实现的,如下

 

int H264DecoderImpl::RegisterDecodeCompleteCallback(
		DecodedImageCallback* callback) {
		decode_complete_callback_ = callback;
		return WEBRTC_VIDEO_CODEC_OK;
	}


我们需要做的只是将解码数据转换为YUV数据,使用WebRTC的CreateFrame方法,整个Decode方法的实现如下

 

 

int H264DecoderImpl::Decode(const EncodedImage& input_image,
		bool missing_frames,
		const RTPFragmentationHeader* fragmentation,
		const CodecSpecificInfo* codec_specific_info,
		int64_t /*render_time_ms*/) {
		if (!inited_) {
			WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, -1,
				"H264DecoderImpl::Decode, decoder is not initialized");
			return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
		}

		if (decode_complete_callback_ == NULL) {
			WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, -1,
				"H264DecoderImpl::Decode, decode complete call back is not set");
			return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
		}

		if (input_image._buffer == NULL) {
			WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, -1,
				"H264DecoderImpl::Decode, null buffer");
			return WEBRTC_VIDEO_CODEC_ERR_PARAMETER;
		}
		if (!codec_specific_info) {
			WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, -1,
				"H264EncoderImpl::Decode, no codec info");
			return WEBRTC_VIDEO_CODEC_ERROR;
		}
		if (codec_specific_info->codecType != kVideoCodecH264) {
			WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, -1,
				"H264EncoderImpl::Decode, non h264 codec %d", codec_specific_info->codecType);
			return WEBRTC_VIDEO_CODEC_ERROR;
		}

		WEBRTC_TRACE(webrtc::kTraceApiCall, webrtc::kTraceVideoCoding, -1,
			"H264DecoderImpl::Decode(frame_type:%d, length:%d",
			input_image._frameType, input_image._length);

		void* data[3];
		SBufferInfo buffer_info;
		memset(data, 0, sizeof(data));
		memset(&buffer_info, 0, sizeof(SBufferInfo));

		memset(buffer_with_start_code_, 0, MAX_ENCODED_IMAGE_SIZE);
		int encoded_image_size = 0;
			memcpy(buffer_with_start_code_ , input_image._buffer, input_image._length);
			encoded_image_size =  input_image._length;

		DECODING_STATE rv = decoder_->DecodeFrame2(buffer_with_start_code_, encoded_image_size, (unsigned char**)data, &buffer_info);

		if (rv != dsErrorFree) {
			WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, -1,
				"H264DecoderImpl::Decode, openH264 decoding fails with error %d", rv);
			return WEBRTC_VIDEO_CODEC_ERROR;
		}

		if (buffer_info.iBufferStatus == 1) {
			int size_y = buffer_info.UsrData.sSystemBuffer.iStride[0] * buffer_info.UsrData.sSystemBuffer.iHeight;
			int size_u = buffer_info.UsrData.sSystemBuffer.iStride[1] * (buffer_info.UsrData.sSystemBuffer.iHeight / 2);
			int size_v = buffer_info.UsrData.sSystemBuffer.iStride[1] * (buffer_info.UsrData.sSystemBuffer.iHeight / 2);

			decoded_image_.CreateFrame(size_y, static_cast(data[0]),
				size_u, static_cast(data[1]),
				size_v, static_cast(data[2]),
				buffer_info.UsrData.sSystemBuffer.iWidth,
				buffer_info.UsrData.sSystemBuffer.iHeight,
				buffer_info.UsrData.sSystemBuffer.iStride[0],
				buffer_info.UsrData.sSystemBuffer.iStride[1],
				buffer_info.UsrData.sSystemBuffer.iStride[1]);

			decoded_image_.set_timestamp(input_image._timeStamp);
			decode_complete_callback_->Decoded(decoded_image_);
			return WEBRTC_VIDEO_CODEC_OK;
		}else {
			WEBRTC_TRACE(webrtc::kTraceError, webrtc::kTraceVideoCoding, -1,
				"H264DecoderImpl::Decode, buffer status:%d", buffer_info.iBufferStatus);
			return WEBRTC_VIDEO_CODEC_OK;
		}
	}

最后是Reset和Copy方法,分别用于重置和复制,这里暂时用不到,二者的实现如下

 

 

VideoDecoder* H264DecoderImpl::Copy() {
		// Sanity checks.
		if (!inited_) {
			// Not initialized.
			assert(false);
			return NULL;
		}
		if (decoded_image_.IsZeroSize()) {
			// Nothing has been decoded before; cannot clone.
			return NULL;
		}
		// Create a new VideoDecoder object
		H264DecoderImpl *copy = new H264DecoderImpl;

		// Initialize the new decoder
		if (copy->InitDecode(&codec_, 1) != WEBRTC_VIDEO_CODEC_OK) {
			delete copy;
			return NULL;
		}

		return static_cast(copy);
	}

 

int H264DecoderImpl::Reset() {
		if (!inited_) {
			return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
		}
		InitDecode(&codec_, 1);
		return WEBRTC_VIDEO_CODEC_OK;
	}


由此,对OPENH264解码功能的重新封装也全部完成

 

 

注册与调用

最后,我们要在主流程中(见上一篇文章)中注册OPENH264为外部编码器,使用RegisterExternalSendCodec和RegisterExternalReceiveCodec方法,如下

 

 

        webrtc::H264Encoder *h264encoder = webrtc::H264Encoder::Create();
	webrtc::H264Decoder *h264decoder = webrtc::H264Decoder::Create();

	webrtc::ViEExternalCodec* external_codec = webrtc::ViEExternalCodec
		::GetInterface(ptrViE);
	external_codec->RegisterExternalSendCodec(videoChannel, 88,
		h264encoder, false);
	external_codec->RegisterExternalReceiveCodec(videoChannel,
		88, h264decoder, false);

这里的88也可以改为其他数值,但是要与后面设置的videoCodec.plType值相符。

 

至此,就成功地将OPENH264集成到了WebRTC的VideoEngine中。

 

 

 

 

你可能感兴趣的:(webrtc)