webrtc源码分析之视频编码之二

在webrtc源码分析之视频编码之一分析了视频编码初始化流程,接下来分析一下视频编码流程,如下图所示,视频编码流程是从VideoBroadcaster回调VideoStreamEncoder的OnFrame开始的。

VideoStreamEncoder的OnFrame定义如下:

void VideoStreamEncoder::OnFrame(const VideoFrame& video_frame) {
  RTC_DCHECK_RUNS_SERIALIZED(&incoming_frame_race_checker_);
  VideoFrame incoming_frame = video_frame;

  // Local time in webrtc time base.
  int64_t current_time_us = clock_->TimeInMicroseconds();
  int64_t current_time_ms = current_time_us / rtc::kNumMicrosecsPerMillisec;
  // In some cases, e.g., when the frame from decoder is fed to encoder,
  // the timestamp may be set to the future. As the encoding pipeline assumes
  // capture time to be less than present time, we should reset the capture
  // timestamps here. Otherwise there may be issues with RTP send stream.
  if (incoming_frame.timestamp_us() > current_time_us)
    incoming_frame.set_timestamp_us(current_time_us);

  // Capture time may come from clock with an offset and drift from clock_.
  int64_t capture_ntp_time_ms;
  if (video_frame.ntp_time_ms() > 0) {
    capture_ntp_time_ms = video_frame.ntp_time_ms();
  } else if (video_frame.render_time_ms() != 0) {
    capture_ntp_time_ms = video_frame.render_time_ms() + delta_ntp_internal_ms_;
  } else {
    capture_ntp_time_ms = current_time_ms + delta_ntp_internal_ms_;
  }
  incoming_frame.set_ntp_time_ms(capture_ntp_time_ms);

  // Convert NTP time, in ms, to RTP timestamp.
  const int kMsToRtpTimestamp = 90;
  incoming_frame.set_timestamp(
      kMsToRtpTimestamp * static_cast(incoming_frame.ntp_time_ms()));

  if (incoming_frame.ntp_time_ms() <= last_captured_timestamp_) {
    // We don't allow the same capture time for two frames, drop this one.
    RTC_LOG(LS_WARNING) << "Same/old NTP timestamp ("
                        << incoming_frame.ntp_time_ms()
                        << " <= " << last_captured_timestamp_
                        << ") for incoming frame. Dropping.";
    return;
  }

  bool log_stats = false;
  if (current_time_ms - last_frame_log_ms_ > kFrameLogIntervalMs) {
    last_frame_log_ms_ = current_time_ms;
    log_stats = true;
  }

  last_captured_timestamp_ = incoming_frame.ntp_time_ms();
  encoder_queue_.PostTask(std::unique_ptr(new EncodeTask(
      incoming_frame, this, rtc::TimeMicros(), log_stats)));
}

EncodeTask的Run定义如下:

  bool Run() override {
    RTC_DCHECK_RUN_ON(&video_stream_encoder_->encoder_queue_);
    video_stream_encoder_->stats_proxy_->OnIncomingFrame(frame_.width(),
                                                         frame_.height());
    ++video_stream_encoder_->captured_frame_count_;
    const int posted_frames_waiting_for_encode =
        video_stream_encoder_->posted_frames_waiting_for_encode_.fetch_sub(1);
    RTC_DCHECK_GT(posted_frames_waiting_for_encode, 0);
    if (posted_frames_waiting_for_encode == 1) {
      video_stream_encoder_->EncodeVideoFrame(frame_, time_when_posted_us_);
    } else {
      // There is a newer frame in flight. Do not encode this frame.
      RTC_LOG(LS_VERBOSE)
          << "Incoming frame dropped due to that the encoder is blocked.";
      ++video_stream_encoder_->dropped_frame_count_;
      video_stream_encoder_->stats_proxy_->OnFrameDroppedInEncoderQueue();
    }
    if (log_stats_) {
      RTC_LOG(LS_INFO) << "Number of frames: captured "
                       << video_stream_encoder_->captured_frame_count_
                       << ", dropped (due to encoder blocked) "
                       << video_stream_encoder_->dropped_frame_count_
                       << ", interval_ms " << kFrameLogIntervalMs;
      video_stream_encoder_->captured_frame_count_ = 0;
      video_stream_encoder_->dropped_frame_count_ = 0;
    }
    return true;
  }

成员video_stream_encoder_是一个VideoStreamEncoder对象,EncodeVideoFrame函数定义如下:

void VideoStreamEncoder::EncodeVideoFrame(const VideoFrame& video_frame,
                                          int64_t time_when_posted_us) {
  RTC_DCHECK_RUN_ON(&encoder_queue_);

  if (pre_encode_callback_)
    pre_encode_callback_->OnFrame(video_frame);

  if (!last_frame_info_ || video_frame.width() != last_frame_info_->width ||
      video_frame.height() != last_frame_info_->height ||
      video_frame.is_texture() != last_frame_info_->is_texture) {
    pending_encoder_reconfiguration_ = true;
    last_frame_info_ = rtc::Optional(VideoFrameInfo(
        video_frame.width(), video_frame.height(), video_frame.is_texture()));
    RTC_LOG(LS_INFO) << "Video frame parameters changed: dimensions="
                     << last_frame_info_->width << "x"
                     << last_frame_info_->height
                     << ", texture=" << last_frame_info_->is_texture << ".";
  }

  if (initial_rampup_ < kMaxInitialFramedrop &&
      video_frame.size() >
          MaximumFrameSizeForBitrate(encoder_start_bitrate_bps_ / 1000)) {
    RTC_LOG(LS_INFO) << "Dropping frame. Too large for target bitrate.";
    AdaptDown(kQuality);
    ++initial_rampup_;
    return;
  }
  initial_rampup_ = kMaxInitialFramedrop;

  int64_t now_ms = clock_->TimeInMilliseconds();
  if (pending_encoder_reconfiguration_) {
    ReconfigureEncoder();
    last_parameters_update_ms_.emplace(now_ms);
  } else if (!last_parameters_update_ms_ ||
             now_ms - *last_parameters_update_ms_ >=
                 vcm::VCMProcessTimer::kDefaultProcessIntervalMs) {
    video_sender_.UpdateChannelParemeters(rate_allocator_.get(),
                                          bitrate_observer_);
    last_parameters_update_ms_.emplace(now_ms);
  }

  if (EncoderPaused()) {
    TraceFrameDropStart();
    return;
  }
  TraceFrameDropEnd();

  VideoFrame out_frame(video_frame);
  // Crop frame if needed.
  if (crop_width_ > 0 || crop_height_ > 0) {
    int cropped_width = video_frame.width() - crop_width_;
    int cropped_height = video_frame.height() - crop_height_;
    rtc::scoped_refptr cropped_buffer =
        I420Buffer::Create(cropped_width, cropped_height);
    // TODO(ilnik): Remove scaling if cropping is too big, as it should never
    // happen after SinkWants signaled correctly from ReconfigureEncoder.
    if (crop_width_ < 4 && crop_height_ < 4) {
      cropped_buffer->CropAndScaleFrom(
          *video_frame.video_frame_buffer()->ToI420(), crop_width_ / 2,
          crop_height_ / 2, cropped_width, cropped_height);
    } else {
      cropped_buffer->ScaleFrom(
          *video_frame.video_frame_buffer()->ToI420().get());
    }
    out_frame =
        VideoFrame(cropped_buffer, video_frame.timestamp(),
                   video_frame.render_time_ms(), video_frame.rotation());
    out_frame.set_ntp_time_ms(video_frame.ntp_time_ms());
  }

  TRACE_EVENT_ASYNC_STEP0("webrtc", "Video", video_frame.render_time_ms(),
                          "Encode");

  overuse_detector_->FrameCaptured(out_frame, time_when_posted_us);

  video_sender_.AddVideoFrame(out_frame, nullptr);
}

有必要的话先裁剪缩放,然后调用VideoSender的AddVideoFrame函数,定义如下:

// Add one raw video frame to the encoder, blocking.
int32_t VideoSender::AddVideoFrame(const VideoFrame& videoFrame,
                                   const CodecSpecificInfo* codecSpecificInfo) {
  EncoderParameters encoder_params;
  std::vector next_frame_types;
  bool encoder_has_internal_source = false;
  {
    rtc::CritScope lock(¶ms_crit_);
    encoder_params = encoder_params_;
    next_frame_types = next_frame_types_;
    encoder_has_internal_source = encoder_has_internal_source_;
  }
  rtc::CritScope lock(&encoder_crit_);
  if (_encoder == nullptr)
    return VCM_UNINITIALIZED;
  SetEncoderParameters(encoder_params, encoder_has_internal_source);
  if (_mediaOpt.DropFrame()) {
    RTC_LOG(LS_VERBOSE) << "Drop Frame "
                        << "target bitrate "
                        << encoder_params.target_bitrate.get_sum_bps()
                        << " loss rate " << encoder_params.loss_rate << " rtt "
                        << encoder_params.rtt << " input frame rate "
                        << encoder_params.input_frame_rate;
    post_encode_callback_->OnDroppedFrame(
        EncodedImageCallback::DropReason::kDroppedByMediaOptimizations);
    return VCM_OK;
  }
  // TODO(pbos): Make sure setting send codec is synchronized with video
  // processing so frame size always matches.
  if (!_codecDataBase.MatchesCurrentResolution(videoFrame.width(),
                                               videoFrame.height())) {
    RTC_LOG(LS_ERROR)
        << "Incoming frame doesn't match set resolution. Dropping.";
    return VCM_PARAMETER_ERROR;
  }
  VideoFrame converted_frame = videoFrame;
  const VideoFrameBuffer::Type buffer_type =
      converted_frame.video_frame_buffer()->type();
  const bool is_buffer_type_supported =
      buffer_type == VideoFrameBuffer::Type::kI420 ||
      (buffer_type == VideoFrameBuffer::Type::kNative &&
       _encoder->SupportsNativeHandle());
  if (!is_buffer_type_supported) {
    // This module only supports software encoding.
    // TODO(pbos): Offload conversion from the encoder thread.
    rtc::scoped_refptr converted_buffer(
        converted_frame.video_frame_buffer()->ToI420());

    if (!converted_buffer) {
      RTC_LOG(LS_ERROR) << "Frame conversion failed, dropping frame.";
      return VCM_PARAMETER_ERROR;
    }
    converted_frame = VideoFrame(converted_buffer,
                                 converted_frame.timestamp(),
                                 converted_frame.render_time_ms(),
                                 converted_frame.rotation());
  }
  int32_t ret =
      _encoder->Encode(converted_frame, codecSpecificInfo, next_frame_types);
  if (ret < 0) {
    RTC_LOG(LS_ERROR) << "Failed to encode frame. Error code: " << ret;
    return ret;
  }

  {
    rtc::CritScope lock(¶ms_crit_);
    // Change all keyframe requests to encode delta frames the next time.
    for (size_t i = 0; i < next_frame_types_.size(); ++i) {
      // Check for equality (same requested as before encoding) to not
      // accidentally drop a keyframe request while encoding.
      if (next_frame_types[i] == next_frame_types_[i])
        next_frame_types_[i] = kVideoFrameDelta;
    }
  }
  return VCM_OK;
}

成员_encoder是一个VCMGenericEncoder对象,Encode函数定义如下:

int32_t VCMGenericEncoder::Encode(const VideoFrame& frame,
                                  const CodecSpecificInfo* codec_specific,
                                  const std::vector& frame_types) {
  RTC_DCHECK_RUNS_SERIALIZED(&race_checker_);
  TRACE_EVENT1("webrtc", "VCMGenericEncoder::Encode", "timestamp",
               frame.timestamp());

  for (FrameType frame_type : frame_types)
    RTC_DCHECK(frame_type == kVideoFrameKey || frame_type == kVideoFrameDelta);

  for (size_t i = 0; i < streams_or_svc_num_; ++i)
    vcm_encoded_frame_callback_->OnEncodeStarted(frame.timestamp(),
                                                 frame.render_time_ms(), i);

  return encoder_->Encode(frame, codec_specific, &frame_types);
}

考虑encoder_为MediaCodecVideoEncoder的情况,Encode函数定义如下:

int32_t MediaCodecVideoEncoder::Encode(
    const VideoFrame& frame,
    const CodecSpecificInfo* /* codec_specific_info */,
    const std::vector* frame_types) {
  RTC_DCHECK_CALLED_SEQUENTIALLY(&encoder_queue_checker_);
  if (sw_fallback_required_)
    return WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE;
  JNIEnv* jni = AttachCurrentThreadIfNeeded();
  ScopedLocalRefFrame local_ref_frame(jni);
  const int64_t frame_input_time_ms = rtc::TimeMillis();

  if (!inited_) {
    return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
  }

  bool send_key_frame = false;
  if (codec_mode_ == kRealtimeVideo) {
    ++frames_received_since_last_key_;
    int64_t now_ms = rtc::TimeMillis();
    if (last_frame_received_ms_ != -1 &&
        (now_ms - last_frame_received_ms_) > kFrameDiffThresholdMs) {
      // Add limit to prevent triggering a key for every frame for very low
      // framerates (e.g. if frame diff > kFrameDiffThresholdMs).
      if (frames_received_since_last_key_ > kMinKeyFrameInterval) {
        ALOGD << "Send key, frame diff: " << (now_ms - last_frame_received_ms_);
        send_key_frame = true;
      }
      frames_received_since_last_key_ = 0;
    }
    last_frame_received_ms_ = now_ms;
  }

  frames_received_++;
  if (!DeliverPendingOutputs(jni)) {
    if (!ProcessHWError(true /* reset_if_fallback_unavailable */)) {
      return sw_fallback_required_ ? WEBRTC_VIDEO_CODEC_FALLBACK_SOFTWARE
                                   : WEBRTC_VIDEO_CODEC_ERROR;
    }
  }
  if (frames_encoded_ < kMaxEncodedLogFrames) {
    ALOGD << "Encoder frame in # " << (frames_received_ - 1)
          << ". TS: " << static_cast(current_timestamp_us_ / 1000)
          << ". Q: " << input_frame_infos_.size() << ". Fps: " << last_set_fps_
          << ". Kbps: " << last_set_bitrate_kbps_;
  }

  if (drop_next_input_frame_) {
    ALOGW << "Encoder drop frame - failed callback.";
    drop_next_input_frame_ = false;
    current_timestamp_us_ += rtc::kNumMicrosecsPerSec / last_set_fps_;
    frames_dropped_media_encoder_++;
    return WEBRTC_VIDEO_CODEC_OK;
  }

  RTC_CHECK(frame_types->size() == 1) << "Unexpected stream count";

  // Check if we accumulated too many frames in encoder input buffers and drop
  // frame if so.
  if (input_frame_infos_.size() > MAX_ENCODER_Q_SIZE) {
    ALOGD << "Already " << input_frame_infos_.size()
          << " frames in the queue, dropping"
          << ". TS: " << static_cast(current_timestamp_us_ / 1000)
          << ". Fps: " << last_set_fps_
          << ". Consecutive drops: " << consecutive_full_queue_frame_drops_;
    current_timestamp_us_ += rtc::kNumMicrosecsPerSec / last_set_fps_;
    consecutive_full_queue_frame_drops_++;
    if (consecutive_full_queue_frame_drops_ >=
        ENCODER_STALL_FRAMEDROP_THRESHOLD) {
      ALOGE << "Encoder got stuck.";
      return ProcessHWErrorOnEncode();
    }
    frames_dropped_media_encoder_++;
    return WEBRTC_VIDEO_CODEC_OK;
  }
  consecutive_full_queue_frame_drops_ = 0;

  rtc::scoped_refptr input_buffer(frame.video_frame_buffer());

  VideoFrame input_frame(input_buffer, frame.timestamp(),
                         frame.render_time_ms(), frame.rotation());

  if (!MaybeReconfigureEncoder(jni, input_frame)) {
    ALOGE << "Failed to reconfigure encoder.";
    return WEBRTC_VIDEO_CODEC_ERROR;
  }

  const bool key_frame =
      frame_types->front() != kVideoFrameDelta || send_key_frame;
  bool encode_status = true;

  int j_input_buffer_index = -1;
  if (!use_surface_) {
    j_input_buffer_index = Java_MediaCodecVideoEncoder_dequeueInputBuffer(
        jni, j_media_codec_video_encoder_);
    if (CheckException(jni)) {
      ALOGE << "Exception in dequeu input buffer.";
      return ProcessHWErrorOnEncode();
    }
    if (j_input_buffer_index == -1) {
      // Video codec falls behind - no input buffer available.
      ALOGW << "Encoder drop frame - no input buffers available";
      if (frames_received_ > 1) {
        current_timestamp_us_ += rtc::kNumMicrosecsPerSec / last_set_fps_;
        frames_dropped_media_encoder_++;
      } else {
        // Input buffers are not ready after codec initialization, HW is still
        // allocating thme - this is expected and should not result in drop
        // frame report.
        frames_received_ = 0;
      }
      return WEBRTC_VIDEO_CODEC_OK;  // TODO(fischman): see webrtc bug 2887.
    } else if (j_input_buffer_index == -2) {
      return ProcessHWErrorOnEncode();
    }
  }

  if (input_frame.video_frame_buffer()->type() !=
      VideoFrameBuffer::Type::kNative) {
    encode_status =
        EncodeByteBuffer(jni, key_frame, input_frame, j_input_buffer_index);
  } else {
    AndroidVideoFrameBuffer* android_buffer =
        static_cast(
            input_frame.video_frame_buffer().get());
    switch (android_buffer->android_type()) {
      case AndroidVideoFrameBuffer::AndroidType::kTextureBuffer:
        encode_status = EncodeTexture(jni, key_frame, input_frame);
        break;
      case AndroidVideoFrameBuffer::AndroidType::kJavaBuffer:
        encode_status =
            EncodeJavaFrame(jni, key_frame, NativeToJavaFrame(jni, input_frame),
                            j_input_buffer_index);
        break;
      default:
        RTC_NOTREACHED();
        return WEBRTC_VIDEO_CODEC_ERROR;
    }
  }

  if (!encode_status) {
    ALOGE << "Failed encode frame with timestamp: " << input_frame.timestamp();
    return ProcessHWErrorOnEncode();
  }

  // Save input image timestamps for later output.
  input_frame_infos_.emplace_back(frame_input_time_ms, input_frame.timestamp(),
                                  input_frame.render_time_ms(),
                                  input_frame.rotation());

  last_input_timestamp_ms_ =
      current_timestamp_us_ / rtc::kNumMicrosecsPerMillisec;

  current_timestamp_us_ += rtc::kNumMicrosecsPerSec / last_set_fps_;

  // Start the polling loop if it is not started.
  if (encode_task_) {
    rtc::TaskQueue::Current()->PostDelayedTask(std::move(encode_task_),
                                               kMediaCodecPollMs);
  }

  if (!DeliverPendingOutputs(jni)) {
    return ProcessHWErrorOnEncode();
  }
  return WEBRTC_VIDEO_CODEC_OK;
}

考虑use_surface_为true的情况,调用java层MediaCodecVideoEncoder的encodeTexture函数,定义如下:

  @CalledByNativeUnchecked
  boolean encodeTexture(boolean isKeyframe, int oesTextureId, float[] transformationMatrix,
      long presentationTimestampUs) {
    checkOnMediaCodecThread();
    try {
      checkKeyFrameRequired(isKeyframe, presentationTimestampUs);
      eglBase.makeCurrent();
      // TODO(perkj): glClear() shouldn't be necessary since every pixel is covered anyway,
      // but it's a workaround for bug webrtc:5147.
      GLES20.glClear(GLES20.GL_COLOR_BUFFER_BIT);
      drawer.drawOes(oesTextureId, transformationMatrix, width, height, 0, 0, width, height);
      eglBase.swapBuffers(TimeUnit.MICROSECONDS.toNanos(presentationTimestampUs));
      return true;
    } catch (RuntimeException e) {
      Logging.e(TAG, "encodeTexture failed", e);
      return false;
    }
  }

通过opengl方式往MediaCodec的输入Surface绘制,将图像数据送到OMX进行编码。

然后调用DeliverPendingOutputs函数,定义如下:

bool MediaCodecVideoEncoder::DeliverPendingOutputs(JNIEnv* jni) {
  RTC_DCHECK_CALLED_SEQUENTIALLY(&encoder_queue_checker_);

  while (true) {
    ScopedJavaLocalRef j_output_buffer_info =
        Java_MediaCodecVideoEncoder_dequeueOutputBuffer(
            jni, j_media_codec_video_encoder_);
    if (CheckException(jni)) {
      ALOGE << "Exception in set dequeue output buffer.";
      ProcessHWError(true /* reset_if_fallback_unavailable */);
      return WEBRTC_VIDEO_CODEC_ERROR;
    }
    if (IsNull(jni, j_output_buffer_info)) {
      break;
    }

    int output_buffer_index =
        Java_OutputBufferInfo_getIndex(jni, j_output_buffer_info);
    if (output_buffer_index == -1) {
      ProcessHWError(true /* reset_if_fallback_unavailable */);
      return false;
    }

    // Get key and config frame flags.
    ScopedJavaLocalRef j_output_buffer =
        Java_OutputBufferInfo_getBuffer(jni, j_output_buffer_info);
    bool key_frame =
        Java_OutputBufferInfo_isKeyFrame(jni, j_output_buffer_info);

    // Get frame timestamps from a queue - for non config frames only.
    int64_t encoding_start_time_ms = 0;
    int64_t frame_encoding_time_ms = 0;
    last_output_timestamp_ms_ =
        Java_OutputBufferInfo_getPresentationTimestampUs(jni,
                                                         j_output_buffer_info) /
        rtc::kNumMicrosecsPerMillisec;
    if (!input_frame_infos_.empty()) {
      const InputFrameInfo& frame_info = input_frame_infos_.front();
      output_timestamp_ = frame_info.frame_timestamp;
      output_render_time_ms_ = frame_info.frame_render_time_ms;
      output_rotation_ = frame_info.rotation;
      encoding_start_time_ms = frame_info.encode_start_time;
      input_frame_infos_.pop_front();
    }

    // Extract payload.
    size_t payload_size = jni->GetDirectBufferCapacity(j_output_buffer.obj());
    uint8_t* payload = reinterpret_cast(
        jni->GetDirectBufferAddress(j_output_buffer.obj()));
    if (CheckException(jni)) {
      ALOGE << "Exception in get direct buffer address.";
      ProcessHWError(true /* reset_if_fallback_unavailable */);
      return WEBRTC_VIDEO_CODEC_ERROR;
    }

    // Callback - return encoded frame.
    const VideoCodecType codec_type = GetCodecType();
    EncodedImageCallback::Result callback_result(
        EncodedImageCallback::Result::OK);
    if (callback_) {
      std::unique_ptr image(
          new EncodedImage(payload, payload_size, payload_size));
      image->_encodedWidth = width_;
      image->_encodedHeight = height_;
      image->_timeStamp = output_timestamp_;
      image->capture_time_ms_ = output_render_time_ms_;
      image->rotation_ = output_rotation_;
      image->content_type_ = (codec_mode_ == VideoCodecMode::kScreensharing)
                                 ? VideoContentType::SCREENSHARE
                                 : VideoContentType::UNSPECIFIED;
      image->timing_.flags = TimingFrameFlags::kInvalid;
      image->_frameType = (key_frame ? kVideoFrameKey : kVideoFrameDelta);
      image->_completeFrame = true;
      CodecSpecificInfo info;
      memset(&info, 0, sizeof(info));
      info.codecType = codec_type;
      if (codec_type == kVideoCodecVP8) {
        info.codecSpecific.VP8.pictureId = picture_id_;
        info.codecSpecific.VP8.nonReference = false;
        info.codecSpecific.VP8.simulcastIdx = 0;
        info.codecSpecific.VP8.temporalIdx = kNoTemporalIdx;
        info.codecSpecific.VP8.layerSync = false;
        info.codecSpecific.VP8.tl0PicIdx = kNoTl0PicIdx;
        info.codecSpecific.VP8.keyIdx = kNoKeyIdx;
      } else if (codec_type == kVideoCodecVP9) {
        if (key_frame) {
          gof_idx_ = 0;
        }
        info.codecSpecific.VP9.picture_id = picture_id_;
        info.codecSpecific.VP9.inter_pic_predicted = key_frame ? false : true;
        info.codecSpecific.VP9.flexible_mode = false;
        info.codecSpecific.VP9.ss_data_available = key_frame ? true : false;
        info.codecSpecific.VP9.tl0_pic_idx = tl0_pic_idx_++;
        info.codecSpecific.VP9.temporal_idx = kNoTemporalIdx;
        info.codecSpecific.VP9.spatial_idx = kNoSpatialIdx;
        info.codecSpecific.VP9.temporal_up_switch = true;
        info.codecSpecific.VP9.inter_layer_predicted = false;
        info.codecSpecific.VP9.gof_idx =
            static_cast(gof_idx_++ % gof_.num_frames_in_gof);
        info.codecSpecific.VP9.num_spatial_layers = 1;
        info.codecSpecific.VP9.spatial_layer_resolution_present = false;
        if (info.codecSpecific.VP9.ss_data_available) {
          info.codecSpecific.VP9.spatial_layer_resolution_present = true;
          info.codecSpecific.VP9.width[0] = width_;
          info.codecSpecific.VP9.height[0] = height_;
          info.codecSpecific.VP9.gof.CopyGofInfoVP9(gof_);
        }
      }
      picture_id_ = (picture_id_ + 1) & 0x7FFF;

      // Generate a header describing a single fragment.
      RTPFragmentationHeader header;
      memset(&header, 0, sizeof(header));
      if (codec_type == kVideoCodecVP8 || codec_type == kVideoCodecVP9) {
        header.VerifyAndAllocateFragmentationHeader(1);
        header.fragmentationOffset[0] = 0;
        header.fragmentationLength[0] = image->_length;
        header.fragmentationPlType[0] = 0;
        header.fragmentationTimeDiff[0] = 0;
        if (codec_type == kVideoCodecVP8) {
          int qp;
          if (vp8::GetQp(payload, payload_size, &qp)) {
            current_acc_qp_ += qp;
            image->qp_ = qp;
          }
        } else if (codec_type == kVideoCodecVP9) {
          int qp;
          if (vp9::GetQp(payload, payload_size, &qp)) {
            current_acc_qp_ += qp;
            image->qp_ = qp;
          }
        }
      } else if (codec_type == kVideoCodecH264) {
        h264_bitstream_parser_.ParseBitstream(payload, payload_size);
        int qp;
        if (h264_bitstream_parser_.GetLastSliceQp(&qp)) {
          current_acc_qp_ += qp;
          image->qp_ = qp;
        }
        // For H.264 search for start codes.
        const std::vector nalu_idxs =
            H264::FindNaluIndices(payload, payload_size);
        if (nalu_idxs.empty()) {
          ALOGE << "Start code is not found!";
          ALOGE << "Data:" <<  image->_buffer[0] << " " << image->_buffer[1]
              << " " << image->_buffer[2] << " " << image->_buffer[3]
              << " " << image->_buffer[4] << " " << image->_buffer[5];
          ProcessHWError(true /* reset_if_fallback_unavailable */);
          return false;
        }
        header.VerifyAndAllocateFragmentationHeader(nalu_idxs.size());
        for (size_t i = 0; i < nalu_idxs.size(); i++) {
          header.fragmentationOffset[i] = nalu_idxs[i].payload_start_offset;
          header.fragmentationLength[i] = nalu_idxs[i].payload_size;
          header.fragmentationPlType[i] = 0;
          header.fragmentationTimeDiff[i] = 0;
        }
      }

      callback_result = callback_->OnEncodedImage(*image, &info, &header);
    }

    // Return output buffer back to the encoder.
    bool success = Java_MediaCodecVideoEncoder_releaseOutputBuffer(
        jni, j_media_codec_video_encoder_, output_buffer_index);
    if (CheckException(jni) || !success) {
      ProcessHWError(true /* reset_if_fallback_unavailable */);
      return false;
    }

    // Print per frame statistics.
    if (encoding_start_time_ms > 0) {
      frame_encoding_time_ms = rtc::TimeMillis() - encoding_start_time_ms;
    }
    if (frames_encoded_ < kMaxEncodedLogFrames) {
      int current_latency = static_cast(last_input_timestamp_ms_ -
                                             last_output_timestamp_ms_);
      ALOGD << "Encoder frame out # " << frames_encoded_
            << ". Key: " << key_frame << ". Size: " << payload_size
            << ". TS: " << static_cast(last_output_timestamp_ms_)
            << ". Latency: " << current_latency
            << ". EncTime: " << frame_encoding_time_ms;
    }

    // Calculate and print encoding statistics - every 3 seconds.
    frames_encoded_++;
    current_frames_++;
    current_bytes_ += payload_size;
    current_encoding_time_ms_ += frame_encoding_time_ms;
    LogStatistics(false);

    // Errors in callback_result are currently ignored.
    if (callback_result.drop_next_frame)
      drop_next_input_frame_ = true;
  }
  return true;
}

DeliverPendingOutputs主要流程如下:

  • 调用java层MediaCodecVideoEncoder的dequeueOutputBuffer函数从编码器取出数据,封装成OutputBufferInfo。
  • 转换OutputBufferInfo为EncodedImage。
  • 回调callback_的OnEncodedImage来分发EncodedImage,callback_成员是一个VCMEncodedFrameCallback对象,通过其OnEncodedImage最终将EncodedImage传给VideoSendStreamImpl,VideoSendStreamImpl的OnEncodedImage函数定义如下:

EncodedImageCallback::Result VideoSendStreamImpl::OnEncodedImage(
    const EncodedImage& encoded_image,
    const CodecSpecificInfo* codec_specific_info,
    const RTPFragmentationHeader* fragmentation) {
  // Encoded is called on whatever thread the real encoder implementation run
  // on. In the case of hardware encoders, there might be several encoders
  // running in parallel on different threads.
  size_t simulcast_idx = 0;
  if (codec_specific_info->codecType == kVideoCodecVP8) {
    simulcast_idx = codec_specific_info->codecSpecific.VP8.simulcastIdx;
  }
  if (config_->post_encode_callback) {
    config_->post_encode_callback->EncodedFrameCallback(EncodedFrame(
        encoded_image._buffer, encoded_image._length, encoded_image._frameType,
        simulcast_idx, encoded_image._timeStamp));
  }
  {
    rtc::CritScope lock(&encoder_activity_crit_sect_);
    if (check_encoder_activity_task_)
      check_encoder_activity_task_->UpdateEncoderActivity();
  }

  protection_bitrate_calculator_.UpdateWithEncodedData(encoded_image);
  EncodedImageCallback::Result result = payload_router_.OnEncodedImage(
      encoded_image, codec_specific_info, fragmentation);

  RTC_DCHECK(codec_specific_info);

  int layer = codec_specific_info->codecType == kVideoCodecVP8
                  ? codec_specific_info->codecSpecific.VP8.simulcastIdx
                  : 0;
  {
    rtc::CritScope lock(&ivf_writers_crit_);
    if (file_writers_[layer].get()) {
      bool ok = file_writers_[layer]->WriteFrame(
          encoded_image, codec_specific_info->codecType);
      RTC_DCHECK(ok);
    }
  }

  return result;
}

payload_router_是一个PayloadRouter对象,在这里完成后续的RTP打包和传输的工作。

你可能感兴趣的:(webrtc源码分析之视频编码之二)