ZLMediaKit 服务器源码解读---RTSP推流拉流

RTSP推流

1、数据解析过程

数据都会通过回调的方式到达RtspSession类(会话)的onRecv函数
先给出类的继承关系:

class RtspSession: public TcpSession, public RtspSplitter, public RtpReceiver , public MediaSourceEvent
{}
class RtspSplitter : public HttpRequestSplitter
{}

数据回调后进入分包处理,然后去根据协议查找头部数据和内容

void RtspSession::onRecv(const Buffer::Ptr &buf) {
	...
    if (_on_recv) {
        //http poster的请求数据转发给http getter处理
        _on_recv(buf);
    } else {
        input(buf->data(), buf->size());
    }
}

void HttpRequestSplitter::input(const char *data,size_t len) {
	...
   	onRecvHeader(header_ptr, header_size);
   	onRecvContent(ptr,_content_len);
	...
}

通过头部区别是否是rtp包还是rtsp命令包,rtp包的处理函数是onRtpPacket,rtsp包的处理函数是onWholeRtspPacket

ssize_t RtspSplitter::onRecvHeader(const char *data, size_t len) {
    if(_isRtpPacket){
        onRtpPacket(data,len);
        return 0;
    }
    _parser.Parse(data);
    auto ret = getContentLength(_parser);
    if(ret == 0){
        onWholeRtspPacket(_parser);
        _parser.Clear();
    }
    return ret;
}

2、处理rtsp包

rtsp处理过程,解析出来的各种交互命令进入不同的处理函数

void RtspSession::onWholeRtspPacket(Parser &parser) {
    string method = parser.Method(); //提取出请求命令字
    _cseq = atoi(parser["CSeq"].data());
    if (_content_base.empty() && method != "GET") {
        _content_base = parser.Url();
        _media_info.parse(parser.FullUrl());
        _media_info._schema = RTSP_SCHEMA;
    }

    using rtsp_request_handler = void (RtspSession::*)(const Parser &parser);
    static unordered_map<string, rtsp_request_handler> s_cmd_functions;
    static onceToken token([]() {
        s_cmd_functions.emplace("OPTIONS", &RtspSession::handleReq_Options);
        s_cmd_functions.emplace("DESCRIBE", &RtspSession::handleReq_Describe);
        s_cmd_functions.emplace("ANNOUNCE", &RtspSession::handleReq_ANNOUNCE);
        s_cmd_functions.emplace("RECORD", &RtspSession::handleReq_RECORD);
        s_cmd_functions.emplace("SETUP", &RtspSession::handleReq_Setup);
        s_cmd_functions.emplace("PLAY", &RtspSession::handleReq_Play);
        s_cmd_functions.emplace("PAUSE", &RtspSession::handleReq_Pause);
        s_cmd_functions.emplace("TEARDOWN", &RtspSession::handleReq_Teardown);
        s_cmd_functions.emplace("GET", &RtspSession::handleReq_Get);
        s_cmd_functions.emplace("POST", &RtspSession::handleReq_Post);
        s_cmd_functions.emplace("SET_PARAMETER", &RtspSession::handleReq_SET_PARAMETER);
        s_cmd_functions.emplace("GET_PARAMETER", &RtspSession::handleReq_SET_PARAMETER);
    });

    auto it = s_cmd_functions.find(method);
    if (it == s_cmd_functions.end()) {
        sendRtspResponse("403 Forbidden");
        throw SockException(Err_shutdown, StrPrinter << "403 Forbidden:" << method);
    }

    (this->*(it->second))(parser);
    parser.Clear();
}

3、处理rtp包(rtcp包处理此处忽略),rtp解码

void RtspSession::onRtpPacket(const char *data, size_t len) {
    uint8_t interleaved = data[1];
    if (interleaved % 2 == 0) {
        if (!_push_src) {
            return;
        }
        auto track_idx = getTrackIndexByInterleaved(interleaved);
        handleOneRtp(track_idx, _sdp_track[track_idx]->_type, _sdp_track[track_idx]->_samplerate, (uint8_t *) data + RtpPacket::kRtpTcpHeaderSize, len - RtpPacket::kRtpTcpHeaderSize);
    } else {
        auto track_idx = getTrackIndexByInterleaved(interleaved - 1);
        onRtcpPacket(track_idx, _sdp_track[track_idx], data + RtpPacket::kRtpTcpHeaderSize, len - RtpPacket::kRtpTcpHeaderSize);
    }
}

using RtpReceiver = RtpMultiReceiver<2>;
 bool RtpMultiReceiver::handleOneRtp(int index, TrackType type, int sample_rate, uint8_t *ptr, size_t len) {
        assert(index < kCount && index >= 0);	
		//RtpTrackImp _track[kCount];
        return _track[index].inputRtp(type, sample_rate, ptr, len).operator bool();
}

在这里将数据封装成一个rtp包,然后调用onBeforeRtpSorted**(作用暂时没搞清楚)**
调用sortPacket排序

class RtpTrackImp : public RtpTrack
//处理了一个完整的rtp包
RtpPacket::Ptr RtpTrack::inputRtp(TrackType type, int sample_rate, uint8_t *ptr, size_t len)
{
	...
    auto rtp = RtpPacket::create();
    //需要添加4个字节的rtp over tcp头
    rtp->setCapacity(RtpPacket::kRtpTcpHeaderSize + len);
    rtp->setSize(RtpPacket::kRtpTcpHeaderSize + len);
    rtp->sample_rate = sample_rate;
    rtp->type = type;

    //赋值4个字节的rtp over tcp头
    uint8_t *data = (uint8_t *) rtp->data();
    data[0] = '$';
    data[1] = 2 * type;
    data[2] = (len >> 8) & 0xFF;
    data[3] = len & 0xFF;
	...
    onBeforeRtpSorted(rtp);
    sortPacket(rtp->getSeq(), rtp);
    return rtp;
}

排好序的包放入 _pkt_sort_cache_map,然后将第一个数据抛出来,过程如下

class RtpTrack : private PacketSortor<RtpPacket::Ptr>;
//pkt排序缓存,根据seq排序
PacketSortor::map<SEQ, T> _pkt_sort_cache_map;
  /**
     * 输入并排序
     * @param seq 序列号
     * @param packet 包负载
     */
void sortPacket(SEQ seq, T packet) {
      	......
        tryPopPacket();
    }

void tryPopPacket() {
      	......
        popPacket();
  		......
      }
 void popPacket() {
       	......
        //过滤回跳包
        popIterator(it);
      	......
    }

void popIterator(typename map<SEQ, T>::iterator it) {
		......
        _cb(seq, data);
  	}

_cb 就是onRtpSorted函数,所以抛出来的数据最终都会回调到 RtspSession的onRtpSorted函数,然后进入RtspSession的成员RtspMediaSourceImp::Ptr _push_src

void RtspSession::onRtpSorted(RtpPacket::Ptr rtp, int track_idx) {
    _push_src->onWrite(std::move(rtp), false);
}

进入_push_src 的函数如下:

  /**
     * 输入rtp并解析
     * 接收到的排好序的rtp
     */
    void onWrite(RtpPacket::Ptr rtp, bool key_pos) override {
      
        if (_all_track_ready && !_muxer->isEnabled()) {
            //获取到所有Track后,并且未开启转协议,那么不需要解复用rtp
            //在关闭rtp解复用后,无法知道是否为关键帧,这样会导致无法秒开,或者开播花屏
            key_pos = rtp->type == TrackVideo;
        } else {
            //需要解复用rtp
            key_pos = _demuxer->inputRtp(rtp);
        }
        GET_CONFIG(bool, directProxy, Rtsp::kDirectProxy);
  
        if (directProxy) {
            //直接代理模式才直接使用原始rtp
            RtspMediaSource::onWrite(std::move(rtp), key_pos);
            //DebugL<<"directProxy:"<
        }
    }

然后进入RtspMediaSourceImp的解复用器 RtspDemuxer,调用代码如下:

bool RtspDemuxer::inputRtp(const RtpPacket::Ptr &rtp) {
    switch (rtp->type) {
        case TrackVideo: {
            if (_video_rtp_decoder) {
                return _video_rtp_decoder->inputRtp(rtp, true);
            }
            return false;
        }
        case TrackAudio: {
            if (_audio_rtp_decoder) {
                _audio_rtp_decoder->inputRtp(rtp, false);
                return false;
            }
            return false;
        }
        default: return false;
    }
}

以H264为例,解复用的调用过程如下:

_video_rtp_decoder->inputRtp(rtp, true);

仔细分析一下这个过程 ,其中_video_rtp_decoder 定义如下

RtpCodec::Ptr _video_rtp_decoder;

赋值如下:

···
_video_rtp_decoder = Factory::getRtpDecoderByTrack(_video_track);
···
//
RtpCodec::Ptr Factory::getRtpDecoderByTrack(const Track::Ptr &track) {
    switch (track->getCodecId()){
        case CodecH264 : return std::make_shared();
        case CodecH265 : return std::make_shared();
        case CodecAAC : return std::make_shared(track->clone());
        case CodecL16 :
        case CodecOpus :
        case CodecG711A :
        case CodecG711U : return std::make_shared(track->getCodecId());
        default : WarnL << "暂不支持该CodecId:" << track->getCodecName(); return nullptr;
    }
}

所以_video_rtp_decoder 本质是RtpCodec的继承类H264RtpDecoder,所以rtp包最终进入H264RtpDecoder类的inputRtp的函数,函数如下:

bool H264RtpDecoder::inputRtp(const RtpPacket::Ptr &rtp, bool key_pos) {
    auto seq = rtp->getSeq();
    auto ret = decodeRtp(rtp);
    if (!_gop_dropped && seq != (uint16_t) (_last_seq + 1) && _last_seq) {
        _gop_dropped = true;
        WarnL << "start drop h264 gop, last seq:" << _last_seq << ", rtp:\r\n" << rtp->dumpString();
    }
    _last_seq = seq;
    return ret;
}

最终在H264RtpDecoder类中的decodeRtp函数将rtp包变成了h264帧数据,并调用RtpCodec::inputFrame将分发出去,也就是如下函数

   /**
     * 写入帧并派发
     */
    bool inputFrame(const Frame::Ptr &frame) override{
        if(_need_update){
            //发现代理列表发生变化了,这里同步一次
            lock_guard<mutex> lck(_mtx);
            _delegates_read = _delegates_write;
            _need_update = false;
        }

        //_delegates_read能确保是单线程操作的
        bool ret = false;
        for (auto &pr : _delegates_read) {
            if (pr.second->inputFrame(frame)) {
                ret = true;
            }
        }
        return ret;
    }

_delegates_read 定义如下,显然是一个map对象,map的value是一个FrameWriterInterface ,用来将帧数据回调出去,通过代码可知最终是代理类调用了inputFrame。

	/**
 * 写帧接口的抽象接口类
 */
class FrameWriterInterface {
public:
    typedef std::shared_ptr<FrameWriterInterface> Ptr;
    FrameWriterInterface(){}
    virtual ~FrameWriterInterface(){}

    /**
     * 写入帧数据
     */
    virtual bool inputFrame(const Frame::Ptr &frame) = 0;
};

map<void *,FrameWriterInterface::Ptr>  _delegates_read;

注意上面函数中_delegates_read = _delegates_write;
而_delegates_write 只在如下函数中才会有更新,也就是说数据分发的代理类都在下面函数添加

   /**
     * 添加代理
     */
    void addDelegate(const FrameWriterInterface::Ptr &delegate){
        //_delegates_write可能多线程同时操作
        lock_guard<mutex> lck(_mtx);
        _delegates_write.emplace(delegate.get(),delegate);
        _need_update = true;
    }

那么addDelegate是何时调用的呢?回到RtspDemuxer类,显然在创建VideoTrack时给解码类添加了代理

void RtspDemuxer::makeVideoTrack(const SdpTrack::Ptr &video) {
    if (_video_rtp_decoder) {
        return;
    }
    //生成Track对象,这里如果是h264编码,返回的就是H264Track
    _video_track = dynamic_pointer_cast<VideoTrack>(Factory::getTrackBySdp(video));
    if (!_video_track) {
        return;
    }
    setBitRate(video, _video_track);
    //生成RtpCodec对象以便解码rtp
    _video_rtp_decoder = Factory::getRtpDecoderByTrack(_video_track);
    if (!_video_rtp_decoder) {
        //找不到相应的rtp解码器,该track无效
        _video_track.reset();
        return;
    }
    //设置rtp解码器代理,生成的frame写入该Track
    _video_rtp_decoder->addDelegate(_video_track);
    
    addTrack(_video_track);
   
}

知道了代理是怎么设置了,那么就能确认是谁调用了H264帧,现在看下面代码,getTrackBySdp返回的是一个H264Track类,最终是调用了_video_track (继承类是:H264Track)的inputFrame;

_video_track = dynamic_pointer_cast<VideoTrack>(Factory::getTrackBySdp(video));

Track::Ptr Factory::getTrackBySdp(const SdpTrack::Ptr &track) {
    auto codec = getCodecId(track->_codec);
    if (codec == CodecInvalid) {
        //根据传统的payload type 获取编码类型以及采样率等信息
        codec = RtpPayload::getCodecId(track->_pt);
    }
    switch (codec) {
  ......
        case CodecH264 : {
            //a=fmtp:96 packetization-mode=1;profile-level-id=42C01F;sprop-parameter-sets=Z0LAH9oBQBboQAAAAwBAAAAPI8YMqA==,aM48gA==
            auto map = Parser::parseArgs(track->_fmtp, ";", "=");
            auto sps_pps = map["sprop-parameter-sets"];
            string base64_SPS = FindField(sps_pps.data(), NULL, ",");
            string base64_PPS = FindField(sps_pps.data(), ",", NULL);
            auto sps = decodeBase64(base64_SPS);
            auto pps = decodeBase64(base64_PPS);
            if (sps.empty() || pps.empty()) {
                //如果sdp里面没有sps/pps,那么可能在后续的rtp里面恢复出sps/pps
                return std::make_shared<H264Track>();
            }
            return std::make_shared<H264Track>(sps, pps, 0, 0);
        }
......
}

然后数据进入H264Track::inputFrame,很显然在这里对H264帧数据进行了处理,分包,

bool H264Track::inputFrame(const Frame::Ptr &frame) {
 
    using H264FrameInternal = FrameInternal<H264FrameNoCacheAble>;
    int type = H264_TYPE(frame->data()[frame->prefixSize()]);
    if (type == H264Frame::NAL_B_P || type == H264Frame::NAL_IDR) {
        return inputFrame_l(frame);
    }

    //非I/B/P帧情况下,split一下,防止多个帧粘合在一起
    bool ret = false;
    splitH264(frame->data(), frame->size(), frame->prefixSize(), [&](const char *ptr, size_t len, size_t prefix) {
        H264FrameInternal::Ptr sub_frame = std::make_shared<H264FrameInternal>(frame, (char *) ptr, len, prefix);
        if (inputFrame_l(sub_frame)) {
            ret = true;
        }
    });
  
    return ret;
}

再来观察下面函数,在这里会添加pps,sps信息(添加完pps,sps信息的h264数据就能直接用ffmpeg播放了),然后将数据发出去给各个代理,

bool H264Track::inputFrame_l(const Frame::Ptr &frame){
    int type = H264_TYPE( frame->data()[frame->prefixSize()]);
    bool ret = true;
    switch (type) {
        case H264Frame::NAL_SPS: {
            _sps = string(frame->data() + frame->prefixSize(), frame->size() - frame->prefixSize());
            break;
        }
        case H264Frame::NAL_PPS: {
            _pps = string(frame->data() + frame->prefixSize(), frame->size() - frame->prefixSize());
            break;
        }
        case H264Frame::NAL_AUD: {
            //忽略AUD帧;
            ret = false;
            break;
        }
        default:
            //这里后面的帧就是h264帧,存文件就可以播放文件
            if (frame->keyFrame()) {//这里插入sps,pps帧信息
                insertConfigFrame(frame);
            }
            ret = VideoTrack::inputFrame(frame);
            break;
    }

    _is_idr = type == H264Frame::NAL_IDR;
    if (_width == 0 && ready()) {
        onReady();
    }
    return ret;
}

3、处理H264包,编码

那么数据时怎么从解码类(RtspDemuxer)到编码类(MultiMediaSourceMuxer)呢?注意在RtspMediaSourceImp类中有以下2个成员,分布式编码类和解码类

    RtspDemuxer::Ptr _demuxer;
    MultiMediaSourceMuxer::Ptr _muxer;

在以下函数中建立数据联系,_demuxer中的H264Track类将_muxer(MultiMediaSourceMuxer)添加进了他的代理

    /**
     * _demuxer触发的添加Track事件
     */
    bool addTrack(const Track::Ptr &track) override {
        if (_muxer) {
            if (_muxer->addTrack(track)) {
                track->addDelegate(_muxer);
                return true;
            }
        }
        return false;
    }

所以H264Track处理后的h264帧在数据分发时会进入MultiMediaSourceMuxer类的接口inputFrame

 bool inputFrame(const Frame::Ptr &frame) override {
        
        GET_CONFIG(bool, rtsp_demand, General::kRtspDemand);
        if (_clear_cache && rtsp_demand) {
            _clear_cache = false;
            _media_src->clearCache();
        }
        if (_enabled || !rtsp_demand) {
            _media_src->intputFrame(frame);//笔者添加的过程
            return RtspMuxer::inputFrame(frame);
        }
        return false;
    }

然后数据进入RtspMuxer的inputFrame

//RtspMuxer成员变量
RtpCodec::Ptr _encoder[TrackMax];
 
bool RtspMuxer::inputFrame(const Frame::Ptr &frame) {
  
    auto &encoder = _encoder[frame->getTrackType()];
    bool ret = encoder ? encoder->inputFrame(frame) : false;;
    
    return ret;
}

下面说明_encoder的申明、类的继承关系、及赋值过程;

//RtspMuxer成员变量
RtpCodec::Ptr _encoder[TrackMax];
class RtpCodec : public RtpRing, public FrameDispatcher , public CodecInfo{}

//_encoder赋值过程
bool RtspMuxer::addTrack(const Track::Ptr &track) {
    //根据track生成sdp
    Sdp::Ptr sdp = track->getSdp();
    if (!sdp) {
        return false;
    }

    auto &encoder = _encoder[track->getTrackType()];
   
    encoder = Factory::getRtpEncoderBySdp(sdp);
    if (!encoder) {
        return false;
    }

    //设置rtp输出环形缓存
    encoder->setRtpRing(_rtpInterceptor);

    //添加其sdp
    _sdp.append(sdp->getSdp());
    trySyncTrack();
    return true;
}

在getRtpEncoderBySdp中赋值,函数如下,显然根据编码类型,返回编码类,这之后依然以h264为例说明,那么返回的就是H264RtpEncoder


RtpCodec::Ptr Factory::getRtpEncoderBySdp(const Sdp::Ptr &sdp) {
   	......
    switch (codec_id){
        case CodecH264 : return std::make_shared<H264RtpEncoder>(ssrc, mtu, sample_rate, pt, interleaved);
        case CodecH265 : return std::make_shared<H265RtpEncoder>(ssrc, mtu, sample_rate, pt, interleaved);
        case CodecAAC : return std::make_shared<AACRtpEncoder>(ssrc, mtu, sample_rate, pt, interleaved);
        case CodecL16 :
        case CodecOpus :
        case CodecG711A :
        case CodecG711U : return std::make_shared<CommonRtpEncoder>(codec_id, ssrc, mtu, sample_rate, pt, interleaved);
        default : WarnL << "暂不支持该CodecId:" << codec_id; return nullptr;
    }
}

那么此时数据也就会进入H264RtpEncoder的inputFrame函数


bool H264RtpEncoder::inputFrame(const Frame::Ptr &frame) {
   // WarnL<<"inputFrame:";
    auto ptr = frame->data() + frame->prefixSize();
    switch (H264_TYPE(ptr[0])) {
        case H264Frame::NAL_SPS: {
            _sps = Frame::getCacheAbleFrame(frame);
            return true;
        }
        case H264Frame::NAL_PPS: {
            _pps = Frame::getCacheAbleFrame(frame);
            return true;
        }
        default: break;
    }

    if (_last_frame) {
        //如果时间戳发生了变化,那么markbit才置true
        //WarnL<<"inputFrame_l:";
        inputFrame_l(_last_frame, _last_frame->pts() != frame->pts());
    }
    _last_frame = Frame::getCacheAbleFrame(frame);
    return true;
}

bool H264RtpEncoder::inputFrame_l(const Frame::Ptr &frame, bool is_mark){
    
    if (frame->keyFrame()) {
        //保证每一个关键帧前都有SPS与PPS
        //WarnL<<"keyFrame";
        insertConfigFrame(frame->pts());
    }
  
    packRtp(frame->data() + frame->prefixSize(), frame->size() - frame->prefixSize(), frame->pts(), is_mark, false);
    
    return true;
}

然后重新将数据打包,最终都会调用RtpCodec::inputRtp(rtp, gop_pos)函数,rtp包进入环形数组,准备分发出去

virtual bool inputRtp(const RtpPacket::Ptr &rtp, bool key_pos){
        //string bytes = bytestohexstring(rtp->data(),rtp->size());
       // WarnL<<"rtp index:"<rtpDebugIndex<getSeq();//"\n"<
        if(_rtpRing){
            _rtpRing->write(rtp,key_pos);
        }
        return key_pos;
    }

总结:通过rtsp端口的数据最终都会在RtspSession类中处理,推流时首先实例化_push_src(RtspMediaSourceImp),同时将_push_src放入一个全局的map变量s_media_source_map中去管理(用来后面取出数据),接下来开始处理数据包,过程如下:
1、处理rtp包:将接收到的原始rtp,经过分包排序,最终回调到RtspSession自身的成员函数中,在这里将数据发给_push_src(RtspMediaSourceImp)去处理
2、解码:在RtspMediaSourceImp类中又会将数据发给H264RtpDecoder去处理,最后将rtp包解码成h264,然后分发出来
3、编码:解码时数据最终又会回调到RtspMediaSourceImp类的成员变量_muxer(MultiMediaSourceMuxer)中去处理,然后到达H264RtpEncoder类中去编码,最后进入环形队列

RTSP 拉流

在鉴权成功时,找到推流媒体源,同时将sdp信息绑定到当前sdp


void RtspSession::onAuthSuccess() {
    TraceP(this);
    weak_ptr<RtspSession> weakSelf = dynamic_pointer_cast<RtspSession>(shared_from_this());
    MediaSource::findAsync(_media_info, weakSelf.lock(), [weakSelf](const MediaSource::Ptr &src){
        auto strongSelf = weakSelf.lock();
        if(!strongSelf){
            return;
        }
        auto rtsp_src = dynamic_pointer_cast<RtspMediaSource>(src);
        if (!rtsp_src) {
            //未找到相应的MediaSource
            string err = StrPrinter << "no such stream:" << strongSelf->_media_info._vhost << " " << strongSelf->_media_info._app << " " << strongSelf->_media_info._streamid;
            strongSelf->send_StreamNotFound();
            strongSelf->shutdown(SockException(Err_shutdown,err));
            return;
        }
        //找到了相应的rtsp流
        strongSelf->_sdp_track = SdpParser(rtsp_src->getSdp()).getAvailableTrack();
        if (strongSelf->_sdp_track.empty()) {
            //该流无效
            WarnL << "sdp中无有效track,该流无效:" << rtsp_src->getSdp();
            strongSelf->send_StreamNotFound();
            strongSelf->shutdown(SockException(Err_shutdown,"can not find any available track in sdp"));
            return;
        }
        strongSelf->_rtcp_context.clear();
        for (auto &track : strongSelf->_sdp_track) {
            strongSelf->_rtcp_context.emplace_back(std::make_shared<RtcpContextForSend>());
        }
        strongSelf->_sessionid = makeRandStr(12);
        strongSelf->_play_src = rtsp_src;
 
        ......
    });
}

在handleReq_Play函数中利用_play_reader 去取出环形缓存区的rtp包,同时发给客户端

void RtspSession::handleReq_Play(const Parser &parser) {
  	......
    auto play_src = _play_src.lock();
    ......
    if (!_play_reader && _rtp_type != Rtsp::RTP_MULTICAST) {
        weak_ptr<RtspSession> weakSelf = dynamic_pointer_cast<RtspSession>(shared_from_this());
        _play_reader = play_src->getRing()->attach(getPoller(), useGOP);
        _play_reader->setDetachCB([weakSelf]() {
            auto strongSelf = weakSelf.lock();
            if (!strongSelf) {
                return;
            }
            strongSelf->shutdown(SockException(Err_shutdown, "rtsp ring buffer detached"));
        });
        _play_reader->setReadCB([weakSelf](const RtspMediaSource::RingDataType &pack) {
            auto strongSelf = weakSelf.lock();
            if (!strongSelf) {
                return;
            }
            strongSelf->sendRtpPacket(pack);

        });
    }
    ......
}

你可能感兴趣的:(ZLMediaKit源码理解,音视频)