最近在使用C++编写真正的OO,解析RTMP协议,并将RTMP流保存为flv文件。所谓真正的OO,是用标准OO的思维和编程方式写代码,用类和对象解决问题;而不是将函数组织成类的本质上还是c的思维方式。
sample1_1500kbps.f4v和sample1_1000kbps.f4v都是fms自带的测试文件。
rtmpdump-2.2e是c写的,和rtmpdump做了比较,性能和内存占用都比rtmpdump要好(在没有优化的前提下比rtmpdump都要好),可读性和可维护性一个天上一个地下,读rtmpdump的代码就像进十八层地狱。
rtmpdump-2.2e代码量是14009,自己写的是12883(只实现了client段必须的rtmp包协议)。代码确实比rtmpdump会多,其中包括很多rtmp协议文档的注释,所以也相差不大(重用性估计会高)不会是数量级的差别。
我一直很看好OO的可读性和可维护性,比较担心性能,现在有点安慰了。我知不是所有的c都性能低下,只是一般而言,对于普通人而言,经过同等的训练,用c++写出来的性能更高(规模增长会让OO的思维方式有利)。
性能比较:
可读性,上层调用的代码比较:
rtmp的download主处理函数是这么写的,我不愿看这个难懂的代码:
int Download(RTMP * rtmp, // connected RTMP object FILE * file, uint32_t dSeek, uint32_t dStopOffset, double duration, bool bResume, char *metaHeader, uint32_t nMetaHeaderSize, char *initialFrame, int initialFrameType, uint32_t nInitialFrameSize, int nSkipKeyFrames, bool bStdoutMode, bool bLiveStream, bool bHashes, bool bOverrideBufferTime, uint32_t bufferTime, double *percent) // percentage downloaded [out] { int32_t now, lastUpdate; int bufferSize = 64 * 1024; char *buffer = (char *) malloc(bufferSize); int nRead = 0; off_t size = ftello(file); unsigned long lastPercent = 0; rtmp->m_read.timestamp = dSeek; *percent = 0.0; if (rtmp->m_read.timestamp) { RTMP_Log(RTMP_LOGDEBUG, "Continuing at TS: %d ms\n", rtmp->m_read.timestamp); } if (bLiveStream) { RTMP_LogPrintf("Starting Live Stream\n"); } else { // print initial status // Workaround to exit with 0 if the file is fully (> 99.9%) downloaded if (duration > 0) { if ((double) rtmp->m_read.timestamp >= (double) duration * 999.0) { RTMP_LogPrintf("Already Completed at: %.3f sec Duration=%.3f sec\n", (double) rtmp->m_read.timestamp / 1000.0, (double) duration / 1000.0); return RD_SUCCESS; } else { *percent = ((double) rtmp->m_read.timestamp) / (duration * 1000.0) * 100.0; *percent = ((double) (int) (*percent * 10.0)) / 10.0; RTMP_LogPrintf("%s download at: %.3f kB / %.3f sec (%.1f%%)\n", bResume ? "Resuming" : "Starting", (double) size / 1024.0, (double) rtmp->m_read.timestamp / 1000.0, *percent); } } else { RTMP_LogPrintf("%s download at: %.3f kB\n", bResume ? "Resuming" : "Starting", (double) size / 1024.0); } } if (dStopOffset > 0) RTMP_LogPrintf("For duration: %.3f sec\n", (double) (dStopOffset - dSeek) / 1000.0); if (bResume && nInitialFrameSize > 0) rtmp->m_read.flags |= RTMP_READ_RESUME; rtmp->m_read.initialFrameType = initialFrameType; rtmp->m_read.nResumeTS = dSeek; rtmp->m_read.metaHeader = metaHeader; rtmp->m_read.initialFrame = initialFrame; rtmp->m_read.nMetaHeaderSize = nMetaHeaderSize; rtmp->m_read.nInitialFrameSize = nInitialFrameSize; now = RTMP_GetTime(); lastUpdate = now - 1000; do { nRead = RTMP_Read(rtmp, buffer, bufferSize); //RTMP_LogPrintf("nRead: %d\n", nRead); if (nRead > 0) { if (fwrite(buffer, sizeof(unsigned char), nRead, file) != (size_t) nRead) { RTMP_Log(RTMP_LOGERROR, "%s: Failed writing, exiting!", __FUNCTION__); free(buffer); return RD_FAILED; } size += nRead; //RTMP_LogPrintf("write %dbytes (%.1f kB)\n", nRead, nRead/1024.0); if (duration <= 0) // if duration unknown try to get it from the stream (onMetaData) duration = RTMP_GetDuration(rtmp); if (duration > 0) { // make sure we claim to have enough buffer time! if (!bOverrideBufferTime && bufferTime < (duration * 1000.0)) { bufferTime = (uint32_t) (duration * 1000.0) + 5000; // extra 5sec to make sure we've got enough RTMP_Log(RTMP_LOGDEBUG, "Detected that buffer time is less than duration, resetting to: %dms", bufferTime); RTMP_SetBufferMS(rtmp, bufferTime); RTMP_UpdateBufferMS(rtmp); } *percent = ((double) rtmp->m_read.timestamp) / (duration * 1000.0) * 100.0; *percent = ((double) (int) (*percent * 10.0)) / 10.0; if (bHashes) { if (lastPercent + 1 <= *percent) { RTMP_LogStatus("#"); lastPercent = (unsigned long) *percent; } } else { now = RTMP_GetTime(); if (abs(now - lastUpdate) > 200) { RTMP_LogStatus("\r%.3f kB / %.2f sec (%.1f%%)", (double) size / 1024.0, (double) (rtmp->m_read.timestamp) / 1000.0, *percent); lastUpdate = now; } } } else { now = RTMP_GetTime(); if (abs(now - lastUpdate) > 200) { if (bHashes) RTMP_LogStatus("#"); else RTMP_LogStatus("\r%.3f kB / %.2f sec", (double) size / 1024.0, (double) (rtmp->m_read.timestamp) / 1000.0); lastUpdate = now; } } } #ifdef _DEBUG else { RTMP_Log(RTMP_LOGDEBUG, "zero read!"); } #endif } while (!RTMP_ctrlC && nRead > -1 && RTMP_IsConnected(rtmp)); free(buffer); if (nRead < 0) nRead = rtmp->m_read.status; /* Final status update */ if (!bHashes) { if (duration > 0) { *percent = ((double) rtmp->m_read.timestamp) / (duration * 1000.0) * 100.0; *percent = ((double) (int) (*percent * 10.0)) / 10.0; RTMP_LogStatus("\r%.3f kB / %.2f sec (%.1f%%)", (double) size / 1024.0, (double) (rtmp->m_read.timestamp) / 1000.0, *percent); } else { RTMP_LogStatus("\r%.3f kB / %.2f sec", (double) size / 1024.0, (double) (rtmp->m_read.timestamp) / 1000.0); } } RTMP_Log(RTMP_LOGDEBUG, "RTMP_Read returned: %d", nRead); if (bResume && nRead == -2) { RTMP_LogPrintf("Couldn't resume FLV file, try --skip %d\n\n", nSkipKeyFrames + 1); return RD_FAILED; } if (nRead == -3) return RD_SUCCESS; if ((duration > 0 && *percent < 99.9) || RTMP_ctrlC || nRead < 0 || RTMP_IsTimedout(rtmp)) { return RD_INCOMPLETE; } return RD_SUCCESS; }
OO的这么写:
void Rtmp2FLV::Dump(RtmpClient* rtmp_client, string host, int port, string app, string stream){ u_int32_t ret; RtmpClient& rtmp = *rtmp_client; if((ret = rtmp.HandShake()) != ErrorCode::Success){ exit(-1); } if((ret = rtmp.ConnectApp(host.c_str(), port, app.c_str())) != ErrorCode::Success){ exit(-1); } if((ret = rtmp.SetWindowAcknowledgementSize(2500000)) != ErrorCode::Success){ exit(-1); } if((ret = Helper::ConsumeAllMessage(&rtmp, 300, MessageCallback, this)) != ErrorCode::Success){ exit(-1); } if((ret = rtmp.Play(stream.c_str(), 0, -1, true)) != ErrorCode::Success){ exit(-1); } if((ret = rtmp.SetBufferLength(36000000)) != ErrorCode::Success){ exit(-1); } if((ret = Helper::ConsumeAllMessage(&rtmp, 10000, MessageCallback, this)) != ErrorCode::Success){ exit(-1); } } void Rtmp2FLV::MessageCallback(MessagePacket* packet, void* pRtmp2FLV){ RtmpAssert(pRtmp2FLV != NULL); Rtmp2FLV* p = (Rtmp2FLV*)pRtmp2FLV; p->OnMessage(packet); } void Rtmp2FLV::OnMessage(MessagePacket* packet){ switch(packet->GetPayload()->GetType()){ case RtmpClassType::MessageVideo:{ cout << "[debug] handler mux video message packet" << endl; MessageVideo* msg = dynamic_cast<MessageVideo*>(packet->GetPayload()); RtmpAssert(msg != NULL); muxer.WriteVideo(fs, msg, packet->GetHeader()); break; } case RtmpClassType::MessageAudio:{ cout << "[debug] handler mux audio message packet" << endl; MessageAudio* msg = dynamic_cast<MessageAudio*>(packet->GetPayload()); RtmpAssert(msg != NULL); muxer.WriteAudio(fs, msg, packet->GetHeader()); break; } case RtmpClassType::MessageAggregate:{ MessageAggregate* msg = dynamic_cast<MessageAggregate*>(packet->GetPayload()); RtmpAssert(msg != NULL); u_int32_t count = msg->GetMessageCount(); cout << "[debug] handler get " << count << " aggregate messages packet" << endl; for(u_int32_t i = 0; i < count; i++){ MessageAggregateSub* sub = msg->GetMessageSubAt(i); OnMessage(sub->GetMessage()); } break; } case RtmpClassType::MessageAMF0CallRequest:{ cout << "[debug] handler amf0 call packet" << endl; MessageAMF0CallRequest* msg = dynamic_cast<MessageAMF0CallRequest*>(packet->GetPayload()); RtmpAssert(msg != NULL); if(msg->GetCommandName()->Equals("onStatus")){ AMF0Object* args = dynamic_cast<AMF0Object*>(msg->GetCommandArguments()->GetValue()); // if the args is a object, discovery the stop event if(args != NULL){ u_int32_t count = args->GetPropertiesCount(); for(u_int32_t i = 0; i < count; i++){ AMF0ObjectProperty* prop = args->GetPropertyAt(i); if(prop->GetName()->Equals("code")){ AMF0String* code = dynamic_cast<AMF0String*>(prop->GetValue()); if(code->Equals("NetStream.Play.Stop")){ // TODO: exit in more gentel way. cout << "[debug] get a stream stop event, exit normally." << endl; exit(0); } } } } } break; } case RtmpClassType::MessageAMF0Data:{ cout << "[debug] handler mux video message packet" << endl; MessageAMF0Data* msg = dynamic_cast<MessageAMF0Data*>(packet->GetPayload()); RtmpAssert(msg != NULL); if(msg->GetDataName()->Equals("onMetaData") && msg->GetParameterCount() > 0){ muxer.WriteMetaData(fs, msg->GetParameterAt(0)); } break; } default:{ cout << "[debug] handler ignore packet: " << packet->GetPayload()->GetDescriptionTitle() << endl; } } }
主调用类的头文件:
/** * the flv header at the beginning at flv file. */ class FLVHeader { public: /** * Signature: 'F','L','V' */ u_int8_t signature[3]; // 3bytes /** * File version (for example, 0x01 for FLV version 1) */ u_int8_t version; // 1byte u_int8_t type_flags_reserved0; // 5bits u_int8_t type_flags_audio; // 1bits u_int8_t type_flags_reserved1; // 1bits u_int8_t type_flags_video; // 1bits /** * The length of this header in bytes * The DataOffset field usually has a value of 9 for FLV version 1. * This field is present to accommodate larger headers in future versions. */ u_int32_t data_offset; //4bytes private: /** * disable the copy constructor and operator=, donot allow directly copy. */ FLVHeader(const FLVHeader&); FLVHeader& operator=(const FLVHeader&); public: FLVHeader(); virtual ~FLVHeader(); public: u_int32_t GetSize(); public: void Write(ostream& os); }; /** * the flv tag header at the beginning at flv tag. fixed 11(0xb) bytes. */ class FLVTagHeader { public: /** * Reserved for FMS, should be 0 */ u_int8_t reserved; // 2bits /** * Indicates if packets are filtered. * 0 = No pre-processing required. * 1 = Pre-processing (such as decryption) of the packet is required before it can be rendered. * Shall be 0 in unencrypted files, and 1 for encrypted tags. * See Annex F. FLV Encryption for the use of filters. */ u_int8_t filter; // 1bit /** * Type of contents in this tag. The following types are defined: * 8 = audio 9 = video 18 = script data */ u_int8_t tag_type; // 5bits. /** * Length of the message. Number of bytes after StreamID to end of tag (Equal to length of the tag – 11) */ u_int32_t data_size; //3bytes /** * Time in milliseconds at which the data in this tag applies. * This value is relative to the first tag in the FLV file, which always has a timestamp of 0. */ u_int32_t timestamp; //3bytes /** * Extension of the Timestamp field to form a SI32 value. This field represents the upper 8 bits, while the previous * Timestamp field represents the lower 24 bits of the time in milliseconds. */ u_int8_t timestamp_extended; //1bytes /** * Always 0. */ u_int32_t stream_id; //3bytes private: /** * disable the copy constructor and operator=, donot allow directly copy. */ FLVTagHeader(const FLVTagHeader&); FLVTagHeader& operator=(const FLVTagHeader&); public: FLVTagHeader(); virtual ~FLVTagHeader(); public: u_int32_t GetSize(); public: void Write(ostream& os); }; /** * the flv tag after flv header, consists of pre_tag_size|tag_header|data. */ class FLVTag { public: /** * Size of previous tag, including its header, in bytes. For FLV version * 1, this value is 11 plus the DataSize of the previous tag. */ u_int32_t previous_tag_size; /** * the common flv tag header, fixed 11 bytes size. */ FLVTagHeader tag_header; /** * the tag header: * AudioTagHeader // IF TagType == 8 * VideoTagHeader // IF TagType == 9 * EncryptionHeader // IF Filter == 1 * FilterParams // IF Filter == 1 * * Data specific for each media type. * Data // IF TagType == 8 AUDIODATA * // IF TagType == 9 VIDEODATA * // IF TagType == 18 SCRIPTDATA */ int8_t* data; // data from the rtmp. private: /** * disable the copy constructor and operator=, donot allow directly copy. */ FLVTag(const FLVTag&); FLVTag& operator=(const FLVTag&); public: FLVTag(); virtual ~FLVTag(); public: /** * total tag size=4+header+data */ u_int32_t GetSize(); /** * total tag size=header+data */ u_int32_t GetTagSize(); public: void Write(ostream& os); public: void SetData(int8_t* v, u_int32_t size); }; /** * used to mux packets in flv stream. */ class FLVMuxer { private: FLVHeader header; FLVTag* previous_tag; private: /** * disable the copy constructor and operator=, donot allow directly copy. */ FLVMuxer(const FLVMuxer&); FLVMuxer& operator=(const FLVMuxer&); public: FLVMuxer(); virtual ~FLVMuxer(); public: void Open(std::ostream& os); void WriteVideo(std::ostream& os, MessageVideo* packet, MessageHeader* pheader); void WriteAudio(std::ostream& os, MessageAudio* packet, MessageHeader* pheader); void WriteMetaData(std::ostream& os, AMF0Any* metadata); /** * close muxer, writer file header. */ void Close(std::ostream& os); private: /** * disconvery metadata from source to pool */ void DiscoveryMetaData(AMF0Any* source, IBytesPool* pool); }; /** * mux the rtmp video/audio/data to flv file. */ class Rtmp2FLV { private: FLVMuxer muxer; std::fstream fs; private: /** * disable the copy constructor and operator=, donot allow directly copy. */ Rtmp2FLV(const Rtmp2FLV&); Rtmp2FLV& operator=(const Rtmp2FLV&); public: Rtmp2FLV(); virtual ~Rtmp2FLV(); public: void Initialize(const char* flv_file_name); void Dump(RtmpClient* rtmp_client, std::string host, int port, std::string app, std::string stream); private: static void MessageCallback(MessagePacket* packet, void* pRtmp2FLV); void OnMessage(MessagePacket* packet);主调用类的实现:
#define FixedTagHeaderSize 11 FLVHeader::FLVHeader(){ signature[0] = 'F'; signature[1] = 'L'; signature[2] = 'V'; version = 1; type_flags_reserved0 = 0; type_flags_audio = 0; type_flags_reserved1 = 0; type_flags_video = 0; data_offset = 0x09; } FLVHeader::~FLVHeader(){ } u_int32_t FLVHeader::GetSize(){ return data_offset; } void FLVHeader::Write(ostream& os){ SimpleBytesPool pool(GetSize()); pool.WriteUInt8(signature[0]); pool.WriteUInt8(signature[1]); pool.WriteUInt8(signature[2]); pool.WriteUInt8(version); u_int8_t flag = type_flags_video; flag |= (type_flags_reserved1 << 1); flag |= (type_flags_audio << 2); flag |= (type_flags_reserved0 << 7); pool.WriteUInt8(flag); pool.WriteUInt32(data_offset); pool.SetPosition(0); os.write((const char*)pool.GetBytes(), (streamsize)pool.GetLength()); //os.flush(); } FLVTagHeader::FLVTagHeader(){ reserved = 0; filter = 0; tag_type = 0; data_size = 0; timestamp = 0; timestamp_extended = 0; stream_id = 0; } FLVTagHeader::~FLVTagHeader(){ } u_int32_t FLVTagHeader::GetSize(){ return FixedTagHeaderSize; } void FLVTagHeader::Write(ostream& os){ SimpleBytesPool pool(GetSize()); u_int8_t flag = tag_type; flag |= (filter << 5); flag |= (reserved << 7); pool.WriteUInt8(flag); pool.WriteUInt24(data_size); pool.WriteUInt24(timestamp); pool.WriteUInt8(timestamp_extended); pool.WriteUInt24(stream_id); pool.SetPosition(0); os.write((const char*)pool.GetBytes(), (streamsize)pool.GetLength()); //os.flush(); } FLVTag::FLVTag(){ data = NULL; previous_tag_size = 0x00; } FLVTag::~FLVTag(){ } u_int32_t FLVTag::GetSize(){ return sizeof(u_int32_t) + tag_header.GetSize() + tag_header.data_size; } u_int32_t FLVTag::GetTagSize(){ return tag_header.GetSize() + tag_header.data_size; } void FLVTag::Write(ostream& os){ SimpleBytesPool pool(sizeof(u_int32_t)); pool.WriteUInt32(previous_tag_size); pool.SetPosition(0); os.write((const char*)pool.GetBytes(), (streamsize)pool.GetLength()); tag_header.Write(os); if(data != NULL && tag_header.data_size > 0){ os.write((const char*)data, (streamsize)tag_header.data_size); } //os.flush(); } void FLVTag::SetData(int8_t* v, u_int32_t size){ RtmpAssert(size > 0); RtmpAssert(v != NULL); data = v; tag_header.data_size = size; } FLVMuxer::FLVMuxer(){ previous_tag = NULL; } FLVMuxer::~FLVMuxer(){ RtmpFree(FLVTag, previous_tag, false); } void FLVMuxer::Open(std::ostream& os){ os.seekp(0); header.Write(os); } void FLVMuxer::WriteVideo(std::ostream& os, MessageVideo* packet, MessageHeader* pheader){ // ignore the empty audio packet. if(packet->GetVideoLength() == 0){ cout << "[debug] ignore the empty video packet" << endl; return; } header.type_flags_video = 1; FLVTag* tag = new FLVTag(); tag->SetData(packet->GetVideoData(), packet->GetVideoLength()); if(previous_tag != NULL){ tag->previous_tag_size = previous_tag->GetTagSize(); } tag->tag_header.tag_type = 0x09; // video. tag->tag_header.timestamp = pheader->GetTimestampLow24bits(); tag->tag_header.timestamp_extended = pheader->GetTimestampHigh8bits(); tag->Write(os); RtmpFree(FLVTag, previous_tag, false); previous_tag = tag; } void FLVMuxer::WriteAudio(std::ostream& os, MessageAudio* packet, MessageHeader* pheader){ // ignore the empty audio packet. if(packet->GetAudioLength() == 0){ cout << "[debug] ignore the empty audio packet" << endl; return; } header.type_flags_audio = 1; FLVTag* tag = new FLVTag(); tag->SetData(packet->GetAudioData(), packet->GetAudioLength()); if(previous_tag != NULL){ tag->previous_tag_size = previous_tag->GetTagSize(); } tag->tag_header.tag_type = 0x08; // audio. tag->tag_header.timestamp = pheader->GetTimestampLow24bits(); tag->tag_header.timestamp_extended = pheader->GetTimestampHigh8bits(); tag->Write(os); RtmpFree(FLVTag, previous_tag, false); previous_tag = tag; } void FLVMuxer::WriteMetaData(std::ostream& os, AMF0Any* metadata){ FLVTag* tag = new FLVTag(); SimpleBytesPool pool(0); DiscoveryMetaData(metadata, &pool); pool.SetPosition(0); tag->SetData(pool.GetBytes(), pool.GetLength()); if(previous_tag != NULL){ tag->previous_tag_size = previous_tag->GetTagSize(); } tag->tag_header.tag_type = 0x12; // 18: script data. tag->Write(os); RtmpFree(FLVTag, previous_tag, false); previous_tag = tag; } void FLVMuxer::DiscoveryMetaData(AMF0Any* source, IBytesPool* pool){ if(true){ AMF0String command; command.SetString("onMetaData"); SimpleBytesPool p(command.GetSize()); command.WritePacket(&p); p.SetPosition(0); pool->AppendBytes(p.GetBytes(), p.GetLength()); } // the onMetaData command object must be a ECMA array. AMF0ECMAArray* source_arr = dynamic_cast<AMF0ECMAArray*>(source->GetValue()); RtmpAssert(source_arr != NULL); u_int32_t count = source_arr->GetPropertiesCount(); AMF0ECMAArray arr; for(u_int32_t i = 0; i < count; i++){ AMF0ObjectProperty* prop = source_arr->GetPropertyAt(i); AMF0UTF8* name = prop->GetName(); if(name->Equals("duration") || name->Equals("width") || name->Equals("height") || name->Equals("videodatarate") || name->Equals("framerate") || name->Equals("videoframerate") ){ // we only copy the number or string property. AMF0Base* dest_value = NULL; AMF0Number* source_value = dynamic_cast<AMF0Number*>(prop->GetValue()); if(source_value != NULL){ dest_value = new AMF0Number(source_value->GetValue()); } else{ AMF0String* source_str = dynamic_cast<AMF0String*>(prop->GetValue()); if(source_str == NULL){ cout << "[debug] ignore the metadata if not number or string" << endl; continue; } dest_value = new AMF0String(source_str->GetString()); arr.AddProperty(Factory::Instance()->CreateAMF0ObjectProperty(name->GetString(), dest_value)); } arr.AddProperty(Factory::Instance()->CreateAMF0ObjectProperty(name->GetString(), dest_value)); } } if(true){ SimpleBytesPool p(arr.GetSize()); arr.WritePacket(&p); p.SetPosition(0); pool->AppendBytes(p.GetBytes(), p.GetLength()); } } void FLVMuxer::Close(std::ostream& os){ os.seekp(0); header.Write(os); } Rtmp2FLV::Rtmp2FLV(){ } Rtmp2FLV::~Rtmp2FLV(){ muxer.Close(fs); fs.close(); } void Rtmp2FLV::Initialize(const char* flv_file_name){ fs.open(flv_file_name, ios::trunc|ios::out|ios::binary); RtmpAssert(fs.is_open()); muxer.Open(fs); } void Rtmp2FLV::Dump(RtmpClient* rtmp_client, string host, int port, string app, string stream){ u_int32_t ret; RtmpClient& rtmp = *rtmp_client; if((ret = rtmp.HandShake()) != ErrorCode::Success){ exit(-1); } if((ret = rtmp.ConnectApp(host.c_str(), port, app.c_str())) != ErrorCode::Success){ exit(-1); } if((ret = rtmp.SetWindowAcknowledgementSize(2500000)) != ErrorCode::Success){ exit(-1); } if((ret = Helper::ConsumeAllMessage(&rtmp, 300, MessageCallback, this)) != ErrorCode::Success){ exit(-1); } if((ret = rtmp.Play(stream.c_str(), 0, -1, true)) != ErrorCode::Success){ exit(-1); } if((ret = rtmp.SetBufferLength(36000000)) != ErrorCode::Success){ exit(-1); } if((ret = Helper::ConsumeAllMessage(&rtmp, 10000, MessageCallback, this)) != ErrorCode::Success){ exit(-1); } } void Rtmp2FLV::MessageCallback(MessagePacket* packet, void* pRtmp2FLV){ RtmpAssert(pRtmp2FLV != NULL); Rtmp2FLV* p = (Rtmp2FLV*)pRtmp2FLV; p->OnMessage(packet); } void Rtmp2FLV::OnMessage(MessagePacket* packet){ switch(packet->GetPayload()->GetType()){ case RtmpClassType::MessageVideo:{ cout << "[debug] handler mux video message packet" << endl; MessageVideo* msg = dynamic_cast<MessageVideo*>(packet->GetPayload()); RtmpAssert(msg != NULL); muxer.WriteVideo(fs, msg, packet->GetHeader()); break; } case RtmpClassType::MessageAudio:{ cout << "[debug] handler mux audio message packet" << endl; MessageAudio* msg = dynamic_cast<MessageAudio*>(packet->GetPayload()); RtmpAssert(msg != NULL); muxer.WriteAudio(fs, msg, packet->GetHeader()); break; } case RtmpClassType::MessageAggregate:{ MessageAggregate* msg = dynamic_cast<MessageAggregate*>(packet->GetPayload()); RtmpAssert(msg != NULL); u_int32_t count = msg->GetMessageCount(); cout << "[debug] handler get " << count << " aggregate messages packet" << endl; for(u_int32_t i = 0; i < count; i++){ MessageAggregateSub* sub = msg->GetMessageSubAt(i); OnMessage(sub->GetMessage()); } break; } case RtmpClassType::MessageAMF0CallRequest:{ cout << "[debug] handler amf0 call packet" << endl; MessageAMF0CallRequest* msg = dynamic_cast<MessageAMF0CallRequest*>(packet->GetPayload()); RtmpAssert(msg != NULL); if(msg->GetCommandName()->Equals("onStatus")){ AMF0Object* args = dynamic_cast<AMF0Object*>(msg->GetCommandArguments()->GetValue()); // if the args is a object, discovery the stop event if(args != NULL){ u_int32_t count = args->GetPropertiesCount(); for(u_int32_t i = 0; i < count; i++){ AMF0ObjectProperty* prop = args->GetPropertyAt(i); if(prop->GetName()->Equals("code")){ AMF0String* code = dynamic_cast<AMF0String*>(prop->GetValue()); if(code->Equals("NetStream.Play.Stop")){ // TODO: exit in more gentel way. cout << "[debug] get a stream stop event, exit normally." << endl; exit(0); } } } } } break; } case RtmpClassType::MessageAMF0Data:{ cout << "[debug] handler mux video message packet" << endl; MessageAMF0Data* msg = dynamic_cast<MessageAMF0Data*>(packet->GetPayload()); RtmpAssert(msg != NULL); if(msg->GetDataName()->Equals("onMetaData") && msg->GetParameterCount() > 0){ muxer.WriteMetaData(fs, msg->GetParameterAt(0)); } break; } default:{ cout << "[debug] handler ignore packet: " << packet->GetPayload()->GetDescriptionTitle() << endl; } } }
另外,rtmpdump使用的librtmp使用同步socket收发数据:
[winlin@dev6 rtmpdump-2.2e]$ find . -name "*.c"|xargs grep --color -in "recv(" ./librtmp/rtmp.c:3417: nBytes = recv(sb->sb_socket, sb->sb_start + sb->sb_size, nBytes, 0); ./rtmpgw.c:370: nRead = recv(sockfd, header, 2047, 0); [winlin@dev6 rtmpdump-2.2e]$ find . -name "*.c"|xargs grep --color -in " send(" ./librtmp/rtmp.c:3460: rc = send(sb->sb_socket, buf, len, 0); ./rtmpgw.c:381: send(sockfd, buf, len, 0); ./rtmpgw.c:530: send(sockfd, buf, len, 0); ./rtmpgw.c:584: if ((nWritten = send(sockfd, buffer, nRead, 0)) < 0) ./rtmpgw.c:646: send(sockfd, buf, len, 0);