在x265中,main()函数中调用了encoder_encode()函数,而encoder_encode()函数调用了encode()函数,encode()函数的主要功能是输入一帧图像,得到一帧图像的输出。
encode()函数主要包括大致三个部分:
(1)分析是否由于错误造成的代码终止,如g_checkFailures、m_aborted。
(2)判断是否有输入帧,若有,则判断该输入帧的像素深度、颜色空间是否支持,并判断List是否为空,若为空则创建;除此之外,还有一个比较重要的变量,即ret,此处初始化了ret为0,ret用于判断encode()函数的执行状况,0代表当前没有可供输出的重构帧,则返回encoder_encode()函数进行处理,1代表有输出,从encode()函数的最后一行代码return ret可以证实这一点。
(3)用一个do/while()判断是否有输出,若有则ret为1,并且调用了startCompressFrame()函数,startCompressFrame()函数的主要目的就是触发线程,为进一步的编码做准备。
对应的代码分析如下:
/*=============================================================*/ /* ====== Analysed by: RuiDong Fang ====== Csdn Blog: http://blog.csdn.net/frd2009041510 ====== Date: 2016.04.14 ====== Funtion: encode()函数,Feed one new input frame into the encoder, get one frame out. */ /*=============================================================*/ /** * Feed one new input frame into the encoder, get one frame out. If pic_in is * NULL, a flush condition is implied and pic_in must be NULL for all subsequent * calls for this encoder instance. * * pic_in input original YUV picture or NULL * pic_out pointer to reconstructed picture struct * * returns 0 if no frames are currently available for output * 1 if frame was output, m_nalList contains access unit * negative on malloc error or abort */ int Encoder::encode(const x265_picture* pic_in, x265_picture* pic_out) { ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////1 #if CHECKED_BUILD || _DEBUG if (g_checkFailures) { x265_log(m_param, X265_LOG_ERROR, "encoder aborting because of internal error\n"); return -1; } #endif if (m_aborted) return -1; if (m_exportedPic) { ATOMIC_DEC(&m_exportedPic->m_countRefEncoders); m_exportedPic = NULL; m_dpb->recycleUnreferenced(); } ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////2 //若有输入时 if (pic_in) { //判断输入帧的颜色空间是否支持,若不支持,打印错误 if (pic_in->colorSpace != m_param->internalCsp) { x265_log(m_param, X265_LOG_ERROR, "Unsupported color space (%d) on input\n", pic_in->colorSpace); return -1; } //输入的每一帧的深度必须处于8至16范围内 if (pic_in->bitDepth < 8 || pic_in->bitDepth > 16) { x265_log(m_param, X265_LOG_ERROR, "Input bit depth (%d) must be between 8 and 16\n", pic_in->bitDepth); return -1; } Frame *inFrame; if (m_dpb->m_freeList.empty()) //若List为空,则创建 { inFrame = new Frame; x265_param* p = m_reconfigured? m_latestParam : m_param; if (inFrame->create(p)) { /* the first PicYuv created is asked to generate the CU and block unit offset * arrays which are then shared with all subsequent PicYuv (orig and recon) * allocated by this top level encoder */ if (m_cuOffsetY) { inFrame->m_fencPic->m_cuOffsetC = m_cuOffsetC; inFrame->m_fencPic->m_cuOffsetY = m_cuOffsetY; inFrame->m_fencPic->m_buOffsetC = m_buOffsetC; inFrame->m_fencPic->m_buOffsetY = m_buOffsetY; } else { if (!inFrame->m_fencPic->createOffsets(m_sps)) { m_aborted = true; x265_log(m_param, X265_LOG_ERROR, "memory allocation failure, aborting encode\n"); inFrame->destroy(); delete inFrame; return -1; } else { m_cuOffsetC = inFrame->m_fencPic->m_cuOffsetC; m_cuOffsetY = inFrame->m_fencPic->m_cuOffsetY; m_buOffsetC = inFrame->m_fencPic->m_buOffsetC; m_buOffsetY = inFrame->m_fencPic->m_buOffsetY; } } } else { m_aborted = true; x265_log(m_param, X265_LOG_ERROR, "memory allocation failure, aborting encode\n"); inFrame->destroy(); delete inFrame; return -1; } } else //若List不为空,则popBack { inFrame = m_dpb->m_freeList.popBack(); inFrame->m_lowresInit = false; } /* Copy input picture into a Frame and PicYuv, send to lookahead */ inFrame->m_fencPic->copyFromPicture(*pic_in, m_sps.conformanceWindow.rightOffset, m_sps.conformanceWindow.bottomOffset); inFrame->m_poc = ++m_pocLast; inFrame->m_userData = pic_in->userData; inFrame->m_pts = pic_in->pts; inFrame->m_forceqp = pic_in->forceqp; inFrame->m_param = m_reconfigured ? m_latestParam : m_param; if (m_pocLast == 0) //若POC=0 m_firstPts = inFrame->m_pts; if (m_bframeDelay && m_pocLast == m_bframeDelay) m_bframeDelayTime = inFrame->m_pts - m_firstPts; /* Encoder holds a reference count until stats collection is finished */ ATOMIC_INC(&inFrame->m_countRefEncoders); if ((m_param->rc.aqMode || m_param->bEnableWeightedPred || m_param->bEnableWeightedBiPred) && (m_param->rc.cuTree && m_param->rc.bStatRead)) { if (!m_rateControl->cuTreeReadFor2Pass(inFrame)) { m_aborted = 1; return -1; } } /* Use the frame types from the first pass, if available */ int sliceType = (m_param->rc.bStatRead) ? m_rateControl->rateControlSliceType(inFrame->m_poc) : pic_in->sliceType; /* In analysisSave mode, x265_analysis_data is allocated in pic_in and inFrame points to this */ /* Load analysis data before lookahead->addPicture, since sliceType has been decided */ if (m_param->analysisMode == X265_ANALYSIS_LOAD) { x265_picture* inputPic = const_cast<x265_picture*>(pic_in); /* readAnalysisFile reads analysis data for the frame and allocates memory based on slicetype */ readAnalysisFile(&inputPic->analysisData, inFrame->m_poc); inFrame->m_analysisData.poc = inFrame->m_poc; inFrame->m_analysisData.sliceType = inputPic->analysisData.sliceType; inFrame->m_analysisData.numCUsInFrame = inputPic->analysisData.numCUsInFrame; inFrame->m_analysisData.numPartitions = inputPic->analysisData.numPartitions; inFrame->m_analysisData.interData = inputPic->analysisData.interData; inFrame->m_analysisData.intraData = inputPic->analysisData.intraData; sliceType = inputPic->analysisData.sliceType; } m_lookahead->addPicture(*inFrame, sliceType); m_numDelayedPic++; } else //若没有输入 m_lookahead->flush(); FrameEncoder *curEncoder = m_frameEncoder[m_curEncoder]; m_curEncoder = (m_curEncoder + 1) % m_param->frameNumThreads; //当前要编码的帧 /*ret用于判断该整体函数的状况: 若当前没有可供输出的帧则为0(if no frames are currently available for output), 否则为1(1 if frame was output, m_nalList contains access unit negative on malloc error or abort)。*/ int ret = 0; /* Normal operation is to wait for the current frame encoder to complete its current frame * and then to give it a new frame to work on. In zero-latency mode, we must encode this * input picture before returning so the order must be reversed. This do/while() loop allows * us to alternate the order of the calls without ugly code replication */ Frame* outFrame = NULL; Frame* frameEnc = NULL; int pass = 0; ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////3 //do/while() loop do { /* getEncodedPicture() should block until the FrameEncoder has completed * encoding the frame. This is how back-pressure through the API is * accomplished when the encoder is full */ if (!m_bZeroLatency || pass) outFrame = curEncoder->getEncodedPicture(m_nalList); if (outFrame) { Slice *slice = outFrame->m_encData->m_slice; x265_frame_stats* frameData = NULL; /* Free up pic_in->analysisData since it has already been used */ if (m_param->analysisMode == X265_ANALYSIS_LOAD) freeAnalysis(&outFrame->m_analysisData); if (pic_out) //若有输出 { PicYuv *recpic = outFrame->m_reconPic; pic_out->poc = slice->m_poc; pic_out->bitDepth = X265_DEPTH; pic_out->userData = outFrame->m_userData; pic_out->colorSpace = m_param->internalCsp; frameData = &(pic_out->frameData); pic_out->pts = outFrame->m_pts; pic_out->dts = outFrame->m_dts; //判断该帧的类型--I/P/B switch (slice->m_sliceType) { case I_SLICE: pic_out->sliceType = outFrame->m_lowres.bKeyframe ? X265_TYPE_IDR : X265_TYPE_I; break; case P_SLICE: pic_out->sliceType = X265_TYPE_P; break; case B_SLICE: pic_out->sliceType = X265_TYPE_B; break; } pic_out->planes[0] = recpic->m_picOrg[0]; pic_out->stride[0] = (int)(recpic->m_stride * sizeof(pixel)); pic_out->planes[1] = recpic->m_picOrg[1]; pic_out->stride[1] = (int)(recpic->m_strideC * sizeof(pixel)); pic_out->planes[2] = recpic->m_picOrg[2]; pic_out->stride[2] = (int)(recpic->m_strideC * sizeof(pixel)); /* Dump analysis data from pic_out to file in save mode and free */ if (m_param->analysisMode == X265_ANALYSIS_SAVE) { pic_out->analysisData.poc = pic_out->poc; pic_out->analysisData.sliceType = pic_out->sliceType; pic_out->analysisData.numCUsInFrame = outFrame->m_analysisData.numCUsInFrame; pic_out->analysisData.numPartitions = outFrame->m_analysisData.numPartitions; pic_out->analysisData.interData = outFrame->m_analysisData.interData; pic_out->analysisData.intraData = outFrame->m_analysisData.intraData; writeAnalysisFile(&pic_out->analysisData); freeAnalysis(&pic_out->analysisData); } } if (slice->m_sliceType == P_SLICE) { if (slice->m_weightPredTable[0][0][0].bPresentFlag) m_numLumaWPFrames++; if (slice->m_weightPredTable[0][0][1].bPresentFlag || slice->m_weightPredTable[0][0][2].bPresentFlag) m_numChromaWPFrames++; } else if (slice->m_sliceType == B_SLICE) { bool bLuma = false, bChroma = false; for (int l = 0; l < 2; l++) { if (slice->m_weightPredTable[l][0][0].bPresentFlag) bLuma = true; if (slice->m_weightPredTable[l][0][1].bPresentFlag || slice->m_weightPredTable[l][0][2].bPresentFlag) bChroma = true; } if (bLuma) m_numLumaWPBiFrames++; if (bChroma) m_numChromaWPBiFrames++; } if (m_aborted) return -1; finishFrameStats(outFrame, curEncoder, curEncoder->m_accessUnitBits, frameData); /* Write RateControl Frame level stats in multipass encodes */ if (m_param->rc.bStatWrite) if (m_rateControl->writeRateControlFrameStats(outFrame, &curEncoder->m_rce)) m_aborted = true; /* Allow this frame to be recycled if no frame encoders are using it for reference */ if (!pic_out) { ATOMIC_DEC(&outFrame->m_countRefEncoders); m_dpb->recycleUnreferenced(); } else m_exportedPic = outFrame; m_numDelayedPic--; ret = 1; //有输出,则ret为1 } /* pop a single frame from decided list, then provide to frame encoder * curEncoder is guaranteed to be idle at this point */ if (!pass) frameEnc = m_lookahead->getDecidedPicture(); if (frameEnc && !pass) { /* give this frame a FrameData instance before encoding */ if (m_dpb->m_picSymFreeList) { frameEnc->m_encData = m_dpb->m_picSymFreeList; m_dpb->m_picSymFreeList = m_dpb->m_picSymFreeList->m_freeListNext; frameEnc->reinit(m_sps); } else { frameEnc->allocEncodeData(m_param, m_sps); Slice* slice = frameEnc->m_encData->m_slice; slice->m_sps = &m_sps; slice->m_pps = &m_pps; slice->m_maxNumMergeCand = m_param->maxNumMergeCand; slice->m_endCUAddr = slice->realEndAddress(m_sps.numCUsInFrame * NUM_4x4_PARTITIONS); frameEnc->m_reconPic->m_cuOffsetC = m_cuOffsetC; frameEnc->m_reconPic->m_cuOffsetY = m_cuOffsetY; frameEnc->m_reconPic->m_buOffsetC = m_buOffsetC; frameEnc->m_reconPic->m_buOffsetY = m_buOffsetY; } curEncoder->m_rce.encodeOrder = m_encodedFrameNum++; if (m_bframeDelay) { int64_t *prevReorderedPts = m_prevReorderedPts; frameEnc->m_dts = m_encodedFrameNum > m_bframeDelay ? prevReorderedPts[(m_encodedFrameNum - m_bframeDelay) % m_bframeDelay] : frameEnc->m_reorderedPts - m_bframeDelayTime; prevReorderedPts[m_encodedFrameNum % m_bframeDelay] = frameEnc->m_reorderedPts; } else frameEnc->m_dts = frameEnc->m_reorderedPts; /* Allocate analysis data before encode in save mode. This is allocated in frameEnc */ if (m_param->analysisMode == X265_ANALYSIS_SAVE) { x265_analysis_data* analysis = &frameEnc->m_analysisData; analysis->poc = frameEnc->m_poc; analysis->sliceType = frameEnc->m_lowres.sliceType; uint32_t widthInCU = (m_param->sourceWidth + g_maxCUSize - 1) >> g_maxLog2CUSize; uint32_t heightInCU = (m_param->sourceHeight + g_maxCUSize - 1) >> g_maxLog2CUSize; uint32_t numCUsInFrame = widthInCU * heightInCU; analysis->numCUsInFrame = numCUsInFrame; analysis->numPartitions = NUM_4x4_PARTITIONS; allocAnalysis(analysis); } /* determine references, setup RPS, etc */ m_dpb->prepareEncode(frameEnc); //========================准备编码前的一些工作,如决定参考帧,设置RPS等等 if (m_param->rc.rateControlMode != X265_RC_CQP) m_lookahead->getEstimatedPictureCost(frameEnc); /* Allow FrameEncoder::compressFrame() to start in the frame encoder thread */ if (!curEncoder->startCompressFrame(frameEnc)) //========================编码线程的开始,调用了startCompressFrame()函数,而startCompressFrame()调用了m_enable.trigger()以触发线程 m_aborted = true; } else if (m_encodedFrameNum) m_rateControl->setFinalFrameCount(m_encodedFrameNum); } while (m_bZeroLatency && ++pass < 2); return ret; }