Android-MediaExtractor详解

前言:

视频播放过程分为三个部分:

1. MediaExtractor,即视频解析,主要作用是音视频分离,解析头信息,分别获取音视频流。

2. MediaCodec,即对音视频流进行解码,获取pcm和yuv数据。详见: https://blog.csdn.net/cheriyou_/article/details/92787998

3. Render,即分别对音视频进行渲染,此处涉及其他模块,我们重点需要了解的是音视频播放的时间戳对齐过程。详见: https://blog.csdn.net/cheriyou_/article/details/101207443

本文的主要内容就是介绍nuplayer中MediaExtractor的应用。

 

MediaExtractor的重点:

1. mediaextractorservice

2. libmediaextractor

3. libstagefright/ MediaExtractorFactory.cpp NuMediaExtractor.cpp

4.frameworks/av/media/extractors

 

分析在nuplayer下extractor的应用:

// 在nuplayer::GenericSource的onPrepareAsyncz中首先会从文件或者链接等方式读取
// dataSource,然后调用initFromDataSource对dataSource进行音视频分离。

NuPlayer::GenericSource::initFromDataSource(){
    sp extractor;
    extractor = MediaExtractorFactory::Create(dataSource, NULL)  //在此函数中会判断
//"media.stagefright.extractremote"是否为true,若是,就会用binder获取"media.extractor"服务,
// 然后创建extractor,否则用本地的extractor。

    sp fileMeta = extractor->getMetaData();// 获取metadata
    size_t numtracks = extractor->countTracks(); // 获取track数
    mFileMeta = fileMeta;
    for (size_t i = 0; i < numtracks; ++i) { // for循环获取每个truck的信息
        sp track = extractor->getTrack(i);
        sp meta = extractor->getTrackMetaData(i); // 获取当前trunck的metadata

        if (!strncasecmp(mime, "video/", 6)) { // 如果当前track是视频trunck
            if (mVideoTrack.mSource == NULL) {
                mVideoTrack.mIndex = i;
                mVideoTrack.mSource = track;
                mVideoTrack.mPackets =
                new AnotherPacketSource(mVideoTrack.mSource->getFormat());

                // video always at the beginning
                mMimes.insertAt(String8(mime), 0);

            }
        }
        else // 如果是audio也做类似的处理
    }
    mSources.push(track); // 把解析出来的track流push到mSources中。后续要对哪个流进行操作就会从mSources中读取这个流。
    }
}

sp MediaExtractorFactory::Create(
        const sp &source, const char *mime) {

    if (!property_get_bool("media.stagefright.extractremote", true)) {
        // local extractor
        return CreateFromService(source, mime);
    } else {
        // 通过binder的方式获取"media.extractor"服务,然后创建extractor
    }
    return NULL;
}


sp MediaExtractorFactory::CreateFromService(
        const sp &source, const char *mime) {

    ......
    void *meta = nullptr;
    MediaExtractor::CreatorFunc creator = NULL;
    MediaExtractor::FreeMetaFunc freeMeta = nullptr;
    float confidence;
    sp plugin;
    creator = sniff(source.get(), &confidence, &meta, &freeMeta, plugin);

    MediaExtractor *ret = creator(source.get(), meta);

    return CreateIMediaExtractorFromMediaExtractor(ret, source, plugin);
}

MediaExtractor::CreatorFunc MediaExtractorFactory::sniff(
        DataSourceBase *source, float *confidence, void **meta,
        MediaExtractor::FreeMetaFunc *freeMeta, sp &plugin) { // 此函数的大概作用就是从所有的extractor里面选出最合适的一个.
    *confidence = 0.0f;
    *meta = nullptr;

    std::shared_ptr>> plugins;
    {
        Mutex::Autolock autoLock(gPluginMutex);
        if (!gPluginsRegistered) {
            return NULL;
        }
        plugins = gPlugins;
    }

    MediaExtractor::CreatorFunc curCreator = NULL;
    MediaExtractor::CreatorFunc bestCreator = NULL;
    for (auto it = plugins->begin(); it != plugins->end(); ++it) {
        float newConfidence;
        void *newMeta = nullptr;
        MediaExtractor::FreeMetaFunc newFreeMeta = nullptr;
        if ((curCreator = (*it)->def.sniff(source, &newConfidence, &newMeta, &newFreeMeta))) {
            if (newConfidence > *confidence) {
                *confidence = newConfidence;
                if (*meta != nullptr && *freeMeta != nullptr) {
                    (*freeMeta)(*meta);
                }
                *meta = newMeta;
                *freeMeta = newFreeMeta;
                plugin = *it;
                bestCreator = curCreator;
            } else {
                if (newMeta != nullptr && newFreeMeta != nullptr) {
                    newFreeMeta(newMeta);
                }
            }
        }
    }

    return bestCreator;
}

// 例如对于mp4文件,此时就会创建一个MPEG4Extractor。

status_t MPEG4Extractor::getMetaData(MetaDataBase &meta) {
    status_t err;
    if ((err = readMetaData()) != OK) {
        return UNKNOWN_ERROR;
    }
    meta = mFileMetaData;
    return OK;
}

status_t MPEG4Extractor::readMetaData() {

    off64_t offset = 0;
    status_t err;
    bool sawMoovOrSidx = false;

    while (!((mHasMoovBox && sawMoovOrSidx && (mMdatFound || mMoofFound)) ||
             (mIsHeif && (mPreferHeif || !mHasMoovBox) &&
                     (mItemTable != NULL) && mItemTable->isValid()))) {
        off64_t orig_offset = offset;
        err = parseChunk(&offset, 0); // while循环处理文件,解析各种box。
// parseChunk是一个递归函数。

        if (err != OK && err != UNKNOWN_ERROR) {
            break;
        } else if (offset <= orig_offset) {
            err = ERROR_MALFORMED;
            break;
        } else if (err == UNKNOWN_ERROR) {
            sawMoovOrSidx = true;
        }
    }

    if (mIsHeif && (mItemTable != NULL) && (mItemTable->countImages() > 0)) {
        // 处理heif文件,省略。
    }

    if (mInitCheck == OK) {
        if (findTrackByMimePrefix("video/") != NULL) {
            mFileMetaData.setCString(  // 给mFileMetaData设置类型
                    kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_MPEG4);
        } else if (findTrackByMimePrefix("audio/") != NULL) {
            mFileMetaData.setCString(kKeyMIMEType, "audio/mp4");
        } else if (findTrackByMimePrefix(
                MEDIA_MIMETYPE_IMAGE_ANDROID_HEIC) != NULL) {
            mFileMetaData.setCString(
                    kKeyMIMEType, MEDIA_MIMETYPE_CONTAINER_HEIF);
        } else {
            mFileMetaData.setCString(kKeyMIMEType, "application/octet-stream");
        }
    } else {
        mInitCheck = err;
    }

    CHECK_NE(err, (status_t)NO_INIT);

    // copy pssh data into file metadata
    uint64_t psshsize = 0;
    for (size_t i = 0; i < mPssh.size(); i++) { // 此处for循环计算mpssh的长度。
        psshsize += 20 + mPssh[i].datalen;
    }
    if (psshsize > 0 && psshsize <= UINT32_MAX) { // 此处把mpssh里面的数据copy给
// mFileMetaData。其中mpssh是在parseChunk中从文件中解析出来的。
        char *buf = (char*)malloc(psshsize);
        if (!buf) {
            ALOGE("b/28471206");
            return NO_MEMORY;
        }
        char *ptr = buf;
        for (size_t i = 0; i < mPssh.size(); i++) {
            memcpy(ptr, mPssh[i].uuid, 20); // uuid + length
            memcpy(ptr + 20, mPssh[i].data, mPssh[i].datalen);
            ptr += (20 + mPssh[i].datalen);
        }
        mFileMetaData.setData(kKeyPssh, 'pssh', buf, psshsize); // 给mFileMetaData设置data
        free(buf);
    }

    return mInitCheck;
}

size_t MPEG4Extractor::countTracks() { 
    status_t err;
    if ((err = readMetaData()) != OK) {
        ALOGV("MPEG4Extractor::countTracks: no tracks");
        return 0;
    }

    size_t n = 0;
    Track *track = mFirstTrack; 
    while (track) {
        ++n;
        track = track->next;
    }

    ALOGV("MPEG4Extractor::countTracks: %zu tracks", n);
    return n;
}

MediaTrack *MPEG4Extractor::getTrack(size_t index) {
    status_t err;
    if ((err = readMetaData()) != OK) {
        return NULL;
    }

    Track *track = mFirstTrack; // 初始化track为第一个track,然后循环去找需要的track
    while (index > 0) {
        if (track == NULL) {
            return NULL;
        }

        track = track->next;
        --index;
    }

    // 然后处理一系列异常情况

    MPEG4Source *source =  new MPEG4Source(
            track->meta, mDataSource, track->timescale, track->sampleTable,
            mSidxEntries, trex, mMoofOffset, itemTable);
    if (source->init() != OK) {
        delete source;
        return NULL;
    }
    return source;
}

status_t MPEG4Extractor::getTrackMetaData(
        MetaDataBase &meta,
        size_t index, uint32_t flags) {

    Track *track = mFirstTrack;
    while (index > 0) { //根据index找到对应的track
        if (track == NULL) {
            return UNKNOWN_ERROR;
        }

        track = track->next;
        --index;
    }

    [=] {
        int64_t duration;
        int32_t samplerate;
        if (track->has_elst && mHeaderTimescale != 0 &&
                track->meta.findInt64(kKeyDuration, &duration) &&
                track->meta.findInt32(kKeySampleRate, &samplerate)) {

            track->has_elst = false;

            if (track->elst_segment_duration > INT64_MAX) {
                return;
            }
            int64_t segment_duration = track->elst_segment_duration;
            int64_t media_time = track->elst_media_time;
            int64_t halfscale = mHeaderTimescale / 2;

            int64_t delay;
            // delay = ((media_time * samplerate) + halfscale) / mHeaderTimescale;
            if (__builtin_mul_overflow(media_time, samplerate, &delay) ||
                    __builtin_add_overflow(delay, halfscale, &delay) ||
                    (delay /= mHeaderTimescale, false) ||
                    delay > INT32_MAX ||
                    delay < INT32_MIN) {
                return;
            }
            ALOGV("delay = %" PRId64, delay);
            track->meta.setInt32(kKeyEncoderDelay, delay);
        // 此处的delay用于nuplayer的sendMetaDataToHal中,
// nuplayer发现有delay时会设置audio codec delay的samples。
            int64_t scaled_duration;
            // scaled_duration = duration * mHeaderTimescale;
            if (__builtin_mul_overflow(duration, mHeaderTimescale, &scaled_duration)) {
                return;
            }
            ALOGV("scaled_duration = %" PRId64, scaled_duration);

            int64_t segment_end;
            int64_t padding;
            // padding = scaled_duration - ((segment_duration + media_time) * 1000000);
            if (__builtin_add_overflow(segment_duration, media_time, &segment_end) ||
                    __builtin_mul_overflow(segment_end, 1000000, &segment_end) ||
                    __builtin_sub_overflow(scaled_duration, segment_end, &padding)) {
                return;
            }
            ALOGV("segment_end = %" PRId64 ", padding = %" PRId64, segment_end, padding);

            if (padding < 0) {
                // track duration from media header (which is what kKeyDuration is) might
                // be slightly shorter than the segment duration, which would make the
                // padding negative. Clamp to zero.
                padding = 0;
            }

            int64_t paddingsamples;
            int64_t halfscale_e6;
            int64_t timescale_e6;
            // paddingsamples = ((padding * samplerate) + (halfscale * 1000000))
            //                / (mHeaderTimescale * 1000000);
            if (__builtin_mul_overflow(padding, samplerate, &paddingsamples) ||
                    __builtin_mul_overflow(halfscale, 1000000, &halfscale_e6) ||
                    __builtin_mul_overflow(mHeaderTimescale, 1000000, ×cale_e6) ||
                    __builtin_add_overflow(paddingsamples, halfscale_e6, &paddingsamples) ||
                    (paddingsamples /= timescale_e6, false) ||
                    paddingsamples > INT32_MAX) {
                return;
            }
            ALOGV("paddingsamples = %" PRId64, paddingsamples);
            track->meta.setInt32(kKeyEncoderPadding, paddingsamples);
        }
    }();

    if ((flags & kIncludeExtensiveMetaData)
            && !track->includes_expensive_metadata) {
        track->includes_expensive_metadata = true;

        const char *mime;
        CHECK(track->meta.findCString(kKeyMIMEType, &mime));
        if (!strncasecmp("video/", mime, 6)) {
            // MPEG2 tracks do not provide CSD, so read the stream header
            if (!strcmp(mime, MEDIA_MIMETYPE_VIDEO_MPEG2)) {
                off64_t offset;
                size_t size;
                if (track->sampleTable->getMetaDataForSample(
                            0 /* sampleIndex */, &offset, &size, NULL /* sampleTime */) == OK) {
                    if (size > kMaxTrackHeaderSize) {
                        size = kMaxTrackHeaderSize;
                    }
                    uint8_t header[kMaxTrackHeaderSize];
                    if (mDataSource->readAt(offset, &header, size) == (ssize_t)size) {
                        track->meta.setData(kKeyStreamHeader, 'mdat', header, size);
                        // 设置kKeyStreamHeader
                    }
                }
            }

       // 设置kKeyThumbnailTime.
            if (mMoofOffset > 0) {
                int64_t duration;
                if (track->meta.findInt64(kKeyDuration, &duration)) {
                    // nothing fancy, just pick a frame near 1/4th of the duration
                    track->meta.setInt64(
                            kKeyThumbnailTime, duration / 4);
            // 此处是设置缩略图的时间为duration的1/4
                }
            } else {
                uint32_t sampleIndex;
                uint32_t sampleTime;
                if (track->timescale != 0 &&
                        track->sampleTable->findThumbnailSample(&sampleIndex) == OK
                        && track->sampleTable->getMetaDataForSample(
                            sampleIndex, NULL /* offset */, NULL /* size */,
                            &sampleTime) == OK) {
                    track->meta.setInt64(
                            kKeyThumbnailTime,
                            ((int64_t)sampleTime * 1000000) / track->timescale);
                }
            }
        }
    }

    meta = track->meta;
    return OK;
}

 

你可能感兴趣的:(安卓基础)