Android音频开发之尝试音频混合

音频混合:实时录制audio时录制麦克风数据 和 写入背景音乐

测试代码:https://github.com/CL-window/audio_mix

本次案例实现了

*MediaPlayer 播放音频
*AudioTrack 播放音频 mp3 --> pcm data ( libs/jl1.0.1.jar )
*AudioRecord 录制音频 pcm file
*AudioTrack 播放音频 pcm data
*AudioRecord 录制音频 use MediaCodec & MediaMuxer write data

MediaExtractor 和 MediaCodec 手动解码出 pcm 数据
*混合音频

音频混合核心算法来自前辈,表示感谢

/**
     * 采用简单的平均算法 average audio mixing algorithm
     * code from :    http://www.codexiu.cn/android/blog/3618/
     * 测试发现这种算法会降低 录制的音量
     */
    private byte[] averageMix(byte[][] bMulRoadAudioes) {

        if (bMulRoadAudioes == null || bMulRoadAudioes.length == 0)
            return null;
        byte[] realMixAudio = bMulRoadAudioes[0];

        if (bMulRoadAudioes.length == 1)
            return realMixAudio;

        for (int rw = 0; rw < bMulRoadAudioes.length; ++rw) {
            if (bMulRoadAudioes[rw].length != realMixAudio.length) {
                Log.e("app", "column of the road of audio + " + rw + " is diffrent.");
                return null;
            }
        }

        int row = bMulRoadAudioes.length;
        int coloum = realMixAudio.length / 2;
        short[][] sMulRoadAudioes = new short[row][coloum];
        for (int r = 0; r < row; ++r) {
            for (int c = 0; c < coloum; ++c) {
                sMulRoadAudioes[r][c] = (short) ((bMulRoadAudioes[r][c * 2] & 0xff) | (bMulRoadAudioes[r][c * 2 + 1] & 0xff) << 8);
            }
        }
        short[] sMixAudio = new short[coloum];
        int mixVal;
        int sr = 0;
        for (int sc = 0; sc < coloum; ++sc) {
            mixVal = 0;
            sr = 0;
            for (; sr < row; ++sr) {
                mixVal += sMulRoadAudioes[sr][sc];
            }
            sMixAudio[sc] = (short) (mixVal / row);
        }
        for (sr = 0; sr < coloum; ++sr) {
            realMixAudio[sr * 2] = (byte) (sMixAudio[sr] & 0x00FF);
            realMixAudio[sr * 2 + 1] = (byte) ((sMixAudio[sr] & 0xFF00) >> 8);
        }
        return realMixAudio;
    }
1.MediaPlayer 播放音乐,这个简单

private void initMediaPlayer(String filePath) {
        releaseMediaPlayer();
        mMediaPlayer = new MediaPlayer();
        mMediaPlayer.setAudioStreamType(AudioManager.STREAM_MUSIC);
        mMediaPlayer.setOnPreparedListener(new MediaPlayer.OnPreparedListener() {
            @Override
            public void onPrepared(MediaPlayer mp) {
                Log.i("slack", "onPrepared...");
                mMediaPlayer.start();
            }
        });
        try {
            mMediaPlayer.setDataSource(filePath);
            mMediaPlayer.prepareAsync();
        } catch (IOException e) {
            e.printStackTrace();
            Log.i("slack", e.getMessage());
        }
    }
2.AudioTrack 播放音频 mp3 --> pcm data 使用了( libs/jl1.0.1.jar )

class PlayTask extends AsyncTask {
        @Override
        protected Void doInBackground(Void... arg0) {
            mIsPlaying = true;
            Decoder mDecoder = new Decoder();
            try {
                int bufferSize = AudioTrack.getMinBufferSize(mFrequence,
                        mPlayChannelConfig, mAudioEncoding);
                short[] buffer = new short[bufferSize];
                // 定义输入流,将音频写入到AudioTrack类中,实现播放
                FileInputStream fin = new FileInputStream(mp3FilePath);
                Bitstream bitstream = new Bitstream(fin);
                // 实例AudioTrack
                AudioTrack track = new AudioTrack(AudioManager.STREAM_MUSIC,
                        mFrequence,
                        mPlayChannelConfig, mAudioEncoding, bufferSize,
                        AudioTrack.MODE_STREAM);
                // 开始播放
                track.play();
                // 由于AudioTrack播放的是流,所以,我们需要一边播放一边读取
                Header header;
                while (mIsPlaying && (header = bitstream.readFrame()) != null) {
                    SampleBuffer sampleBuffer = (SampleBuffer) mDecoder.decodeFrame(header, bitstream);
                    buffer = sampleBuffer.getBuffer();
                    track.write(buffer, 0, buffer.length);
                    bitstream.closeFrame();
                }

                // 播放结束
                track.stop();
                track.release();
                fin.close();
            } catch (Exception e) {
                // TODO: handle exception
                Log.e("slack", "error:" + e.getMessage());
            }
            return null;
        }


        protected void onPostExecute(Void result) {

        }


        protected void onPreExecute() {

        }
    }
3.AudioRecord 录制音频 pcm file
class RecordTask extends AsyncTask {
        @Override
        protected Void doInBackground(Void... arg0) {
            mIsRecording = true;
            try {
                // 开通输出流到指定的文件
                DataOutputStream dos = new DataOutputStream(
                        new BufferedOutputStream(
                                new FileOutputStream(mAudioFile)));
                // 根据定义好的几个配置,来获取合适的缓冲大小
                int bufferSize = AudioRecord.getMinBufferSize(mFrequence,
                        mChannelStereo, mAudioEncoding);
                // 实例化AudioRecord
//                AudioRecord record = findAudioRecord();
                AudioRecord record = new AudioRecord(
                        MediaRecorder.AudioSource.MIC, mFrequence,
                        mChannelConfig, mAudioEncoding, bufferSize);
                // 定义缓冲
                short[] buffer = new short[bufferSize];


                // 开始录制
                record.startRecording();


                int r = 0; // 存储录制进度
                // 定义循环,根据isRecording的值来判断是否继续录制
                while (mIsRecording) {
                    // 从bufferSize中读取字节,返回读取的short个数
                    int bufferReadResult = record
                            .read(buffer, 0, buffer.length);
                    // 循环将buffer中的音频数据写入到OutputStream中
                    for (int i = 0; i < bufferReadResult; i++) {
                        dos.writeShort(buffer[i]);
                    }
                    publishProgress(new Integer(r)); // 向UI线程报告当前进度
                    r++; // 自增进度值
                }
                // 录制结束
                record.stop();
                Log.i("slack", "::" + mAudioFile.length());
                dos.close();
            } catch (Exception e) {
                // TODO: handle exception
                Log.e("slack", "::" + e.getMessage());
            }
            return null;
        }


        // 当在上面方法中调用publishProgress时,该方法触发,该方法在UI线程中被执行
        protected void onProgressUpdate(Integer... progress) {
            //
        }


        protected void onPostExecute(Void result) {

        }

    }
4.AudioTrack 播放音频 pcm data
class PlayPCMTask extends AsyncTask {
        @Override
        protected Void doInBackground(Void... arg0) {
            mIsPlaying = true;
            int bufferSize = AudioTrack.getMinBufferSize(mFrequence,
                    mPlayChannelConfig, mAudioEncoding);
            short[] buffer = new short[bufferSize];
            try {
                // 定义输入流,将音频写入到AudioTrack类中,实现播放
                DataInputStream dis = new DataInputStream(
                        new BufferedInputStream(new FileInputStream(mAudioFile)));
                // 实例AudioTrack
                // AudioTrack AudioFormat.CHANNEL_IN_STEREO here may some problem
                AudioTrack track = new AudioTrack(AudioManager.STREAM_MUSIC,
                        mFrequence,
                        AudioFormat.CHANNEL_IN_STEREO, mAudioEncoding, bufferSize,
                        AudioTrack.MODE_STREAM);
                // 开始播放
                track.play();
                // 由于AudioTrack播放的是流,所以,我们需要一边播放一边读取
                while (mIsPlaying && dis.available() > 0) {
                    int i = 0;
                    while (dis.available() > 0 && i < buffer.length) {
                        buffer[i] = dis.readShort();
                        i++;
                    }
                    // 然后将数据写入到AudioTrack中
                    track.write(buffer, 0, buffer.length);
                }


                // 播放结束
                track.stop();
                dis.close();
            } catch (Exception e) {
                // TODO: handle exception
                Log.e("slack", "error:" + e.getMessage());
            }
            return null;
        }


        protected void onPostExecute(Void result) {

        }


        protected void onPreExecute() {

        }
    }
5.AudioRecord 录制音频 use MediaCodec & MediaMuxer write data

这个有两种,一种是byte[],一种是ByteBuffer

/**
     * use byte[]
     */
    class RecordMediaCodecTask extends AsyncTask {
        @Override
        protected Void doInBackground(Void... arg0) {
            mIsRecording = true;
            int samples_per_frame = 2048;
            int bufferReadResult = 0;
            long audioPresentationTimeNs; //音频时间戳 pts
            try {
                // 根据定义好的几个配置,来获取合适的缓冲大小
                int bufferSize = AudioRecord.getMinBufferSize(mFrequence,
                        mChannelConfig, mAudioEncoding);
                // 实例化AudioRecord
                AudioRecord record = new AudioRecord(
                        MediaRecorder.AudioSource.MIC, mFrequence,
                        mChannelConfig, mAudioEncoding, bufferSize);
//                record.setRecordPositionUpdateListener(new AudioRecord.OnRecordPositionUpdateListener() {
//                    @Override
//                    public void onMarkerReached(AudioRecord recorder) {
//
//                    }
//
//                    @Override
//                    public void onPeriodicNotification(AudioRecord recorder) {
//
//                    }
//                });
                // 定义缓冲
                byte[] buffer = new byte[samples_per_frame];// byte size need less than MediaFormat.KEY_MAX_INPUT_SIZE

                // 开始录制
                record.startRecording();

                while (mIsRecording) {
                    // 从bufferSize中读取字节,返回读取的short个数
                    audioPresentationTimeNs = System.nanoTime();
                    //从缓冲区中读取数据,存入到buffer字节数组数组中
                    bufferReadResult = record.read(buffer, 0, samples_per_frame);
                    //判断是否读取成功
                    if (bufferReadResult == AudioRecord.ERROR_BAD_VALUE || bufferReadResult == AudioRecord.ERROR_INVALID_OPERATION)
                        Log.e("slack", "Read error");
                    if (mAudioEncoder != null) {
                        //将音频数据发送给AudioEncoder类进行编码
                        mAudioEncoder.offerAudioEncoder(buffer, audioPresentationTimeNs);
                    }

                }
                // 录制结束
                if (record != null) {
                    record.setRecordPositionUpdateListener(null);
                    record.stop();
                    record.release();
                    record = null;
                }

            } catch (Exception e) {
                // TODO: handle exception
                Log.e("slack", "::" + e.getMessage());
            }
            return null;
        }


        // 当在上面方法中调用publishProgress时,该方法触发,该方法在UI线程中被执行
        protected void onProgressUpdate(Integer... progress) {
            //
        }


        protected void onPostExecute(Void result) {

        }

    }

    /**
     * use ByteBuffer
     */
    class RecordMediaCodecByteBufferTask extends AsyncTask {
        @Override
        protected Void doInBackground(Void... arg0) {
            mIsRecording = true;
            int samples_per_frame = 2048;// SAMPLES_PER_FRAME
            int bufferReadResult = 0;
            long audioPresentationTimeNs; //音频时间戳 pts
            try {
                // 根据定义好的几个配置,来获取合适的缓冲大小
                int bufferSize = AudioRecord.getMinBufferSize(mFrequence,
                        mChannelConfig, mAudioEncoding);
                // 实例化AudioRecord
                AudioRecord record = new AudioRecord(
                        MediaRecorder.AudioSource.MIC, mFrequence,
                        mChannelConfig, mAudioEncoding, bufferSize);
//                record.setRecordPositionUpdateListener(new AudioRecord.OnRecordPositionUpdateListener() {
//                    @Override
//                    public void onMarkerReached(AudioRecord recorder) {
//
//                    }
//
//                    @Override
//                    public void onPeriodicNotification(AudioRecord recorder) {
//
//                    }
//                });
                // 定义缓冲
                int readBytes;
                ByteBuffer buf = ByteBuffer.allocateDirect(samples_per_frame);

                // 开始录制
                record.startRecording();

                while (mIsRecording) {
                    // 从bufferSize中读取字节,返回读取的short个数
                    audioPresentationTimeNs = System.nanoTime();
                    //从缓冲区中读取数据,存入到buffer字节数组数组中
                    // read audio data from internal mic
                    buf.clear();
                    bufferReadResult = record.read(buf, samples_per_frame);
                    //判断是否读取成功
                    if (bufferReadResult == AudioRecord.ERROR || bufferReadResult == AudioRecord.ERROR_BAD_VALUE ||
                            bufferReadResult == AudioRecord.ERROR_INVALID_OPERATION)
                        Log.e("slack", "Read error");
                    if (mAudioEncoder != null) {
                        //将音频数据发送给AudioEncoder类进行编码
                        buf.position(bufferReadResult).flip();
                        mAudioEncoder.offerAudioEncoder(buf, audioPresentationTimeNs, bufferReadResult);
                    }

                }
                // 录制结束
                if (record != null) {
                    record.setRecordPositionUpdateListener(null);
                    record.stop();
                    record.release();
                    record = null;
                }

            } catch (Exception e) {
                // TODO: handle exception
                Log.e("slack", "::" + e.getMessage());
            }
            return null;
        }


        // 当在上面方法中调用publishProgress时,该方法触发,该方法在UI线程中被执行
        protected void onProgressUpdate(Integer... progress) {
            //
        }


        protected void onPostExecute(Void result) {

        }

    }

AudioEncoder:

import android.media.MediaCodec;
import android.media.MediaCodecInfo;
import android.media.MediaFormat;
import android.media.MediaMuxer;
import android.util.Log;

import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

/**
 * Created by slack
 * on 17/2/6 下午12:26.
 * 对音频数据进行编码
 * MediaCodec & MediaMuxer write data
 */
public class AudioEncoder {
    private static final String TAG = "AudioEncoder";
    //编码
    private MediaCodec mAudioCodec;     //音频编解码器
    private MediaFormat mAudioFormat;
    private static final String AUDIO_MIME_TYPE = "audio/mp4a-latm"; //音频类型
    private static final int SAMPLE_RATE = 44100; //采样率(CD音质)
    private TrackIndex mAudioTrackIndex = new TrackIndex();
    private MediaMuxer mMediaMuxer;     //混合器
    private boolean mMuxerStart = false; //混合器启动的标志
    private MediaCodec.BufferInfo mAudioBufferInfo;
    private static long audioBytesReceived = 0;        //接收到的音频数据 用来设置录音起始时间的
    private long audioStartTime;
    private String recordFile;
    private boolean eosReceived = false;  //终止录音的标志
    private ExecutorService encodingService = Executors.newSingleThreadExecutor(); //序列化线程任务

    private long mLastAudioPresentationTimeUs = 0;

    //枚举值 一个用来标志编码 一个标志编码完成
    enum EncoderTaskType {
        ENCODE_FRAME, FINALIZE_ENCODER
    }

    public AudioEncoder(String filePath) {
        recordFile = filePath;
//        prepareEncoder();
    }

    class TrackIndex {
        int index = 0;
    }

    public void prepareEncoder() {
        eosReceived = false;
        audioBytesReceived = 0;
        mAudioBufferInfo = new MediaCodec.BufferInfo();
        mAudioFormat = new MediaFormat();
        mAudioFormat.setString(MediaFormat.KEY_MIME, AUDIO_MIME_TYPE);
        mAudioFormat.setInteger(MediaFormat.KEY_AAC_PROFILE, MediaCodecInfo.CodecProfileLevel.AACObjectLC);
        mAudioFormat.setInteger(MediaFormat.KEY_SAMPLE_RATE, SAMPLE_RATE);
        mAudioFormat.setInteger(MediaFormat.KEY_BIT_RATE, 128000);
        mAudioFormat.setInteger(MediaFormat.KEY_CHANNEL_COUNT, 2);
        mAudioFormat.setInteger(MediaFormat.KEY_MAX_INPUT_SIZE, 10 * 1024);
        try {
            mAudioCodec = MediaCodec.createEncoderByType(AUDIO_MIME_TYPE);
            mAudioCodec.configure(mAudioFormat, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
            mAudioCodec.start();
            mMediaMuxer = new MediaMuxer(recordFile, MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4);
            Log.d(TAG, "prepareEncoder...");
        } catch (IOException e) {
            e.printStackTrace();
        }

    }

    public void prepareEncoder(MediaFormat format) {
        if(format == null){
            this.prepareEncoder();
            return;
        }
        eosReceived = false;
        audioBytesReceived = 0;
        mAudioBufferInfo = new MediaCodec.BufferInfo();
        mAudioFormat = new MediaFormat();
        mAudioFormat.setInteger(MediaFormat.KEY_SAMPLE_RATE, format.getInteger(MediaFormat.KEY_SAMPLE_RATE));
        mAudioFormat.setInteger(MediaFormat.KEY_BIT_RATE, format.getInteger(MediaFormat.KEY_BIT_RATE));
        mAudioFormat.setInteger(MediaFormat.KEY_CHANNEL_COUNT, format.getInteger(MediaFormat.KEY_CHANNEL_COUNT));
        mAudioFormat.setInteger(MediaFormat.KEY_AAC_PROFILE, MediaCodecInfo.CodecProfileLevel.AACObjectLC);
        mAudioFormat.setString(MediaFormat.KEY_MIME, AUDIO_MIME_TYPE);
        mAudioFormat.setInteger(MediaFormat.KEY_MAX_INPUT_SIZE, 10 * 1024);
        try {
            mAudioCodec = MediaCodec.createEncoderByType(AUDIO_MIME_TYPE);
            mAudioCodec.configure(mAudioFormat, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
            mAudioCodec.start();
            mMediaMuxer = new MediaMuxer(recordFile, MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4);
            Log.d(TAG, "prepareEncoder...");
        } catch (IOException e) {
            e.printStackTrace();
        }

    }

    //此方法 由AudioRecorder任务调用 开启编码任务
    public void offerAudioEncoder(byte[] input, long presentationTimeStampNs) {
        if (!encodingService.isShutdown()) {
//            Log.d(TAG, "encodingServiceEncoding--submit: " + input.length + "  " + presentationTimeStampNs) ;
            encodingService.submit(new AudioEncodeTask(this, input, presentationTimeStampNs));
        }

    }

    public void offerAudioEncoder(ByteBuffer buffer, long presentationTimeStampNs, int length) {
        if (!encodingService.isShutdown()) {
            encodingService.submit(new AudioEncodeTask(this, buffer, length, presentationTimeStampNs));
        }

    }

    //发送音频数据和时间进行编码
    public void _offerAudioEncoder(byte[] input, long pts) {
        if (audioBytesReceived == 0) {
            audioStartTime = System.nanoTime();
        }
        audioBytesReceived += input.length;
        drainEncoder(mAudioCodec, mAudioBufferInfo, mAudioTrackIndex, false);
        try {
            ByteBuffer[] inputBuffers = mAudioCodec.getInputBuffers();
            int inputBufferIndex = mAudioCodec.dequeueInputBuffer(-1);
//            Log.d(TAG, "inputBufferIndex--" + inputBufferIndex);
            if (inputBufferIndex >= 0) {
                ByteBuffer inputBuffer = inputBuffers[inputBufferIndex];
                inputBuffer.clear();
                inputBuffer.put(input);

                //录音时长
                long presentationTimeUs = (System.nanoTime() - audioStartTime) / 1000L;
//                Log.d(TAG, "presentationTimeUs--" + presentationTimeUs);
                if (eosReceived) {
                    mAudioCodec.queueInputBuffer(inputBufferIndex, 0, input.length, presentationTimeUs, MediaCodec.BUFFER_FLAG_END_OF_STREAM);
                    closeEncoder(mAudioCodec, mAudioBufferInfo, mAudioTrackIndex);
                    closeMuxer();
                    encodingService.shutdown();

                } else {
                    mAudioCodec.queueInputBuffer(inputBufferIndex, 0, input.length, presentationTimeUs, 0);
                }
            }

        } catch (Throwable t) {
            Log.e(TAG, "_offerAudioEncoder exception " + t.getMessage());
        }

    }

    public void _offerAudioEncoder(ByteBuffer buffer, int length, long pts) {
        if (audioBytesReceived == 0) {
            audioStartTime = pts;
        }
        audioBytesReceived += length;
        drainEncoder(mAudioCodec, mAudioBufferInfo, mAudioTrackIndex, false);
        try {
            ByteBuffer[] inputBuffers = mAudioCodec.getInputBuffers();
            int inputBufferIndex = mAudioCodec.dequeueInputBuffer(-1);
//            Log.d(TAG, "inputBufferIndex--" + inputBufferIndex);
            if (inputBufferIndex >= 0) {
                ByteBuffer inputBuffer = inputBuffers[inputBufferIndex];
                inputBuffer.clear();
                if (buffer != null) {
                    inputBuffer.put(buffer);
                }

                //录音时长
                long presentationTimeUs = (pts - audioStartTime) / 1000;
//                Log.d(TAG, "presentationTimeUs--" + presentationTimeUs);
                if (eosReceived) {
                    mAudioCodec.queueInputBuffer(inputBufferIndex, 0, length, presentationTimeUs, MediaCodec.BUFFER_FLAG_END_OF_STREAM);
                    closeEncoder(mAudioCodec, mAudioBufferInfo, mAudioTrackIndex);
                    closeMuxer();
                    encodingService.shutdown();

                } else {
                    mAudioCodec.queueInputBuffer(inputBufferIndex, 0, length, presentationTimeUs, 0);
                }
            }

        } catch (Throwable t) {
            Log.e(TAG, "_offerAudioEncoder exception " + t.getMessage());
        }

    }

    /**
     * try 修复 E/MPEG4Writer: timestampUs 6220411 < lastTimestampUs 6220442 for Audio track
     * add check : mLastAudioPresentationTimeUs < bufferInfo.presentationTimeUs
     */
    public void drainEncoder(MediaCodec encoder, MediaCodec.BufferInfo bufferInfo, TrackIndex trackIndex, boolean endOfStream) {
        final int TIMEOUT_USEC = 100;
        ByteBuffer[] encoderOutputBuffers = encoder.getOutputBuffers();
        try {
            while (true) {
                int encoderIndex = encoder.dequeueOutputBuffer(bufferInfo, TIMEOUT_USEC);
//                Log.d(TAG, "encoderIndex---" + encoderIndex);
                if (encoderIndex == MediaCodec.INFO_TRY_AGAIN_LATER) {
                    //没有可进行混合的输出流数据 但还没有结束录音 此时退出循环
//                    Log.d(TAG, "info_try_again_later");
                    if (!endOfStream)
                        break;
                    else
                        Log.d(TAG, "no output available, spinning to await EOS");
                } else if (encoderIndex == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
                    //只会在第一次接收数据前 调用一次
                    if (mMuxerStart)
                        throw new RuntimeException("format 在muxer启动后发生了改变");
                    MediaFormat newFormat = encoder.getOutputFormat();
                    trackIndex.index = mMediaMuxer.addTrack(newFormat);
                    if (!mMuxerStart) {
                        mMediaMuxer.start();
                    }
                    mMuxerStart = true;
                } else if (encoderIndex < 0) {
                    Log.w(TAG, "encoderIndex 非法" + encoderIndex);
                } else {
                    //退出循环
                    if ((bufferInfo.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0) {
                        break;
                    }

                    ByteBuffer encodeData = encoderOutputBuffers[encoderIndex];
                    if (encodeData == null) {
                        throw new RuntimeException("编码数据为空");
                    }else
                    if (bufferInfo.size != 0 && mLastAudioPresentationTimeUs < bufferInfo.presentationTimeUs) {
                        if (!mMuxerStart) {
                            throw new RuntimeException("混合器未开启");
                        }
                        Log.d(TAG, "write_info_data......");
                        encodeData.position(bufferInfo.offset);
                        encodeData.limit(bufferInfo.offset + bufferInfo.size);
//                        Log.d(TAG, "presentationTimeUs--bufferInfo : " + bufferInfo.presentationTimeUs);
                        mMediaMuxer.writeSampleData(trackIndex.index, encodeData, bufferInfo);

                        mLastAudioPresentationTimeUs = bufferInfo.presentationTimeUs;
                    }

                    encoder.releaseOutputBuffer(encoderIndex, false);

                }
            }
        } catch (Exception e) {
            e.printStackTrace();
            Log.e("slack", "error :: " + e.getMessage());
        }

    }

    /**
     * 关闭编码
     *
     * @param encoder
     * @param bufferInfo
     */
    public void closeEncoder(MediaCodec encoder, MediaCodec.BufferInfo bufferInfo, TrackIndex trackIndex) {
        drainEncoder(encoder, bufferInfo, trackIndex, true);
        encoder.stop();
        encoder.release();
        encoder = null;

    }

    /**
     * 关闭混合器
     */
    public void closeMuxer() {
        if (mMuxerStart && mMediaMuxer != null) {
            mMediaMuxer.stop();
            mMediaMuxer.release();
            mMediaMuxer = null;
            mMuxerStart = false;
        }
    }

    //发送终止编码信息
    public void stop() {
        if (!encodingService.isShutdown()) {
            encodingService.submit(new AudioEncodeTask(this, EncoderTaskType.FINALIZE_ENCODER));
        }
    }

    //终止编码
    private void _stop() {
        eosReceived = true;
        Log.d(TAG, "停止编码");
    }


    /**
     * 音频编码任务
     */
    class AudioEncodeTask implements Runnable {
        private static final String TAG = "AudioEncoderTask";
        private boolean is_initialized = false;
        private AudioEncoder encoder;
        private byte[] audio_data;
        private ByteBuffer byteBuffer;
        private int length;
        long pts;
        private EncoderTaskType type;

        //进行编码任务时 调用此构造方法
        public AudioEncodeTask(AudioEncoder encoder, byte[] audio_data, long pts) {
            this.encoder = encoder;
            this.audio_data = audio_data;
            this.pts = pts;
            is_initialized = true;
            this.type = EncoderTaskType.ENCODE_FRAME;
            //这里是有数据的
//            Log.d(TAG,"AudioData--"+audio_data + " pts--"+pts);
        }

        public AudioEncodeTask(AudioEncoder encoder, ByteBuffer buffer, int length, long pts) {
            this.encoder = encoder;
            this.byteBuffer = buffer;
            this.length = length;
            this.pts = pts;
            is_initialized = true;
            this.type = EncoderTaskType.ENCODE_FRAME;
            //这里是有数据的
        }

        //当要停止编码任务时 调用此构造方法
        public AudioEncodeTask(AudioEncoder encoder, EncoderTaskType type) {
            this.type = type;

            if (type == EncoderTaskType.FINALIZE_ENCODER) {
                this.encoder = encoder;
                is_initialized = true;
            }
//            Log.d(TAG, "完成...");
        }

        ////编码
        private void encodeFrame() {
//            Log.d(TAG, "audio_data---encoder--" + audio_data);
            if (audio_data != null && encoder != null) {
                encoder._offerAudioEncoder(audio_data, pts);
                audio_data = null;
            } else if (byteBuffer != null && encoder != null) {
                encoder._offerAudioEncoder(byteBuffer, length, pts);
                audio_data = null;
            }

        }

        //终止编码
        private void finalizeEncoder() {
            encoder._stop();
        }

        @Override
        public void run() {
            Log.d(TAG, "is_initialized--" + is_initialized + " " + type);
            if (is_initialized) {
                switch (type) {
                    case ENCODE_FRAME:
                        //进行编码
                        encodeFrame();
                        break;
                    case FINALIZE_ENCODER:
                        //完成编码
                        finalizeEncoder();
                        break;
                }
                is_initialized = false;
            } else {
                //打印错误日志
                Log.e(TAG, "AudioEncoderTask is not initiallized");
            }
        }
    }


}
6. MediaExtractor 和 MediaCodec 手动解码出 pcm 数据,播放就使用 4 就好

import android.media.MediaCodec;
import android.media.MediaExtractor;
import android.media.MediaFormat;
import android.util.Log;

import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.Objects;

/**
 * Created by slack
 * on 17/2/7 上午11:11.
 * mp3 --> pcm data
 */

public class PCMData {
    
    /**
     * 初始化解码器
     */
    private static final Object lockPCM = new Object();
    private static final int BUFFER_SIZE = 2048;

    private ArrayList chunkPCMDataContainer = new ArrayList<>();//PCM数据块容器
    private MediaExtractor mediaExtractor;
    private MediaCodec mediaDecode;
    private ByteBuffer[] decodeInputBuffers;
    private ByteBuffer[] decodeOutputBuffers;
    private MediaCodec.BufferInfo decodeBufferInfo;
    boolean sawInputEOS = false;
    boolean sawOutputEOS = false;

    private String mp3FilePath;

    private MediaFormat mMediaFormat;

    public PCMData(String path) {
        mp3FilePath = path;
    }

    public PCMData startPcmExtractor(){
        initMediaDecode();
        new Thread(new Runnable() {
            @Override
            public void run() {
                srcAudioFormatToPCM();
            }
        }).start();
        return this;
    }

    public PCMData release(){
        chunkPCMDataContainer.clear();
        return this;
    }

    public byte[] getPCMData() {
        synchronized (lockPCM) {//记得加锁
            if (chunkPCMDataContainer.isEmpty()) {
                return null;
            }

            byte[] pcmChunk = chunkPCMDataContainer.get(0).bufferBytes;//每次取出index 0 的数据
            chunkPCMDataContainer.remove(0);//取出后将此数据remove掉 既能保证PCM数据块的取出顺序 又能及时释放内存
            return pcmChunk;
        }
    }

    /**
     * 测试时发现 播放音频的 MediaCodec.BufferInfo.size 是变换的
     */
    public int getBufferSize() {
        synchronized (lockPCM) {//记得加锁
            if (chunkPCMDataContainer.isEmpty()) {
                return BUFFER_SIZE;
            }
            return chunkPCMDataContainer.get(0).bufferSize;
        }
    }

    public MediaFormat getMediaFormat() {
        return mMediaFormat;
    }

    private void initMediaDecode() {
        try {
            mediaExtractor = new MediaExtractor();//此类可分离视频文件的音轨和视频轨道
            mediaExtractor.setDataSource(mp3FilePath);//媒体文件的位置
            for (int i = 0; i < mediaExtractor.getTrackCount(); i++) {//遍历媒体轨道 此处我们传入的是音频文件,所以也就只有一条轨道
                mMediaFormat = mediaExtractor.getTrackFormat(i);
                String mime = mMediaFormat.getString(MediaFormat.KEY_MIME);
                if (mime.startsWith("audio/")) {//获取音频轨道
//                    format.setInteger(MediaFormat.KEY_MAX_INPUT_SIZE, 200 * 1024);
                    mediaExtractor.selectTrack(i);//选择此音频轨道
                    mediaDecode = MediaCodec.createDecoderByType(mime);//创建Decode解码器
                    mediaDecode.configure(mMediaFormat, null, null, 0);
                    break;
                }
            }
        } catch (Exception e) {
            e.printStackTrace();
            Log.e("slack","error :: " + e.getMessage());
        }

        if (mediaDecode == null) {
            Log.e("slack", "create mediaDecode failed");
            return;
        }
        mediaDecode.start();//启动MediaCodec ,等待传入数据
        decodeInputBuffers = mediaDecode.getInputBuffers();//MediaCodec在此ByteBuffer[]中获取输入数据
        decodeOutputBuffers = mediaDecode.getOutputBuffers();//MediaCodec将解码后的数据放到此ByteBuffer[]中 我们可以直接在这里面得到PCM数据
        decodeBufferInfo = new MediaCodec.BufferInfo();//用于描述解码得到的byte[]数据的相关信息
    }

    private void putPCMData(byte[] pcmChunk,int bufferSize) {
        synchronized (lockPCM) {//记得加锁
            chunkPCMDataContainer.add(new PCM(pcmChunk,bufferSize));
        }
    }


    /**
     * 解码音频文件 得到PCM数据块
     *
     * @return 是否解码完所有数据
     */
    private void srcAudioFormatToPCM() {

        sawOutputEOS = false;
        sawInputEOS = false;
        try {
            while (!sawOutputEOS) {
                if (!sawInputEOS) {
                    int inputIndex = mediaDecode.dequeueInputBuffer(-1);//获取可用的inputBuffer -1代表一直等待,0表示不等待 建议-1,避免丢帧
                    if (inputIndex >= 0) {
                        ByteBuffer inputBuffer = decodeInputBuffers[inputIndex];//拿到inputBuffer
                        inputBuffer.clear();//清空之前传入inputBuffer内的数据
                        int sampleSize = mediaExtractor.readSampleData(inputBuffer, 0);//MediaExtractor读取数据到inputBuffer中
                        if (sampleSize < 0) {//小于0 代表所有数据已读取完成
                            sawInputEOS = true;
                            mediaDecode.queueInputBuffer(inputIndex, 0, 0, 0, MediaCodec.BUFFER_FLAG_END_OF_STREAM);
                        } else {
                            long presentationTimeUs = mediaExtractor.getSampleTime();
                            mediaDecode.queueInputBuffer(inputIndex, 0, sampleSize, presentationTimeUs, 0);//通知MediaDecode解码刚刚传入的数据
                            mediaExtractor.advance();//MediaExtractor移动到下一取样处
                        }
                    }
                }

                //获取解码得到的byte[]数据 参数BufferInfo上面已介绍 10000同样为等待时间 同上-1代表一直等待,0代表不等待。此处单位为微秒
                //此处建议不要填-1 有些时候并没有数据输出,那么他就会一直卡在这 等待
                int outputIndex = mediaDecode.dequeueOutputBuffer(decodeBufferInfo, 10000);
                if (outputIndex >= 0) {
                    int outputBufIndex = outputIndex;
                    // Simply ignore codec config buffers.
                    if ((decodeBufferInfo.flags & MediaCodec.BUFFER_FLAG_CODEC_CONFIG) != 0) {
                        mediaDecode.releaseOutputBuffer(outputBufIndex, false);
                        continue;
                    }

                    if (decodeBufferInfo.size != 0) {

                        ByteBuffer outBuf = decodeOutputBuffers[outputBufIndex];//拿到用于存放PCM数据的Buffer

                        outBuf.position(decodeBufferInfo.offset);
                        outBuf.limit(decodeBufferInfo.offset + decodeBufferInfo.size);
                        byte[] data = new byte[decodeBufferInfo.size];//BufferInfo内定义了此数据块的大小
                        outBuf.get(data);//将Buffer内的数据取出到字节数组中
                        putPCMData(data,decodeBufferInfo.size);//自己定义的方法,供编码器所在的线程获取数据,下面会贴出代码
                    }

                    mediaDecode.releaseOutputBuffer(outputBufIndex, false);//此操作一定要做,不然MediaCodec用完所有的Buffer后 将不能向外输出数据

                    if ((decodeBufferInfo.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0) {
                        sawOutputEOS = true;
                    }

                } else if (outputIndex == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
                    decodeOutputBuffers = mediaDecode.getOutputBuffers();
                } else if (outputIndex == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {

                }
            }
        } finally {
            if(mediaDecode != null) {
                mediaDecode.release();
            }
            if(mediaExtractor != null){
                mediaExtractor.release();
            }
        }
    }


    class PCM{
        public PCM(byte[] bufferBytes, int bufferSize) {
            this.bufferBytes = bufferBytes;
            this.bufferSize = bufferSize;
        }

        byte[] bufferBytes;
        int bufferSize;
    }
}
7.混合音频,需要6 配合播放背景音乐

播放获取 pcm

class PlayNeedMixAudioTask extends Thread {

        private BackGroundFrameListener listener;
        private long audioPresentationTimeNs; //音频时间戳 pts

        public PlayNeedMixAudioTask(BackGroundFrameListener l) {
            listener = l;
        }

        @Override
        public void run() {
            Log.i("thread", "PlayNeedMixAudioTask: " + Thread.currentThread().getId());
            mIsPlaying = true;
            try {
                int bufferSize = AudioTrack.getMinBufferSize(mFrequence,
                        mPlayChannelConfig, mAudioEncoding);
                // 实例AudioTrack
                AudioTrack track = new AudioTrack(AudioManager.STREAM_MUSIC,
                        mFrequence,
                        mPlayChannelConfig, mAudioEncoding, bufferSize,
                        AudioTrack.MODE_STREAM);
                // 开始播放
                track.play();

                while (mIsPlaying) {
                    audioPresentationTimeNs = System.nanoTime();
                    byte[] temp = mPCMData.getPCMData();
                    if (temp == null) {
                        continue;
                    }
                    track.write(temp, 0, temp.length);
                    if (listener != null) {
                        listener.onFrameArrive(temp);
                    }
                }

                mHasFrameBytes = false;
                track.stop();
                track.release();
            } catch (Exception e) {
                // TODO: handle exception
                Log.e("slack", "error:" + e.getMessage());
            }
        }
    }
混合麦克风数据和背景音乐

class RecordMixTask extends AsyncTask {
        @Override
        protected Void doInBackground(Void... arg0) {
            Log.i("thread", "RecordMixTask: " + Thread.currentThread().getId());
            mIsRecording = true;
            int bufferReadResult = 0;
            long audioPresentationTimeNs; //音频时间戳 pts
            try {
                // 根据定义好的几个配置,来获取合适的缓冲大小
                int bufferSize = AudioRecord.getMinBufferSize(mFrequence,
                        mChannelConfig, mAudioEncoding);
                // 实例化AudioRecord
                AudioRecord record = new AudioRecord(
                        MediaRecorder.AudioSource.MIC, mFrequence,
                        mChannelConfig, mAudioEncoding, bufferSize * 4);

                // 开始录制
                record.startRecording();

                while (mIsRecording) {

                    audioPresentationTimeNs = System.nanoTime();

                    int samples_per_frame = mPCMData.getBufferSize(); // 这里需要与 背景音乐读取出来的数据长度 一样
                    byte[] buffer = new byte[samples_per_frame];
                    //从缓冲区中读取数据,存入到buffer字节数组数组中
                    bufferReadResult = record.read(buffer, 0, buffer.length);
                    //判断是否读取成功
                    if (bufferReadResult == AudioRecord.ERROR_BAD_VALUE || bufferReadResult == AudioRecord.ERROR_INVALID_OPERATION)
                        Log.e("slack", "Read error");
                    if (mAudioEncoder != null) {
//                        Log.i("slack","buffer length: " + buffer.length + " " + bufferReadResult + " " + bufferSize);
                        buffer = mixBuffer(buffer);
                        //将音频数据发送给AudioEncoder类进行编码
                        mAudioEncoder.offerAudioEncoder(buffer, audioPresentationTimeNs);
                    }

                }
                // 录制结束
                if (record != null) {
                    record.setRecordPositionUpdateListener(null);
                    record.stop();
                    record.release();
                    record = null;
                }

            } catch (Exception e) {
                // TODO: handle exception
                Log.e("slack", "::" + e.getMessage());
            }
            return null;
        }


        // 当在上面方法中调用publishProgress时,该方法触发,该方法在UI线程中被执行
        protected void onProgressUpdate(Integer... progress) {
            //
        }


        protected void onPostExecute(Void result) {

        }

    }
/**
     * 混合 音频
     */
    private byte[] mixBuffer(byte[] buffer) {
        if(mIsPlaying && mHasFrameBytes){
//            return getBackGroundBytes(); // 直接写入背景音乐数据
            return averageMix(new byte[][]{buffer,getBackGroundBytes()});
        }
        return buffer;
    }
混合算法网上有很多,没有尝试其他的,平均算法足够模拟实现了。


你可能感兴趣的:(android,studio)