Android/Java 实现PCM与G.711编码互转

背景

最近做一个UDP局域网通讯的项目,是 Android 手机和一个室内硬件外设通讯,因为这个外设的音频传输采用的是 G.711 a率 编码,Android 的 AudioRecord 又不能直接采集 G.711 a率 编码的音频,所以就想采集之后转成 G.711 来传输。

但是网络上搜了好几圈,都是使用的 Android jni 去调用 C++ 代码实现,我去尝试调用了几次都没成功,所以自己仿照着写了一个,经测试可以使用,特此分享。

格式参数

PCM:
采样率:8000
采样大小:16 BIT 
声道:双声道 

G.711
采样率:8000 
采样大小:16 BIT 
声道:单声道 

转换工具类: G711Code

    /**
     * 核心转换
     * Created by onlygx
     */

    public class G711Code {

        private final static int SIGN_BIT = 0x80;
        private final static int QUANT_MASK = 0xf;
        private final static int SEG_SHIFT = 4;
        private final static int SEG_MASK = 0x70;

        static short[] seg_end = {0xFF, 0x1FF, 0x3FF, 0x7FF,0xFFF, 0x1FFF, 0x3FFF, 0x7FFF};

        static short search(short val,short[] table,short size){

            for (short i = 0 ; i < size; i++) {
                if(val <= table[i]){
                    return i;
                }
            }
            return size;
        }

        static byte linear2alaw(short pcm_val){
            short mask;
            short seg;
            char aval;
            if(pcm_val >= 0){
                mask = 0xD5;
            }else{
                mask = 0x55;
                pcm_val = (short) (-pcm_val - 1);
                if(pcm_val < 0){
                    pcm_val = 32767;
                }
            }

            /* Convert the scaled magnitude to segment number. */
            seg = search(pcm_val, seg_end, (short) 8);

            /* Combine the sign, segment, and quantization bits. */

            if (seg >= 8)       /* out of range, return maximum value. */
                return (byte) (0x7F ^ mask);
            else {
                aval = (char) (seg << SEG_SHIFT);
                if (seg < 2)
                    aval |= (pcm_val >> 4) & QUANT_MASK;
                else
                    aval |= (pcm_val >> (seg + 3)) & QUANT_MASK;
                return (byte) (aval ^ mask);
            }
        }


        static short alaw2linear(byte a_val){
            short       t;
            short       seg;

            a_val ^= 0x55;

            t = (short) ((a_val & QUANT_MASK) << 4);
            seg = (short) ((a_val & SEG_MASK) >> SEG_SHIFT);
            switch (seg) {
                case 0:
                    t += 8;
                    break;
                case 1:
                    t += 0x108;
                    break;
                default:
                    t += 0x108;
                    t <<= seg - 1;
            }
            return (a_val & SIGN_BIT) != 0 ? t : (short) -t;
        }

        /**
         * pcm 转 G711 a率
         * @param pcm
         * @param code
         * @param size
         */
        public static void G711aEncoder(short[] pcm,byte[] code,int size){
            for(int i=0;i/**
         * G.711 转 PCM
         * @param pcm
         * @param code
         * @param size
         */
        public static void G711aDecoder(short[] pcm,byte[] code,int size)
        {
            for(int i=0;i

附录1 音频采集工具类

工具类不是我原创的,但是百度太多已经忘记是在哪里看到的,在此感谢那位作者。
(貌似有好几个都是这么写的,不知道谁是原作者。)

简单给小白说一下,这个工具类需要先初始化,然后设置一个回调来处理采集到的数据。
实现 OnAudioFrameCapturedListener 接口,重写 onAudioFrameCaptured 方法,这是个很重要的回调。
如果不用了,要停止采集。

    import android.media.AudioFormat;
    import android.media.AudioRecord;
    import android.media.MediaRecorder;
    import android.os.SystemClock;
    import android.util.Log;

    public class AudioCapturer {
        private static final String TAG = "AudioCapturer";

        private static final int DEFAULT_SOURCE = MediaRecorder.AudioSource.MIC;
        private static final int DEFAULT_SAMPLE_RATE = 8000;
        private static final int DEFAULT_CHANNEL_CONFIG = AudioFormat.CHANNEL_IN_MONO;
        private static final int DEFAULT_AUDIO_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
        private AudioRecord mAudioRecord;
        private int mMinBufferSize = 0;

        private Thread mCaptureThread;
        private boolean mIsCaptureStarted = false;
        private volatile boolean mIsLoopExit = false;
        private OnAudioFrameCapturedListener mAudioFrameCapturedListener;


        public interface OnAudioFrameCapturedListener {
            public void onAudioFrameCaptured(short[] audioData);
        }
        public boolean isCaptureStarted() {
            return mIsCaptureStarted;
        }
        public void setOnAudioFrameCapturedListener(OnAudioFrameCapturedListener listener) {
            mAudioFrameCapturedListener = listener;
        }
        public boolean startCapture() {
            return startCapture(DEFAULT_SOURCE, DEFAULT_SAMPLE_RATE, DEFAULT_CHANNEL_CONFIG,
                    DEFAULT_AUDIO_FORMAT);
        }
        public boolean startCapture(int audioSource, int sampleRateInHz, int channelConfig, int audioFormat) {
            if (mIsCaptureStarted) {
                Log.e(TAG, "Capture already started !");
                return false;
            }

            mMinBufferSize = AudioRecord.getMinBufferSize(sampleRateInHz,channelConfig,audioFormat);
            if (mMinBufferSize == AudioRecord.ERROR_BAD_VALUE) {
                Log.e(TAG, "Invalid parameter !");
                return false;
            }
            Log.d(TAG , "getMinBufferSize = "+mMinBufferSize+" bytes !");

            mAudioRecord = new AudioRecord(audioSource,sampleRateInHz
                ,channelConfig,audioFormat,mMinBufferSize);

            if (mAudioRecord.getState() == AudioRecord.STATE_UNINITIALIZED) {
                Log.e(TAG, "AudioRecord initialize fail !");
                return false;
            }
            mAudioRecord.startRecording();
            mIsLoopExit = false;
            mCaptureThread = new Thread(new AudioCaptureRunnable());
            mCaptureThread.start();
            mIsCaptureStarted = true;
            Log.d(TAG, "Start audio capture success !");
            return true;
        }
        public void stopCapture() {
            if (!mIsCaptureStarted) {
                return;
            }
            mIsLoopExit = true;
            try {
                mCaptureThread.interrupt();
                mCaptureThread.join(1000);
            }
            catch (InterruptedException e) {
                e.printStackTrace();
            }
            if (mAudioRecord.getRecordingState() == AudioRecord.RECORDSTATE_RECORDING) {
                mAudioRecord.stop();
            }
            mAudioRecord.release();

            mIsCaptureStarted = false;
            mAudioFrameCapturedListener = null;
            Log.d(TAG, "Stop audio capture success !");
        }
        private class AudioCaptureRunnable implements Runnable {

            @Override
            public void run() {
                while (!mIsLoopExit) {
                    short[] buffer = new short[320];
                    int ret = mAudioRecord.read(buffer, 0, 320);
                    if (ret == AudioRecord.ERROR_INVALID_OPERATION) {
                        Log.e(TAG , "Error ERROR_INVALID_OPERATION");
                    } 
                    else if (ret == AudioRecord.ERROR_BAD_VALUE) {
                        Log.e(TAG , "Error ERROR_BAD_VALUE");
                    }
                    else {
                       if (mAudioFrameCapturedListener != null) {
                            mAudioFrameCapturedListener.onAudioFrameCaptured(buffer);
                        }

                        Log.d(TAG , "OK, Captured "+ret+" bytes !");
                    }

                    SystemClock.sleep(10);
                }
            }
        }
    }

附录2 音频播放工具类

    import android.media.AudioFormat;
    import android.media.AudioManager;
    import android.media.AudioTrack;
    import android.util.Log;

    public class AudioReader {
        private int mFrequency;// 采样率
        private int mChannel;// 声道
        private int mSampBit; // 采样精度
        private AudioTrack mAudioTrack;

        public AudioReader(){
            mFrequency = 8000;
            mChannel = AudioFormat.CHANNEL_OUT_MONO;
            mSampBit = AudioFormat.ENCODING_PCM_16BIT;
        }

        public void init(){
            if (mAudioTrack != null){
                release();
            }

            // 获得构建对象的最小缓冲区大小
            int minBufSize = AudioTrack.getMinBufferSize(mFrequency,mChannel, mSampBit);

            //STREAM_ALARM:警告声
            //STREAM_MUSCI:音乐声,例如music等
            //STREAM_RING:铃声
            //STREAM_SYSTEM:系统声音
            //STREAM_VOCIE_CALL:电话声音
            mAudioTrack = new AudioTrack(AudioManager.STREAM_MUSIC,
                    mFrequency,mChannel,mSampBit,minBufSize, AudioTrack.MODE_STREAM);

            //AudioTrack中有MODE_STATIC和MODE_STREAM两种分类。
            //STREAM的意思是由用户在应用程序通过write方式把数据一次一次得写到audiotrack中。
            //这个和我们在socket中发送数据一样,应用层从某个地方获取数据,
            //例如通过编解码得到PCM数据,然后write到audiotrack。
            //这种方式的坏处就是总是在JAVA层和Native层交互,效率损失较大。
            //而STATIC的意思是一开始创建的时候,就把音频数据放到一个固定的buffer,然后直接传给audiotrack,
            //后续就不用一次次得write了。AudioTrack会自己播放这个buffer中的数据。
            //这种方法对于铃声等内存占用较小,延时要求较高的声音来说很适用。
            mAudioTrack.play();
        }

        public void release(){
            if (mAudioTrack != null){
                mAudioTrack.stop();
                mAudioTrack.release();
            }
        }

        public void playAudioTrack(byte []data, int offset, int length){
            if (data == null || data.length == 0){return ;}
            try {
                mAudioTrack.write(data, offset, length);
            } catch (Exception e) {
                Log.i("MyAudioTrack", "catch exception...");
            }
        }

        public void playAudioTrack(short []data, int offset, int length){
            if (data == null || data.length == 0){return ;}
            try {
                mAudioTrack.write(data, offset, length);
            } catch (Exception e) {
                Log.i("MyAudioTrack", "catch exception...");
            }
        }

        public int getPrimePlaySize(){
            int minBufSize = AudioTrack.getMinBufferSize(mFrequency,
                    mChannel, mSampBit);
            return minBufSize * 2;
        }
    }

你可能感兴趣的:(Android,PCM编码,G.711编码,Java)