iOS AudioQueue 音量大小

1、AudioQueue代理回调获取音频数据:

2、传入data数据获取音量大小


//  .h文件

//  AudioQueueTool.h

//  语音

//  楚高尚

//  Created by sylincom on 2019/11/29.

//  Copyright © 2019 sylincom. All rights reserved.

//

#import

#define kNumberAudioQueueBuffers3// 定义了三个缓冲区

NS_ASSUME_NONNULL_BEGIN

@class AudioQueueTool;

@protocolAudioQueueToolDelegate

//AudioQueue

- (void)AQRecorder:(AudioQueueTool*)recorder didRecivedBuffer:(Byte*)buffer length:(NSInteger)length;

- (void)AQRecorder:(AudioQueueTool*)recorder finishWithError:(NSError*)error;

- (void)AQCallBackVoiceGrade:(int)voiceGrade;

@end

@interfaceAudioQueueTool :NSObject

@property (nonatomic, weak) id delegate;

+ (instancetype)shared;

- (void)start;

- (void)stop;

@end

NS_ASSUME_NONNULL_END

//.m文件

//

//  AudioQueueTool.m

//  语音

//  楚高尚

//  Created by sylincom on 2019/11/29.

//  Copyright © 2019 sylincom. All rights reserved.

//

#import "AudioQueueTool.h"

#import

typedefstructAQCallbackStruct {

    AudioStreamBasicDescription mDataFormat;

    AudioQueueRef queue;

    AudioQueueBufferRef mBuffers[kNumberAudioQueueBuffers];

    AudioFileIDoutputFile;


    unsignedlongframeSize;

    longlongrecPtr;

    intrun;

} AQCallbackStruct;

@interface AudioQueueTool ()

@property (nonatomic, assign) AQCallbackStruct aqc;

@property (nonatomic, strong) NSTimer *heartTime;

- (void)processAudioBuffer:(AudioQueueBufferRef)buffer withQueue:(AudioQueueRef)queue;

@end

staticvoidAQInputCallback(void*inUserData, AudioQueueRef inAudioQueue, AudioQueueBufferRef inBuffer,

                            constAudioTimeStamp *inStartTime, UInt32 nNumPackets,constAudioStreamPacketDescription *inPacketDesc) {

    AudioQueueTool *engine = (__bridgeAudioQueueTool *)inUserData;

    if(inBuffer->mAudioDataByteSize >0) {

        [engine processAudioBuffer:inBuffer withQueue:inAudioQueue];

    }


    if(engine.aqc.run) {

        AudioQueueEnqueueBuffer(engine.aqc.queue, inBuffer,0,NULL);

    }

}

@implementation AudioQueueTool

- (void)dealloc {

    AudioQueueStop(_aqc.queue,true);

    _aqc.run =0;

    AudioQueueDispose(_aqc.queue,true);

}

+ (instancetype)shared {

    staticAudioQueueTool *recorder =nil;

    staticdispatch_once_t onceToken;

    dispatch_once(&onceToken, ^{

        recorder = [[AudioQueueTool alloc] init];

    });

    returnrecorder;

}

- (instancetype)init {

    if(self= [superinit]) {

    }

    return self;

}

#pragma mark - 开始录音

- (void)start {

//    NSLog(@"AudioQueueTool--->mainThread:%@,current thread:%@", [NSThread mainThread], [NSThread currentThread]);

    // 开启录音通道

    NSError *error =nil;

    //设置audio session的category

    intret = [[AVAudioSession sharedInstance]

               setCategory:AVAudioSessionCategoryPlayAndRecord

               error:

               &

               error];//注意,这里选的是AVAudioSessionCategoryPlayAndRecord参数,如果只需要录音,就选择Record就可以了,如果需要录音和播放,则选择PlayAndRecord,这个很重要

    if(!ret) {

//        NSLog(@"设置声音环境失败");

        if([self.delegate respondsToSelector:@selector(AQRecorder:finishWithError:)]) {

            [self.delegate AQRecorder:selffinishWithError:error];

        }

        return;

    }

    //启用audio session

    ret = [[AVAudioSession sharedInstance] setActive:YESerror:&error];

    if(!ret) {

//        NSLog(@"启动失败");

        if([self.delegate respondsToSelector:@selector(AQRecorder:finishWithError:)]) {

            [self.delegate AQRecorder:selffinishWithError:error];

        }

        return;

    }


    // 初始化 AudioFormat  参数设置

    _aqc.mDataFormat.mSampleRate =16000;//采样率

    _aqc.mDataFormat.mFormatID = kAudioFormatLinearPCM;//PCM采样

    _aqc.mDataFormat.mFramesPerPacket =1;//每个数据包多少帧

    _aqc.mDataFormat.mChannelsPerFrame =1;//1单声道,2立体声

    //结果分析: 8bit为1byte,即为1个通道里1帧需要采集2byte数据,再*通道数,即为所有通道采集的byte数目。

    _aqc.mDataFormat.mBitsPerChannel =16;//在一个数据帧中,每个通道的样本数据的位数。//语音每采样点占用位数


    _aqc.mDataFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;


    //所以这里结果赋值给每帧需要采集的byte数目,然后这里的packet也等于一帧的数据。

    //每帧的bytes数

    _aqc.mDataFormat.mBytesPerFrame = (_aqc.mDataFormat.mBitsPerChannel /8) * _aqc.mDataFormat.mChannelsPerFrame;

    //每个数据包的bytes总数

    _aqc.mDataFormat.mBytesPerPacket = _aqc.mDataFormat.mBytesPerFrame;


    //初始化音频输入队列

    AudioQueueNewInput(&_aqc.mDataFormat, AQInputCallback, (__bridgevoid*)(self),NULL,NULL,0, &_aqc.queue);


    //估算的缓存区大小

    int  bufferByteSize =640;

   //创建缓冲器

    for(inti =0; i < kNumberAudioQueueBuffers; i++) {

        OSStatus status = AudioQueueAllocateBuffer(_aqc.queue, bufferByteSize, &_aqc.mBuffers[i]);

        if(status != noErr) {

            AudioQueueDispose(_aqc.queue,true);

            _aqc.queue =NULL;

            break;

        }

        //将 _audioBuffers[i] 添加到队列中

        AudioQueueEnqueueBuffer(_aqc.queue, _aqc.mBuffers[i],0,NULL);

    }


    _aqc.recPtr =0;

    _aqc.run =1;


    // 开始录音

    if(_aqc.queue) {

        AudioQueueStart(_aqc.queue,NULL);

//        NSLog(@" Recorder-> 开始录音");

    }else{

        if([self.delegate respondsToSelector:@selector(AQRecorder:finishWithError:)]) {

            [self.delegate AQRecorder:self

                         finishWithError:[NSError errorWithDomain:@" Recorder"

                                                             code:-1

                                                         userInfo:@{

                                                                    NSLocalizedDescriptionKey :@"录音初始化错误"

                                                                    }]];

        }

//        NSLog(@" Recorder-> 录音初始化错误");

    }

}

#pragma mark - 停止录音

- (void)stop {

    _aqc.run =0;

    AudioQueueStop(_aqc.queue,true);

    if([self.delegate respondsToSelector:@selector(AQRecorder:finishWithError:)]) {

        NSError *error;

        [self.delegate AQRecorder:selffinishWithError:error];

    }

}

#pragma mark - Private 实时获取音频数据

- (void)processAudioBuffer:(AudioQueueBufferRef)inBuffer withQueue:(AudioQueueRef)queue {

    Byte *data = (Byte *)malloc(inBuffer->mAudioDataByteSize);

    memset(data,0, inBuffer->mAudioDataByteSize);

    memcpy(data, inBuffer->mAudioData, inBuffer->mAudioDataByteSize);

    if(data !=NULL) {

        if([self.delegate respondsToSelector:@selector(AQRecorder:didRecivedBuffer:length:)]) {

            [self.delegate AQRecorder:selfdidRecivedBuffer:data length:inBuffer->mAudioDataByteSize];

        }

    }

    //调用方法获取音量

    NSData *dataBuffer = [[NSData alloc]initWithBytes:data length:inBuffer->mAudioDataByteSize];

    [selfisQuite:dataBuffer];

    free(data);

}

#pragma mark - 调用方法获取音量

-(BOOL)isQuite:(NSData *)pcmData

{

    if(pcmData ==nil)

    {

        returnNO;

    }

    longlongpcmAllLenght =0;

    shortbutterByte[pcmData.length/2];

    memcpy(butterByte, pcmData.bytes, pcmData.length);//frame_size * sizeof(short)

    // 将 buffer 内容取出,进行平方和运算

    for(inti =0; i < pcmData.length/2; i++)

    {

        pcmAllLenght += butterByte[i] * butterByte[i];

    }

    // 平方和除以数据总长度,得到音量大小。

    doublemean = pcmAllLenght / (double)pcmData.length;

    doublevolume =10*log10(mean);//volume为分贝数大小


    if (volume >= 45) //有效返回值  45分贝 --> 70分贝之间

    {

        if([self.delegate respondsToSelector:@selector(AQCallBackVoiceGrade:)]) {

            [self.delegate AQCallBackVoiceGrade:volume];

        }

//      NSLog(@" -=-=-=--=-=%f分贝",volume);

    }

    return YES;

}

@end

你可能感兴趣的:(iOS AudioQueue 音量大小)