目的:
利用麦克风做为一种事件的输入方式
核心:
通过AudioSession与AudioQueue实现麦克风输入的数据捕捉.
开启AudioSession:
1. AudioSessionInitialize
2. AudioSessionSetProperty(kAudioSessionProperty_AudioCategory)
3. AudioSessionSetActive
建立声音格式:
1. 声音格式的数据结构AudioStreamBasicDescription
2. 使用kAudioFormatLinearPCM来做为声音格式
建立AudioQueue:
1. AudioQueueNewInput
2. AudioQueueStart
3. AudioQueueSetProperty(kAudioQueueProperty_EnableLevelMetering)
获取声音峰值数据:
1. 记录峰值的数据结构AudioQueueLevelMeterState
2. AudioQueueGetProperty(kAudioQueueProperty_CurrentLevelMeterDB)
关闭AudioQueue:
1. AudioQueueStop
2. AudioQueueDispose
代码:
#import <UIKit/UIKit.h>
#include <AudioToolbox/AudioToolbox.h>
@interface MicrophoneTestViewController : UIViewController {
IBOutlet UILabel* _averagePower;
IBOutlet UILabel* _peakPower;
AudioQueueRef mQueue;
AudioStreamBasicDescription mFormat;
AudioQueueLevelMeterState *_chan_lvls;
NSArray *_channelNumbers;
}
-(void)setChannelNumbers:(NSArray *)v;
-(void)initAudioSession;
- (IBAction)startstop: (id) sender;
@end
[/code]
[code]
#import "MicrophoneTestViewController.h"
static void MyInputBufferHandler(void * inUserData,
AudioQueueRef inAQ,
AudioQueueBufferRef inBuffer,
const AudioTimeStamp * inStartTime,
UInt32 inNumPackets,
const AudioStreamPacketDescription* inPacketDesc)
{
// 如果要记录声音,可以在这里做记录处理.
// 如果要分析声音数据,可以在这里做记录处理.
}
static void interruptionListener(void * inClientData,
UInt32 inInterruptionState)
{
// 声音中断通知(BEGIN,END)
}
@implementation MicrophoneTestViewController
// Implement viewDidLoad to do additional setup after loading the view, typically from a nib.
- (void)viewDidLoad {
[super viewDidLoad];
_averagePower.text = @"0";
_peakPower.text = @"0";
mQueue = NULL;
_channelNumbers = [[NSArray alloc] initWithObjects:[NSNumber numberWithInt:0], nil];
_chan_lvls = (AudioQueueLevelMeterState*)malloc(sizeof(AudioQueueLevelMeterState) * [_channelNumbers count]);
[self initAudioSession];
[NSTimer
scheduledTimerWithTimeInterval:1.f/30.f
target:self
selector:@selector(_refresh)
userInfo:nil
repeats:YES
];
}
- (void)didReceiveMemoryWarning {
// Releases the view if it doesn't have a superview.
[super didReceiveMemoryWarning];
// Release any cached data, images, etc that aren't in use.
}
- (void)viewDidUnload {
// Release any retained subviews of the main view.
// e.g. self.myOutlet = nil;
[_channelNumbers release];
free(_chan_lvls);
}
- (void)dealloc {
[super dealloc];
}
-(void)initAudioSession
{
OSStatus error = AudioSessionInitialize(NULL, NULL, interruptionListener, self);
if (error) printf("ERROR INITIALIZING AUDIO SESSION! %d\n", (int)error);
else
{
UInt32 category = kAudioSessionCategory_PlayAndRecord;
error = AudioSessionSetProperty(kAudioSessionProperty_AudioCategory, sizeof(category), &category);
if (error) printf("couldn't set audio category!");
error = AudioSessionSetActive(true);
if (error) printf("AudioSessionSetActive (true) failed");
}
}
-(void)setupAudioFormat:(UInt32)inFormatID
{
memset(&mFormat, 0, sizeof(mFormat));
UInt32 size = sizeof(mFormat.mSampleRate);
OSStatus result = AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareSampleRate,
&size,
&mFormat.mSampleRate);
size = sizeof(mFormat.mChannelsPerFrame);
result = AudioSessionGetProperty(kAudioSessionProperty_CurrentHardwareInputNumberChannels,
&size,
&mFormat.mChannelsPerFrame);
mFormat.mFormatID = inFormatID;
if (inFormatID == kAudioFormatLinearPCM)
{
// if we want pcm, default to signed 16-bit little-endian
mFormat.mFormatFlags = kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
mFormat.mBitsPerChannel = 16;
mFormat.mBytesPerPacket = mFormat.mBytesPerFrame = (mFormat.mBitsPerChannel / 8) * mFormat.mChannelsPerFrame;
mFormat.mFramesPerPacket = 1;
}
}
-(void)startMicrophone
{
[self setupAudioFormat:kAudioFormatLinearPCM];
OSStatus result = AudioQueueNewInput(&mFormat, MyInputBufferHandler, NULL, NULL, NULL, 0, &mQueue);
if (result == noErr) {
result = AudioQueueStart(mQueue, NULL);
if (result == noErr) {
UInt32 val = 1;
AudioQueueSetProperty(mQueue, kAudioQueueProperty_EnableLevelMetering, &val, sizeof(UInt32));
if (mFormat.mChannelsPerFrame != [_channelNumbers count])
{
NSArray *chan_array;
if (mFormat.mChannelsPerFrame < 2)
chan_array = [[NSArray alloc] initWithObjects:[NSNumber numberWithInt:0], nil];
else
chan_array = [[NSArray alloc] initWithObjects:[NSNumber numberWithInt:0], [NSNumber numberWithInt:1], nil];
[self setChannelNumbers:chan_array];
[chan_array release];
_chan_lvls = (AudioQueueLevelMeterState*)realloc(_chan_lvls, mFormat.mChannelsPerFrame * sizeof(AudioQueueLevelMeterState));
}
return;
}
}
// 失败
mQueue = NULL;
NSLog(@"startMicrophone:失败.");
return;
}
-(void)stopMicrophone
{
if (mQueue) {
AudioQueueStop(mQueue, true);
AudioQueueDispose(mQueue, true);
mQueue = NULL;
}
}
-(void)_refresh
{
if (mQueue) {
UInt32 data_sz = sizeof(AudioQueueLevelMeterState) * [_channelNumbers count];
OSErr status = AudioQueueGetProperty(mQueue, kAudioQueueProperty_CurrentLevelMeterDB, _chan_lvls, &data_sz);
if (status == noErr)
{
// 这里没有去处理多个通道的数据显示,直接就显示最后一个通道的结果了
// 这里的值就是我们打算用来做为一些触发机制的值了,需要用到的时候直接访问_chan_lvls这个数组
for (int i=0; i<[_channelNumbers count]; i++)
{
NSInteger channelIdx = [(NSNumber *)[_channelNumbers objectAtIndex:i] intValue];
if (channelIdx < [_channelNumbers count] && channelIdx <= 127)
{
_averagePower.text = [NSString stringWithFormat:@"%f", _chan_lvls[channelIdx].mAveragePower];
_peakPower.text = [NSString stringWithFormat:@"%f", _chan_lvls[channelIdx].mPeakPower];
}
}
}
}
}
-(void)setChannelNumbers:(NSArray *)v
{
[v retain];
[_channelNumbers release];
_channelNumbers = v;
}
- (IBAction)startstop: (id) sender
{
if (mQueue) {
[self stopMicrophone];
} else {
[self startMicrophone];
}
}
@end