今天来看看类AudioTrack的构造函数。
public AudioTrack(int streamType, int sampleRateInHz, int channelConfig, int audioFormat,
int bufferSizeInBytes, int mode, int sessionId)
throws IllegalArgumentException {
mState = STATE_UNINITIALIZED;
// remember which looper is associated with the AudioTrack instanciation
if ((mInitializationLooper = Looper.myLooper()) == null) {
mInitializationLooper = Looper.getMainLooper();
}
audioParamCheck(streamType, sampleRateInHz, channelConfig, audioFormat, mode);
audioBuffSizeCheck(bufferSizeInBytes);
if (sessionId < 0) {
throw (new IllegalArgumentException("Invalid audio session ID: "+sessionId));
}
int[] session = new int[1];
session[0] = sessionId;
// native initialization
int initResult = native_setup(new WeakReference(this),
mStreamType, mSampleRate, mChannels, mAudioFormat,
mNativeBufferSizeInBytes, mDataLoadMode, session);
if (initResult != SUCCESS) {
loge("Error code "+initResult+" when initializing AudioTrack.");
return; // with mState == STATE_UNINITIALIZED
}
mSessionId = session[0];
if (mDataLoadMode == MODE_STATIC) {
mState = STATE_NO_STATIC_DATA;
} else {
mState = STATE_INITIALIZED;
}
}
*****************************************源码*************************************************
***********************************************************************************************
源码路径:
frameworks\base\media\java\android\media\AudioTrack.java
###########################################说明##############################################################
还是先看看自带注释:
/**
* Class constructor with audio session. Use this constructor when the AudioTrack must be
* attached to a particular audio session. The primary use of the audio session ID is to
* associate audio effects to a particular instance of AudioTrack: if an audio session ID
* is provided when creating an AudioEffect, this effect will be applied only to audio tracks
* and media players in the same session and not to the output mix.
* When an AudioTrack is created without specifying a session, it will create its own session
* which can be retreived by calling the {@link #getAudioSessionId()} method.
* If a session ID is provided, this AudioTrack will share effects attached to this session
* with all other media players or audio tracks in the same session.
* @param streamType the type of the audio stream. See
* {@link AudioManager#STREAM_VOICE_CALL}, {@link AudioManager#STREAM_SYSTEM},
* {@link AudioManager#STREAM_RING}, {@link AudioManager#STREAM_MUSIC} and
* {@link AudioManager#STREAM_ALARM}
* @param sampleRateInHz the sample rate expressed in Hertz. Examples of rates are (but
* not limited to) 44100, 22050 and 11025.
* @param channelConfig describes the configuration of the audio channels.
* See {@link AudioFormat#CHANNEL_OUT_MONO} and
* {@link AudioFormat#CHANNEL_OUT_STEREO}
* @param audioFormat the format in which the audio data is represented.
* See {@link AudioFormat#ENCODING_PCM_16BIT} and
* {@link AudioFormat#ENCODING_PCM_8BIT}
* @param bufferSizeInBytes the total size (in bytes) of the buffer where audio data is read
* from for playback. If using the AudioTrack in streaming mode, you can write data into
* this buffer in smaller chunks than this size. If using the AudioTrack in static mode,
* this is the maximum size of the sound that will be played for this instance.
* See {@link #getMinBufferSize(int, int, int)} to determine the minimum required buffer size
* for the successful creation of an AudioTrack instance in streaming mode. Using values
* smaller than getMinBufferSize() will result in an initialization failure.
* @param mode streaming or static buffer. See {@link #MODE_STATIC} and{@link #MODE_STREAM}
* @param sessionId Id of audio session the AudioTrack must be attached to
* @throws java.lang.IllegalArgumentException
*/
如果没有指定一个session,就会为该AudioTrack创建一个自己的session,该session可以被别人通过getAudioSessionId得到。
别人得到了该session,并且使用该session创建播放器或者audio track,就会会你共享AudioEffect。
貌似和上面说的第一次使用session是一个效果。
下面开始参数介绍,这些在前面的文章中已经说过。
又重点说明了下min buffer size。bufferSizeInBytes是用来读出数据给playback的buffer的总大小。
如果是stream模式,每次写的chunk可以与该size相同,也可以比它小。
如果是static模式,写入的音频数据最大为该size。
1、先设置状态:
mState = STATE_UNINITIALIZED;
关于状态,前文有说过。
2、记住关联的looper:
// remember which looper is associated with the AudioTrack instanciation
if ((mInitializationLooper = Looper.myLooper()) == null) {
mInitializationLooper = Looper.getMainLooper();
}
函数myLooper的实现及注释:
/**
* Return the Looper object associated with the current thread. Returns
* null if the calling thread is not associated with a Looper.
*/
public static final Looper myLooper() {
return (Looper)sThreadLocal.get();
}
再往下看(在类Looper中):
// sThreadLocal.get() will return null unless you've called prepare().
private static final ThreadLocal sThreadLocal = new ThreadLocal();
看注释可知,你如果没有调过prepare函数,调用sThreadLocal.get函数返回的将是NULL。
看看prepare函数:
/** Initialize the current thread as a looper.
* This gives you a chance to create handlers that then reference
* this looper, before actually starting the loop. Be sure to call
* {@link #loop()} after calling this method, and end it by calling
* {@link #quit()}.
*/
public static final void prepare() {
if (sThreadLocal.get() != null) {
throw new RuntimeException("Only one Looper may be created per thread");
}
sThreadLocal.set(new Looper());
}
看看Looper的构造函数:
private Looper() {
mQueue = new MessageQueue();
mRun = true;
mThread = Thread.currentThread();
}
看看loop函数:
/**
* Run the message queue in this thread. Be sure to call
* {@link #quit()} to end the loop.
*/
public static final void loop() {
Looper me = myLooper();
MessageQueue queue = me.mQueue;
while (true) {
Message msg = queue.next(); // might block
//if (!me.mRun) {
// break;
//}
if (msg != null) {
if (msg.target == null) {
// No target is a magic identifier for the quit message.
return;
}
if (me.mLogging!= null) me.mLogging.println(
">>>>> Dispatching to " + msg.target + " "
+ msg.callback + ": " + msg.what
);
msg.target.dispatchMessage(msg);
if (me.mLogging!= null) me.mLogging.println(
"<<<<< Finished to " + msg.target + " "
+ msg.callback);
msg.recycle();
}
}
}
到这儿大概明白了,想起了WinCE中Thread 的Run函数。
此处的looper是为thread加了一个消息处理的功能。
回到原代码:
if ((mInitializationLooper = Looper.myLooper()) == null) {
mInitializationLooper = Looper.getMainLooper();
}
Looper.getMainLooper函数实现:
/** Returns the application's main looper, which lives in the main thread of the application.
*/
public synchronized static final Looper getMainLooper() {
return mMainLooper;
}
再看下setMainLooper:
private synchronized static void setMainLooper(Looper looper) {
mMainLooper = looper;
}
那么setMainLooper在哪儿被使用的呢:
/** Initialize the current thread as a looper, marking it as an application's main
* looper. The main looper for your application is created by the Android environment,
* so you should never need to call this function yourself.
* {@link #prepare()}
*/
public static final void prepareMainLooper() {
prepare();
setMainLooper(myLooper());
if (Process.supportsProcesses()) {
myLooper().mQueue.mQuitAllowed = false;
}
}
有点深了,该拔出来了。
3、下面是参数检查:
audioParamCheck(streamType, sampleRateInHz, channelConfig, audioFormat, mode);
函数audioParamCheck的实现:
// Convenience method for the constructor's parameter checks.
// This is where constructor IllegalArgumentException-s are thrown
// postconditions:
// mStreamType is valid
// mChannelCount is valid
// mChannels is valid
// mAudioFormat is valid
// mSampleRate is valid
// mDataLoadMode is valid
private void audioParamCheck(int streamType, int sampleRateInHz,
int channelConfig, int audioFormat, int mode) {
//--------------
// stream type
if( (streamType != AudioManager.STREAM_ALARM) && (streamType != AudioManager.STREAM_MUSIC)
&& (streamType != AudioManager.STREAM_RING) && (streamType != AudioManager.STREAM_SYSTEM)
&& (streamType != AudioManager.STREAM_VOICE_CALL)
&& (streamType != AudioManager.STREAM_NOTIFICATION)
&& (streamType != AudioManager.STREAM_BLUETOOTH_SCO)
&& (streamType != AudioManager.STREAM_DTMF)) {
throw (new IllegalArgumentException("Invalid stream type."));
} else {
mStreamType = streamType;
}
//--------------
// sample rate
if ( (sampleRateInHz < 4000) || (sampleRateInHz > 48000) ) {
throw (new IllegalArgumentException(sampleRateInHz
+ "Hz is not a supported sample rate."));
} else {
mSampleRate = sampleRateInHz;
}
//--------------
// channel config
mChannelConfiguration = channelConfig;
switch (channelConfig) {
case AudioFormat.CHANNEL_OUT_DEFAULT: //AudioFormat.CHANNEL_CONFIGURATION_DEFAULT
case AudioFormat.CHANNEL_OUT_MONO:
case AudioFormat.CHANNEL_CONFIGURATION_MONO:
mChannelCount = 1;
mChannels = AudioFormat.CHANNEL_OUT_MONO;
break;
case AudioFormat.CHANNEL_OUT_STEREO:
case AudioFormat.CHANNEL_CONFIGURATION_STEREO:
mChannelCount = 2;
mChannels = AudioFormat.CHANNEL_OUT_STEREO;
break;
default:
mChannelCount = 0;
mChannels = AudioFormat.CHANNEL_INVALID;
mChannelConfiguration = AudioFormat.CHANNEL_CONFIGURATION_INVALID;
throw(new IllegalArgumentException("Unsupported channel configuration."));
}
//--------------
// audio format
switch (audioFormat) {
case AudioFormat.ENCODING_DEFAULT:
mAudioFormat = AudioFormat.ENCODING_PCM_16BIT;
break;
case AudioFormat.ENCODING_PCM_16BIT:
case AudioFormat.ENCODING_PCM_8BIT:
mAudioFormat = audioFormat;
break;
default:
mAudioFormat = AudioFormat.ENCODING_INVALID;
throw(new IllegalArgumentException("Unsupported sample encoding."
+ " Should be ENCODING_PCM_8BIT or ENCODING_PCM_16BIT."));
}
//--------------
// audio load mode
if ( (mode != MODE_STREAM) && (mode != MODE_STATIC) ) {
throw(new IllegalArgumentException("Invalid mode."));
} else {
mDataLoadMode = mode;
}
}
想起前面见过类似的代码,在函数getMinBufferSize中。
为什么不改良下audioParamCheck函数,给一些参数赋个默认值,这样函数getMinBufferSize中不是可以省些代码么?
4、接下来检查buffer size:
audioBuffSizeCheck(bufferSizeInBytes);
5、检查sessionId的合法性。
if (sessionId < 0) {
throw (new IllegalArgumentException("Invalid audio session ID: "+sessionId));
}
6、new一个变量,用来保存sessionId:
int[] session = new int[1];
session[0] = sessionId;
7、调到native层,估计有不少要罗嗦的,就先把省下的尾巴吃掉,再进入native吧。
如果调用native的函数失败,直接返回:
if (initResult != SUCCESS) {
loge("Error code "+initResult+" when initializing AudioTrack.");
return; // with mState == STATE_UNINITIALIZED
}
8、记录sessionId。
mSessionId = session[0];
9、根据模式,设置状态:
if (mDataLoadMode == MODE_STATIC) {
mState = STATE_NO_STATIC_DATA;
} else {
mState = STATE_INITIALIZED;
}
10、好了,现在进入native中吧:
// native initialization
int initResult = native_setup(new WeakReference(this),
mStreamType, mSampleRate, mChannels, mAudioFormat,
mNativeBufferSizeInBytes, mDataLoadMode, session);
static int
android_media_AudioTrack_native_setup(JNIEnv *env, jobject thiz, jobject weak_this,
jint streamType, jint sampleRateInHertz, jint channels,
jint audioFormat, jint buffSizeInBytes, jint memoryMode, jintArray jSession)
{
LOGV("sampleRate=%d, audioFormat(from Java)=%d, channels=%x, buffSize=%d",
sampleRateInHertz, audioFormat, channels, buffSizeInBytes);
int afSampleRate;
int afFrameCount;
if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) {
LOGE("Error creating AudioTrack: Could not get AudioSystem frame count.");
return AUDIOTRACK_ERROR_SETUP_AUDIOSYSTEM;
}
if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
LOGE("Error creating AudioTrack: Could not get AudioSystem sampling rate.");
return AUDIOTRACK_ERROR_SETUP_AUDIOSYSTEM;
}
if (!AudioSystem::isOutputChannel(channels)) {
LOGE("Error creating AudioTrack: invalid channel mask.");
return AUDIOTRACK_ERROR_SETUP_INVALIDCHANNELMASK;
}
int nbChannels = AudioSystem::popCount(channels);
// check the stream type
AudioSystem::stream_type atStreamType;
if (streamType == javaAudioTrackFields.STREAM_VOICE_CALL) {
atStreamType = AudioSystem::VOICE_CALL;
} else if (streamType == javaAudioTrackFields.STREAM_SYSTEM) {
atStreamType = AudioSystem::SYSTEM;
} else if (streamType == javaAudioTrackFields.STREAM_RING) {
atStreamType = AudioSystem::RING;
} else if (streamType == javaAudioTrackFields.STREAM_MUSIC) {
atStreamType = AudioSystem::MUSIC;
} else if (streamType == javaAudioTrackFields.STREAM_ALARM) {
atStreamType = AudioSystem::ALARM;
} else if (streamType == javaAudioTrackFields.STREAM_NOTIFICATION) {
atStreamType = AudioSystem::NOTIFICATION;
} else if (streamType == javaAudioTrackFields.STREAM_BLUETOOTH_SCO) {
atStreamType = AudioSystem::BLUETOOTH_SCO;
} else if (streamType == javaAudioTrackFields.STREAM_DTMF) {
atStreamType = AudioSystem::DTMF;
} else {
LOGE("Error creating AudioTrack: unknown stream type.");
return AUDIOTRACK_ERROR_SETUP_INVALIDSTREAMTYPE;
}
// check the format.
// This function was called from Java, so we compare the format against the Java constants
if ((audioFormat != javaAudioTrackFields.PCM16) && (audioFormat != javaAudioTrackFields.PCM8)) {
LOGE("Error creating AudioTrack: unsupported audio format.");
return AUDIOTRACK_ERROR_SETUP_INVALIDFORMAT;
}
// for the moment 8bitPCM in MODE_STATIC is not supported natively in the AudioTrack C++ class
// so we declare everything as 16bitPCM, the 8->16bit conversion for MODE_STATIC will be handled
// in android_media_AudioTrack_native_write()
if ((audioFormat == javaAudioTrackFields.PCM8)
&& (memoryMode == javaAudioTrackFields.MODE_STATIC)) {
LOGV("android_media_AudioTrack_native_setup(): requesting MODE_STATIC for 8bit \
buff size of %dbytes, switching to 16bit, buff size of %dbytes",
buffSizeInBytes, 2*buffSizeInBytes);
audioFormat = javaAudioTrackFields.PCM16;
// we will need twice the memory to store the data
buffSizeInBytes *= 2;
}
// compute the frame count
int bytesPerSample = audioFormat == javaAudioTrackFields.PCM16 ? 2 : 1;
int format = audioFormat == javaAudioTrackFields.PCM16 ?
AudioSystem::PCM_16_BIT : AudioSystem::PCM_8_BIT;
int frameCount = buffSizeInBytes / (nbChannels * bytesPerSample);
AudioTrackJniStorage* lpJniStorage = new AudioTrackJniStorage();
// initialize the callback information:
// this data will be passed with every AudioTrack callback
jclass clazz = env->GetObjectClass(thiz);
if (clazz == NULL) {
LOGE("Can't find %s when setting up callback.", kClassPathName);
delete lpJniStorage;
return AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;
}
lpJniStorage->mCallbackData.audioTrack_class = (jclass)env->NewGlobalRef(clazz);
// we use a weak reference so the AudioTrack object can be garbage collected.
lpJniStorage->mCallbackData.audioTrack_ref = env->NewGlobalRef(weak_this);
lpJniStorage->mStreamType = atStreamType;
if (jSession == NULL) {
LOGE("Error creating AudioTrack: invalid session ID pointer");
delete lpJniStorage;
return AUDIOTRACK_ERROR;
}
jint* nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);
if (nSession == NULL) {
LOGE("Error creating AudioTrack: Error retrieving session id pointer");
delete lpJniStorage;
return AUDIOTRACK_ERROR;
}
int sessionId = nSession[0];
env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
nSession = NULL;
// create the native AudioTrack object
AudioTrack* lpTrack = new AudioTrack();
if (lpTrack == NULL) {
LOGE("Error creating uninitialized AudioTrack");
goto native_track_failure;
}
// initialize the native AudioTrack object
if (memoryMode == javaAudioTrackFields.MODE_STREAM) {
lpTrack->set(
atStreamType,// stream type
sampleRateInHertz,
format,// word length, PCM
channels,
frameCount,
0,// flags
audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user)
0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
0,// shared mem
true,// thread can call Java
sessionId);// audio session ID
} else if (memoryMode == javaAudioTrackFields.MODE_STATIC) {
// AudioTrack is using shared memory
if (!lpJniStorage->allocSharedMem(buffSizeInBytes)) {
LOGE("Error creating AudioTrack in static mode: error creating mem heap base");
goto native_init_failure;
}
lpTrack->set(
atStreamType,// stream type
sampleRateInHertz,
format,// word length, PCM
channels,
frameCount,
0,// flags
audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user));
0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
lpJniStorage->mMemBase,// shared mem
true,// thread can call Java
sessionId);// audio session ID
}
if (lpTrack->initCheck() != NO_ERROR) {
LOGE("Error initializing AudioTrack");
goto native_init_failure;
}
nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);
if (nSession == NULL) {
LOGE("Error creating AudioTrack: Error retrieving session id pointer");
goto native_init_failure;
}
// read the audio session ID back from AudioTrack in case we create a new session
nSession[0] = lpTrack->getSessionId();
env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
nSession = NULL;
// save our newly created C++ AudioTrack in the "nativeTrackInJavaObj" field
// of the Java object (in mNativeTrackInJavaObj)
env->SetIntField(thiz, javaAudioTrackFields.nativeTrackInJavaObj, (int)lpTrack);
// save the JNI resources so we can free them later
//LOGV("storing lpJniStorage: %x\n", (int)lpJniStorage);
env->SetIntField(thiz, javaAudioTrackFields.jniData, (int)lpJniStorage);
return AUDIOTRACK_SUCCESS;
// failures:
native_init_failure:
delete lpTrack;
env->SetIntField(thiz, javaAudioTrackFields.nativeTrackInJavaObj, 0);
native_track_failure:
if (nSession != NULL) {
env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
}
env->DeleteGlobalRef(lpJniStorage->mCallbackData.audioTrack_class);
env->DeleteGlobalRef(lpJniStorage->mCallbackData.audioTrack_ref);
delete lpJniStorage;
env->SetIntField(thiz, javaAudioTrackFields.jniData, 0);
return AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;
}
下面的代码是获取frameCount和samplingRate,前文已经说过:
if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) {
LOGE("Error creating AudioTrack: Could not get AudioSystem frame count.");
return AUDIOTRACK_ERROR_SETUP_AUDIOSYSTEM;
}
if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
LOGE("Error creating AudioTrack: Could not get AudioSystem sampling rate.");
return AUDIOTRACK_ERROR_SETUP_AUDIOSYSTEM;
}
判断channel是不是output的,这儿是track,是用来播放的,所以channel当然必须是output的了:
if (!AudioSystem::isOutputChannel(channels)) {
LOGE("Error creating AudioTrack: invalid channel mask.");
return AUDIOTRACK_ERROR_SETUP_INVALIDCHANNELMASK;
}
接下来调用函数AudioSystem::popCount:
int nbChannels = AudioSystem::popCount(channels);
函数AudioSystem::popCount的实现:
// use emulated popcount optimization
// http://www.df.lth.se/~john_e/gems/gem002d.html
uint32_t AudioSystem::popCount(uint32_t u)
{
u = ((u&0x55555555) + ((u>>1)&0x55555555));
u = ((u&0x33333333) + ((u>>2)&0x33333333));
u = ((u&0x0f0f0f0f) + ((u>>4)&0x0f0f0f0f));
u = ((u&0x00ff00ff) + ((u>>8)&0x00ff00ff));
u = ( u&0x0000ffff) + (u>>16);
return u;
}
自己先想想,如果统计?
最直接的就是把每个bit都看作一个数,然后将这32个数加起来,不就是1的个数了?
不过,这种方法费时,费空间。有一点好处,不费脑,呵呵。
到给的网站上看看。
原理差不多,也是拆开统计,不过人家的技术含量高些,不用定义变量也不用循环。
其思想就是,依次分组统计,从小组,到大组,直到最后把32bit全统计了。
1bit一组的就不用统计了,是1就有一个1,是0就没有。
两个bit一组的统计:
u = ((u&0x55555555) + ((u>>1)&0x55555555));
(u&0x55555555)把每组的两个中,低位的1留下。
((u>>1)&0x55555555)把每组中高位的1留下,并且将其移动到了地位。
然后相加。
如果原来两位都是1,则1+1=10,统计数2就出来了。
如果原来两位中有一个1,则1+0=1。两位都是0,0+0=0。
该方法不错。
然后分为4个一组统计:
u = ((u&0x33333333) + ((u>>2)&0x33333333));
前面步骤中,已经把每组两个中1的个数统计出来了,并将结果放在了两个bit中。
现在,我们再将上面的每两组分成一组,进行统计。
(u&0x33333333)把低位小组的结果留下。
((u>>2)&0x33333333)把高位小组的结果留下,并将其移动到地位,也就是可以作为个数看待。
将每两个组的结果相加,就得出4个bit一组中1的个数。
到这儿,思想和方法应该基本上都清楚了。
((u&0x0f0f0f0f) + ((u>>4)&0x0f0f0f0f));统计了每8个bit一组中的1的个数。
((u&0x00ff00ff) + ((u>>8)&0x00ff00ff));统计了每16个bit一组中的1的个数。
( u&0x0000ffff) + (u>>16);统计了32个bit一组,也就是原数据的1的个数。
回到函数android_media_AudioTrack_native_setup中。
检查stream 类型:
// check the stream type
AudioSystem::stream_type atStreamType;
if (streamType == javaAudioTrackFields.STREAM_VOICE_CALL) {
atStreamType = AudioSystem::VOICE_CALL;
} else if (streamType == javaAudioTrackFields.STREAM_SYSTEM) {
atStreamType = AudioSystem::SYSTEM;
} else if (streamType == javaAudioTrackFields.STREAM_RING) {
atStreamType = AudioSystem::RING;
} else if (streamType == javaAudioTrackFields.STREAM_MUSIC) {
atStreamType = AudioSystem::MUSIC;
} else if (streamType == javaAudioTrackFields.STREAM_ALARM) {
atStreamType = AudioSystem::ALARM;
} else if (streamType == javaAudioTrackFields.STREAM_NOTIFICATION) {
atStreamType = AudioSystem::NOTIFICATION;
} else if (streamType == javaAudioTrackFields.STREAM_BLUETOOTH_SCO) {
atStreamType = AudioSystem::BLUETOOTH_SCO;
} else if (streamType == javaAudioTrackFields.STREAM_DTMF) {
atStreamType = AudioSystem::DTMF;
} else {
LOGE("Error creating AudioTrack: unknown stream type.");
return AUDIOTRACK_ERROR_SETUP_INVALIDSTREAMTYPE;
}
检查音频格式:
// check the format.
// This function was called from Java, so we compare the format against the Java constants
if ((audioFormat != javaAudioTrackFields.PCM16) && (audioFormat != javaAudioTrackFields.PCM8)) {
LOGE("Error creating AudioTrack: unsupported audio format.");
return AUDIOTRACK_ERROR_SETUP_INVALIDFORMAT;
}
下面是一个adapt处理。因为native中的AudioTrack类对static模式不支持8bitPCM,所以,需要做一次转换:
// for the moment 8bitPCM in MODE_STATIC is not supported natively in the AudioTrack C++ class
// so we declare everything as 16bitPCM, the 8->16bit conversion for MODE_STATIC will be handled
// in android_media_AudioTrack_native_write()
if ((audioFormat == javaAudioTrackFields.PCM8)
&& (memoryMode == javaAudioTrackFields.MODE_STATIC)) {
LOGV("android_media_AudioTrack_native_setup(): requesting MODE_STATIC for 8bit \
buff size of %dbytes, switching to 16bit, buff size of %dbytes",
buffSizeInBytes, 2*buffSizeInBytes);
audioFormat = javaAudioTrackFields.PCM16; // 将格式改为16bitPCM,看代码可知,变量只在后面作判断用
// we will need twice the memory to store the data
buffSizeInBytes *= 2; // 原来1byte占一个坑,现在要占两个,总buffer当然也要翻倍了
}
if (audioFormat == javaAudioTrackFields.PCM16) {
// writing to shared memory, check for capacity
if ((size_t)sizeInBytes > pTrack->sharedBuffer()->size()) {
sizeInBytes = pTrack->sharedBuffer()->size();
}
memcpy(pTrack->sharedBuffer()->pointer(), data + offsetInBytes, sizeInBytes);
written = sizeInBytes;
} else if (audioFormat == javaAudioTrackFields.PCM8) {
// data contains 8bit data we need to expand to 16bit before copying
// to the shared memory
// writing to shared memory, check for capacity,
// note that input data will occupy 2X the input space due to 8 to 16bit conversion
if (((size_t)sizeInBytes)*2 > pTrack->sharedBuffer()->size()) {
sizeInBytes = pTrack->sharedBuffer()->size() / 2;
}
int count = sizeInBytes;
int16_t *dst = (int16_t *)pTrack->sharedBuffer()->pointer();
const int8_t *src = (const int8_t *)(data + offsetInBytes);
while(count--) {
*dst++ = (int16_t)(*src++^0x80) << 8;
}
// even though we wrote 2*sizeInBytes, we only report sizeInBytes as written to hide
// the 8bit mixer restriction from the user of this function
written = sizeInBytes;
}
计算frame count:
// compute the frame count
int bytesPerSample = audioFormat == javaAudioTrackFields.PCM16 ? 2 : 1;
int format = audioFormat == javaAudioTrackFields.PCM16 ?
AudioSystem::PCM_16_BIT : AudioSystem::PCM_8_BIT;
int frameCount = buffSizeInBytes / (nbChannels * bytesPerSample);
想起来前面对文章有,有个关于frameCount的公式还没看明白:
// Ensure that buffer depth covers at least audio hardware latency
uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
if (minBufCount < 2) minBufCount = 2;
如果和这儿的frameCount含义类似,afFrameCount/afSampleRate是个时间,再乘以1000,难道是毫秒变秒?
再拿延迟除以变过的时间,难道就是所说的Ensure that buffer depth covers at least audio hardware latency?
不搞了,先回去。
接下来创建了一个AudioTrackJniStorage对象:
AudioTrackJniStorage* lpJniStorage = new AudioTrackJniStorage();
AudioTrackJniStorage类的代码不多,如下:
class AudioTrackJniStorage {
public:
sp mMemHeap;
sp mMemBase;
audiotrack_callback_cookie mCallbackData;
int mStreamType;
AudioTrackJniStorage() {
mCallbackData.audioTrack_class = 0;
mCallbackData.audioTrack_ref = 0;
mStreamType = AudioSystem::DEFAULT;
}
~AudioTrackJniStorage() {
mMemBase.clear();
mMemHeap.clear();
}
bool allocSharedMem(int sizeInBytes) {
mMemHeap = new MemoryHeapBase(sizeInBytes, 0, "AudioTrack Heap Base");
if (mMemHeap->getHeapID() < 0) {
return false;
}
mMemBase = new MemoryBase(mMemHeap, 0, sizeInBytes);
return true;
}
};
从代码可以看出,其中记录了流动类型。
通过调用其函数allocSharedMem可以分配共享内存。
此处涉及到Android中进程间内存共享机制,这个以后再说。
接下来调用JNIEvn的一些接口,获取一些东东,然后初始化到AudioTrackJniStorage对象中。
// initialize the callback information:
// this data will be passed with every AudioTrack callback
jclass clazz = env->GetObjectClass(thiz);
if (clazz == NULL) {
LOGE("Can't find %s when setting up callback.", kClassPathName);
delete lpJniStorage;
return AUDIOTRACK_ERROR_SETUP_NATIVEINITFAILED;
}
lpJniStorage->mCallbackData.audioTrack_class = (jclass)env->NewGlobalRef(clazz);
// we use a weak reference so the AudioTrack object can be garbage collected.
lpJniStorage->mCallbackData.audioTrack_ref = env->NewGlobalRef(weak_this);
关于JNIEnv,以后找时间了再好好总结。
看下AudioTrackJniStorage对象中的mCallbackData吧,是audiotrack_callback_cookie类型:
struct audiotrack_callback_cookie {
jclass audioTrack_class;
jobject audioTrack_ref;
};
下面,设置AudioTrackJniStorage对象中的stream类型:
lpJniStorage->mStreamType = atStreamType;
判断传过来的session是否为空:
if (jSession == NULL) {
LOGE("Error creating AudioTrack: invalid session ID pointer");
delete lpJniStorage;
return AUDIOTRACK_ERROR;
}
调用JNIEnv对象的接口,获取传入的session信息,并将session数组释放,因为我们当时传入的时候,这个变量是new出来的。
jint* nSession = (jint *) env->GetPrimitiveArrayCritical(jSession, NULL);
if (nSession == NULL) {
LOGE("Error creating AudioTrack: Error retrieving session id pointer");
delete lpJniStorage;
return AUDIOTRACK_ERROR;
}
int sessionId = nSession[0];
env->ReleasePrimitiveArrayCritical(jSession, nSession, 0);
nSession = NULL;
创建native中的AudioTrack对象,如果失败,goto到error处理:
// create the native AudioTrack object
AudioTrack* lpTrack = new AudioTrack();
if (lpTrack == NULL) {
LOGE("Error creating uninitialized AudioTrack");
goto native_track_failure;
}
类AudioTrack的这个无参的构造函数只是将状态设置为了NO_INIT。其注释如下:
/* Constructs an uninitialized AudioTrack. No connection with
* AudioFlinger takes place.
*/
可见,它只是创建了一个为初始化的AudioTrack对象,并未与AudioFlinger建立联系。
接下来调用刚才创建的AudioTrack对象的set函数。调用前作了个判断,stream模式和static模式的部分参数设置不同。
比较了一下,发现第10个参数hared mem不同。
stream模式的话,传的是0,因为stream模式下,数据是不断写过来的,不需要在此通过共享内存过来。
static模式就不一样了,因为其数据是一次传入的。
// initialize the native AudioTrack object
if (memoryMode == javaAudioTrackFields.MODE_STREAM) {
lpTrack->set(
atStreamType,// stream type
sampleRateInHertz,
format,// word length, PCM
channels,
frameCount,
0,// flags
audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user)
0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
0,// shared mem
true,// thread can call Java
sessionId);// audio session ID
} else if (memoryMode == javaAudioTrackFields.MODE_STATIC) {
// AudioTrack is using shared memory
if (!lpJniStorage->allocSharedMem(buffSizeInBytes)) {
LOGE("Error creating AudioTrack in static mode: error creating mem heap base");
goto native_init_failure;
}
lpTrack->set(
atStreamType,// stream type
sampleRateInHertz,
format,// word length, PCM
channels,
frameCount,
0,// flags
audioCallback, &(lpJniStorage->mCallbackData),//callback, callback data (user));
0,// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
lpJniStorage->mMemBase,// shared mem
true,// thread can call Java
sessionId);// audio session ID
}
进入set函数看看:
status_t AudioTrack::set(
int streamType,
uint32_t sampleRate,
int format,
int channels,
int frameCount,
uint32_t flags,
callback_t cbf,
void* user,
int notificationFrames,
const sp& sharedBuffer,
bool threadCanCallJava,
int sessionId)
{
LOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(), sharedBuffer->size());
// 判断mAudioTrack是否为0,即IAudioTrack对象是否已被创建
// (以后直接在代码中插入说明,是否方便些呢?)
if (mAudioTrack != 0) {
LOGE("Track already in use");
return INVALID_OPERATION;
}
int afSampleRate;
// 下面两个函数调用,已经谋面过几次了,就不再说了。
if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
return NO_INIT;
}
uint32_t afLatency;
if (AudioSystem::getOutputLatency(&afLatency, streamType) != NO_ERROR) {
return NO_INIT;
}
// 判断参数,如果是默认值的话,设为一个特定值。
// handle default values first.
if (streamType == AudioSystem::DEFAULT) {
streamType = AudioSystem::MUSIC;
}
if (sampleRate == 0) {
sampleRate = afSampleRate;
}
// these below should probably come from the audioFlinger too...
if (format == 0) {
format = AudioSystem::PCM_16_BIT;
}
if (channels == 0) {
channels = AudioSystem::CHANNEL_OUT_STEREO;
}
// 检查格式是否有效。此处指的格式是指8bitPCM和16bitPCM。
// validate parameters
if (!AudioSystem::isValidFormat(format)) {
LOGE("Invalid format");
return BAD_VALUE;
}
// 如果不是linear PCM,即线性PCM,将标志设为OUTPUT_FLAG_DIRECT
// 该标志在getOutput和创建IAudioTrack对象时会被使用。
// 说明一下,一排+号表示插入的开始,一排-号表示插入的结束,可以嵌套。
// 个人觉得这样看起来会方便些。
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// OUTPUT_FLAG_DIRECT的定义:
// request to open a direct output with getOutput() (by opposition to sharing an output with other AudioTracks)
enum output_flags {
OUTPUT_FLAG_INDIRECT = 0x0,
OUTPUT_FLAG_DIRECT = 0x1
};
// 看来只有两种,一种direct,一种非direct。
----------------------------------------------------------------
// force direct flag if format is not linear PCM
if (!AudioSystem::isLinearPCM(format)) {
flags |= AudioSystem::OUTPUT_FLAG_DIRECT;
}
// 判断是否是output channel。我们要创建AudioTrack,当然必须是output的channel了。
if (!AudioSystem::isOutputChannel(channels)) {
LOGE("Invalid channel mask");
return BAD_VALUE;
}
// channel计数。函数popCount已经介绍过。
// 为什么可以这样统计channel个数呢,因为每个channel都占了1bit。
uint32_t channelCount = AudioSystem::popCount(channels);
// 获取output。
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// AudioSystem::getOutput的实现:
// 找了下,也没看到有注释。
audio_io_handle_t AudioSystem::getOutput(stream_type stream,
uint32_t samplingRate,
uint32_t format,
uint32_t channels,
output_flags flags)
{
audio_io_handle_t output = 0;
// Do not use stream to output map cache if the direct output
// flag is set or if we are likely to use a direct output
// (e.g voice call stream @ 8kHz could use BT SCO device and be routed to
// a direct output on some platforms).
// TODO: the output cache and stream to output mapping implementation needs to
// be reworked for proper operation with direct outputs. This code is too specific
// to the first use case we want to cover (Voice Recognition and Voice Dialer over
// Bluetooth SCO
// 注释中说明,如果flag为direct,或者可能要使用direct output的话,就不要使用stream to output map cache
if ((flags & AudioSystem::OUTPUT_FLAG_DIRECT) == 0 &&
((stream != AudioSystem::VOICE_CALL && stream != AudioSystem::BLUETOOTH_SCO) ||
channels != AudioSystem::CHANNEL_OUT_MONO ||
(samplingRate != 8000 && samplingRate != 16000))) {
Mutex::Autolock _l(gLock);
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// gStreamOutputMap的定义:
// mapping between stream types and outputs
static DefaultKeyedVector gStreamOutputMap;
----------------------------------------------------------------
// 可见就是stream与output之间的一个map。
output = AudioSystem::gStreamOutputMap.valueFor(stream);
LOGV_IF((output != 0), "getOutput() read %d from cache for stream %d", output, stream);
}
// output为0有两种情况,一种是因为direct flag,一种是因为在map中没找到
if (output == 0) {
// 前面没获取到,那就从audio policy service那儿获取
const sp& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return 0;
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// 函数AudioPolicyService::getOutput的实现:
audio_io_handle_t AudioPolicyService::getOutput(AudioSystem::stream_type stream,
uint32_t samplingRate,
uint32_t format,
uint32_t channels,
AudioSystem::output_flags flags)
{
if (mpPolicyManager == NULL) {
return 0;
}
LOGV("getOutput() tid %d", gettid());
Mutex::Autolock _l(mLock);
// mpPolicyManager的赋值在AudioPolicyService的构造函数中
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
AudioPolicyService::AudioPolicyService()
: BnAudioPolicyService() , mpPolicyManager(NULL)
{
char value[PROPERTY_VALUE_MAX];
// start tone playback thread
mTonePlaybackThread = new AudioCommandThread(String8(""));
// start audio commands thread
mAudioCommandThread = new AudioCommandThread(String8("ApmCommandThread"));
#if (defined GENERIC_AUDIO) || (defined AUDIO_POLICY_TEST)
mpPolicyManager = new AudioPolicyManagerBase(this);
LOGV("build for GENERIC_AUDIO - using generic audio policy");
#else
// if running in emulation - use the emulator driver
if (property_get("ro.kernel.qemu", value, 0)) {
LOGV("Running in emulation - using generic audio policy");
mpPolicyManager = new AudioPolicyManagerBase(this);
}
else {
// 我们真正使用的在这儿
// createAudioPolicyManager定义的地方还真不少,我们使用的应该是那个ALSA的
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// createAudioPolicyManager函数的实现:
extern "C" AudioPolicyInterface* createAudioPolicyManager(AudioPolicyClientInterface *clientInterface)
{
return new AudioPolicyManagerALSA(clientInterface);
}
// 类AudioPolicyManagerALSA继承自类AudioPolicyManagerBase。
// 类AudioPolicyManagerALSA中没实现多少自己的东西,只是实现了一个stopInput函数。
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// 类AudioPolicyManagerBase的注释:
// ----------------------------------------------------------------------------
// AudioPolicyManagerBase implements audio policy manager behavior common to all platforms.
// Each platform must implement an AudioPolicyManager class derived from AudioPolicyManagerBase
// and override methods for which the platform specific behavior differs from the implementation
// in AudioPolicyManagerBase. Even if no specific behavior is required, the AudioPolicyManager
// class must be implemented as well as the class factory function createAudioPolicyManager()
// and provided in a shared library libaudiopolicy.so.
// ----------------------------------------------------------------------------
----------------------------------------------------------------
----------------------------------------------------------------
LOGV("Using hardware specific audio policy");
mpPolicyManager = createAudioPolicyManager(this);
}
#endif
// load properties
property_get("ro.camera.sound.forced", value, "0");
mpPolicyManager->setSystemProperty("ro.camera.sound.forced", value);
}
----------------------------------------------------------------
// 上面暂时先看到这。关于AudioPolicyManagerBase 类,有时间了再好好看看。此处先重点看看其getOutput函数。
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// 不行,篇幅太长了。代码不贴了,以后有空了再详细介绍。大致说下其功能吧。
// 根据stream获取策略。
// 获取策略的设备
// 调用mpClientInterface->openOutput打开output,最终将output返回。
----------------------------------------------------------------
return mpPolicyManager->getOutput(stream, samplingRate, format, channels, flags);
}
----------------------------------------------------------------
output = aps->getOutput(stream, samplingRate, format, channels, flags);
if ((flags & AudioSystem::OUTPUT_FLAG_DIRECT) == 0) {
Mutex::Autolock _l(gLock);
AudioSystem::gStreamOutputMap.add(stream, output);
}
}
return output;
}
----------------------------------------------------------------
audio_io_handle_t output = AudioSystem::getOutput((AudioSystem::stream_type)streamType,
sampleRate, format, channels, (AudioSystem::output_flags)flags);
if (output == 0) {
LOGE("Could not get audio output for stream type %d", streamType);
return BAD_VALUE;
}
// 给成员变量赋值。
// 其他还都知道,notificationFrames是什么东东?
// 看了下,原来是set函数的第9个参数
// 其注释为:// notificationFrames == 0 since not using EVENT_MORE_DATA to feed the AudioTrack
// stream和static模式,都是用的0
mVolume[LEFT] = 1.0f;
mVolume[RIGHT] = 1.0f;
mSendLevel = 0;
mFrameCount = frameCount;
mNotificationFramesReq = notificationFrames;
mSessionId = sessionId;
mAuxEffectId = 0;
// 下面要看看createTrack函数了,篇幅长也没办法,谁让我们看的是AudioTrack的构造函数呢
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
status_t AudioTrack::createTrack(
int streamType,
uint32_t sampleRate,
int format,
int channelCount,
int frameCount,
uint32_t flags,
const sp& sharedBuffer,
audio_io_handle_t output,
bool enforceFrameCount)
{
status_t status;
// 获取audio flinger对象
const sp& audioFlinger = AudioSystem::get_audio_flinger();
if (audioFlinger == 0) {
LOGE("Could not get audioflinger");
return NO_INIT;
}
// 下面三个函数已经见过N次了
int afSampleRate;
if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
return NO_INIT;
}
int afFrameCount;
if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) {
return NO_INIT;
}
uint32_t afLatency;
if (AudioSystem::getOutputLatency(&afLatency, streamType) != NO_ERROR) {
return NO_INIT;
}
mNotificationFramesAct = mNotificationFramesReq;
// 给frameCount 赋值
if (!AudioSystem::isLinearPCM(format)) {
if (sharedBuffer != 0) {
frameCount = sharedBuffer->size();
}
} else {
// 这个公式以前见过,不过还是不理解。
// Ensure that buffer depth covers at least audio hardware latency
uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
if (minBufCount < 2) minBufCount = 2;
int minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate;
if (sharedBuffer == 0) {
if (frameCount == 0) {
frameCount = minFrameCount;
}
if (mNotificationFramesAct == 0) {
mNotificationFramesAct = frameCount/2;
}
// Make sure that application is notified with sufficient margin
// before underrun
if (mNotificationFramesAct > (uint32_t)frameCount/2) {
mNotificationFramesAct = frameCount/2;
}
if (frameCount < minFrameCount) {
if (enforceFrameCount) {
LOGE("Invalid buffer size: minFrameCount %d, frameCount %d", minFrameCount, frameCount);
return BAD_VALUE;
} else {
frameCount = minFrameCount;
}
}
} else {
// Ensure that buffer alignment matches channelcount
if (((uint32_t)sharedBuffer->pointer() & (channelCount | 1)) != 0) {
LOGE("Invalid buffer alignement: address %p, channelCount %d", sharedBuffer->pointer(), channelCount);
return BAD_VALUE;
}
frameCount = sharedBuffer->size()/channelCount/sizeof(int16_t);
}
}
// 调用audio flinger的接口创建track。
// createTrack函数的注释如下:
/* create an audio track and registers it with AudioFlinger.
* return null if the track cannot be created.
*/
// 功能是创建一个audio track,并将其注册到audio flinger。
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// 函数createTrack实现:
virtual sp createTrack(
pid_t pid,
int streamType,
uint32_t sampleRate,
int format,
int channelCount,
int frameCount,
uint32_t flags,
const sp& sharedBuffer,
int output,
int *sessionId,
status_t *status)
{
Parcel data, reply;
sp track;
data.writeInterfaceToken(IAudioFlinger::getInterfaceDescriptor());
data.writeInt32(pid);
data.writeInt32(streamType);
data.writeInt32(sampleRate);
data.writeInt32(format);
data.writeInt32(channelCount);
data.writeInt32(frameCount);
data.writeInt32(flags);
data.writeStrongBinder(sharedBuffer->asBinder());
data.writeInt32(output);
int lSessionId = 0;
if (sessionId != NULL) {
lSessionId = *sessionId;
}
data.writeInt32(lSessionId);
// createTrack是类BpAudioFlinger中的函数
// 通过remote调到了BnAudioFlinger的createTrack函数。
// 此处又涉及到了Binder机制。看样子真要找个时间,好好整理Binder相关的资料了。
status_t lStatus = remote()->transact(CREATE_TRACK, data, &reply);
if (lStatus != NO_ERROR) {
LOGE("createTrack error: %s", strerror(-lStatus));
} else {
lSessionId = reply.readInt32();
if (sessionId != NULL) {
*sessionId = lSessionId;
}
lStatus = reply.readInt32();
track = interface_cast(reply.readStrongBinder());
}
if (status) {
*status = lStatus;
}
return track;
}
----------------------------------------------------------------
sp track = audioFlinger->createTrack(getpid(),
streamType,
sampleRate,
format,
channelCount,
frameCount,
((uint16_t)flags) << 16,
sharedBuffer,
output,
&mSessionId,
&status);
if (track == 0) {
LOGE("AudioFlinger could not create track, status: %d", status);
return status;
}
// 此处的call back是audio_track_cblk_t,是用来放播放数据的。
// 其中audio flinger中创建,此处是将地址告诉给上层,以便上层写数据
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// getCblk函数的实现在类BpAudioTrack中,看样子此处有涉及到了Binder。
virtual sp getCblk() const
{
Parcel data, reply;
sp cblk;
data.writeInterfaceToken(IAudioTrack::getInterfaceDescriptor());
// 果然,通过remote调到了BnAudioTrack的>transact函数,最终调用的是BnAudioTrack的getCblk函数。
status_t status = remote()->transact(GET_CBLK, data, &reply);
if (status == NO_ERROR) {
cblk = interface_cast(reply.readStrongBinder());
}
return cblk;
}
----------------------------------------------------------------
sp cblk = track->getCblk();
if (cblk == 0) {
LOGE("Could not get control block");
return NO_INIT;
}
mAudioTrack.clear();
mAudioTrack = track;
mCblkMemory.clear();
mCblkMemory = cblk;
// audio_track_cblk_t是用来存数据的,还是蛮重要的一个东东,好好看看
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
struct audio_track_cblk_t
{
// The data members are grouped so that members accessed frequently and in the same context
// are in the same line of data cache.
Mutex lock;
Condition cv;
volatile uint64_t user; // 生产者的地址
volatile uint64_t server; // 消费者的地址
uint64_t userBase; // 生产者地址的base
uint64_t serverBase; // 消费者地址的base
void* buffers;
uint32_t frameCount;
// Cache line boundary
uint64_t loopStart;
uint64_t loopEnd;
int loopCount;
volatile union {
uint16_t volume[2];
uint32_t volumeLR;
};
uint32_t sampleRate;
// NOTE: audio_track_cblk_t::frameSize is not equal to AudioTrack::frameSize() for
// 8 bit PCM data: in this case, mCblk->frameSize is based on a sample size of
// 16 bit because data is converted to 16 bit before being stored in buffer
uint8_t frameSize;
uint8_t channelCount;
uint16_t flags;
uint16_t bufferTimeoutMs; // Maximum cumulated timeout before restarting audioflinger
uint16_t waitTimeMs; // Cumulated wait time
uint16_t sendLevel;
uint16_t reserved;
// Cache line boundary (32 bytes)
audio_track_cblk_t();
uint64_t stepUser(uint32_t frameCount);
bool stepServer(uint32_t frameCount);
void* buffer(uint64_t offset) const;
uint32_t framesAvailable();
uint32_t framesAvailable_l();
uint32_t framesReady();
};
----------------------------------------------------------------
mCblk = static_cast(cblk->pointer());
mCblk->flags |= CBLK_DIRECTION_OUT;
if (sharedBuffer == 0) {
// share buffer是0,也就是说是stream模式,buffer是除去头的余下部分
mCblk->buffers = (char*)mCblk + sizeof(audio_track_cblk_t);
} else {
mCblk->buffers = sharedBuffer->pointer();
// Force buffer full condition as data is already present in shared memory
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// 函数stepUser的实现:
uint64_t audio_track_cblk_t::stepUser(uint32_t frameCount)
{
uint64_t u = this->user;
u += frameCount;
// Ensure that user is never ahead of server for AudioRecord
if (flags & CBLK_DIRECTION_MSK) {
// If stepServer() has been called once, switch to normal obtainBuffer() timeout period
if (bufferTimeoutMs == MAX_STARTUP_TIMEOUT_MS-1) {
bufferTimeoutMs = MAX_RUN_TIMEOUT_MS;
}
} else if (u > this->server) {
LOGW("stepServer occured after track reset");
u = this->server;
}
if (u >= userBase + this->frameCount) {
userBase += this->frameCount;
}
this->user = u;
// Clear flow control error condition as new data has been written/read to/from buffer.
flags &= ~CBLK_UNDERRUN_MSK;
return u;
}
----------------------------------------------------------------
mCblk->stepUser(mCblk->frameCount);
}
mCblk->volumeLR = (uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) | uint16_t(mVolume[LEFT] * 0x1000);
mCblk->sendLevel = uint16_t(mSendLevel * 0x1000);
mAudioTrack->attachAuxEffect(mAuxEffectId);
mCblk->bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS;
mCblk->waitTimeMs = 0;
mRemainingFrames = mNotificationFramesAct;
mLatency = afLatency + (1000*mCblk->frameCount) / sampleRate;
return NO_ERROR;
}
----------------------------------------------------------------
// create the IAudioTrack
status_t status = createTrack(streamType, sampleRate, format, channelCount,
frameCount, flags, sharedBuffer, output, true);
if (status != NO_ERROR) {
return status;
}
// 如果cbf不为0,创建一个AudioTrackThread线程。
// cbf是个什么东东?是调用函数set时的第7个参数audioCallback。
// 应该是一个回调函数。果然,其实现如下。
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// 函数audioCallback的实现:
static void audioCallback(int event, void* user, void *info) {
if (event == AudioTrack::EVENT_MORE_DATA) {
// set size to 0 to signal we're not using the callback to write more data
AudioTrack::Buffer* pBuff = (AudioTrack::Buffer*)info;
pBuff->size = 0;
} else if (event == AudioTrack::EVENT_MARKER) {
audiotrack_callback_cookie *callbackInfo = (audiotrack_callback_cookie *)user;
JNIEnv *env = AndroidRuntime::getJNIEnv();
if (user && env) {
env->CallStaticVoidMethod(
callbackInfo->audioTrack_class,
javaAudioTrackFields.postNativeEventInJava,
callbackInfo->audioTrack_ref, event, 0,0, NULL);
if (env->ExceptionCheck()) {
env->ExceptionDescribe();
env->ExceptionClear();
}
}
} else if (event == AudioTrack::EVENT_NEW_POS) {
audiotrack_callback_cookie *callbackInfo = (audiotrack_callback_cookie *)user;
JNIEnv *env = AndroidRuntime::getJNIEnv();
if (user && env) {
env->CallStaticVoidMethod(
callbackInfo->audioTrack_class,
javaAudioTrackFields.postNativeEventInJava,
callbackInfo->audioTrack_ref, event, 0,0, NULL);
if (env->ExceptionCheck()) {
env->ExceptionDescribe();
env->ExceptionClear();
}
}
}
}
----------------------------------------------------------------
if (cbf != 0) {
// 看看创建AudioTrackThread线程时的参数
// *this不用说了
// threadCanCallJava是set函数的传入bool类型的参数,static模式和stream中传入的都是true。
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// AudioTrackThread的构造函数:
// 此处的receiver就是创建AudioTrackThread线程的AudioTrack对象的指针
AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
: Thread(bCanCallJava), mReceiver(receiver)
{
}
// 来看看类AudioTrackThread的定义:
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
/* a small internal class to handle the callback */
class AudioTrackThread : public Thread
{
public:
AudioTrackThread(AudioTrack& receiver, bool bCanCallJava = false);
private:
friend class AudioTrack;
virtual bool threadLoop();
virtual status_t readyToRun();
virtual void onFirstRef();
AudioTrack& mReceiver;
Mutex mLock;
};
----------------------------------------------------------------
// 既然AudioTrackThread是线程,就看看其threadLoop吧:
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
bool AudioTrack::AudioTrackThread::threadLoop()
{
// 调用了AudioTrack对象的processAudioBuffer函数:
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
bool AudioTrack::processAudioBuffer(const sp& thread)
{
Buffer audioBuffer;
uint32_t frames;
size_t writtenSize;
// Manage underrun callback
if (mActive && (mCblk->framesReady() == 0)) {
LOGV("Underrun user: %x, server: %x, flags %04x", mCblk->user, mCblk->server, mCblk->flags);
if ((mCblk->flags & CBLK_UNDERRUN_MSK) == CBLK_UNDERRUN_OFF) {
mCbf(EVENT_UNDERRUN, mUserData, 0);
if (mCblk->server == mCblk->frameCount) {
mCbf(EVENT_BUFFER_END, mUserData, 0);
}
mCblk->flags |= CBLK_UNDERRUN_ON;
if (mSharedBuffer != 0) return false;
}
}
// Manage loop end callback
// mCblk的赋值在AudioTrack的createTrack函数中:
// mCblk = static_cast(cblk->pointer());
// cblk的赋值:sp cblk = track->getCblk();
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// track的赋值:
sp track = audioFlinger->createTrack(getpid(),
streamType,
sampleRate,
format,
channelCount,
frameCount,
((uint16_t)flags) << 16,
sharedBuffer,
output,
&mSessionId,
&status);
// 原来track是通过AudioFlinger创建的一个IAudioTrack对象。
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
sp AudioFlinger::createTrack(
pid_t pid,
int streamType,
uint32_t sampleRate,
int format,
int channelCount,
int frameCount,
uint32_t flags,
const sp& sharedBuffer,
int output,
int *sessionId,
status_t *status)
{
sp track;
sp trackHandle;
sp client;
wp wclient;
status_t lStatus;
int lSessionId;
if (streamType >= AudioSystem::NUM_STREAM_TYPES) {
LOGE("invalid stream type");
lStatus = BAD_VALUE;
goto Exit;
}
{
Mutex::Autolock _l(mLock);
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// checkPlaybackThread_l根据output返回一个PlaybackThread对象:
// checkPlaybackThread_l() must be called with AudioFlinger::mLock held
AudioFlinger::PlaybackThread *AudioFlinger::checkPlaybackThread_l(int output) const
{
PlaybackThread *thread = NULL;
if (mPlaybackThreads.indexOfKey(output) >= 0) {
thread = (PlaybackThread *)mPlaybackThreads.valueFor(output).get();
}
return thread;
}
// PlaybackThread对象和output建立有一个map关系,存放在成员变量mPlaybackThreads中,
// 函数AudioFlinger::openOutput和函数AudioFlinger::openDuplicateOutput中会将这种对应添加到mPlaybackThreads中。
// mPlaybackThreads.add(id, thread);
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// 看下函数AudioFlinger::openOutput的实现吧:
int AudioFlinger::openOutput(uint32_t *pDevices,
uint32_t *pSamplingRate,
uint32_t *pFormat,
uint32_t *pChannels,
uint32_t *pLatencyMs,
uint32_t flags)
{
status_t status;
PlaybackThread *thread = NULL;
mHardwareStatus = AUDIO_HW_OUTPUT_OPEN;
uint32_t samplingRate = pSamplingRate ? *pSamplingRate : 0;
uint32_t format = pFormat ? *pFormat : 0;
uint32_t channels = pChannels ? *pChannels : 0;
uint32_t latency = pLatencyMs ? *pLatencyMs : 0;
LOGV("openOutput(), Device %x, SamplingRate %d, Format %d, Channels %x, flags %x",
pDevices ? *pDevices : 0,
samplingRate,
format,
channels,
flags);
if (pDevices == NULL || *pDevices == 0) {
return 0;
}
Mutex::Autolock _l(mLock);
// 调用到硬件抽象层的oopenOutputStream函数
// 函数AudioHardwareALSA::openOutputStream就先不介绍了(因为我们最终使用的是ALSA,所以看的是AudioHardwareALSA类)
// 其创建了一个AudioStreamOutALSA对象,并将其返回
AudioStreamOut *output = mAudioHardware->openOutputStream(*pDevices,
(int *)&format,
&channels,
&samplingRate,
&status);
LOGV("openOutput() openOutputStream returned output %p, SamplingRate %d, Format %d, Channels %x, status %d",
output,
samplingRate,
format,
channels,
status);
mHardwareStatus = AUDIO_HW_IDLE;
if (output != 0) {
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// 看下AudioFlinger::nextUniqueId的实现:
int AudioFlinger::nextUniqueId()
{
return android_atomic_inc(&mNextUniqueId);
}
// 获取一个独一无二的id。
----------------------------------------------------------------
int id = nextUniqueId();
// 根据flag,创建一个MixerThread线程,或者DirectOutputThread。
if ((flags & AudioSystem::OUTPUT_FLAG_DIRECT) ||
(format != AudioSystem::PCM_16_BIT) ||
(channels != AudioSystem::CHANNEL_OUT_STEREO)) {
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// 类DirectOutputThread的声明:
class DirectOutputThread : public PlaybackThread {
public:
DirectOutputThread (const sp& audioFlinger, AudioStreamOut* output, int id, uint32_t device);
~DirectOutputThread();
// Thread virtuals
virtual bool threadLoop();
virtual bool checkForNewParameters_l();
protected:
virtual int getTrackName_l();
virtual void deleteTrackName_l(int name);
virtual uint32_t activeSleepTimeUs();
virtual uint32_t idleSleepTimeUs();
virtual uint32_t suspendSleepTimeUs();
private:
void applyVolume(uint16_t leftVol, uint16_t rightVol, bool ramp);
float mLeftVolFloat;
float mRightVolFloat;
uint16_t mLeftVolShort;
uint16_t mRightVolShort;
};
----------------------------------------------------------------
thread = new DirectOutputThread(this, output, id, *pDevices);
LOGV("openOutput() created direct output: ID %d thread %p", id, thread);
} else {
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// 类MixerThread的声明:
class MixerThread : public PlaybackThread {
public:
MixerThread (const sp& audioFlinger,
AudioStreamOut* output,
int id,
uint32_t device);
virtual ~MixerThread();
// Thread virtuals
virtual bool threadLoop();
void invalidateTracks(int streamType);
virtual bool checkForNewParameters_l();
virtual status_t dumpInternals(int fd, const Vector& args);
protected:
uint32_t prepareTracks_l(const SortedVector< wp