status_t AudioTrack::createTrack(
int streamType,
uint32_t sampleRate,
int format,
int channelCount,
int frameCount,
uint32_t flags,
const sp& sharedBuffer,
audio_io_handle_t output,
bool enforceFrameCount)
{
status_t status;
const sp& audioFlinger = AudioSystem::get_audio_flinger();
if (audioFlinger == 0) {
LOGE("Could not get audioflinger");
return NO_INIT;
}
int afSampleRate;
if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
return NO_INIT;
}
int afFrameCount;
if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) {
return NO_INIT;
}
uint32_t afLatency;
if (AudioSystem::getOutputLatency(&afLatency, streamType) != NO_ERROR) {
return NO_INIT;
}
mNotificationFramesAct = mNotificationFramesReq;
if (!AudioSystem::isLinearPCM(format)) {
if (sharedBuffer != 0) {
frameCount = sharedBuffer->size();
}
} else {
// Ensure that buffer depth covers at least audio hardware latency
uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
if (minBufCount < 2) minBufCount = 2;
int minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate;
if (sharedBuffer == 0) {
if (frameCount == 0) {
frameCount = minFrameCount;
}
if (mNotificationFramesAct == 0) {
mNotificationFramesAct = frameCount/2;
}
// Make sure that application is notified with sufficient margin
// before underrun
if (mNotificationFramesAct > (uint32_t)frameCount/2) {
mNotificationFramesAct = frameCount/2;
}
if (frameCount < minFrameCount) {
if (enforceFrameCount) {
LOGE("Invalid buffer size: minFrameCount %d, frameCount %d", minFrameCount, frameCount);
return BAD_VALUE;
} else {
frameCount = minFrameCount;
}
}
} else {
// Ensure that buffer alignment matches channelcount
if (((uint32_t)sharedBuffer->pointer() & (channelCount | 1)) != 0) {
LOGE("Invalid buffer alignement: address %p, channelCount %d", sharedBuffer->pointer(), channelCount);
return BAD_VALUE;
}
frameCount = sharedBuffer->size()/channelCount/sizeof(int16_t);
}
}
sp track = audioFlinger->createTrack(getpid(),
streamType,
sampleRate,
format,
channelCount,
frameCount,
((uint16_t)flags) << 16,
sharedBuffer,
output,
&mSessionId,
&status);
if (track == 0) {
LOGE("AudioFlinger could not create track, status: %d", status);
return status;
}
sp cblk = track->getCblk();
if (cblk == 0) {
LOGE("Could not get control block");
return NO_INIT;
}
mAudioTrack.clear();
mAudioTrack = track;
mCblkMemory.clear();
mCblkMemory = cblk;
mCblk = static_cast(cblk->pointer());
mCblk->flags |= CBLK_DIRECTION_OUT;
if (sharedBuffer == 0) {
mCblk->buffers = (char*)mCblk + sizeof(audio_track_cblk_t);
} else {
mCblk->buffers = sharedBuffer->pointer();
// Force buffer full condition as data is already present in shared memory
mCblk->stepUser(mCblk->frameCount);
}
mCblk->volumeLR = (uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) | uint16_t(mVolume[LEFT] * 0x1000);
mCblk->sendLevel = uint16_t(mSendLevel * 0x1000);
mAudioTrack->attachAuxEffect(mAuxEffectId);
mCblk->bufferTimeoutMs = MAX_STARTUP_TIMEOUT_MS;
mCblk->waitTimeMs = 0;
mRemainingFrames = mNotificationFramesAct;
mLatency = afLatency + (1000*mCblk->frameCount) / sampleRate;
return NO_ERROR;
}
//--> client buffer
//audio flinger client move data from client buffer to hardware buffer
//--> hardware buffer
//hardware playback porcess extract data from hardware buffer
//what is "frame"
//-----------------------------------------------------------------------------------------------------
//A frame is a set of samples, one per channel, at a particular instant in time. For stereophonic audio,
//a frame consists of two samples. For Dolby 5.1 Surround Sound, a frame would consist of six samples
//(left channel, center channel, right channel, rear right, rear left, and the low frequency channel).
//For monophonic audio, a frame is equivalent to one sample.
//Retrieved from "http://alsa.opensrc.org/Frame"
//1个采样点只针对一个声道,而实际上可能会有一或多个声道.
//由于不能用一个独立的单位来表示全部声道一次采样的数据量,也就引出了Frame的概念.
//Frame的大小,就是一个采样点的字节数×声道数.
//另外,在目前的声卡驱动程序中,其内部缓冲区也是采用Frame作为单位来分配和管理的.
//what is "bufCount"
//-------------------------------------------------------------------------------------------------------
//bufCount 表示一个frame里面的字节数,用来描述一个时域的采样点所占的字节宽度,(注意:不等同于channel数.)
//what is "frame count"
//-------------------------------------------------------------------------------------------------------
//frameFount 表示一个buffer所能够缓冲的frame的个数,也就是时域的点数
//所以:
//afFrameCount -- 硬件buffer的大小,以frame为单位.
//afSampleRate -- 硬件采样率.
//afFrameCount/afSampleRate -- 硬件buffer保存的数据能够播放的时间是多少,换算成ms就是
// 1000*(afFrameCount/afSampleRate)
//硬件buffer的大小是既定的,如果硬件要求的延迟硬件buffer不能提供,则需要多开几个软件buffer,以缓冲更多的数据,提供更长的播放时间.
//如果硬件要求的延迟为afLatency(ms),
//那么硬件延迟要求对应的最小的软件buffer的数目为:
//minBufCount=afLatency/1000*(afFrameCount/afSampleRate)
//但这里并未给出每个软件buffer的大小.
//暂记每个软件buffer的大小(以frame为单位)为bufsize,采样率转换之前的采样率为sampleRate
//由于每个软件buffer和硬件buffer中的数据所对应的播放时长是一样的.所以:
//bufsize/sampleRate=afFrameCount/afSampleRate;
//bufsize=afFrameCount*sampleRate/afSampleRate;
//则所有软件buffer加在一起大小为
//minFrameCount=minBufCount*afFrameCount*sampleRate/afSampleRate;