前面看过的代码中,经常会调用到AudioSystem类中的getOutputSamplingRate函数,getOutputFrameCount函数,getOutputLatency函数等。
这些函数的实现基本类似,今天就细细品味下AudioSystem::getOutputSamplingRate函数。
*****************************************源码*************************************************
status_t AudioSystem::getOutputSamplingRate(int* samplingRate, int streamType)
{
OutputDescriptor *outputDesc;
audio_io_handle_t output;
if (streamType == DEFAULT) {
streamType = MUSIC;
}
output = getOutput((stream_type)streamType);
if (output == 0) {
return PERMISSION_DENIED;
}
gLock.lock();
outputDesc = AudioSystem::gOutputs.valueFor(output);
if (outputDesc == 0) {
LOGV("getOutputSamplingRate() no output descriptor for output %d in gOutputs", output);
gLock.unlock();
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
*samplingRate = af->sampleRate(output);
} else {
LOGV("getOutputSamplingRate() reading from output desc");
*samplingRate = outputDesc->samplingRate;
gLock.unlock();
}
LOGV("getOutputSamplingRate() streamType %d, output %d, sampling rate %d", streamType, output, *samplingRate);
return NO_ERROR;
}
**********************************************************************************************
源码路径:
frameworks\base\media\libmedia\AudioSystem.cpp
###########################################说明################################################
status_t AudioSystem::getOutputSamplingRate(int* samplingRate, int streamType)
{
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// audio output descritor used to cache output configurations in client process to avoid frequent calls
// through IAudioFlinger
class OutputDescriptor {
public:
OutputDescriptor()
: samplingRate(0), format(0), channels(0), frameCount(0), latency(0) {}
uint32_t samplingRate;
int32_t format;
int32_t channels;
size_t frameCount;
uint32_t latency;
};
----------------------------------------------------------------
OutputDescriptor *outputDesc;
// typedef int audio_io_handle_t;
audio_io_handle_t output;
// Default类型的stream,强制转为music
if (streamType == DEFAULT) {
streamType = MUSIC;
}
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
audio_io_handle_t AudioSystem::getOutput(stream_type stream,
uint32_t samplingRate,
uint32_t format,
uint32_t channels,
output_flags flags)
{
audio_io_handle_t output = 0;
// 如果是direct模式,或者可能会使用direct模式,就不能使用cache中保存的output
// Do not use stream to output map cache if the direct output
// flag is set or if we are likely to use a direct output
// (e.g voice call stream @ 8kHz could use BT SCO device and be routed to
// a direct output on some platforms).
// TODO: the output cache and stream to output mapping implementation needs to
// be reworked for proper operation with direct outputs. This code is too specific
// to the first use case we want to cover (Voice Recognition and Voice Dialer over
// Bluetooth SCO
if ((flags & AudioSystem::OUTPUT_FLAG_DIRECT) == 0 &&
((stream != AudioSystem::VOICE_CALL && stream != AudioSystem::BLUETOOTH_SCO) ||
channels != AudioSystem::CHANNEL_OUT_MONO ||
(samplingRate != 8000 && samplingRate != 16000))) {
Mutex::Autolock _l(gLock);
// gStreamOutputMap中保存的是stream与output之间的对应关系
// 调用getOutput时,如果获取不到output,就会调用AudioPolicyService的getOutput函数获取output,
// 并将stream与output的对应关系添加到gStreamOutputMap。(也就是该函数下面的处理)
output = AudioSystem::gStreamOutputMap.valueFor(stream);
LOGV_IF((output != 0), "getOutput() read %d from cache for stream %d", output, stream);
}
// 如果Cache中找不到stream对应的output
if (output == 0) {
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// establish binder interface to AudioFlinger service
const sp<IAudioPolicyService>& AudioSystem::get_audio_policy_service()
{
gLock.lock();
// 如果gAudioPolicyService尚未创建,则创建一个,否则直接将gAudioPolicyService返回
if (gAudioPolicyService.get() == 0) {
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
ServiceManager是一个管理所有service的一个东东
sp<IServiceManager> defaultServiceManager()
{
// 如果gDefaultServiceManager不为空,直接将其返回
if (gDefaultServiceManager != NULL) return gDefaultServiceManager;
{
AutoMutex _l(gDefaultServiceManagerLock);
if (gDefaultServiceManager == NULL) {
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
sp<ProcessState> ProcessState::self()
{
if (gProcess != NULL) return gProcess;
AutoMutex _l(gProcessMutex);
if (gProcess == NULL) gProcess = new ProcessState;
return gProcess;
}
----------------------------------------------------------------
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
sp<IBinder> ProcessState::getContextObject(const sp<IBinder>& caller)
{
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
bool ProcessState::supportsProcesses() const
{
return mDriverFD >= 0;
}
----------------------------------------------------------------
if (supportsProcesses()) {
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
sp<IBinder> ProcessState::getStrongProxyForHandle(int32_t handle)
{
sp<IBinder> result;
AutoMutex _l(mLock);
handle_entry* e = lookupHandleLocked(handle);
if (e != NULL) {
// We need to create a new BpBinder if there isn't currently one, OR we
// are unable to acquire a weak reference on this current one. See comment
// in getWeakProxyForHandle() for more info about this.
IBinder* b = e->binder;
if (b == NULL || !e->refs->attemptIncWeak(this)) {
b = new BpBinder(handle);
e->binder = b;
if (b) e->refs = b->getWeakRefs();
result = b;
} else {
// This little bit of nastyness is to allow us to add a primary
// reference to the remote proxy when this team doesn't have one
// but another team is sending the handle to us.
result.force_set(b);
e->refs->decWeak(this);
}
}
return result;
}
----------------------------------------------------------------
return getStrongProxyForHandle(0);
} else {
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
sp<IBinder> ProcessState::getContextObject(const String16& name, const sp<IBinder>& caller)
{
mLock.lock();
sp<IBinder> object(
mContexts.indexOfKey(name) >= 0 ? mContexts.valueFor(name) : NULL);
mLock.unlock();
//printf("Getting context object %s for %p\n", String8(name).string(), caller.get());
if (object != NULL) return object;
// Don't attempt to retrieve contexts if we manage them
if (mManagesContexts) {
LOGE("getContextObject(%s) failed, but we manage the contexts!\n",
String8(name).string());
return NULL;
}
IPCThreadState* ipc = IPCThreadState::self();
{
Parcel data, reply;
// no interface token on this magic transaction
data.writeString16(name);
data.writeStrongBinder(caller);
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
status_t IPCThreadState::transact(int32_t handle,
uint32_t code, const Parcel& data,
Parcel* reply, uint32_t flags)
{
status_t err = data.errorCheck();
flags |= TF_ACCEPT_FDS;
IF_LOG_TRANSACTIONS() {
TextOutput::Bundle _b(alog);
alog << "BC_TRANSACTION thr " << (void*)pthread_self() << " / hand "
<< handle << " / code " << TypeCode(code) << ": "
<< indent << data << dedent << endl;
}
if (err == NO_ERROR) {
LOG_ONEWAY(">>>> SEND from pid %d uid %d %s", getpid(), getuid(),
(flags & TF_ONE_WAY) == 0 ? "READ REPLY" : "ONE WAY");
err = writeTransactionData(BC_TRANSACTION, flags, handle, code, data, NULL);
}
if (err != NO_ERROR) {
if (reply) reply->setError(err);
return (mLastError = err);
}
if ((flags & TF_ONE_WAY) == 0) {
#if 0
if (code == 4) { // relayout
LOGI(">>>>>> CALLING transaction 4");
} else {
LOGI(">>>>>> CALLING transaction %d", code);
}
#endif
if (reply) {
err = waitForResponse(reply);
} else {
Parcel fakeReply;
err = waitForResponse(&fakeReply);
}
#if 0
if (code == 4) { // relayout
LOGI("<<<<<< RETURNING transaction 4");
} else {
LOGI("<<<<<< RETURNING transaction %d", code);
}
#endif
IF_LOG_TRANSACTIONS() {
TextOutput::Bundle _b(alog);
alog << "BR_REPLY thr " << (void*)pthread_self() << " / hand "
<< handle << ": ";
if (reply) alog << indent << *reply << dedent << endl;
else alog << "(none requested)" << endl;
}
} else {
err = waitForResponse(NULL, NULL);
}
return err;
}
----------------------------------------------------------------
status_t result = ipc->transact(0 /*magic*/, 0, data, &reply, 0);
if (result == NO_ERROR) {
object = reply.readStrongBinder();
}
}
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
void IPCThreadState::flushCommands()
{
if (mProcess->mDriverFD <= 0)
return;
// 和驱动对话?
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
status_t IPCThreadState::talkWithDriver(bool doReceive)
{
LOG_ASSERT(mProcess->mDriverFD >= 0, "Binder driver is not opened");
binder_write_read bwr;
// Is the read buffer empty?
const bool needRead = mIn.dataPosition() >= mIn.dataSize();
// We don't want to write anything if we are still reading
// from data left in the input buffer and the caller
// has requested to read the next data.
const size_t outAvail = (!doReceive || needRead) ? mOut.dataSize() : 0;
bwr.write_size = outAvail;
bwr.write_buffer = (long unsigned int)mOut.data();
// This is what we'll read.
if (doReceive && needRead) {
bwr.read_size = mIn.dataCapacity();
bwr.read_buffer = (long unsigned int)mIn.data();
} else {
bwr.read_size = 0;
}
IF_LOG_COMMANDS() {
TextOutput::Bundle _b(alog);
if (outAvail != 0) {
alog << "Sending commands to driver: " << indent;
const void* cmds = (const void*)bwr.write_buffer;
const void* end = ((const uint8_t*)cmds)+bwr.write_size;
alog << HexDump(cmds, bwr.write_size) << endl;
while (cmds < end) cmds = printCommand(alog, cmds);
alog << dedent;
}
alog << "Size of receive buffer: " << bwr.read_size
<< ", needRead: " << needRead << ", doReceive: " << doReceive << endl;
}
// Return immediately if there is nothing to do.
if ((bwr.write_size == 0) && (bwr.read_size == 0)) return NO_ERROR;
bwr.write_consumed = 0;
bwr.read_consumed = 0;
status_t err;
do {
IF_LOG_COMMANDS() {
alog << "About to read/write, write size = " << mOut.dataSize() << endl;
}
#if defined(HAVE_ANDROID_OS)
// 通过ioctl函数与驱动对话
if (ioctl(mProcess->mDriverFD, BINDER_WRITE_READ, &bwr) >= 0)
err = NO_ERROR;
else
err = -errno;
#else
err = INVALID_OPERATION;
#endif
IF_LOG_COMMANDS() {
alog << "Finished read/write, write size = " << mOut.dataSize() << endl;
}
} while (err == -EINTR);
IF_LOG_COMMANDS() {
alog << "Our err: " << (void*)err << ", write consumed: "
<< bwr.write_consumed << " (of " << mOut.dataSize()
<< "), read consumed: " << bwr.read_consumed << endl;
}
if (err >= NO_ERROR) {
if (bwr.write_consumed > 0) {
if (bwr.write_consumed < (ssize_t)mOut.dataSize())
mOut.remove(0, bwr.write_consumed);
else
mOut.setDataSize(0);
}
if (bwr.read_consumed > 0) {
mIn.setDataSize(bwr.read_consumed);
mIn.setDataPosition(0);
}
IF_LOG_COMMANDS() {
TextOutput::Bundle _b(alog);
alog << "Remaining data size: " << mOut.dataSize() << endl;
alog << "Received commands from driver: " << indent;
const void* cmds = mIn.data();
const void* end = mIn.data() + mIn.dataSize();
alog << HexDump(cmds, mIn.dataSize()) << endl;
while (cmds < end) cmds = printReturnCommand(alog, cmds);
alog << dedent;
}
return NO_ERROR;
}
return err;
}
----------------------------------------------------------------
talkWithDriver(false);
}
----------------------------------------------------------------
ipc->flushCommands();
if (object != NULL) setContextObject(object, name);
return object;
}
----------------------------------------------------------------
return getContextObject(String16("default"), caller);
}
}
----------------------------------------------------------------
gDefaultServiceManager = interface_cast<IServiceManager>(
ProcessState::self()->getContextObject(NULL));
}
}
return gDefaultServiceManager;
}
----------------------------------------------------------------
sp<IServiceManager> sm = defaultServiceManager();
sp<IBinder> binder;
do {
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
public IBinder getService(String name) throws RemoteException {
Parcel data = Parcel.obtain();
Parcel reply = Parcel.obtain();
data.writeInterfaceToken(IServiceManager.descriptor);
data.writeString(name);
// 此处应该是Binder机制。transact最终应该调到以Bn开头的某个类中
// 果然...在类ServiceManagerNative中
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
public boolean onTransact(int code, Parcel data, Parcel reply, int flags)
{
try {
switch (code) {
case IServiceManager.GET_SERVICE_TRANSACTION: {
data.enforceInterface(IServiceManager.descriptor);
String name = data.readString();
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
/**
* Returns a reference to a service with the given name.
*
* @param name the name of the service to get
* @return a reference to the service, or <code>null</code> if the service doesn't exist
*/
public static IBinder getService(String name) {
try {
IBinder service = sCache.get(name);
if (service != null) {
return service;
} else {
return getIServiceManager().getService(name);
}
} catch (RemoteException e) {
Log.e(TAG, "error in getService", e);
}
return null;
}
----------------------------------------------------------------
IBinder service = getService(name);
reply.writeStrongBinder(service);
return true;
}
case IServiceManager.CHECK_SERVICE_TRANSACTION: {
data.enforceInterface(IServiceManager.descriptor);
String name = data.readString();
IBinder service = checkService(name);
reply.writeStrongBinder(service);
return true;
}
case IServiceManager.ADD_SERVICE_TRANSACTION: {
data.enforceInterface(IServiceManager.descriptor);
String name = data.readString();
IBinder service = data.readStrongBinder();
addService(name, service);
return true;
}
case IServiceManager.LIST_SERVICES_TRANSACTION: {
data.enforceInterface(IServiceManager.descriptor);
String[] list = listServices();
reply.writeStringArray(list);
return true;
}
case IServiceManager.SET_PERMISSION_CONTROLLER_TRANSACTION: {
data.enforceInterface(IServiceManager.descriptor);
IPermissionController controller
= IPermissionController.Stub.asInterface(
data.readStrongBinder());
setPermissionController(controller);
return true;
}
}
} catch (RemoteException e) {
}
return false;
}
----------------------------------------------------------------
mRemote.transact(GET_SERVICE_TRANSACTION, data, reply, 0);
IBinder binder = reply.readStrongBinder();
reply.recycle();
data.recycle();
return binder;
}
----------------------------------------------------------------
binder = sm->getService(String16("media.audio_policy"));
if (binder != 0)
break;
LOGW("AudioPolicyService not published, waiting...");
usleep(500000); // 0.5 s
} while(true);
if (gAudioPolicyServiceClient == NULL) {
gAudioPolicyServiceClient = new AudioPolicyServiceClient();
}
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
status_t BpBinder::linkToDeath(
const sp<DeathRecipient>& recipient, void* cookie, uint32_t flags)
{
Obituary ob;
ob.recipient = recipient;
ob.cookie = cookie;
ob.flags = flags;
LOG_ALWAYS_FATAL_IF(recipient == NULL,
"linkToDeath(): recipient must be non-NULL");
{
AutoMutex _l(mLock);
if (!mObitsSent) {
if (!mObituaries) {
mObituaries = new Vector<Obituary>;
if (!mObituaries) {
return NO_MEMORY;
}
LOGV("Requesting death notification: %p handle %d\n", this, mHandle);
getWeakRefs()->incWeak(this);
IPCThreadState* self = IPCThreadState::self();
self->requestDeathNotification(mHandle, this);
self->flushCommands();
}
ssize_t res = mObituaries->add(ob);
return res >= (ssize_t)NO_ERROR ? (status_t)NO_ERROR : res;
}
}
return DEAD_OBJECT;
}
----------------------------------------------------------------
binder->linkToDeath(gAudioPolicyServiceClient);
gAudioPolicyService = interface_cast<IAudioPolicyService>(binder);
gLock.unlock();
} else {
gLock.unlock();
}
return gAudioPolicyService;
}
----------------------------------------------------------------
const sp<IAudioPolicyService>& aps = AudioSystem::get_audio_policy_service();
if (aps == 0) return 0;
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
在类BpAudioPolicyService(class BpAudioPolicyService : public BpInterface<IAudioPolicyService>)中
virtual audio_io_handle_t getOutput(
AudioSystem::stream_type stream,
uint32_t samplingRate,
uint32_t format,
uint32_t channels,
AudioSystem::output_flags flags)
{
Parcel data, reply;
data.writeInterfaceToken(IAudioPolicyService::getInterfaceDescriptor());
data.writeInt32(static_cast <uint32_t>(stream));
data.writeInt32(samplingRate);
data.writeInt32(static_cast <uint32_t>(format));
data.writeInt32(channels);
data.writeInt32(static_cast <uint32_t>(flags));
// 又是Binder,应该是调到Native的类中
// 在类AudioPolicyService(class AudioPolicyService: public BnAudioPolicyService,
// public AudioPolicyClientInterface,public IBinder::DeathRecipient)中
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
audio_io_handle_t AudioPolicyService::getOutput(AudioSystem::stream_type stream,
uint32_t samplingRate,
uint32_t format,
uint32_t channels,
AudioSystem::output_flags flags)
{
if (mpPolicyManager == NULL) {
return 0;
}
LOGV("getOutput() tid %d", gettid());
Mutex::Autolock _l(mLock);
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
// 看样子真正干活的地方是这儿
audio_io_handle_t AudioPolicyManagerBase::getOutput(AudioSystem::stream_type stream,
uint32_t samplingRate,
uint32_t format,
uint32_t channels,
AudioSystem::output_flags flags)
{
// 创建一个AudioOutputDescriptor对象,并打开一个output
audio_io_handle_t output = 0;
uint32_t latency = 0;
routing_strategy strategy = getStrategy((AudioSystem::stream_type)stream);
uint32_t device = getDeviceForStrategy(strategy);
LOGV("getOutput() stream %d, samplingRate %d, format %d, channels %x, flags %x", stream, samplingRate, format, channels, flags);
#ifdef AUDIO_POLICY_TEST
if (mCurOutput != 0) {
LOGV("getOutput() test output mCurOutput %d, samplingRate %d, format %d, channels %x, mDirectOutput %d",
mCurOutput, mTestSamplingRate, mTestFormat, mTestChannels, mDirectOutput);
if (mTestOutputs[mCurOutput] == 0) {
LOGV("getOutput() opening test output");
AudioOutputDescriptor *outputDesc = new AudioOutputDescriptor();
outputDesc->mDevice = mTestDevice;
outputDesc->mSamplingRate = mTestSamplingRate;
outputDesc->mFormat = mTestFormat;
outputDesc->mChannels = mTestChannels;
outputDesc->mLatency = mTestLatencyMs;
outputDesc->mFlags = (AudioSystem::output_flags)(mDirectOutput ? AudioSystem::OUTPUT_FLAG_DIRECT : 0);
outputDesc->mRefCount[stream] = 0;
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
audio_io_handle_t AudioPolicyService::openOutput(uint32_t *pDevices,
uint32_t *pSamplingRate,
uint32_t *pFormat,
uint32_t *pChannels,
uint32_t *pLatencyMs,
AudioSystem::output_flags flags)
{
sp<IAudioFlinger> af = AudioSystem::get_audio_flinger();
if (af == 0) {
LOGW("openOutput() could not get AudioFlinger");
return 0;
}
// 又调到了AudioFlinger中
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
int AudioFlinger::openOutput(uint32_t *pDevices,
uint32_t *pSamplingRate,
uint32_t *pFormat,
uint32_t *pChannels,
uint32_t *pLatencyMs,
uint32_t flags)
{
status_t status;
PlaybackThread *thread = NULL;
mHardwareStatus = AUDIO_HW_OUTPUT_OPEN;
uint32_t samplingRate = pSamplingRate ? *pSamplingRate : 0;
uint32_t format = pFormat ? *pFormat : 0;
uint32_t channels = pChannels ? *pChannels : 0;
uint32_t latency = pLatencyMs ? *pLatencyMs : 0;
LOGV("openOutput(), Device %x, SamplingRate %d, Format %d, Channels %x, flags %x",
pDevices ? *pDevices : 0,
samplingRate,
format,
channels,
flags);
if (pDevices == NULL || *pDevices == 0) {
return 0;
}
Mutex::Autolock _l(mLock);
// 最终干活的还是在硬件抽象层
AudioStreamOut *output = mAudioHardware->openOutputStream(*pDevices,
(int *)&format,
&channels,
&samplingRate,
&status);
LOGV("openOutput() openOutputStream returned output %p, SamplingRate %d, Format %d, Channels %x, status %d",
output,
samplingRate,
format,
channels,
status);
mHardwareStatus = AUDIO_HW_IDLE;
if (output != 0) {
int id = nextUniqueId();
if ((flags & AudioSystem::OUTPUT_FLAG_DIRECT) ||
(format != AudioSystem::PCM_16_BIT) ||
(channels != AudioSystem::CHANNEL_OUT_STEREO)) {
thread = new DirectOutputThread(this, output, id, *pDevices);
LOGV("openOutput() created direct output: ID %d thread %p", id, thread);
} else {
thread = new MixerThread(this, output, id, *pDevices);
LOGV("openOutput() created mixer output: ID %d thread %p", id, thread);
#ifdef LVMX
unsigned bitsPerSample =
(format == AudioSystem::PCM_16_BIT) ? 16 :
((format == AudioSystem::PCM_8_BIT) ? 8 : 0);
unsigned channelCount = (channels == AudioSystem::CHANNEL_OUT_STEREO) ? 2 : 1;
int audioOutputType = LifeVibes::threadIdToAudioOutputType(thread->id());
LifeVibes::init_aot(audioOutputType, samplingRate, bitsPerSample, channelCount);
LifeVibes::setDevice(audioOutputType, *pDevices);
#endif
}
mPlaybackThreads.add(id, thread);
if (pSamplingRate) *pSamplingRate = samplingRate;
if (pFormat) *pFormat = format;
if (pChannels) *pChannels = channels;
if (pLatencyMs) *pLatencyMs = thread->latency();
// notify client processes of the new output creation
thread->audioConfigChanged_l(AudioSystem::OUTPUT_OPENED);
return id;
}
return 0;
}
----------------------------------------------------------------
return af->openOutput(pDevices,
pSamplingRate,
(uint32_t *)pFormat,
pChannels,
pLatencyMs,
flags);
}
----------------------------------------------------------------
mTestOutputs[mCurOutput] = mpClientInterface->openOutput(&outputDesc->mDevice,
&outputDesc->mSamplingRate,
&outputDesc->mFormat,
&outputDesc->mChannels,
&outputDesc->mLatency,
outputDesc->mFlags);
if (mTestOutputs[mCurOutput]) {
AudioParameter outputCmd = AudioParameter();
outputCmd.addInt(String8("set_id"),mCurOutput);
mpClientInterface->setParameters(mTestOutputs[mCurOutput],outputCmd.toString());
addOutput(mTestOutputs[mCurOutput], outputDesc);
}
}
return mTestOutputs[mCurOutput];
}
#endif //AUDIO_POLICY_TEST
// open a direct output if required by specified parameters
if (needsDirectOuput(stream, samplingRate, format, channels, flags, device)) {
LOGV("getOutput() opening direct output device %x", device);
AudioOutputDescriptor *outputDesc = new AudioOutputDescriptor();
outputDesc->mDevice = device;
outputDesc->mSamplingRate = samplingRate;
outputDesc->mFormat = format;
outputDesc->mChannels = channels;
outputDesc->mLatency = 0;
outputDesc->mFlags = (AudioSystem::output_flags)(flags | AudioSystem::OUTPUT_FLAG_DIRECT);
outputDesc->mRefCount[stream] = 0;
output = mpClientInterface->openOutput(&outputDesc->mDevice,
&outputDesc->mSamplingRate,
&outputDesc->mFormat,
&outputDesc->mChannels,
&outputDesc->mLatency,
outputDesc->mFlags);
// only accept an output with the requeted parameters
if (output == 0 ||
(samplingRate != 0 && samplingRate != outputDesc->mSamplingRate) ||
(format != 0 && format != outputDesc->mFormat) ||
(channels != 0 && channels != outputDesc->mChannels)) {
LOGV("getOutput() failed opening direct output: samplingRate %d, format %d, channels %d",
samplingRate, format, channels);
if (output != 0) {
mpClientInterface->closeOutput(output);
}
delete outputDesc;
return 0;
}
addOutput(output, outputDesc);
return output;
}
if (channels != 0 && channels != AudioSystem::CHANNEL_OUT_MONO &&
channels != AudioSystem::CHANNEL_OUT_STEREO) {
return 0;
}
// open a non direct output
// get which output is suitable for the specified stream. The actual routing change will happen
// when startOutput() will be called
uint32_t a2dpDevice = device & AudioSystem::DEVICE_OUT_ALL_A2DP;
if (AudioSystem::popCount((AudioSystem::audio_devices)device) == 2) {
#ifdef WITH_A2DP
if (a2dpUsedForSonification() && a2dpDevice != 0) {
// if playing on 2 devices among which one is A2DP, use duplicated output
LOGV("getOutput() using duplicated output");
LOGW_IF((mA2dpOutput == 0), "getOutput() A2DP device in multiple %x selected but A2DP output not opened", device);
output = mDuplicatedOutput;
} else
#endif
{
// if playing on 2 devices among which none is A2DP, use hardware output
output = mHardwareOutput;
}
LOGV("getOutput() using output %d for 2 devices %x", output, device);
} else {
#ifdef WITH_A2DP
if (a2dpDevice != 0) {
// if playing on A2DP device, use a2dp output
LOGW_IF((mA2dpOutput == 0), "getOutput() A2DP device %x selected but A2DP output not opened", device);
output = mA2dpOutput;
} else
#endif
{
// if playing on not A2DP device, use hardware output
output = mHardwareOutput;
}
}
LOGW_IF((output ==0), "getOutput() could not find output for stream %d, samplingRate %d, format %d, channels %x, flags %x",
stream, samplingRate, format, channels, flags);
return output;
}
----------------------------------------------------------------
return mpPolicyManager->getOutput(stream, samplingRate, format, channels, flags);
}
----------------------------------------------------------------
remote()->transact(GET_OUTPUT, data, &reply);
return static_cast <audio_io_handle_t> (reply.readInt32());
}
----------------------------------------------------------------
output = aps->getOutput(stream, samplingRate, format, channels, flags);
if ((flags & AudioSystem::OUTPUT_FLAG_DIRECT) == 0) {
Mutex::Autolock _l(gLock);
AudioSystem::gStreamOutputMap.add(stream, output);
}
}
return output;
}
----------------------------------------------------------------
output = getOutput((stream_type)streamType);
if (output == 0) {
return PERMISSION_DENIED;
}
// 函数名为getOutputSamplingRate,当然要得到我们想要的东西
gLock.lock();
// 看AudioSystem中是否保存了output对应的描述符
outputDesc = AudioSystem::gOutputs.valueFor(output);
if (outputDesc == 0) {
// 如果没有,通过AudioFlinger获取
LOGV("getOutputSamplingRate() no output descriptor for output %d in gOutputs", output);
gLock.unlock();
const sp<IAudioFlinger>& af = AudioSystem::get_audio_flinger();
if (af == 0) return PERMISSION_DENIED;
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
uint32_t AudioFlinger::sampleRate(int output) const
{
Mutex::Autolock _l(mLock);
PlaybackThread *thread = checkPlaybackThread_l(output);
if (thread == NULL) {
LOGW("sampleRate() unknown thread %d", output);
return 0;
}
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
uint32_t AudioFlinger::ThreadBase::sampleRate() const
{
// 函数AudioFlinger::PlaybackThread::readOutputParameters中有对mSampleRate赋值
// 函数AudioFlinger::RecordThread::readInputParameters中有对mSampleRate赋值录音时用
return mSampleRate;
}
----------------------------------------------------------------
return thread->sampleRate();
}
----------------------------------------------------------------
*samplingRate = af->sampleRate(output);
} else {
// 若有,直接从描述符中取出
LOGV("getOutputSamplingRate() reading from output desc");
*samplingRate = outputDesc->samplingRate;
gLock.unlock();
}
LOGV("getOutputSamplingRate() streamType %d, output %d, sampling rate %d", streamType, output, *samplingRate);
return NO_ERROR;