最近Android8.0,遇到了一个问题。
由于我们开发的产品在对音量进行控制时通常只需要对speaker进行控制,并且音量控制逻辑是由我们自己的中间件进行控制的,所以音量曲线也是走的中间件的逻辑。
但是由于蓝牙设备又是走的安卓自己的流程,当需要修改蓝牙设备的音量曲线的时候去需要去修改安卓里的音量曲线。
通常在之前的版本中(4.x),是在代码中进行修改的,所以我也在类似的地方进行修改,结果没有效果,走了一些弯路,
所以又重新跟着音量设置的逻辑来查(关于音量设置的逻辑在另一篇文章中有写到),最终才发现是咋回事。
在AudioPolicyManager.cpp中
status_t AudioPolicyManager::setStreamVolumeIndex(audio_stream_type_t stream,
int index,
audio_devices_t device)
{
if ((index < mVolumeCurves->getVolumeIndexMin(stream)) ||
(index > mVolumeCurves->getVolumeIndexMax(stream))) {
return BAD_VALUE;
}
if (!audio_is_output_device(device)) {
return BAD_VALUE;
}
// Force max volume if stream cannot be muted
if (!mVolumeCurves->canBeMuted(stream)) index = mVolumeCurves->getVolumeIndexMax(stream);
// update other private stream volumes which follow this one
for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
continue;
}
mVolumeCurves->addCurrentVolumeIndex((audio_stream_type_t)curStream, device, index);
}
// update volume on all outputs and streams matching the following:
// - The requested stream (or a stream matching for volume control) is active on the output
// - The device (or devices) selected by the strategy corresponding to this stream includes
// the requested device
// - For non default requested device, currently selected device on the output is either the
// requested device or one of the devices selected by the strategy
// - For default requested device (AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME), apply volume only if
// no specific device volume value exists for currently selected device.
status_t status = NO_ERROR;
for (size_t i = 0; i < mOutputs.size(); i++) {
sp desc = mOutputs.valueAt(i);
audio_devices_t curDevice = Volume::getDeviceForVolume(desc->device());
for (int curStream = 0; curStream < AUDIO_STREAM_FOR_POLICY_CNT; curStream++) {
if (!streamsMatchForvolume(stream, (audio_stream_type_t)curStream)) {
continue;
}
if (!(desc->isStreamActive((audio_stream_type_t)curStream) ||
(isInCall() && (curStream == AUDIO_STREAM_VOICE_CALL)))) {
continue;
}
routing_strategy curStrategy = getStrategy((audio_stream_type_t)curStream);
audio_devices_t curStreamDevice = getDeviceForStrategy(curStrategy, false /*fromCache*/);
if ((device != AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME) &&
((curStreamDevice & device) == 0)) {
continue;
}
bool applyVolume;
if (device != AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME) {
curStreamDevice |= device;
applyVolume = (curDevice & curStreamDevice) != 0;
} else {
applyVolume = !mVolumeCurves->hasVolumeIndexForDevice(
stream, Volume::getDeviceForVolume(curStreamDevice));
}
if (applyVolume) {
//FIXME: workaround for truncated touch sounds
// delayed volume change for system stream to be removed when the problem is
// handled by system UI
//调用checkAndSetVolume设置音量
status_t volStatus =
checkAndSetVolume((audio_stream_type_t)curStream, index, desc, curDevice,
(stream == AUDIO_STREAM_SYSTEM) ? TOUCH_SOUND_FIXED_DELAY_MS : 0);
if (volStatus != NO_ERROR) {
status = volStatus;
}
}
}
}
return NO_ERROR;
}
这里调用了checkAndSetVolume继续设置音量
status_t AudioPolicyManager::checkAndSetVolume(audio_stream_type_t stream,
int index,
const sp& outputDesc,
audio_devices_t device,
int delayMs,
bool force)
{
// do not change actual stream volume if the stream is muted
if (outputDesc->mMuteCount[stream] != 0) {
ALOGVV("checkAndSetVolume() stream %d muted count %d",
stream, outputDesc->mMuteCount[stream]);
return NO_ERROR;
}
audio_policy_forced_cfg_t forceUseForComm =
mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_COMMUNICATION);
// do not change in call volume if bluetooth is connected and vice versa
if ((stream == AUDIO_STREAM_VOICE_CALL && forceUseForComm == AUDIO_POLICY_FORCE_BT_SCO) ||
(stream == AUDIO_STREAM_BLUETOOTH_SCO && forceUseForComm != AUDIO_POLICY_FORCE_BT_SCO)) {
ALOGV("checkAndSetVolume() cannot set stream %d volume with force use = %d for comm",
stream, forceUseForComm);
return INVALID_OPERATION;
}
if (device == AUDIO_DEVICE_NONE) {
device = outputDesc->device();
}
//调用computeVolume将index值转化成db值
float volumeDb = computeVolume(stream, index, device);
if (outputDesc->isFixedVolume(device)) {
volumeDb = 0.0f;
}
outputDesc->setVolume(volumeDb, stream, device, delayMs, force);
if (stream == AUDIO_STREAM_VOICE_CALL ||
stream == AUDIO_STREAM_BLUETOOTH_SCO) {
float voiceVolume;
// Force voice volume to max for bluetooth SCO as volume is managed by the headset
if (stream == AUDIO_STREAM_VOICE_CALL) {
voiceVolume = (float)index/(float)mVolumeCurves->getVolumeIndexMax(stream);
} else {
voiceVolume = 1.0;
}
if (voiceVolume != mLastVoiceVolume) {
mpClientInterface->setVoiceVolume(voiceVolume, delayMs);
mLastVoiceVolume = voiceVolume;
}
}
return NO_ERROR;
}
这里调用了computeVolume将index值转化成db值
进入computeVolume
float AudioPolicyManager::computeVolume(audio_stream_type_t stream,
int index,
audio_devices_t device)
{
//调用mVolumeCurves->volIndexToDb
float volumeDB = mVolumeCurves->volIndexToDb(stream, Volume::getDeviceCategory(device), index);
// handle the case of accessibility active while a ringtone is playing: if the ringtone is much
// louder than the accessibility prompt, the prompt cannot be heard, thus masking the touch
// exploration of the dialer UI. In this situation, bring the accessibility volume closer to
// the ringtone volume
if ((stream == AUDIO_STREAM_ACCESSIBILITY)
&& (AUDIO_MODE_RINGTONE == mEngine->getPhoneState())
&& isStreamActive(AUDIO_STREAM_RING, 0)) {
const float ringVolumeDB = computeVolume(AUDIO_STREAM_RING, index, device);
return ringVolumeDB - 4 > volumeDB ? ringVolumeDB - 4 : volumeDB;
}
// if a headset is connected, apply the following rules to ring tones and notifications
// to avoid sound level bursts in user's ears:
// - always attenuate notifications volume by 6dB
// - attenuate ring tones volume by 6dB unless music is not playing and
// speaker is part of the select devices
// - if music is playing, always limit the volume to current music volume,
// with a minimum threshold at -36dB so that notification is always perceived.
const routing_strategy stream_strategy = getStrategy(stream);
if ((device & (AUDIO_DEVICE_OUT_BLUETOOTH_A2DP |
AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES |
AUDIO_DEVICE_OUT_WIRED_HEADSET |
AUDIO_DEVICE_OUT_WIRED_HEADPHONE |
AUDIO_DEVICE_OUT_USB_HEADSET)) &&
((stream_strategy == STRATEGY_SONIFICATION)
|| (stream_strategy == STRATEGY_SONIFICATION_RESPECTFUL)
|| (stream == AUDIO_STREAM_SYSTEM)
|| ((stream_strategy == STRATEGY_ENFORCED_AUDIBLE) &&
(mEngine->getForceUse(AUDIO_POLICY_FORCE_FOR_SYSTEM) == AUDIO_POLICY_FORCE_NONE))) &&
mVolumeCurves->canBeMuted(stream)) {
// when the phone is ringing we must consider that music could have been paused just before
// by the music application and behave as if music was active if the last music track was
// just stopped
if (isStreamActive(AUDIO_STREAM_MUSIC, SONIFICATION_HEADSET_MUSIC_DELAY) ||
mLimitRingtoneVolume) {
volumeDB += SONIFICATION_HEADSET_VOLUME_FACTOR_DB;
audio_devices_t musicDevice = getDeviceForStrategy(STRATEGY_MEDIA, true /*fromCache*/);
float musicVolDB = computeVolume(AUDIO_STREAM_MUSIC,
mVolumeCurves->getVolumeIndex(AUDIO_STREAM_MUSIC,
musicDevice),
musicDevice);
float minVolDB = (musicVolDB > SONIFICATION_HEADSET_VOLUME_MIN_DB) ?
musicVolDB : SONIFICATION_HEADSET_VOLUME_MIN_DB;
if (volumeDB > minVolDB) {
volumeDB = minVolDB;
ALOGV("computeVolume limiting volume to %f musicVol %f", minVolDB, musicVolDB);
}
if (device & (AUDIO_DEVICE_OUT_BLUETOOTH_A2DP |
AUDIO_DEVICE_OUT_BLUETOOTH_A2DP_HEADPHONES)) {
// on A2DP, also ensure notification volume is not too low compared to media when
// intended to be played
if ((volumeDB > -96.0f) &&
(musicVolDB - SONIFICATION_A2DP_MAX_MEDIA_DIFF_DB > volumeDB)) {
ALOGV("computeVolume increasing volume for stream=%d device=0x%X from %f to %f",
stream, device,
volumeDB, musicVolDB - SONIFICATION_A2DP_MAX_MEDIA_DIFF_DB);
volumeDB = musicVolDB - SONIFICATION_A2DP_MAX_MEDIA_DIFF_DB;
}
}
} else if ((Volume::getDeviceForVolume(device) != AUDIO_DEVICE_OUT_SPEAKER) ||
stream_strategy != STRATEGY_SONIFICATION) {
volumeDB += SONIFICATION_HEADSET_VOLUME_FACTOR_DB;
}
}
return volumeDB;
}
继续调用mVolumeCurves->volIndexToDb
!!!!在这里就有一个非常重要的点,也是我花了一些时间去踩坑的点。
先去看frameworks\av\services\audiopolicy\common\managerdefinitions下的
Android.mk文件,其中有一段
ifeq ($(USE_XML_AUDIO_POLICY_CONF), 1)
LOCAL_SRC_FILES += src/Serializer.cpp
LOCAL_SHARED_LIBRARIES += libicuuc libxml2
LOCAL_C_INCLUDES += \
$(TOPDIR)external/libxml2/include \
$(TOPDIR)external/icu/icu4c/source/common
else
LOCAL_SRC_FILES += \
src/ConfigParsingUtils.cpp \
src/StreamDescriptor.cpp \
src/Gains.cpp
根据命名规则来看USE_XML_AUDIO_POLICY_CONF也就是说是否使用XML的意思。
我之前没有关注到这个mk所以自然而然的去和老版本一样去代码里面改,而在编译之后生成的.so替换进去完全没有效果,后来修改代码发现压根没有编译进去,所以才来找mk才发现有两种方法。
我发现第二种(else下的,也就是和老版本一样在代码里面改)没有效果,连USE_XML_AUDIO_POLICY_CONF的值都不用去查了,就知道是用的xml的方式,就根据ifeq下的套路来改就好了。
但是我们现在还是先从比较好理解的else下的老版本方法来讲。
回到我们的mVolumeCurves->volIndexToDb,那么就去看mVolumeCurves是个什么对象。
在AudioPolicyManager的构造函数
AudioPolicyManager::AudioPolicyManager(AudioPolicyClientInterface *clientInterface)
中有这么一段
#ifdef USE_XML_AUDIO_POLICY_CONF
mVolumeCurves = new VolumeCurvesCollection();
AudioPolicyConfig config(mHwModules, mAvailableOutputDevices, mAvailableInputDevices,
mDefaultOutputDevice, speakerDrcEnabled,
static_cast(mVolumeCurves));
if (deserializeAudioPolicyXmlConfig(config) != NO_ERROR) {
#else
mVolumeCurves = new StreamDescriptorCollection();
AudioPolicyConfig config(mHwModules, mAvailableOutputDevices, mAvailableInputDevices,
mDefaultOutputDevice, speakerDrcEnabled);
if ((ConfigParsingUtils::loadConfig(AUDIO_POLICY_VENDOR_CONFIG_FILE, config) != NO_ERROR) &&
(ConfigParsingUtils::loadConfig(AUDIO_POLICY_CONFIG_FILE, config) != NO_ERROR)) {
这里也牵涉到了USE_XML_AUDIO_POLICY_CONF,我们已经知道这个的值是1,但是我们还是先看看else下面。mVolumeCurves = new StreamDescriptorCollection();
StreamDescriptorCollection()的定义在StreamDescriptor.h和StreamDescriptor.cpp中
那么去StreamDescriptor.h和StreamDescriptor.cpp看看
class StreamDescriptorCollection : public DefaultKeyedVector<audio_stream_type_t, StreamDescriptor>,
public IVolumeCurvesCollection
StreamDescriptorCollection::StreamDescriptorCollection()
{
for (size_t stream = 0 ; stream < AUDIO_STREAM_CNT; stream++) {
add(static_cast(stream), StreamDescriptor());
}
}
发现是用了DefaultKeyedVector把StreamDescriptor()和stream(也就是音量的类型)对应起来。
再去看看StreamDescriptor(),这个StreamDescriptor()也是在StreamDescriptor.cpp中定义的。
StreamDescriptor::StreamDescriptor()
: mIndexMin(0), mIndexMax(1), mCanBeMuted(true)
{
// Initialize the current stream's index to mIndexMax so volume isn't 0 in
// cases where the Java layer doesn't call into the audio policy service to
// set the default volume.
mIndexCur.add(AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME, mIndexMax);
}
这里做了一个简单的初始化,暂时不用管。
再去看StreamDescriptorCollection::volIndexToDb
float StreamDescriptorCollection::volIndexToDb(audio_stream_type_t stream, device_category category,
int indexInUi) const
{
(int)stream, index, (int)device);
const StreamDescriptor &streamDesc = valueAt(stream);
return Gains::volIndexToDb(streamDesc.getVolumeCurvePoint(category),
streamDesc.getVolumeIndexMin(), streamDesc.getVolumeIndexMax(),
indexInUi);
}
调用Gains::volIndexToDb
那么进入Gains.cpp.
//static
float Gains::volIndexToDb(const VolumeCurvePoint *curve, int indexMin, int indexMax, int indexInUi)
{
// the volume index in the UI is relative to the min and max volume indices for this stream type
int nbSteps = 1 + curve[Volume::VOLMAX].mIndex - curve[Volume::VOLMIN].mIndex;
int volIdx = (nbSteps * (indexInUi - indexMin)) / (indexMax - indexMin);
// find what part of the curve this index volume belongs to, or if it's out of bounds
int segment = 0;
if (volIdx < curve[Volume::VOLMIN].mIndex) { // out of bounds
return VOLUME_MIN_DB;
} else if (volIdx < curve[Volume::VOLKNEE1].mIndex) {
segment = 0;
} else if (volIdx < curve[Volume::VOLKNEE2].mIndex) {
segment = 1;
} else if (volIdx <= curve[Volume::VOLMAX].mIndex) {
segment = 2;
} else { // out of bounds
return 0.0f;
}
// linear interpolation in the attenuation table in dB
float decibels = curve[segment].mDBAttenuation +
((float)(volIdx - curve[segment].mIndex)) *
( (curve[segment+1].mDBAttenuation -
curve[segment].mDBAttenuation) /
((float)(curve[segment+1].mIndex -
curve[segment].mIndex)) );
ALOGVV("VOLUME vol index=[%d %d %d], dB=[%.1f %.1f %.1f]",
curve[segment].mIndex, volIdx,
curve[segment+1].mIndex,
curve[segment].mDBAttenuation,
decibels,
curve[segment+1].mDBAttenuation);
return decibels;
}
};
在这里真正地进行了计算并且返回了一个decibels,这个值就是我们调用
float volumeDB = mVolumeCurves->volIndexToDb(stream, Volume::getDeviceCategory(device), index);
返回的值。
如果有兴趣的话可以仔细看一看是怎么计算的(其实也很简单,就是根据传入的Index再匹配这个index两边的point进行计算)。
此时我们发现是根据我们传入的几个值的数据来进行计算的。所以再看看传入的东西都是些什么。
回到StreamDescriptorCollection::volIndexToDb
float StreamDescriptorCollection::volIndexToDb(audio_stream_type_t stream, device_category category,
int indexInUi) const
{
(int)stream, index, (int)device);
const StreamDescriptor &streamDesc = valueAt(stream);
return Gains::volIndexToDb(streamDesc.getVolumeCurvePoint(category),
streamDesc.getVolumeIndexMin(), streamDesc.getVolumeIndexMax(),
indexInUi);
}
看到传入的是streamDesc, 而streamDesc是一个StreamDescriptor对象。
原来如此,这就是我们之前看到的一个stream对应一个StreamDescriptor对象,
streamDesc = valueAt(stream)就是去获取当前stream所对应的StreamDescriptor对象。
我们到StreamDescriptor.h中StreamDescriptor的定义中有以下几个参数。
private:
const VolumeCurvePoint *mVolumeCurve[DEVICE_CATEGORY_CNT];
KeyedVectorint> mIndexCur; /**< current volume index per device. */
int mIndexMin; /**< min volume index. */
int mIndexMax; /**< max volume index. */
bool mCanBeMuted; /**< true is the stream can be muted. */
而
int getVolumeIndexMin() const { return mIndexMin; }
int getVolumeIndexMax() const { return mIndexMax; }
const VolumeCurvePoint *getVolumeCurvePoint(device_category deviceCategory) const
{
return mVolumeCurve[deviceCategory];
}
这个也仅仅是返回这几个参数。那么我们先看看mVolumeCurve,从它的定义看他是和
deviceCategory相绑定的,大概可以看出,音量曲线的参数是根据不同device和stream组合来定义的,简单的说就是同一个steam不同的设备的音量曲线也不同。
既然有getVolumeCurvePoint,那么也肯定有setVolumeCurvePoint
void StreamDescriptor::setVolumeCurvePoint(device_category deviceCategory,
const VolumeCurvePoint *point)
{
mVolumeCurve[deviceCategory] = point;
}
mVolumeCurve在这里进行了赋值。我们再看看这个setVolumeCurvePoint在哪里调用的。
发现StreamDescriptorCollection::setVolumeCurvePoint调用了StreamDescriptor的
setVolumeCurvePoint
void StreamDescriptorCollection::setVolumeCurvePoint(audio_stream_type_t stream,
device_category deviceCategory,
const VolumeCurvePoint *point)
{
editValueAt(stream).setVolumeCurvePoint(deviceCategory, point);
}
现在明白了,是在StreamDescriptorCollection中调用steam对应的StreamDescriptor对象的setVolumeCurvePoint,并且把传入的deviceCategory和point传入StreamDescriptor的setVolumeCurvePoint,我们需要知道这个point是哪里来的。
再看是谁调用了StreamDescriptorCollection的setVolumeCurvePoint
查找到在StreamDescriptorCollection::initializeVolumeCurves中
void StreamDescriptorCollection::initializeVolumeCurves(bool isSpeakerDrcEnabled)
{
for (int i = 0; i < AUDIO_STREAM_CNT; i++) {
for (int j = 0; j < DEVICE_CATEGORY_CNT; j++) {
setVolumeCurvePoint(static_cast(i),
static_cast(j),
Gains::sVolumeProfiles[i][j]);
}
}
// Check availability of DRC on speaker path: if available, override some of the speaker curves
if (isSpeakerDrcEnabled) {
setVolumeCurvePoint(AUDIO_STREAM_SYSTEM, DEVICE_CATEGORY_SPEAKER,
Gains::sDefaultSystemVolumeCurveDrc);
setVolumeCurvePoint(AUDIO_STREAM_RING, DEVICE_CATEGORY_SPEAKER,
Gains::sSpeakerSonificationVolumeCurveDrc);
setVolumeCurvePoint(AUDIO_STREAM_ALARM, DEVICE_CATEGORY_SPEAKER,
Gains::sSpeakerSonificationVolumeCurveDrc);
setVolumeCurvePoint(AUDIO_STREAM_NOTIFICATION, DEVICE_CATEGORY_SPEAKER,
Gains::sSpeakerSonificationVolumeCurveDrc);
setVolumeCurvePoint(AUDIO_STREAM_MUSIC, DEVICE_CATEGORY_SPEAKER,
Gains::sSpeakerMediaVolumeCurveDrc);
setVolumeCurvePoint(AUDIO_STREAM_ACCESSIBILITY, DEVICE_CATEGORY_SPEAKER,
Gains::sSpeakerMediaVolumeCurveDrc);
}
}
下面if的内容先不看,我们看这个循环
for (int i = 0; i < AUDIO_STREAM_CNT; i++) {
for (int j = 0; j < DEVICE_CATEGORY_CNT; j++) {
setVolumeCurvePoint(static_cast(i),
static_cast(j),
Gains::sVolumeProfiles[i][j]);
}
}
这个双循环吧所有的steam和device对应的点都传了进去。看看这个point是哪里来的。
在Gains.cpp中
const VolumeCurvePoint *Gains::sVolumeProfiles[AUDIO_STREAM_CNT]
[DEVICE_CATEGORY_CNT] = {
{ // AUDIO_STREAM_VOICE_CALL
Gains::sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_EARPIECE
Gains::sDefaultMediaVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
},
{ // AUDIO_STREAM_SYSTEM
Gains::sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_EARPIECE
Gains::sExtMediaSystemVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
},
{ // AUDIO_STREAM_RING
Gains::sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sDefaultVolumeCurve, // DEVICE_CATEGORY_EARPIECE
Gains::sExtMediaSystemVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
},
{ // AUDIO_STREAM_MUSIC
Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sSpeakerMediaVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EARPIECE
Gains::sDefaultMediaVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
},
{ // AUDIO_STREAM_ALARM
Gains::sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sDefaultVolumeCurve, // DEVICE_CATEGORY_EARPIECE
Gains::sExtMediaSystemVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
},
{ // AUDIO_STREAM_NOTIFICATION
Gains::sDefaultVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sSpeakerSonificationVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sDefaultVolumeCurve, // DEVICE_CATEGORY_EARPIECE
Gains::sExtMediaSystemVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
},
{ // AUDIO_STREAM_BLUETOOTH_SCO
Gains::sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sSpeakerVoiceVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sDefaultVoiceVolumeCurve, // DEVICE_CATEGORY_EARPIECE
Gains::sDefaultMediaVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
},
{ // AUDIO_STREAM_ENFORCED_AUDIBLE
Gains::sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_EARPIECE
Gains::sExtMediaSystemVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
},
{ // AUDIO_STREAM_DTMF
Gains::sHeadsetSystemVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sDefaultSystemVolumeCurve, // DEVICE_CATEGORY_EARPIECE
Gains::sExtMediaSystemVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
},
{ // AUDIO_STREAM_TTS
// "Transmitted Through Speaker": always silent except on DEVICE_CATEGORY_SPEAKER
Gains::sSilentVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sSilentVolumeCurve, // DEVICE_CATEGORY_EARPIECE
Gains::sSilentVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
},
{ // AUDIO_STREAM_ACCESSIBILITY
Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sSpeakerMediaVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sDefaultMediaVolumeCurve, // DEVICE_CATEGORY_EARPIECE
Gains::sDefaultMediaVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
},
{ // AUDIO_STREAM_REROUTING
Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_EARPIECE
Gains::sFullScaleVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
},
{ // AUDIO_STREAM_PATCH
Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_HEADSET
Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_SPEAKER
Gains::sFullScaleVolumeCurve, // DEVICE_CATEGORY_EARPIECE
Gains::sFullScaleVolumeCurve // DEVICE_CATEGORY_EXT_MEDIA
},
};
我们看到了每一个device和steam对应的曲线,再看曲线的内容,同样也在Gain.cpp中
// Enginedefault
const VolumeCurvePoint
Gains::sDefaultVolumeCurve[Volume::VOLCNT] = {
{1, -49.5f}, {33, -33.5f}, {66, -17.0f}, {100, 0.0f}
};
const VolumeCurvePoint
Gains::sDefaultMediaVolumeCurve[Volume::VOLCNT] = {
{1, -58.0f}, {20, -40.0f}, {60, -17.0f}, {100, 0.0f}
};
const VolumeCurvePoint
Gains::sExtMediaSystemVolumeCurve[Volume::VOLCNT] = {
{1, -58.0f}, {20, -40.0f}, {60, -21.0f}, {100, -10.0f}
};
const VolumeCurvePoint
Gains::sSpeakerMediaVolumeCurve[Volume::VOLCNT] = {
{1, -56.0f}, {20, -34.0f}, {60, -11.0f}, {100, 0.0f}
};
const VolumeCurvePoint
Gains::sSpeakerMediaVolumeCurveDrc[Volume::VOLCNT] = {
{1, -55.0f}, {20, -43.0f}, {86, -12.0f}, {100, 0.0f}
};
const VolumeCurvePoint
Gains::sSpeakerSonificationVolumeCurve[Volume::VOLCNT] = {
{1, -29.7f}, {33, -20.1f}, {66, -10.2f}, {100, 0.0f}
};
const VolumeCurvePoint
Gains::sSpeakerSonificationVolumeCurveDrc[Volume::VOLCNT] = {
{1, -35.7f}, {33, -26.1f}, {66, -13.2f}, {100, 0.0f}
};
// AUDIO_STREAM_SYSTEM, AUDIO_STREAM_ENFORCED_AUDIBLE and AUDIO_STREAM_DTMF volume tracks
// AUDIO_STREAM_RING on phones and AUDIO_STREAM_MUSIC on tablets.
// AUDIO_STREAM_DTMF tracks AUDIO_STREAM_VOICE_CALL while in call (See AudioService.java).
// The range is constrained between -24dB and -6dB over speaker and -30dB and -18dB over headset.
const VolumeCurvePoint
Gains::sDefaultSystemVolumeCurve[Volume::VOLCNT] = {
{1, -24.0f}, {33, -18.0f}, {66, -12.0f}, {100, -6.0f}
};
const VolumeCurvePoint
Gains::sDefaultSystemVolumeCurveDrc[Volume::VOLCNT] = {
{1, -34.0f}, {33, -24.0f}, {66, -15.0f}, {100, -6.0f}
};
const VolumeCurvePoint
Gains::sHeadsetSystemVolumeCurve[Volume::VOLCNT] = {
{1, -30.0f}, {33, -26.0f}, {66, -22.0f}, {100, -18.0f}
};
const VolumeCurvePoint
Gains::sDefaultVoiceVolumeCurve[Volume::VOLCNT] = {
{0, -42.0f}, {33, -28.0f}, {66, -14.0f}, {100, 0.0f}
};
const VolumeCurvePoint
Gains::sSpeakerVoiceVolumeCurve[Volume::VOLCNT] = {
{0, -24.0f}, {33, -16.0f}, {66, -8.0f}, {100, 0.0f}
};
const VolumeCurvePoint
Gains::sLinearVolumeCurve[Volume::VOLCNT] = {
{0, -96.0f}, {33, -68.0f}, {66, -34.0f}, {100, 0.0f}
};
const VolumeCurvePoint
Gains::sSilentVolumeCurve[Volume::VOLCNT] = {
{0, -96.0f}, {1, -96.0f}, {2, -96.0f}, {100, -96.0f}
};
const VolumeCurvePoint
Gains::sFullScaleVolumeCurve[Volume::VOLCNT] = {
{0, 0.0f}, {1, 0.0f}, {2, 0.0f}, {100, 0.0f}
};
这下真相大白了,原来还是回到了Gain.cpp中来设置这些参数,所以没有使用XML的情况下在这里根据你需要的stream和device修改对应的数据就可以达到修改曲线的目的了。
接下来讲使用XML的方法
回到AudioPolicyManager.cpp中
#ifdef USE_XML_AUDIO_POLICY_CONF
mVolumeCurves = new VolumeCurvesCollection();
AudioPolicyConfig config(mHwModules, mAvailableOutputDevices, mAvailableInputDevices,
mDefaultOutputDevice, speakerDrcEnabled,
static_cast(mVolumeCurves));
if (deserializeAudioPolicyXmlConfig(config) != NO_ERROR) {
#else
mVolumeCurves = new StreamDescriptorCollection();
AudioPolicyConfig config(mHwModules, mAvailableOutputDevices, mAvailableInputDevices,
mDefaultOutputDevice, speakerDrcEnabled);
if ((ConfigParsingUtils::loadConfig(AUDIO_POLICY_VENDOR_CONFIG_FILE, config) != NO_ERROR) &&
(ConfigParsingUtils::loadConfig(AUDIO_POLICY_CONFIG_FILE, config) != NO_ERROR)) {
如果使用xml的话 mVolumeCurves = new VolumeCurvesCollection();
在VolumeCurve.h中
class VolumeCurvesCollection : public KeyedVector,
public IVolumeCurvesCollection
{
public:
VolumeCurvesCollection()
{
// Create an empty collection of curves
for (ssize_t i = 0 ; i < AUDIO_STREAM_CNT; i++) {
audio_stream_type_t stream = static_cast(i);
KeyedVector::add(stream, VolumeCurvesForStream());
}
}
这个VolumeCurvesCollection继承了KeyedVector,其中类型是audio_stream_type_t和VolumeCurvesForStream,
也是用KeyedVector把stream和VolumeCurvesForStream对应起来。
同样在VolumeCurve.h
class VolumeCurvesForStream : public KeyedVector >
{
public:
VolumeCurvesForStream() : mIndexMin(0), mIndexMax(1), mCanBeMuted(true)
{
mIndexCur.add(AUDIO_DEVICE_OUT_DEFAULT_FOR_VOLUME, 0);
}
这个VolumeCurvesForStream 继承了KeyedVector,其中类型是device_category和VolumeCurve,和之前的方法相同,也是先查找stream,在查找stream下的某个device,最后找到对应的volumecurve。
回到VolumeCurvesCollection的volIndexToDb方法
virtual float volIndexToDb(audio_stream_type_t stream, device_category cat, int indexInUi) const
{
return getCurvesFor(stream).volIndexToDb(cat, indexInUi);
}
调用getCurvesFor(stream)的volIndexToDb方法,
const VolumeCurvesForStream &getCurvesFor(audio_stream_type_t stream) const
{
ALOG_ASSERT(indexOfKey(stream) >= 0, "Invalid stream type for Volume Curve");
return valueFor(stream);
}
getCurvesFor(stream)返回的是KeyedVector的value,也就是stream对应的VolumeCurvesForStream 对象。
所以去看VolumeCurvesForStream 里面的volIndexToDb方法
float volIndexToDb(device_category deviceCat, int indexInUi) const
{
return getCurvesFor(deviceCat)->volIndexToDb(indexInUi, mIndexMin, mIndexMax);
}
调用getCurvesFor(deviceCat)的volIndexToDb方法,
sp getCurvesFor(device_category device) const
{
if (indexOfKey(device) < 0) {
return 0;
}
return valueFor(device);
}
getCurvesFor(deviceCat)返回的是一个VolumeCurve对象,那么去看
VolumeCurve的volIndexToDb方法。
VolumeCurve的volIndexToDb方法的定义在VolumeCurve.cpp中
float VolumeCurve::volIndexToDb(int indexInUi, int volIndexMin, int volIndexMax) const
{
ALOG_ASSERT(!mCurvePoints.isEmpty(), "Invalid volume curve");
size_t nbCurvePoints = mCurvePoints.size();
// the volume index in the UI is relative to the min and max volume indices for this stream
int nbSteps = 1 + mCurvePoints[nbCurvePoints - 1].mIndex - mCurvePoints[0].mIndex;
int volIdx = (nbSteps * (indexInUi - volIndexMin)) / (volIndexMax - volIndexMin);
// Where would this volume index been inserted in the curve point
size_t indexInUiPosition = mCurvePoints.orderOf(CurvePoint(volIdx, 0));
if (indexInUiPosition >= nbCurvePoints) {
//use last point of table
return mCurvePoints[nbCurvePoints - 1].mAttenuationInMb / 100.0f;
}
if (indexInUiPosition == 0) {
if (indexInUiPosition != mCurvePoints[0].mIndex) {
return VOLUME_MIN_DB; // out of bounds
}
return mCurvePoints[0].mAttenuationInMb / 100.0f;
}
// linear interpolation in the attenuation table in dB
float decibels = (mCurvePoints[indexInUiPosition - 1].mAttenuationInMb / 100.0f) +
((float)(volIdx - mCurvePoints[indexInUiPosition - 1].mIndex)) *
( ((mCurvePoints[indexInUiPosition].mAttenuationInMb / 100.0f) -
(mCurvePoints[indexInUiPosition - 1].mAttenuationInMb / 100.0f)) /
((float)(mCurvePoints[indexInUiPosition].mIndex -
mCurvePoints[indexInUiPosition - 1].mIndex)) );
ALOGV("VOLUME mDeviceCategory %d, mStreamType %d vol index=[%d %d %d], dB=[%.1f %.1f %.1f]",
mDeviceCategory, mStreamType,
mCurvePoints[indexInUiPosition - 1].mIndex, volIdx,
mCurvePoints[indexInUiPosition].mIndex,
((float)mCurvePoints[indexInUiPosition - 1].mAttenuationInMb / 100.0f), decibels,
((float)mCurvePoints[indexInUiPosition].mAttenuationInMb / 100.0f));
return decibels;
}
同样通过曲线计算db值,返回decibels也就是db值了。
和之前同样的思路,一定有一个地方是解析了xml读取了音量曲线的point存到mCurvePoints中。而这个mCurvePoints是属于VolumeCurve这个类型的对象内的。
class VolumeCurve : public RefBase
{
public:
VolumeCurve(device_category device, audio_stream_type_t stream) :
mDeviceCategory(device), mStreamType(stream) {}
device_category getDeviceCategory() const { return mDeviceCategory; }
audio_stream_type_t getStreamType() const { return mStreamType; }
void add(const CurvePoint &point) { mCurvePoints.add(point); }
float volIndexToDb(int indexInUi, int volIndexMin, int volIndexMax) const;
void dump(int fd) const;
private:
SortedVector mCurvePoints;
device_category mDeviceCategory;
audio_stream_type_t mStreamType;
};
我们返回去看
AudioPolicyMananger.cpp
#ifdef USE_XML_AUDIO_POLICY_CONF
mVolumeCurves = new VolumeCurvesCollection();
AudioPolicyConfig config(mHwModules, mAvailableOutputDevices, mAvailableInputDevices,
mDefaultOutputDevice, speakerDrcEnabled,
static_cast(mVolumeCurves));
if (deserializeAudioPolicyXmlConfig(config) != NO_ERROR) {
#else
mVolumeCurves = new StreamDescriptorCollection();
AudioPolicyConfig config(mHwModules, mAvailableOutputDevices, mAvailableInputDevices,
mDefaultOutputDevice, speakerDrcEnabled);
if ((ConfigParsingUtils::loadConfig(AUDIO_POLICY_VENDOR_CONFIG_FILE, config) != NO_ERROR) &&
(ConfigParsingUtils::loadConfig(AUDIO_POLICY_CONFIG_FILE, config) != NO_ERROR)) {
这里
AudioPolicyConfig config(mHwModules, mAvailableOutputDevices, mAvailableInputDevices,
mDefaultOutputDevice, speakerDrcEnabled,
static_cast(mVolumeCurves));
好像读取了mVolumeCurves的数据。我们向下追踪。
同样在AudioPolicyMananger.cpp中
#ifdef USE_XML_AUDIO_POLICY_CONF
// Treblized audio policy xml config will be located in /odm/etc or /vendor/etc.
static const char *kConfigLocationList[] =
{"/odm/etc", "/vendor/etc", "/system/etc"};
static const int kConfigLocationListSize =
(sizeof(kConfigLocationList) / sizeof(kConfigLocationList[0]));
static status_t deserializeAudioPolicyXmlConfig(AudioPolicyConfig &config) {
char audioPolicyXmlConfigFile[AUDIO_POLICY_XML_CONFIG_FILE_PATH_MAX_LENGTH];
status_t ret;
for (int i = 0; i < kConfigLocationListSize; i++) {
PolicySerializer serializer;
snprintf(audioPolicyXmlConfigFile,
sizeof(audioPolicyXmlConfigFile),
"%s/%s",
kConfigLocationList[i],
AUDIO_POLICY_XML_CONFIG_FILE_NAME);
ret = serializer.deserialize(audioPolicyXmlConfigFile, config);
if (ret == NO_ERROR) {
break;
}
}
return ret;
}
#endif
我们仔细阅读这段代码看到着了做了一个循环,解析了”/odm/etc”, “/vendor/etc”, “/system/etc”这三个路径和AUDIO_POLICY_XML_CONFIG_FILE_NAME拼接的文件,
在看AUDIO_POLICY_XML_CONFIG_FILE_NAME的定义
#define AUDIO_POLICY_XML_CONFIG_FILE_NAME "audio_policy_configuration.xml"
果然,这里调用serializer.deserialize(audioPolicyXmlConfigFile, config)解析了
“/odm/etc/audio_policy_configuration.xml”,
“/vendor/etc/audio_policy_configuration.xml”,
“/system/etc/audio_policy_configuration.xml”
这三个xml,其实这个xml在源码中就是一个,在编译之后才赋值到了这三个文件夹下。我们打开这个xml
audio_policy_configuration.xml
<audioPolicyConfiguration version="1.0" xmlns:xi="http://www.w3.org/2001/XInclude">
<globalConfiguration speaker_drc_enabled="true"/>
<modules>
<module name="primary" halVersion="3.0">
<attachedDevices>
<item>Speakeritem>
<item>Built-In Micitem>
<item>Built-In Back Micitem>
attachedDevices>
<defaultOutputDevice>SpeakerdefaultOutputDevice>
<mixPorts>
<mixPort name="primary output" role="source" flags="AUDIO_OUTPUT_FLAG_PRIMARY">
<profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
mixPort>
<mixPort name="deep_buffer" role="source"
flags="AUDIO_OUTPUT_FLAG_DEEP_BUFFER">
<profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
mixPort>
<mixPort name="compressed_offload" role="source"
flags="AUDIO_OUTPUT_FLAG_DIRECT|AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD|AUDIO_OUTPUT_FLAG_NON_BLOCKING">
<profile name="" format="AUDIO_FORMAT_MP3"
samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
channelMasks="AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_MONO"/>
<profile name="" format="AUDIO_FORMAT_AAC"
samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
channelMasks="AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_MONO"/>
<profile name="" format="AUDIO_FORMAT_AAC_LC"
samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
channelMasks="AUDIO_CHANNEL_OUT_STEREO,AUDIO_CHANNEL_OUT_MONO"/>
mixPort>
<mixPort name="voice_tx" role="source">
<profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
mixPort>
<mixPort name="primary input" role="sink">
<profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO,AUDIO_CHANNEL_IN_FRONT_BACK"/>
mixPort>
<mixPort name="voice_rx" role="sink">
<profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
mixPort>
mixPorts>
<devicePorts>
<devicePort tagName="Earpiece" type="AUDIO_DEVICE_OUT_EARPIECE" role="sink">
<profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
samplingRates="48000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
devicePort>
<devicePort tagName="Speaker" role="sink" type="AUDIO_DEVICE_OUT_SPEAKER" address="">
<profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
<gains>
<gain name="gain_1" mode="AUDIO_GAIN_MODE_JOINT"
minValueMB="-8400"
maxValueMB="4000"
defaultValueMB="0"
stepValueMB="100"/>
gains>
devicePort>
<devicePort tagName="Wired Headset" type="AUDIO_DEVICE_OUT_WIRED_HEADSET" role="sink">
<profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
devicePort>
<devicePort tagName="Wired Headphones" type="AUDIO_DEVICE_OUT_WIRED_HEADPHONE" role="sink">
<profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
devicePort>
<devicePort tagName="BT SCO" type="AUDIO_DEVICE_OUT_BLUETOOTH_SCO" role="sink">
<profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
devicePort>
<devicePort tagName="BT SCO Headset" type="AUDIO_DEVICE_OUT_BLUETOOTH_SCO_HEADSET" role="sink">
<profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
devicePort>
<devicePort tagName="BT SCO Car Kit" type="AUDIO_DEVICE_OUT_BLUETOOTH_SCO_CARKIT" role="sink">
<profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
devicePort>
<devicePort tagName="Telephony Tx" type="AUDIO_DEVICE_OUT_TELEPHONY_TX" role="sink">
<profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_OUT_MONO"/>
devicePort>
<devicePort tagName="Built-In Mic" type="AUDIO_DEVICE_IN_BUILTIN_MIC" role="source">
<profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO,AUDIO_CHANNEL_IN_FRONT_BACK"/>
devicePort>
<devicePort tagName="Built-In Back Mic" type="AUDIO_DEVICE_IN_BACK_MIC" role="source">
<profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO,AUDIO_CHANNEL_IN_FRONT_BACK"/>
devicePort>
<devicePort tagName="Wired Headset Mic" type="AUDIO_DEVICE_IN_WIRED_HEADSET" role="source">
<profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
samplingRates="8000,11025,12000,16000,22050,24000,32000,44100,48000"
channelMasks="AUDIO_CHANNEL_IN_MONO,AUDIO_CHANNEL_IN_STEREO,AUDIO_CHANNEL_IN_FRONT_BACK"/>
devicePort>
<devicePort tagName="BT SCO Headset Mic" type="AUDIO_DEVICE_IN_BLUETOOTH_SCO_HEADSET" role="source">
<profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
devicePort>
<devicePort tagName="Telephony Rx" type="AUDIO_DEVICE_IN_TELEPHONY_RX" role="source">
<profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
samplingRates="8000,16000" channelMasks="AUDIO_CHANNEL_IN_MONO"/>
devicePort>
devicePorts>
<routes>
<route type="mix" sink="Earpiece"
sources="primary output,deep_buffer,BT SCO Headset Mic"/>
<route type="mix" sink="Speaker"
sources="primary output,deep_buffer,compressed_offload,BT SCO Headset Mic,Telephony Rx"/>
<route type="mix" sink="Wired Headset"
sources="primary output,deep_buffer,compressed_offload,BT SCO Headset Mic,Telephony Rx"/>
<route type="mix" sink="Wired Headphones"
sources="primary output,deep_buffer,compressed_offload,BT SCO Headset Mic,Telephony Rx"/>
<route type="mix" sink="Telephony Tx"
sources="voice_tx"/>
<route type="mix" sink="primary input"
sources="Built-In Mic,Built-In Back Mic,Wired Headset Mic,BT SCO Headset Mic"/>
<route type="mix" sink="Telephony Tx"
sources="Built-In Mic,Built-In Back Mic,Wired Headset Mic,BT SCO Headset Mic"/>
<route type="mix" sink="voice_rx"
sources="Telephony Rx"/>
routes>
module>
<module description="HDMI Audio HAL" name="hdmi" version="2.0">
<mixPorts>
<mixPort name="hdmi output" role="source">
<profile name="" format="AUDIO_FORMAT_PCM_16_BIT" samplingRates="48000"/>
mixPort>
mixPorts>
<devicePorts>
<devicePort tagName="HDMI Out" type="AUDIO_DEVICE_OUT_AUX_DIGITAL" role="sink">
<profile name="" format="AUDIO_FORMAT_PCM_16_BIT"
samplingRates="48000" channelMasks="AUDIO_CHANNEL_OUT_STEREO"/>
devicePort>
devicePorts>
<routes>
<route type="mix" sink="HDMI Out"
sources="hdmi output"/>
routes>
module>
<xi:include href="a2dp_audio_policy_configuration.xml"/>
<xi:include href="usb_audio_policy_configuration.xml"/>
<xi:include href="r_submix_audio_policy_configuration.xml"/>
modules>
<xi:include href="audio_policy_volumes.xml"/>
<xi:include href="default_volume_tables.xml"/>
audioPolicyConfiguration>
这里面定义了一些参数,但是并没有定义我们需要的曲线相关的参数。
注意其中:
<xi:include href="audio_policy_volumes.xml"/>
<xi:include href="default_volume_tables.xml"/>
这里include了两个xml
audio_policy_volumes.xml
<volumes>
<volume stream="AUDIO_STREAM_VOICE_CALL" deviceCategory="DEVICE_CATEGORY_HEADSET">
<point>0,-4200point>
<point>33,-2800point>
<point>66,-1400point>
<point>100,0point>
volume>
<volume stream="AUDIO_STREAM_VOICE_CALL" deviceCategory="DEVICE_CATEGORY_SPEAKER">
<point>0,-2400point>
<point>33,-1600point>
<point>66,-800point>
<point>100,0point>
volume>
<volume stream="AUDIO_STREAM_VOICE_CALL" deviceCategory="DEVICE_CATEGORY_EARPIECE">
<point>0,-2400point>
<point>33,-1600point>
<point>66,-800point>
<point>100,0point>
volume>
<volume stream="AUDIO_STREAM_VOICE_CALL" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_SYSTEM" deviceCategory="DEVICE_CATEGORY_HEADSET">
<point>1,-3000point>
<point>33,-2600point>
<point>66,-2200point>
<point>100,-1800point>
volume>
<volume stream="AUDIO_STREAM_SYSTEM" deviceCategory="DEVICE_CATEGORY_SPEAKER"
ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_SYSTEM" deviceCategory="DEVICE_CATEGORY_EARPIECE"
ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_SYSTEM" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_RING" deviceCategory="DEVICE_CATEGORY_HEADSET"
ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_RING" deviceCategory="DEVICE_CATEGORY_SPEAKER">
<point>1,-2970point>
<point>33,-2010point>
<point>66,-1020point>
<point>100,0point>
volume>
<volume stream="AUDIO_STREAM_RING" deviceCategory="DEVICE_CATEGORY_EARPIECE"
ref="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_RING" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_MUSIC" deviceCategory="DEVICE_CATEGORY_HEADSET"
ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_MUSIC" deviceCategory="DEVICE_CATEGORY_SPEAKER"
ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_MUSIC" deviceCategory="DEVICE_CATEGORY_EARPIECE"
ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_MUSIC" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_ALARM" deviceCategory="DEVICE_CATEGORY_HEADSET"
ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_ALARM" deviceCategory="DEVICE_CATEGORY_SPEAKER">
<point>1,-2970point>
<point>33,-2010point>
<point>66,-1020point>
<point>100,0point>
volume>
<volume stream="AUDIO_STREAM_ALARM" deviceCategory="DEVICE_CATEGORY_EARPIECE"
ref="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_ALARM" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_NOTIFICATION" deviceCategory="DEVICE_CATEGORY_HEADSET"
ref="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_NOTIFICATION" deviceCategory="DEVICE_CATEGORY_SPEAKER">
<point>1,-2970point>
<point>33,-2010point>
<point>66,-1020point>
<point>100,0point>
volume>
<volume stream="AUDIO_STREAM_NOTIFICATION" deviceCategory="DEVICE_CATEGORY_EARPIECE"
ref="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_NOTIFICATION" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_BLUETOOTH_SCO" deviceCategory="DEVICE_CATEGORY_HEADSET">
<point>0,-4200point>
<point>33,-2800point>
<point>66,-1400point>
<point>100,0point>
volume>
<volume stream="AUDIO_STREAM_BLUETOOTH_SCO" deviceCategory="DEVICE_CATEGORY_SPEAKER">
<point>0,-2400point>
<point>33,-1600point>
<point>66,-800point>
<point>100,0point>
volume>
<volume stream="AUDIO_STREAM_BLUETOOTH_SCO" deviceCategory="DEVICE_CATEGORY_EARPIECE">
<point>0,-4200point>
<point>33,-2800point>
<point>66,-1400point>
<point>100,0point>
volume>
<volume stream="AUDIO_STREAM_BLUETOOTH_SCO" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_ENFORCED_AUDIBLE" deviceCategory="DEVICE_CATEGORY_HEADSET">
<point>1,-3000point>
<point>33,-2600point>
<point>66,-2200point>
<point>100,-1800point>
volume>
<volume stream="AUDIO_STREAM_ENFORCED_AUDIBLE" deviceCategory="DEVICE_CATEGORY_SPEAKER"
ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_ENFORCED_AUDIBLE" deviceCategory="DEVICE_CATEGORY_EARPIECE"
ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_ENFORCED_AUDIBLE" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_DTMF" deviceCategory="DEVICE_CATEGORY_HEADSET">
<point>1,-3000point>
<point>33,-2600point>
<point>66,-2200point>
<point>100,-1800point>
volume>
<volume stream="AUDIO_STREAM_DTMF" deviceCategory="DEVICE_CATEGORY_SPEAKER"
ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_DTMF" deviceCategory="DEVICE_CATEGORY_EARPIECE"
ref="DEFAULT_SYSTEM_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_DTMF" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_TTS" deviceCategory="DEVICE_CATEGORY_HEADSET"
ref="SILENT_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_TTS" deviceCategory="DEVICE_CATEGORY_SPEAKER"
ref="FULL_SCALE_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_TTS" deviceCategory="DEVICE_CATEGORY_EARPIECE"
ref="SILENT_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_TTS" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="SILENT_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_ACCESSIBILITY" deviceCategory="DEVICE_CATEGORY_HEADSET"
ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_ACCESSIBILITY" deviceCategory="DEVICE_CATEGORY_SPEAKER"
ref="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_ACCESSIBILITY" deviceCategory="DEVICE_CATEGORY_EARPIECE"
ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_ACCESSIBILITY" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="DEFAULT_MEDIA_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_REROUTING" deviceCategory="DEVICE_CATEGORY_HEADSET"
ref="FULL_SCALE_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_REROUTING" deviceCategory="DEVICE_CATEGORY_SPEAKER"
ref="FULL_SCALE_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_REROUTING" deviceCategory="DEVICE_CATEGORY_EARPIECE"
ref="FULL_SCALE_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_REROUTING" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="FULL_SCALE_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_PATCH" deviceCategory="DEVICE_CATEGORY_HEADSET"
ref="FULL_SCALE_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_PATCH" deviceCategory="DEVICE_CATEGORY_SPEAKER"
ref="FULL_SCALE_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_PATCH" deviceCategory="DEVICE_CATEGORY_EARPIECE"
ref="FULL_SCALE_VOLUME_CURVE"/>
<volume stream="AUDIO_STREAM_PATCH" deviceCategory="DEVICE_CATEGORY_EXT_MEDIA"
ref="FULL_SCALE_VOLUME_CURVE"/>
volumes>
default_volume_tables.xml
<volumes>
<reference name="FULL_SCALE_VOLUME_CURVE">
<point>0,0point>
<point>100,0point>
reference>
<reference name="SILENT_VOLUME_CURVE">
<point>0,-9600point>
<point>100,-9600point>
reference>
<reference name="DEFAULT_SYSTEM_VOLUME_CURVE">
<point>1,-2400point>
<point>33,-1800point>
<point>66,-1200point>
<point>100,-600point>
reference>
<reference name="DEFAULT_MEDIA_VOLUME_CURVE">
<point>1,-5800point>
<point>20,-2600point>
<point>60,-900point>
<point>100,0point>
reference>
<reference name="DEFAULT_DEVICE_CATEGORY_HEADSET_VOLUME_CURVE">
<point>1,-4950point>
<point>33,-3350point>
<point>66,-1700point>
<point>100,0point>
reference>
<reference name="DEFAULT_DEVICE_CATEGORY_SPEAKER_VOLUME_CURVE">
<point>1,-5800point>
<point>20,-4000point>
<point>60,-1700point>
<point>100,0point>
reference>
<reference name="DEFAULT_DEVICE_CATEGORY_EARPIECE_VOLUME_CURVE">
<point>1,-4950point>
<point>33,-3350point>
<point>66,-1700point>
<point>100,0point>
reference>
<reference name="DEFAULT_DEVICE_CATEGORY_EXT_MEDIA_VOLUME_CURVE">
<point>1,-5800point>
<point>20,-4000point>
<point>60,-2100point>
<point>100,-1000point>
reference>
volumes>
发现了,这个我们非常熟悉,和之前Gains.cpp中的一模一样,那么想要改曲线就在这里改吧。
但是为了学习一下,继续看一看他到底是怎么解析的。
PolicySerializer serializer.deserialize的定义在Serializer.cpp中
status_t PolicySerializer::deserialize(const char *configFile, AudioPolicyConfig &config)
{
ALOGI("%s: %s document. HXA", __FUNCTION__, configFile);
xmlDocPtr doc;
doc = xmlParseFile(configFile);
if (doc == NULL) {
ALOGE("%s: Could not parse %s document.", __FUNCTION__, configFile);
return BAD_VALUE;
}
xmlNodePtr cur = xmlDocGetRootElement(doc);
if (cur == NULL) {
ALOGE("%s: Could not parse %s document: empty.", __FUNCTION__, configFile);
xmlFreeDoc(doc);
return BAD_VALUE;
}
if (xmlXIncludeProcess(doc) < 0) {
ALOGE("%s: libxml failed to resolve XIncludes on %s document.", __FUNCTION__, configFile);
}
if (xmlStrcmp(cur->name, (const xmlChar *) mRootElementName.c_str())) {
ALOGE("%s: No %s root element found in xml data %s.", __FUNCTION__, mRootElementName.c_str(),
(const char *)cur->name);
xmlFreeDoc(doc);
return BAD_VALUE;
}
string version = getXmlAttribute(cur, versionAttribute);
if (version.empty()) {
ALOGE("%s: No version found in root node %s", __FUNCTION__, mRootElementName.c_str());
return BAD_VALUE;
}
if (version != mVersion) {
ALOGE("%s: Version does not match; expect %s got %s", __FUNCTION__, mVersion.c_str(),
version.c_str());
return BAD_VALUE;
}
// Lets deserialize children
// Modules
ModuleTraits::Collection modules;
deserializeCollection(doc, cur, modules, &config);
config.setHwModules(modules);
// deserialize volume section
VolumeTraits::Collection volumes;
deserializeCollection(doc, cur, volumes, &config);
config.setVolumes(volumes);
// Global Configuration
GlobalConfigTraits::deserialize(cur, config);
xmlFreeDoc(doc);
return android::OK;
}
这段程序中使用了
#include
#include
这里面的方法去解析xml,具体怎么解析就不多做解释了,我们只看我们关注的部分。
// deserialize volume section
VolumeTraits::Collection volumes;
deserializeCollection(doc, cur, volumes, &config);
config.setVolumes(volumes);
这里调用了
config.setVolumes(volumes)
跟踪看看
void setVolumes(const VolumeCurvesCollection &volumes)
{
if (mVolumeCurves != nullptr) {
*mVolumeCurves = volumes;
}
}
原来就是在这里对mVolumeCurves 进行赋值了的。
再看
VolumeTraits::Collection volumes;
这里定义一个volumes对象
这个对象是
VolumeTraits::Collection
类型的,追踪一下看到在Serializer.h中
struct VolumeTraits
下面有个定义
typedef VolumeCurvesCollection Collection;
发现其实这个volumes也就是VolumeCurvesCollection 类型的对象。
这下非常清楚了!
在
// deserialize volume section
VolumeTraits::Collection volumes;
deserializeCollection(doc, cur, volumes, &config);
config.setVolumes(volumes);
这几句中
先定义了一个VolumeCurvesCollection 对象volumes,
再调用deserializeCollection解析xml把值存到volumes中
再调用config.setVolumes把volumes存到同样为VolumeCurvesCollection 的mVolumeCurves中
那么在AudioPolicyManager.cpp中,我们调用
volumeDB = mVolumeCurves->volIndexToDb(stream, Volume::getDeviceCategory(device), index);
就利用mVolumeCurves中存到的音量曲线对应的值进行计算,然后把index转为db值。
到此为止,整套逻辑就基本结束了。
为了再多学点东西
我们再进进deserializeCollection看看
template <class Trait>
static status_t deserializeCollection(_xmlDoc *doc, const _xmlNode *cur,
typename Trait::Collection &collection,
typename Trait::PtrSerializingCtx serializingContext)
{
const xmlNode *root = cur->xmlChildrenNode;
while (root != NULL) {
if (xmlStrcmp(root->name, (const xmlChar *)Trait::collectionTag) &&
xmlStrcmp(root->name, (const xmlChar *)Trait::tag)) {
root = root->next;
continue;
}
const xmlNode *child = root;
if (!xmlStrcmp(child->name, (const xmlChar *)Trait::collectionTag)) {
child = child->xmlChildrenNode;
}
while (child != NULL) {
if (!xmlStrcmp(child->name, (const xmlChar *)Trait::tag)) {
typename Trait::PtrElement element;
status_t status = Trait::deserialize(doc, child, element, serializingContext);
if (status != NO_ERROR) {
return status;
}
if (collection.add(element) < 0) {
ALOGE("%s: could not add element to %s collection", __FUNCTION__,
Trait::collectionTag);
}
}
child = child->next;
}
if (!xmlStrcmp(root->name, (const xmlChar *)Trait::tag)) {
return NO_ERROR;
}
root = root->next;
}
return NO_ERROR;
}
这里使用了一个模板去解析
这里的Trait根据模板得知是VolumeTraits,
struct VolumeTraits
{
static const char *const tag;
static const char *const collectionTag;
static const char *const volumePointTag;
struct Attributes
{
static const char stream[];
static const char deviceCategory[];
static const char reference[];
};
typedef VolumeCurve Element;
typedef sp PtrElement;
typedef VolumeCurvesCollection Collection;
typedef void *PtrSerializingCtx;
static status_t deserialize(_xmlDoc *doc, const _xmlNode *root, PtrElement &element,
PtrSerializingCtx serializingContext);
// No Child
};
在deserializeCollection中主要对比了collectionTag等Tag进行查找标签。
看看collectionTag的定义,同样在Serializer.cpp中
const char *const VolumeTraits::tag = "volume";
const char *const VolumeTraits::collectionTag = "volumes";
const char *const VolumeTraits::volumePointTag = "point";
const char VolumeTraits::Attributes::stream[] = "stream";
const char VolumeTraits::Attributes::deviceCategory[] = "deviceCategory";
const char VolumeTraits::Attributes::reference[] = "ref";
原来如此,就是这里查找我们之前的两个xml中的volume,volumes,point等标签了。
这里又调用一下函数进一步解析
typename Trait::PtrElement element;
status_t status = Trait::deserialize(doc, child, element, serializingContext);
继续解析,这里根据模板调用的是VolumeTraits::deserialize
status_t VolumeTraits::deserialize(_xmlDoc *doc, const _xmlNode *root, PtrElement &element,
PtrSerializingCtx /*serializingContext*/)
{
string streamTypeLiteral = getXmlAttribute(root, Attributes::stream);
if (streamTypeLiteral.empty()) {
ALOGE("%s: No %s found", __FUNCTION__, Attributes::stream);
return BAD_VALUE;
}
audio_stream_type_t streamType;
if (!StreamTypeConverter::fromString(streamTypeLiteral, streamType)) {
ALOGE("%s: Invalid %s", __FUNCTION__, Attributes::stream);
return BAD_VALUE;
}
string deviceCategoryLiteral = getXmlAttribute(root, Attributes::deviceCategory);
if (deviceCategoryLiteral.empty()) {
ALOGE("%s: No %s found", __FUNCTION__, Attributes::deviceCategory);
return BAD_VALUE;
}
device_category deviceCategory;
if (!DeviceCategoryConverter::fromString(deviceCategoryLiteral, deviceCategory)) {
ALOGE("%s: Invalid %s=%s", __FUNCTION__, Attributes::deviceCategory,
deviceCategoryLiteral.c_str());
return BAD_VALUE;
}
string referenceName = getXmlAttribute(root, Attributes::reference);
const _xmlNode *ref = NULL;
if (!referenceName.empty()) {
getReference(root->parent, ref, referenceName);
if (ref == NULL) {
ALOGE("%s: No reference Ptr found for %s", __FUNCTION__, referenceName.c_str());
return BAD_VALUE;
}
}
element = new Element(deviceCategory, streamType);
const xmlNode *child = referenceName.empty() ? root->xmlChildrenNode : ref->xmlChildrenNode;
while (child != NULL) {
if (!xmlStrcmp(child->name, (const xmlChar *)volumePointTag)) {
xmlChar *pointDefinition = xmlNodeListGetString(doc, child->xmlChildrenNode, 1);;
if (pointDefinition == NULL) {
return BAD_VALUE;
}
ALOGV("%s: %s=%s", __FUNCTION__, tag, (const char*)pointDefinition);
Vector point;
collectionFromString >((const char*)pointDefinition, point, ",");
if (point.size() != 2) {
ALOGE("%s: Invalid %s: %s", __FUNCTION__, volumePointTag,
(const char*)pointDefinition);
return BAD_VALUE;
}
element->add(CurvePoint(point[0], point[1]));
xmlFree(pointDefinition);
}
child = child->next;
}
return NO_ERROR;
}
其中
element->add(CurvePoint(point[0], point[1]));
我们看到了这里有一个添加CurvePoint的动作了,我们非常熟悉。
看看element,
PtrElement &element,
跟进PtrElement的定义,发现使我们之前遇到过的
struct VolumeTraits
{
static const char *const tag;
static const char *const collectionTag;
static const char *const volumePointTag;
struct Attributes
{
static const char stream[];
static const char deviceCategory[];
static const char reference[];
};
typedef VolumeCurve Element;
typedef sp PtrElement;
typedef VolumeCurvesCollection Collection;
typedef void *PtrSerializingCtx;
static status_t deserialize(_xmlDoc *doc, const _xmlNode *root, PtrElement &element,
PtrSerializingCtx serializingContext);
// No Child
};
这是一个VolumeCurve对象,调用了它的add方法,这也是我们之前看到过的。
现在终于串联起来了
梳理一下!!!!!!!!!
1.首先在AudioPolicyManager.cpp里面
调用deserializeAudioPolicyXmlConfig
解析audio_policy_configuration.xml里面的内容到
AudioPolicyConfig &config这个对象里,
这对象里的参数有
private:
HwModuleCollection &mHwModules; /**< Collection of Module, with Profiles, i.e. Mix Ports. */
DeviceVector &mAvailableOutputDevices;
DeviceVector &mAvailableInputDevices;
sp &mDefaultOutputDevices;
VolumeCurvesCollection *mVolumeCurves;
bool &mIsSpeakerDrcEnabled;
};
其中的VolumeCurvesCollection 是我们需要的。
2.调用serializer.deserialize,在Serializer.cpp中
status_t PolicySerializer::deserialize(const char *configFile, AudioPolicyConfig &config)这个函数内写了以下一段解析volume相关的内容,并用setVolumes存进config中。
VolumeTraits::Collection volumes;
deserializeCollection<VolumeTraits>(doc, cur, volumes, &config);
config.setVolumes(volumes);
其中VolumeTraits::Collection volumes也就是一个VolumeCurvesCollection对象,就是先从xml中取出VolumeCurvesCollection 类型的数据到volume中,然后再传给config。
这其中的东西很绕圈子的原因就在于使用了模板,其实只要理解了模板就好了。
3.调用
deserializeCollection(_xmlDoc *doc, const _xmlNode *cur,
typename Trait::Collection &collection,
typename Trait::PtrSerializingCtx serializingContext)
4.再在第3步函数里调用
Trait::deserialize(doc, child, element, serializingContext);
这里也是一个模板,
调用的其实是
VolumeTraits::deserialize(_xmlDoc *doc, const _xmlNode *root, PtrElement &element,
PtrSerializingCtx /*serializingContext*/)
函数内部根据标签读取到point用
element->add(CurvePoint(point[0], point[1]));
存进element里面,这个element是PtrElement 类型,在 VolumeTraits中定义了
typedef sp
其实就是一个VolumeCurve类型。
5.再回到2
deserializeCollection(_xmlDoc *doc, const _xmlNode *cur,
typename Trait::Collection &collection,
typename Trait::PtrSerializingCtx serializingContext)
函数中
使用
if (collection.add(element) < 0) {
ALOGE("%s: could not add element to %s collection", __FUNCTION__,
Trait::collectionTag);
}
把element存进collection中,然后回到
VolumeTraits::Collection volumes;
deserializeCollection<VolumeTraits>(doc, cur, volumes, &config);
config.setVolumes(volumes);
把其中的volumes也就是collection(在函数声明中注意命名把各个对象对应起来),传给config。
到此为止config中就已经存好了所有的流和设备对应的音量曲线了。
我们之前找到的计算的地方也就可以直接调用了。