Hal层分析主要涉及的接口文件
DeviceHalInterface.h
AudioHwDevice.h
其中DeviceHalInterface.h是链接Hal层的接口, AudioHwDevice.h是对hw dev的包装,下面看下这两个接口文件中相关方法的作用
[–>DeviceHalInterface.h]
namespace android {
//输入流Hal接口
class StreamInHalInterface;
//输出流Hal接口
class StreamOutHalInterface;
//设备操作接口
class DeviceHalInterface : public RefBase
{
public:
// Sets the value of 'devices' to a bitmask of 1 or more values of audio_devices_t.
virtual status_t getSupportedDevices(uint32_t *devices) = 0;
// 用于检查硬件是否初始化成功
virtual status_t initCheck() = 0;
//设置通话音量,范围0-1.0
Set the audio volume of a voice call. Range is between 0.0 and 1.0.
virtual status_t setVoiceVolume(float volume) = 0;
// 设置除通话音量外的其他所有音频流类型音量,范围0-1.0, 如果硬件不支持的,该功能由软件层的混音器完成
virtual status_t setMasterVolume(float volume) = 0;
// 获取设备主音量
virtual status_t getMasterVolume(float *volume) = 0;
// 设置模式,NORMAL状态为普通模式,RINGTONE表示来电模式(这时听到的声音是来电铃声)IN_CALL表示通话模式(这时听到的声音是手机通话过程中的语音)
virtual status_t setMode(audio_mode_t mode) = 0;
// 麦克风开关控制
virtual status_t setMicMute(bool state) = 0;
virtual status_t getMicMute(bool *state) = 0;
......
// 设置全局音频参数,采用key/value组织形式
virtual status_t setParameters(const String8& kvPairs) = 0;
//获取全局音频参数
virtual status_t getParameters(const String8& keys, String8 *values) = 0;
//根据传入的参数获取音频输入缓冲的大小,返回0表示其中某个参数的值,Hal不支持
Returns audio input buffer size according to parameters passed.
virtual status_t getInputBufferSize(const struct audio_config *config,
size_t *size) = 0;
//创建音频输出流对象(相当与打开音频输出设备)AF通过write写入数据,指针类型将返回该音频输出流支持的类型、声道数、采样率
// 通过释放其返回对象中所有引用来关闭流
virtual status_t openOutputStream(
audio_io_handle_t handle,
audio_devices_t devices,
audio_output_flags_t flags,
struct audio_config *config,
const char *address,
sp<StreamOutHalInterface> *outStream) = 0;
// 创建音频输入流对象(相当与打开音频输入设备)AF可以read数据
virtual status_t openInputStream(
audio_io_handle_t handle,
audio_devices_t devices,
struct audio_config *config,
audio_input_flags_t flags,
const char *address,
audio_source_t source,
sp<StreamInHalInterface> *inStream) = 0;
// Returns whether createAudioPatch and releaseAudioPatch operations are supported.
virtual status_t supportsAudioPatches(bool *supportsPatches) = 0;
// AudioPatch概念,用于表示音频中端到端的连接关系,比如连接source与sink,既可以是实实在在的音频输入设备,如MIC,也可以是底层中混音后的音频流;这里的sink则表示输出设备,如扬声器、耳机等。
virtual status_t createAudioPatch(
unsigned int num_sources,
const struct audio_port_config *sources,
unsigned int num_sinks,
const struct audio_port_config *sinks,
audio_patch_handle_t *patch) = 0;
......
// 音频端配置
virtual status_t setAudioPortConfig(const struct audio_port_config *config) = 0;
// 获取麦克风列表
virtual status_t getMicrophones(std::vector<media::MicrophoneInfo> *microphones) = 0;
virtual status_t dump(int fd) = 0;
protected:
// Subclasses can not be constructed directly by clients.
DeviceHalInterface() {}
// The destructor automatically closes the device.
virtual ~DeviceHalInterface() {}
};
} // namespace android
#endif // ANDROID_HARDWARE_DEVICE_HAL_INTERFACE_H
[–>AudioHwDevice.h]
namespace android {
//音频输出流
class AudioStreamOut;
class AudioHwDevice {
public:
enum Flags {
AHWD_CAN_SET_MASTER_VOLUME = 0x1,
AHWD_CAN_SET_MASTER_MUTE = 0x2,
};
AudioHwDevice(audio_module_handle_t handle,
const char *moduleName,
sp<DeviceHalInterface> hwDevice,
Flags flags)
: mHandle(handle)
, mModuleName(strdup(moduleName))
, mHwDevice(hwDevice)
, mFlags(flags) { }
virtual ~AudioHwDevice() { free((void *)mModuleName); }
bool canSetMasterVolume() const {
return (0 != (mFlags & AHWD_CAN_SET_MASTER_VOLUME));
}
bool canSetMasterMute() const {
return (0 != (mFlags & AHWD_CAN_SET_MASTER_MUTE));
}
audio_module_handle_t handle() const { return mHandle; }
const char *moduleName() const { return mModuleName; }
sp<DeviceHalInterface> hwDevice() const { return mHwDevice; }
/** This method creates and opens the audio hardware output stream.
* The "address" parameter qualifies the "devices" audio device type if needed.
* The format format depends on the device type:
* - Bluetooth devices use the MAC address of the device in the form "00:11:22:AA:BB:CC"
* - USB devices use the ALSA card and device numbers in the form "card=X;device=Y"
* - Other devices may use a number or any other string.
*/
status_t openOutputStream(
AudioStreamOut **ppStreamOut,
audio_io_handle_t handle,
audio_devices_t devices,
audio_output_flags_t flags,
struct audio_config *config,
const char *address);
bool supportsAudioPatches() const;
private:
const audio_module_handle_t mHandle;
const char * const mModuleName;
sp<DeviceHalInterface> mHwDevice;
const Flags mFlags;
};
} // namespace android
#endif // ANDROID_AUDIO_HW_DEVICE_H
HAL层初始化在AF初始化时完成。
AF初始化时,使用DevicesFactoryHalInterface
静态方法创建Hal工厂对象
[–>AudioFlinger.cpp]
AudioFlinger::AudioFlinger() {
......
mDevicesFactoryHal = DevicesFactoryHalInterface::create();
......
}
[–>DevicesFactoryHalInterface.cpp]
sp<DevicesFactoryHalInterface> DevicesFactoryHalInterface::create() {
if (hardware::audio::V4_0::IDevicesFactory::getService() != nullptr) {
return new V4_0::DevicesFactoryHalHybrid();
}
if (hardware::audio::V2_0::IDevicesFactory::getService() != nullptr) {
return new DevicesFactoryHalHybrid();
}
return nullptr;
}
这里假设我们使用的V4 HW
[–>DevicesFactoryHalHybrid.cpp]
DevicesFactoryHalHybrid::DevicesFactoryHalHybrid()
: mLocalFactory(new DevicesFactoryHalLocal()),
mHidlFactory(new DevicesFactoryHalHidl()) {
}
status_t DevicesFactoryHalHybrid::openDevice(const char *name, sp<DeviceHalInterface> *device) {
//只有A2DP Module用Hidl接口,对于 audio module使用本地接口, 所以后续openDevice接口调用流程使用DevicesFactoryHalLocal类分析
if (mHidlFactory != 0 && strcmp(AUDIO_HARDWARE_MODULE_ID_A2DP, name) != 0 &&
strcmp(AUDIO_HARDWARE_MODULE_ID_HEARING_AID, name) != 0) {
return mHidlFactory->openDevice(name, device);
}
return mLocalFactory->openDevice(name, device);
}
[–>DevicesFactoryHalHidl.cpp]
DevicesFactoryHalHidl::DevicesFactoryHalHidl() {
mDevicesFactory = IDevicesFactory::getService();
if (mDevicesFactory != 0) {
// It is assumed that DevicesFactory is owned by AudioFlinger
// and thus have the same lifespan.
mDevicesFactory->linkToDeath(HalDeathHandler::getInstance(), 0 /*cookie*/);
} else {
......
}
......
}
初始化流程将得到mDevicesFactoryHal
对象, 原型为DevicesFactoryHalInterface
,那么HAL初始化完成后,紧接着我们继续分析AT流程中的openOutput过程, 在openOutput
流程中会涉及audio Hal层设备的打开、outputStream创建等。
sp<AudioFlinger::ThreadBase> AudioFlinger::openOutput_l(audio_module_handle_t module,
audio_io_handle_t *output,
...) {
......
//找到合适Hw device
AudioHwDevice *outHwDev = findSuitableHwDev_l(module, devices);
......
//(1) 打开输入流
AudioStreamOut *outputStream = NULL;
status_t status = outHwDev->openOutputStream(
&outputStream,
*output,
devices,
flags,
config,
address.string());
......
}
static const char * const audio_interfaces[] = {
AUDIO_HARDWARE_MODULE_ID_PRIMARY, //primary
AUDIO_HARDWARE_MODULE_ID_A2DP, //a2dp
AUDIO_HARDWARE_MODULE_ID_USB, //usb
};
AudioHwDevice* AudioFlinger::findSuitableHwDev_l(
audio_module_handle_t module,
audio_devices_t devices)
{
//这里做了内存缓存实现, 如果mAudioHwDevs中没有就创建,有就直接拿来用
if (module == 0) {
for (size_t i = 0; i < arraysize(audio_interfaces); i++) {
loadHwModule_l(audio_interfaces[i]);
}
} else {
AudioHwDevice *audioHwDevice = mAudioHwDevs.valueFor(module);
if (audioHwDevice != NULL) {
return audioHwDevice;
}
}
}
audio_module_handle_t AudioFlinger::loadHwModule_l(const char *name)
{
......
sp<DeviceHalInterface> dev;
//(2) 使用mDevicesFactoryHal-> openDevice打开设备
int rc = mDevicesFactoryHal->openDevice(name, &dev);
......
rc = dev->initCheck();
......
//(3) 创建Hal dev设备加入到列表中
audio_module_handle_t handle = (audio_module_handle_t) nextUniqueId(AUDIO_UNIQUE_ID_USE_MODULE);
mAudioHwDevs.add(handle, new AudioHwDevice(handle, name, dev, flags));
}
上面有三个关注点(1) (2) (3), 标号(2) 涉及DeviceHalInterface
类型设备的创建,代表Hal层的设备,标号(3) 是对标号(2)设备的封装,标号(1)通过标号(2) 创建的AudioHwDevice
对象打开输出流。
分析openOutput
流程中标号(2)Hal层设备创建流程
在 HAL初始化 章节中, 我们分析了mDevicesFactoryHal
指向的实例原型为DevicesFactoryHalLocal
, 那么我们看下其openDevice
方法的实现
[–> DevicesFactoryHalLocal.cpp]
status_t DevicesFactoryHalLocal::openDevice(const char *name, sp<DeviceHalInterface> *device) {
audio_hw_device_t *dev;
//(1) 加载audio hw, 这里name可能的取值primary、usb、a2dp,
//这里*dev 为实现了hardware/audio.h头文件的类, 对于本文分析的RK平台,
//其实现在hardware/rockchip/audio/legacy_hal/audio_hw_hal.cpp(debug实现)
//或者 hardware/rockchip/audio/tinyalsa_hal/audio_hw.c(tinyalsa实现)
status_t rc = load_audio_interface(name, &dev);
if (rc == OK) {
//(2) 使用 DeviceHalLocal 包装 audio hw dev
*device = new DeviceHalLocal(dev);
}
return rc;
}```
分析openOutput
流程中标号(3)中 AudioHwDevice初始化流程
看下构造方法, 基本没做什么事,只是一些变量的初始化
[–>AudioHwDevice.h]
AudioHwDevice(audio_module_handle_t handle,
const char *moduleName,
sp<DeviceHalInterface> hwDevice,
Flags flags)
: mHandle(handle)
, mModuleName(strdup(moduleName))
, mHwDevice(hwDevice)
, mFlags(flags) { }
主要实现了两个方法
我们重点分析 openOutputStream
方法,其涉及AudioStreamOut
对象创建
[–>AudioHwDevice.cpp]
status_t AudioHwDevice::openOutputStream(
AudioStreamOut **ppStreamOut,
audio_io_handle_t handle,
audio_devices_t devices,
audio_output_flags_t flags,
struct audio_config *config,
const char *address)
{
......
//创建AudioStreamOut
AudioStreamOut *outputStream = new AudioStreamOut(this, flags);
......
//打开AudioStreamOut流
status_t status = outputStream->open(handle, devices, config, address);
}
在AF中, openOutput
流程中通过调用AudioHwDevice
的openOutputStream
方法初始化AudioStreamOut
, 后续放音过程中,相关数据流体的写入操作便是通过AudioStreamOut
完成。
继续跟进AudioStreamOut
中open
方法
[–>AudioStreamOut.cpp]
status_t AudioStreamOut::open(
audio_io_handle_t handle,
audio_devices_t devices,
struct audio_config *config,
const char *address)
{
sp<StreamOutHalInterface> outStream;
......
int status = hwDev()->openOutputStream(
handle,
devices,
customFlags,
config,
address,
&outStream);
......
}
sp<DeviceHalInterface> AudioStreamOut::hwDev() const
{
//audioHwDev原型为AudioHwDevice, 其hwDevice()返回类型为`DeviceHalInterface`
return audioHwDev->hwDevice();
}
在 Hal层设备创建 章节中我们得知 hwDev()
方法获取的实例为DeviceHalLocal
, 看其openOutputStream
方法
[–>DeviceHalLocal.cpp]
status_t DeviceHalLocal::openOutputStream(
audio_io_handle_t handle,
audio_devices_t devices,
audio_output_flags_t flags,
struct audio_config *config,
const char *address,
sp<StreamOutHalInterface> *outStream) {
//audio_stream_out结构体定义在hardware/libhardware/include/hardware/audio.h
audio_stream_out_t *halStream;
......
//mDev原型为`audio_hw_device_t`, 这里进入到了hal层, 实现为audio_hw_hal.cpp(debug实现)或者audio_hw.c(tinyalsa实现)
int openResut = mDev->open_output_stream(
mDev, handle, devices, flags, config, &halStream, address);
if (openResut == OK) {
//包装halStream为StreamOutHalLocal类型
*outStream = new StreamOutHalLocal(halStream, this);
}
ALOGV("open_output_stream status %d stream %p", openResut, halStream);
return openResut;
}
进入Hal层的open_output_stream
方法,看其实现
[–> audio_hw.c]
static int adev_open(const hw_module_t* module, const char* name,
hw_device_t** device)
{
.......
adev->hw_device.init_check = adev_init_check;
adev->hw_device.set_voice_volume = adev_set_voice_volume;
adev->hw_device.set_master_volume = adev_set_master_volume;
//设置mode
adev->hw_device.set_mode = adev_set_mode;
......
//获取输入buffer
adev->hw_device.get_input_buffer_size = adev_get_input_buffer_size;
//打开输出流
adev->hw_device.open_output_stream = adev_open_output_stream;
......
//路由初始化
route_init(); //Alsa_route.c
......
//设备初始化
adev_open_init(adev);
}
static int adev_open_output_stream(struct audio_hw_device *dev,
audio_io_handle_t handle,
audio_devices_t devices,
audio_output_flags_t flags,
struct audio_config *config,
struct audio_stream_out **stream_out,
const char *address __unused)
{
//调用tinyalsa相关接口操作设备, 初始化stream_out
......
init_hdmi_audio(&out->hdmi_audio);
if(devices & AUDIO_DEVICE_OUT_AUX_DIGITAL) {
......
}
if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
} else if (flags & AUDIO_OUTPUT_FLAG_DEEP_BUFFER) {
out->config = pcm_config_deep;
out->pcm_device = PCM_DEVICE_DEEP;
type = OUTPUT_DEEP_BUF;
}else {
out->config = pcm_config;
out->pcm_device = PCM_DEVICE;
type = OUTPUT_LOW_LATENCY;
}
}
AF同System HAL建立关联主要是通过AudioHwDevice
类完成
Android 8以后,Google引入了Hidl接口,类似Aidl接口实现,将Hal层实现迁移到vendor分区,是system framework可以独立升级。audio也接入了Hidl支持。
system下HIDL接口,路径如下
frameworks/av/media/libaudiohal
HAL层Hidl接口相关定义路径如下
hardware/interfaces/audio
audio HAL层接口定义在
hardware/libhardware/include/hardware/audio.h
RK 平台 HAL层实现在hardware/rockchip/audio
, 提供了legacy_hal和tinyalsa_hal两种实现,其中legacy_hal用于debug, tinyalsa_hal用于product。
hardware/rockchip/audio/tinyalsa_hal
看下目录结构
重点分析文件列表
audio_hw.h
audio_hw.c
其中, hardware/audio.h
的实现主要在audio_hw.c
中, 看下一些重要方法的实现
[–>audio_hw.c]
static struct hw_module_methods_t hal_module_methods = {
.open = adev_open,
};
//audio_hw_module一些common定义
struct audio_module HAL_MODULE_INFO_SYM = {
.common = {
.tag = HARDWARE_MODULE_TAG,
.module_api_version = AUDIO_MODULE_API_VERSION_0_1,
.hal_api_version = HARDWARE_HAL_API_VERSION,
.id = AUDIO_HARDWARE_MODULE_ID,
.name = "Manta audio HW HAL",
.author = "The Android Open Source Project",
.methods = &hal_module_methods,
},
};
};
//open方法实现
static int adev_open(const hw_module_t* module, const char* name,
hw_device_t** device)
{
struct audio_device *adev;
int ret;
......
adev->hw_device.common.tag = HARDWARE_DEVICE_TAG;
adev->hw_device.common.version = AUDIO_DEVICE_API_VERSION_2_0;
adev->hw_device.common.module = (struct hw_module_t *) module;
adev->hw_device.common.close = adev_close;
adev->hw_device.init_check = adev_init_check;
......
adev->hw_device.open_output_stream = adev_open_output_stream;
adev->hw_device.close_output_stream = adev_close_output_stream;
adev->hw_device.open_input_stream = adev_open_input_stream;
adev->hw_device.close_input_stream = adev_close_input_stream;
.......
//调用alsa_route.c中的方法
route_init();
......
*device = &adev->hw_device.common;
adev_open_init(adev);
return 0;
}
.......
static void adev_open_init(struct audio_device *adev)
{
ALOGD("%s",__func__);
int i = 0;
adev->mic_mute = false;
adev->screenOff = false;
#ifdef AUDIO_3A
adev->voice_api = NULL;
#endif
adev->input_source = AUDIO_SOURCE_DEFAULT;
for(i =0; i < OUTPUT_TOTAL; i++){
adev->outputs[i] = NULL;
}
for(i =0; i < SND_OUT_SOUND_CARD_MAX; i++){
adev->out_card[i] = (int)SND_OUT_SOUND_CARD_UNKNOWN;
}
for(i =0; i < SND_IN_SOUND_CARD_MAX; i++){
adev->in_card[i] = (int)SND_IN_SOUND_CARD_UNKNOWN;
}
adev->owner[0] = NULL;
adev->owner[1] = NULL;
char value[PROPERTY_VALUE_MAX];
if (property_get("vendor.audio.period_size", value, NULL) > 0) {
pcm_config.period_size = atoi(value);
pcm_config_in.period_size = pcm_config.period_size;
}
if (property_get("vendor.audio.in_period_size", value, NULL) > 0)
pcm_config_in.period_size = atoi(value);
}
说明:
AudioStreamOut
、AudioStreamIn
及AudioHardwareBase
定义在AudioHardwareInterface.cpp
中
AudioStreamOutALSA
、AudioStreamInALSA
实现在AudioHardware.cpp
中