之前确定到当打开pcm设备的时候,会执行到snd_pcm_open,这个open会导致substream->ops->open(substream)的调用。
这个open函数在snd_pcm_set_ops函数设置了snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &rtd->ops)
也就对应到了soc_pcm_open中产生一系列的动作,这个会在我的另一篇博文中详细说到。
现在说这个snd_pcm_open是如何被调用的呢?
可以发现,这个snd_pcm_open被这两个函数调用:snd_pcm_playback_open、snd_pcm_capture_open
而这两个函数正是snd_pcm_f_ops字段.open函数指针字段。
已知ops字段被调用的方式是通过alsa驱动主设备号为116的字符设备,通过ops字段回调过来的的。
static int snd_open(struct inode *inode, struct file *file)
{
unsigned int minor = iminor(inode);
struct snd_minor *mptr = NULL;
const struct file_operations *new_fops;
int err = 0;
mptr = snd_minors[minor];
new_fops = fops_get(mptr->f_ops);
replace_fops(file, new_fops);
if (file->f_op->open)
err = file->f_op->open(inode, file);
return err;
}
所以要看snd_pcm_open如何被调用,那么需要看谁调用了alsa的字符设备。
这里可以知道,在\external\tinyalsa\pcm.c下,存在
struct pcm *pcm_open(unsigned int card, unsigned int device, unsigned int flags, struct pcm_config *config)
这里会通过相应的card和device和flags打开对应的pcm或者capture
struct pcm *pcm_open(unsigned int card, unsigned int device,
unsigned int flags, struct pcm_config *config)
{
struct pcm *pcm;
struct snd_pcm_info info;
struct snd_pcm_hw_params params;
struct snd_pcm_sw_params sparams;
char fn[256];
int rc;
pcm = calloc(1, sizeof(struct pcm));
if (!pcm || !config)
return &bad_pcm; /* TODO: could support default config here */
pcm->config = *config;
snprintf(fn, sizeof(fn), "/dev/snd/pcmC%uD%u%c", card, device,
flags & PCM_IN ? 'c' : 'p');
pcm->flags = flags;
pcm->fd = open(fn, O_RDWR);
if (pcm->fd < 0) {
oops(pcm, errno, "cannot open device '%s'", fn);
return pcm;
}
。。。。。。
。。。。。。
。。。。。。
并对config结构体进行参数的设置等一些设置操作。
那么又是谁调用了这个open函数来打开对应的设备文件呢?
这里我通过#include
地方在:vendor\xxxx\modules\audio\normal\audio_hw.c
可以发现,这个C文件对每个设备想要调用到pcm设备之前,会先进行硬件参数的设置,然后调用pcm_open来打开底层pcm设备。
可以看到static int start_output_stream(struct tiny_stream_out *out)函数中
#ifdef USE_PCM_IRQ_MODE
out->pcm = pcm_open(card, port, PCM_OUT | PCM_MONOTONIC, &out->cur_config);
#else
out->pcm = pcm_open(card, port, PCM_OUT | PCM_MMAP | PCM_NOIRQ | PCM_MONOTONIC, &out->cur_config);
#endif
注意到这条log信息,显示了一些主要的信息。
ALOGD("start output stream mode:%x devices:%x call_start:%d, call_connected:%d, is_voip:%d, voip_state:%x, is_bt_sco:%d"
继续往上跟踪,可以知道在static ssize_t out_write(struct audio_stream_out *stream, const void* buffer, size_t bytes)中调用了start_output_stream函数。
继续跟踪,
static int adev_open_output_stream(struct audio_hw_device *dev,
audio_io_handle_t handle,
audio_devices_t devices,
audio_output_flags_t flags,
struct audio_config *config,
struct audio_stream_out **stream_out)
注意struct tiny_stream_out *out结构,这个函数的stream结构赋值了各种函数指针,这给上层调用这些函数提供了接口。
out->stream.common.get_sample_rate = out_get_sample_rate;
out->stream.common.set_sample_rate = out_set_sample_rate;
out->stream.common.get_buffer_size = out_get_buffer_size;
out->stream.common.get_channels = out_get_channels;
out->stream.common.get_format = out_get_format;
out->stream.common.set_format = out_set_format;
out->stream.common.dump = out_dump;
out->stream.common.set_parameters = out_set_parameters;
out->stream.common.get_parameters = out_get_parameters;
out->stream.common.add_audio_effect = out_add_audio_effect;
out->stream.common.remove_audio_effect = out_remove_audio_effect;
out->stream.get_latency = out_get_latency;
out->stream.set_volume = out_set_volume;
out->stream.write = out_write;
out->stream.get_render_position = out_get_render_position;
out->stream.get_presentation_position = out_get_presentation_position;
再往上跟踪,在adev_open函数中
adev->hw_device.common.tag = HARDWARE_DEVICE_TAG;
adev->hw_device.common.version = AUDIO_DEVICE_API_VERSION_2_0;
adev->hw_device.common.module = (struct hw_module_t *) module;
adev->hw_device.common.close = adev_close;
adev->hw_device.get_supported_devices = adev_get_supported_devices;
adev->hw_device.init_check = adev_init_check;
adev->hw_device.set_voice_volume = adev_set_voice_volume;
adev->hw_device.set_master_volume = adev_set_master_volume;
adev->hw_device.set_mode = adev_set_mode;
adev->hw_device.set_master_mute = adev_set_master_mute;
adev->hw_device.get_master_mute = adev_get_master_mute;
adev->hw_device.set_mic_mute = adev_set_mic_mute;
adev->hw_device.get_mic_mute = adev_get_mic_mute;
adev->hw_device.set_parameters = adev_set_parameters;
adev->hw_device.get_parameters = adev_get_parameters;
adev->hw_device.get_input_buffer_size = adev_get_input_buffer_size;
adev->hw_device.open_output_stream = adev_open_output_stream;
adev->hw_device.close_output_stream = adev_close_output_stream;
adev->hw_device.open_input_stream = adev_open_input_stream;
adev->hw_device.close_input_stream = adev_close_input_stream;
adev->hw_device.dump = adev_dump;
adev->realCall = false;
static struct hw_module_methods_t hal_module_methods = {
.open = adev_open,
};
struct audio_module HAL_MODULE_INFO_SYM = {
.common = {
.tag = HARDWARE_MODULE_TAG,
.module_api_version = AUDIO_MODULE_API_VERSION_0_1,
.hal_api_version = HARDWARE_HAL_API_VERSION,
.id = AUDIO_HARDWARE_MODULE_ID,
.name = "Spreadtrum Audio HW HAL",
.author = "The Android Open Source Project",
.methods = &hal_module_methods,
},
};
留意一下这个结构体
struct tiny_audio_device {
struct audio_hw_device hw_device;
pthread_mutex_t lock; /* see note below on mutex acquisition order */
struct mixer *mixer;
struct audio_control *dev_ctl;
struct dev_test_t dev_test;
audio_mode_t call_mode;
voice_status_t call_status;
bool low_power;
bool mic_mute;
bool bluetooth_nrec;
int bluetooth_type;
bool master_mute;
int audio_outputs_state;
int voice_volume;
其中 struct audio_hw_device hw_device结构体就存储了这个adev_open_output_stream函数指针。
所以需要使用这个函数指针,那么必须调用这个结构体的函数指针!
答案在frameworks\av\media\libnbaio\AudioStreamOutSink.cpp中,存在方法如下
ssize_t AudioStreamOutSink::write(const void *buffer, size_t count)
{
if (!mNegotiated) {
return NEGOTIATE;
}
ALOG_ASSERT(mFormat != Format_Invalid);
ssize_t ret = mStream->write(mStream, buffer, count << mBitShift);
if (ret > 0) {
ret >>= mBitShift;
mFramesWritten += ret;
} else {
// FIXME verify HAL implementations are returning the correct error codes e.g. WOULD_BLOCK
}
return ret;
}
注意到ssize_t ret = mStream->write(mStream, buffer, count << mBitShift)
他调用了write函数指针。
而这个函数指针正好是out->stream.write = out_write。
为什么呢?
为什么 mStream->write 就正好是out_write呢?
原因在这儿!
mOutputSink = new AudioStreamOutSink(output->stream)!!!
函数位置在:
AudioFlinger::MixerThread::MixerThread(const sp
audio_io_handle_t id, audio_devices_t device, type_t type)
好,现在已经知道了在cpp中,如果要实现打开pcm设备,那么必须先调用这个方法。下面找找谁调用了这个方法。
通过查看log的方式,我找到了调用这个write的地方。
在frameworks\av\services\audioflinger\Threads.cpp
方法:ssize_t AudioFlinger::PlaybackThread::threadLoop_write()
存在这么一句: ssize_t framesWritten = mNormalSink->write(mMixBuffer + offset, count)
可以在Threads.h中可以知道,这个mNormalSink
其实是sp
找一下NBAIO_Sink的class在哪里。
class NBAIO_Sink : public NBAIO_Port {
public:
periodically.
virtual size_t framesWritten() const { return mFramesWritten; }
virtual size_t framesUnderrun() const { return 0; }
virtual size_t underruns() const { return 0; }
virtual ssize_t availableToWrite() const { return SSIZE_MAX; }
virtual ssize_t write(const void *buffer, size_t count) = 0;
virtual ssize_t writeVia(writeVia_t via, size_t total, void *user, size_t block = 0);
virtual status_t getNextWriteTimestamp(int64_t *ts) { return INVALID_OPERATION; }
virtual status_t getTimestamp(AudioTimestamp& timestamp) { return INVALID_OPERATION; }
protected:
NBAIO_Sink(NBAIO_Format format = Format_Invalid) : NBAIO_Port(format), mFramesWritten(0) { }
virtual ~NBAIO_Sink() { }
size_t mFramesWritten;
};
可以发现这是一个纯虚函数,所以看其实现在哪里。
回过头看class AudioStreamOutSink : public NBAIO_Sink {
可以发现,这个write真正实现在
ssize_t AudioStreamOutSink::write(const void *buffer, size_t count)
{
if (!mNegotiated) {
return NEGOTIATE;
}
ALOG_ASSERT(mFormat != Format_Invalid);
ssize_t ret = mStream->write(mStream, buffer, count << mBitShift);
if (ret > 0) {
ret >>= mBitShift;
mFramesWritten += ret;
} else {
// FIXME verify HAL implementations are returning the correct error codes e.g. WOULD_BLOCK
}
return ret;
}
继续往前看,threadLoop_write方法,查找一下,发现
ssize_t AudioFlinger::PlaybackThread::threadLoop_write()在ssize_t AudioFlinger::MixerThread::threadLoop_write()中调用了
下面是代码
ssize_t AudioFlinger::MixerThread::threadLoop_write()
{
// FIXME we should only do one push per cycle; confirm this is true
// Start the fast mixer if it's not already running
if (mFastMixer != NULL) {
FastMixerStateQueue *sq = mFastMixer->sq();
FastMixerState *state = sq->begin();
if (state->mCommand != FastMixerState::MIX_WRITE &&
(kUseFastMixer != FastMixer_Dynamic || state->mTrackMask > 1)) {
if (state->mCommand == FastMixerState::COLD_IDLE) {
int32_t old = android_atomic_inc(&mFastMixerFutex);
if (old == -1) {
__futex_syscall3(&mFastMixerFutex, FUTEX_WAKE_PRIVATE, 1);
}
#ifdef AUDIO_WATCHDOG
if (mAudioWatchdog != 0) {
mAudioWatchdog->resume();
}
#endif
}
state->mCommand = FastMixerState::MIX_WRITE;
mFastMixerDumpState.increaseSamplingN(mAudioFlinger->isLowRamDevice() ?
FastMixerDumpState::kSamplingNforLowRamDevice : FastMixerDumpState::kSamplingN);
sq->end();
sq->push(FastMixerStateQueue::BLOCK_UNTIL_PUSHED);
if (kUseFastMixer == FastMixer_Dynamic) {
mNormalSink = mPipeSink;
}
} else {
sq->end(false /*didModify*/);
}
}
return PlaybackThread::threadLoop_write();
}
现在跟踪,ssize_t AudioFlinger::MixerThread::threadLoop_write()方法。
搜索一下,可以知道他在bool AudioFlinger::PlaybackThread::threadLoop()方法中调用了。
那么如果再继续往上跟踪,肯定是有人启动了bool AudioFlinger::PlaybackThread::threadLoop()方法了!
这里我没时间再继续跟踪了。不过可以确定的是,pcm设备被打开,一定是调用了这个方法。
还有一个注意点,这里new了一些类!。
audio_io_handle_t AudioFlinger::openOutput(audio_module_handle_t module,
audio_devices_t *pDevices,
uint32_t *pSamplingRate,
audio_format_t *pFormat,
audio_channel_mask_t *pChannelMask,
uint32_t *pLatencyMs,
audio_output_flags_t flags,
const audio_offload_info_t *offloadInfo)
AudioStreamOut *output = new AudioStreamOut(outHwDev, outStream, flags);
if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
thread = new OffloadThread(this, output, id, *pDevices);
ALOGV("openOutput() created offload output: ID %d thread %p", id, thread);
} else if ((flags & AUDIO_OUTPUT_FLAG_DIRECT) ||
(config.format != AUDIO_FORMAT_PCM_16_BIT) ||
(config.channel_mask != AUDIO_CHANNEL_OUT_STEREO)) {
thread = new DirectOutputThread(this, output, id, *pDevices);
ALOGV("openOutput() created direct output: ID %d thread %p", id, thread);
} else {
thread = new MixerThread(this, output, id, *pDevices);
ALOGV("openOutput() created mixer output: ID %d thread %p", id, thread);
}
暂时到这儿,后续有时间再更新,继续往上跟踪
目前从下往上跟踪看来,可以初步了解整个框架,到时候深入了解的时候,还是需要从上往下逐步分析,这样在debug的时候才能胸有成竹。