FFMPEG录屏(5)---- 捕获扬声器和麦克风(基于WASAPI)


最近微软已经公布了对Windows7维护的截止时间,考虑到捕获基于dshow-capture的依赖还需要安装额外的软件,所以还是实现了基于Windows CoreAudio API的音频捕获以及GDI的桌面捕获。

如下为微软官方给出的demo:

CoreAudio Loopback Recording
CoreAudio Capturing a Stream
GDI Capturing an Image
Desktop Duplication API

在本篇中,请仔细阅读CoreAudio相关文章,会帮助你更快的上手。

头文件引用

首先这里有一个坑,请务必新建一个头文件并将CoreAudio相关头文件定义在内,否则你会发现一些意想不到的编译错误

//header_mmdevice.h
#ifndef MMDEVICE_DEFINE
#define MMDEVICE_DEFINE

#ifdef _WIN32

#include 
#include 
#include //must include before functiondiscoverykeys_devpkey
#include 
#include 
#include 
#include 
#include 
#include 

class com_initialize {
public:
	com_initialize() {
		CoInitializeEx(NULL, COINIT_MULTITHREADED);
	}
	~com_initialize() {
		CoUninitialize();
	}
};

#define DEFAULT_AUDIO_INOUTPUT_NAME "Default"
#define DEFAULT_AUDIO_INOUTPUT_ID "Default"

#endif // _WIN32

#endif //MMDEVICE_DEFINE

使用CoreAudio API是有一个initguid.h文件必须引入在cpp文件中且全工程只能引用一次

//header_mmdevice.cpp
#include //must define in a cpp file only once
#include "header_mmdevice.h"

初始化COM

CoInitializeEx(NULL, COINIT_MULTITHREADED);//multi thread version,can init several times

获取设备枚举器

IMMDeviceEnumerator *_enumerator;
HRESULT hr = CoCreateInstance(
				__uuidof(MMDeviceEnumerator),
				NULL,
				CLSCTX_ALL,
				__uuidof(IMMDeviceEnumerator),
				(void **)&_enumerator);
根据设备ID获取设备实例或获取默认设备实例
bool _is_default = (utils_string::ascii_utf8(DEFAULT_AUDIO_INOUTPUT_ID).compare(_device_id) == 0);

IMMDevice *_device;
if (_is_default) {
	hr = _enumerator->GetDefaultAudioEndpoint(
		is_input ? eCapture : eRender,
		is_input ? eCommunications : eConsole, &_device);
}
else {
	hr = _enumerator->GetDevice(utils_string::utf8_unicode(_device_id).c_str(), &_device);
}
激活client
IAudioClient *_capture_client;
_device->Activate(__uuidof(IAudioClient), CLSCTX_ALL, NULL, (void**)&_capture_client);
获取数据格式
WAVEFORMATEX *_wfex;
_capture_client->GetMixFormat(&_wfex);

int _sample_rate = _wfex->nSamplesPerSec;
int _bit_rate = _wfex->nAvgBytesPerSec;
int _bit_per_sample = _wfex->wBitsPerSample;
int _channel_num = _wfex->nChannels;
AVSampleFormat _fmt = AV_SAMPLE_FMT_FLT;//wasapi is always flt
根据设备类型设置捕获标志

在此采用EventCallBack模式,当capture内数据准备完成时将触发我们所设置的event,并为扬声器设备设置LoopBack标志。

DWORD flags = AUDCLNT_STREAMFLAGS_EVENTCALLBACK;
if (_is_input == false)
	flags |= AUDCLNT_STREAMFLAGS_LOOPBACK;
初始化client

hnsBufferDuration

The buffer capacity as a time value. This parameter is of type REFERENCE_TIME and is expressed in 100-nanosecond units. This parameter contains the buffer size that the caller requests for the buffer that the audio application will share with the audio engine (in shared mode) or with the endpoint device (in exclusive mode). If the call succeeds, the method allocates a buffer that is a least this large. For more information about REFERENCE_TIME, see the Windows SDK documentation. For more information about buffering requirements, see Remarks.

根据API说明,我们以100纳秒为一次捕获周期

#define NS_PER_SEC 1000000000
#define REFTIMES_PER_SEC  NS_PER_SEC/100            //100ns per buffer unit
hr = _capture_client->Initialize(
				AUDCLNT_SHAREMODE_SHARED,
				flags,
				REFTIMES_PER_SEC,
				0,
				_wfex,
				NULL);
获取一次捕获的Sample数量
uint32_t _capture_sample_count;
_capture_client->GetBufferSize(&_capture_sample_count);
获取capture
IAudioCaptureClient *_capture;
 _capture_client->GetService(__uuidof(IAudioCaptureClient), (void**)&_capture);
创建Event用以接收数据capture事件
HANDLE _ready_event = CreateEvent(NULL, FALSE, FALSE, NULL);
_capture_client->SetEventHandle(_ready_event);
启动捕获线程
int record_audio_wasapi::do_record(AVFrame *frame)
{
	HRESULT res = S_OK;
	LPBYTE buffer = NULL;
	DWORD flags = 0;
	uint32_t sample_count = 0;
	int error = AE_NO;

	while (_running) {
		res = _capture->GetNextPacketSize(&sample_count);

		if (FAILED(res)) {
			if (res != AUDCLNT_E_DEVICE_INVALIDATED)
				al_error("GetNextPacketSize failed: %lX", res);
			error = AE_CO_GET_PACKET_FAILED;
			break;
		}

		if (!sample_count)
			break;

		buffer = NULL;
		res = _capture->GetBuffer(&buffer, &sample_count, &flags, NULL, NULL);
		if (FAILED(res)) {
			if (res != AUDCLNT_E_DEVICE_INVALIDATED)
				al_error("GetBuffer failed: %lX",res);
			error = AE_CO_GET_BUFFER_FAILED;
			break;
		}

		//input mode do not have silent data flag do nothing here
		if (flags & AUDCLNT_BUFFERFLAGS_SILENT) {
			al_warn("on slient data %d", sample_count);
		}

		if (buffer) {
			process_data(frame, buffer, sample_count);
		}
		else {
			al_error("buffer invalid is");
		}

		_capture->ReleaseBuffer(sample_count);
	}

	return error;
}

void record_audio_wasapi::record_loop()
{
	AVFrame *frame = av_frame_alloc();

	HANDLE events[2] = { _stop_event,_ready_event };

	int error = AE_NO;
	while (_running)
	{
		if (WaitForMultipleObjects(2, events, FALSE, INFINITE) == WAIT_OBJECT_0)
			break;

		if ((error = do_record(frame)) != AE_NO) {
			if (_on_error) _on_error(error, _cb_extra_index);
			break;
		}
	}//while(_running)

	av_frame_free(&frame);
}

至此对系统扬声器和麦克风的捕获流程已经结束。

重要的补充(否则AMIX无法正常合流)

在调试时发现,麦克风的捕获一切正常,基本以10ms的频率输出数据。
当扬声器捕获又有大坑等着了,发现当系统没有声音在渲染时,ready event永远也不会被设置。只有当真正有声音在播放时才会被设置,而且声音播放完成后会发送一连串的silent数据随后又恢复了宁静。。。
尝试通过以一个自适应的方式(计算Sample数量并动态的调整Sleep间隔),根据PCM格式去补全音频数据,但效果不佳,总会有爆破音,标准的数据丢失的现象。最后采用了在捕获扬声器数据时同步播放静音的方法。
但其实不这样做也应该是可以的,只是采用了amix进行混音,必须保证麦克风和扬声器的音频数据在同一时间线上,否则会造成扬声器声音提前或者麦克风声音被截断(取决于amix的drop和duration参数)
Sample – WASAPI loopback capture (record what you hear)

There are a couple of oddities for WASAPI loopback capture. One is that “event mode” doesn’t work for loopback capture; you can call pAudioClient->Initialize(… AUDCLNT_STREAMFLAGS_LOOPBACK | AUDCLNT_STREAMFLAGS_EVENTCALLBACK, … ), you can call pAudioClient->SetEventHandle(…), and everything will succeed… but the “data is ready” event will never fire. So this app creates its own waitable timer.

Another oddity is that WASAPI will only push data down to the render endpoint when there are active streams. When nothing is playing, there is nothing to capture.
For example, play a song, and then run loopback-capture. While loopback-capture is running, stop the song, and then start it again. You’ll get this output when you start it back up:

But my particular favorite way of handling this is to run silence.exe. That way there are never any “nothing is playing” glitches, because there’s always something playing.

所以在初始化函数中,判断为扬声器捕获时,创建一个AudioRender

int record_audio_wasapi::init_render()
{
	int error = AE_NO;

	do {
		HRESULT res = _device->Activate(__uuidof(IAudioClient),
			CLSCTX_ALL, 
			nullptr,
			(void **)&_render_client
		);

		if (FAILED(res)) {
			error = AE_CO_ACTIVE_DEVICE_FAILED;
			break;
		}

		WAVEFORMATEX *wfex;
		res = _render_client->GetMixFormat(&wfex);
		if (FAILED(res)) {
			error = AE_CO_GET_FORMAT_FAILED;
			break;
		}

		res = _render_client->Initialize(AUDCLNT_SHAREMODE_SHARED,
			AUDCLNT_STREAMFLAGS_EVENTCALLBACK,
			REFTIMES_PER_SEC,
			0, wfex, nullptr);

		CoTaskMemFree(wfex);
		if (FAILED(res)) {
			error = AE_CO_AUDIOCLIENT_INIT_FAILED;
			break;
		}

		/* Silent loopback fix. Prevents audio stream from stopping and */
		/* messing up timestamps and other weird glitches during silence */
		/* by playing a silent sample all over again. */

		res = _render_client->GetService(__uuidof(IAudioRenderClient),
			(void **)&_render);
		if (FAILED(res)) {
			error = AE_CO_GET_CAPTURE_FAILED;
			break;
		}

		_render_event = CreateEvent(NULL, FALSE, FALSE, NULL);
		if (!_render_event) {
			error = AE_CO_CREATE_EVENT_FAILED;
			break;
		}
			
		res = _render_client->SetEventHandle(_render_event);
		if (FAILED(res)) {
			error = AE_CO_SET_EVENT_FAILED;
			break;
		}

		//pre fill a single silent buffer
		res = _render_client->GetBufferSize(&_render_sample_count);
		if (FAILED(res)) {
			error = AE_CO_GET_VALUE_FAILED;
			break;
		}

		uint8_t *buffer = NULL;
		res = _render->GetBuffer(_render_sample_count, &buffer);
		if (FAILED(res)) {
			error = AE_CO_GET_VALUE_FAILED;
			break;
		}

		res = _render->ReleaseBuffer(_render_sample_count, AUDCLNT_BUFFERFLAGS_SILENT);
		if (FAILED(res)) {
			error = AE_CO_RELEASE_BUFFER_FAILED;
			break;
		}
	} while (0);

	return error;
}

在启动扬声器捕获线程时,首先启动Render线程

void record_audio_wasapi::render_loop()
{
	HANDLE events[2] = { _stop_event,_render_event };

	HRESULT res = S_OK;
	uint8_t *pData = NULL;
	uint32_t padding_count = 0;

	while (_running && 
		WaitForMultipleObjects(2, events, FALSE, INFINITE) != WAIT_OBJECT_0
		) {
			
		res = _render_client->GetCurrentPadding(&padding_count);
		if (FAILED(res)) {
			break;
		}

		if (padding_count == _render_sample_count) {
			if (_on_error) _on_error(AE_CO_PADDING_UNEXPECTED, _cb_extra_index);
			break;
		}

		res = _render->GetBuffer(_render_sample_count - padding_count, &pData);
		if (FAILED(res)) {
			if (_on_error) _on_error(AE_CO_GET_BUFFER_FAILED, _cb_extra_index);
			break;
		}

		res = _render->ReleaseBuffer(_render_sample_count - padding_count, AUDCLNT_BUFFERFLAGS_SILENT);
		if (FAILED(res)) {
			if (_on_error) _on_error(AE_CO_RELEASE_BUFFER_FAILED, _cb_extra_index);
			break;
		}
	}
}

至此已经完成了基于WASAPI对扬声器和麦克风进行捕获。

补充

win7下的采集略有不同,event不会被触发,可以查看Github中源码进行修改。


GitHub传送门

screen-recorder

你可能感兴趣的:(录屏软件,ffmpeg)