本文要介绍的是一个耳返功能,就是实现插入usb mic之后就打通一个usb mic到speaker的回环,把拿到的usb mic的数据直接写到下行。有两种方案,一种方式是audio hal拿到mic数据之后创建一个新的线程把数据写入到playback的pcm节点中,这种方式有个缺点就是需要在hal层写一个简单的混音算法(一般就是保证不越界的情况下两个数据相加),以及没办法处理重采样的问题。另一种方式是数据上报到AudioFlinger,这种方式就解决了重采样和混音的问题,但是这个方式的缺点就是延时相对来说会大一点。
通过调查代码我们发现Android是有一个createAudiopatch函数可以实现这个功能。首先我们先来看一下调用的代码
1. createAudiopatch的使用
我们知道当有外部设备插入的时候会调用AudioService的etWiredDeviceConnectionState函数,然后通过MSG_SET_WIRED_DEVICE_CONNECTION_STATE消息走到onSetWiredDeviceConnectionState函数,所以我们可以在onSetWiredDeviceConnectionState这个函数中去使用createAudioPatch函数去实现,代码如下:
if(AudioSystem.DEVICE_IN_USB_DEVICE == device && 1 == state) {
try {
Thread.sleep(2000);
} catch (InterruptedException e) {
e.printStackTrace();
}
AudioPatch[] audioPatchArray = new AudioPatch[] { mAudioPatch };
usbMicPort = findAudioDevicePort(device, address);
speakerPort = findAudioDevicePort(AudioSystem.DEVICE_OUT_SPEAKER, "");
Log.d(TAG, "createAudioPatch usbMicPort ===== " +usbMicPort);
Log.d(TAG, "createAudioPatch speakerPort ===== " +speakerPort);
if(null != usbMicPort) {
AudioPortConfig sourceConfig = usbMicPort.activeConfig();
sourceConfig = usbMicPort.buildConfig(sourceConfig.samplingRate(), 12, 1, null);
AudioPortConfig sinkConfig = speakerPort.activeConfig();
sinkConfig = speakerPort.buildConfig(sinkConfig.samplingRate(), 1, 1, null);
int result = AudioSystem.createAudioPatch(audioPatchArray, new AudioPortConfig[] {sourceConfig},
new AudioPortConfig[] {sinkConfig});
mAudioPatch = audioPatchArray[0];
Log.d(TAG, "createAudioPatch result ===== " + result);
}
} else if(AudioSystem.DEVICE_IN_USB_DEVICE == device && 0 == state && null != mAudioPatch) {
AudioSystem.releaseAudioPatch(mAudioPatch);
mAudioPatch = null;
Log.d(TAG, "releaseAudioPatch---------");
}
通过上面的代码可以看到是通过findAudioDevicePort函数获取输入输出设备的deviceport,findAudioDevicePort函数的实现
private AudioDevicePort findAudioDevicePort(int type, String address) {
if (type == AudioManager.DEVICE_NONE) {
return null;
}
ArrayList devicePorts = new ArrayList<>();
if (AudioManager.listAudioDevicePorts(devicePorts) != AudioManager.SUCCESS) {
return null;
}
for (AudioDevicePort port : devicePorts) {
if (port.type() == type && port.address().equals(address)) {
return port;
}
}
return null;
}
这块主要是对于api的使用,这个不是本文的重点,我们不做过多关注,我们重点要关注createAudioPatch的实现,通过上面的代码我们知道这个函数是AudioSystem的方法,继续追查就是到native层AudioSystem.cpp中,然后通过binder会走到AudioPolicyManager::createAudioPatch函数中。
2. AudioPolicyManager::createAudioPatch函数分析
status_t AudioPolicyManager::createAudioPatch(const struct audio_patch *patch,
audio_patch_handle_t *handle,
uid_t uid)
{
ALOGV("createAudioPatch()");
if (patch->sources[0].type == AUDIO_PORT_TYPE_MIX) {
//这个条件是输入混音
} else if (patch->sources[0].type == AUDIO_PORT_TYPE_DEVICE) {
if (patch->sinks[0].type == AUDIO_PORT_TYPE_MIX) {
//输入是device,输出是mix
} else if (patch->sinks[0].type == AUDIO_PORT_TYPE_DEVICE) {
// device to device connection
if (patchDesc != 0) {
if (patchDesc->mPatch.sources[0].id != patch->sources[0].id) {
return BAD_VALUE;
}
}
sp srcDeviceDesc =
mAvailableInputDevices.getDeviceFromId(patch->sources[0].id);
if (srcDeviceDesc == 0) {
return BAD_VALUE;
}
//update source and sink with our own data as the data passed in the patch may
// be incomplete.
struct audio_patch newPatch = *patch;
srcDeviceDesc->toAudioPortConfig(&newPatch.sources[0], &patch->sources[0]);
for (size_t i = 0; i < patch->num_sinks; i++) {
if (patch->sinks[i].type != AUDIO_PORT_TYPE_DEVICE) {
ALOGV("createAudioPatch() source device but one sink is not a device");
return INVALID_OPERATION;
}
sp sinkDeviceDesc =
mAvailableOutputDevices.getDeviceFromId(patch->sinks[i].id);
if (sinkDeviceDesc == 0) {
return BAD_VALUE;
}
sinkDeviceDesc->toAudioPortConfig(&newPatch.sinks[i], &patch->sinks[i]);
// create a software bridge in PatchPanel if:
// - source and sink devices are on differnt HW modules OR
// - audio HAL version is < 3.0
if (!srcDeviceDesc->hasSameHwModuleAs(sinkDeviceDesc) ||
(srcDeviceDesc->mModule->getHalVersionMajor() < 3)) {
// support only one sink device for now to simplify output selection logic
if (patch->num_sinks > 1) {
return INVALID_OPERATION;
}
SortedVector outputs =
getOutputsForDevice(sinkDeviceDesc->type(), mOutputs);
// if the sink device is reachable via an opened output stream, request to go via
// this output stream by adding a second source to the patch description
audio_io_handle_t output = selectOutput(outputs,
AUDIO_OUTPUT_FLAG_NONE,
AUDIO_FORMAT_INVALID);
if (output != AUDIO_IO_HANDLE_NONE) {
sp outputDesc = mOutputs.valueFor(output);
if (outputDesc->isDuplicated()) {
return INVALID_OPERATION;
}
outputDesc->toAudioPortConfig(&newPatch.sources[1], &patch->sources[0]);
newPatch.sources[1].ext.mix.usecase.stream = AUDIO_STREAM_PATCH;
newPatch.num_sources = 2;
}
}
}
// TODO: check from routing capabilities in config file and other conflicting patches
audio_patch_handle_t afPatchHandle = AUDIO_PATCH_HANDLE_NONE;
if (index >= 0) {
afPatchHandle = patchDesc->mAfPatchHandle;
}
status_t status = mpClientInterface->createAudioPatch(&newPatch,
&afPatchHandle,
0);
ALOGV("createAudioPatch() patch panel returned %d patchHandle %d",
status, afPatchHandle);
if (status == NO_ERROR) {
if (index < 0) {
patchDesc = new AudioPatch(&newPatch, uid);
addAudioPatch(patchDesc->mHandle, patchDesc);
} else {
patchDesc->mPatch = newPatch;
}
patchDesc->mAfPatchHandle = afPatchHandle;
*handle = patchDesc->mHandle;
nextAudioPortGeneration();
mpClientInterface->onAudioPatchListUpdate();
} else {
ALOGW("createAudioPatch() patch panel could not connect device patch, error %d",
status);
return INVALID_OPERATION;
}
} else {
return BAD_VALUE;
}
} else {
return BAD_VALUE;
}
return NO_ERROR;
}
通过上面的代码我们看到这个函数通过binder走到AudioFlinger的createAudioPatch函数。
3. AudioFlinger::PatchPanel::createAudioPatch函数分析
status_t AudioFlinger::PatchPanel::createAudioPatch(const struct audio_patch *patch,
audio_patch_handle_t *handle)
{
Patch *newPatch = new Patch(patch);
switch (patch->sources[0].type) {
case AUDIO_PORT_TYPE_DEVICE: {
audio_module_handle_t srcModule = patch->sources[0].ext.device.hw_module;
ssize_t index = audioflinger->mAudioHwDevs.indexOfKey(srcModule);
if (index < 0) {
ALOGW("createAudioPatch() bad src hw module %d", srcModule);
status = BAD_VALUE;
goto exit;
}
AudioHwDevice *audioHwDevice = audioflinger->mAudioHwDevs.valueAt(index);
for (unsigned int i = 0; i < patch->num_sinks; i++) {
// support only one sink if connection to a mix or across HW modules
if ((patch->sinks[i].type == AUDIO_PORT_TYPE_MIX ||
patch->sinks[i].ext.mix.hw_module != srcModule) &&
patch->num_sinks > 1) {
status = INVALID_OPERATION;
goto exit;
}
// reject connection to different sink types
if (patch->sinks[i].type != patch->sinks[0].type) {
ALOGW("createAudioPatch() different sink types in same patch not supported");
status = BAD_VALUE;
goto exit;
}
}
// manage patches requiring a software bridge
// - special patch request with 2 sources (reuse one existing output mix) OR
// - Device to device AND
// - source HW module != destination HW module OR
// - audio HAL does not support audio patches creation
if ((patch->num_sources == 2) ||
((patch->sinks[0].type == AUDIO_PORT_TYPE_DEVICE) &&
((patch->sinks[0].ext.device.hw_module != srcModule) ||
!audioHwDevice->supportsAudioPatches()))) {
if (patch->num_sources == 2) {
//走这个if,所以不需要openoutput了,直接get playbackthread就可以
if (patch->sources[1].type != AUDIO_PORT_TYPE_MIX ||
(patch->num_sinks != 0 && patch->sinks[0].ext.device.hw_module !=
patch->sources[1].ext.mix.hw_module)) {
ALOGW("createAudioPatch() invalid source combination");
status = INVALID_OPERATION;
goto exit;
}
sp thread =
audioflinger->checkPlaybackThread_l(patch->sources[1].ext.mix.handle);
newPatch->mPlaybackThread = (MixerThread *)thread.get();
if (thread == 0) {
ALOGW("createAudioPatch() cannot get playback thread");
status = INVALID_OPERATION;
goto exit;
}
} else {
audio_config_t config = AUDIO_CONFIG_INITIALIZER;
audio_devices_t device = patch->sinks[0].ext.device.type;
String8 address = String8(patch->sinks[0].ext.device.address);
audio_io_handle_t output = AUDIO_IO_HANDLE_NONE;
sp thread = audioflinger->openOutput_l(
patch->sinks[0].ext.device.hw_module,
&output,
&config,
device,
address,
AUDIO_OUTPUT_FLAG_NONE);
newPatch->mPlaybackThread = (PlaybackThread *)thread.get();
ALOGV("audioflinger->openOutput_l() returned %p",
newPatch->mPlaybackThread.get());
if (newPatch->mPlaybackThread == 0) {
status = NO_MEMORY;
goto exit;
}
}
audio_devices_t device = patch->sources[0].ext.device.type;
String8 address = String8(patch->sources[0].ext.device.address);
audio_config_t config = AUDIO_CONFIG_INITIALIZER;
// open input stream with source device audio properties if provided or
// default to peer output stream properties otherwise.
if (patch->sources[0].config_mask & AUDIO_PORT_CONFIG_SAMPLE_RATE) {
config.sample_rate = patch->sources[0].sample_rate;
} else {
config.sample_rate = newPatch->mPlaybackThread->sampleRate();
}
if (patch->sources[0].config_mask & AUDIO_PORT_CONFIG_CHANNEL_MASK) {
config.channel_mask = patch->sources[0].channel_mask;
} else {
config.channel_mask =
audio_channel_in_mask_from_count(newPatch->mPlaybackThread->channelCount());
}
if (patch->sources[0].config_mask & AUDIO_PORT_CONFIG_FORMAT) {
config.format = patch->sources[0].format;
} else {
config.format = newPatch->mPlaybackThread->format();
}
audio_io_handle_t input = AUDIO_IO_HANDLE_NONE;
if(device == AUDIO_DEVICE_IN_USB_DEVICE)
ifUsbSource = AUDIO_SOURCE_USB_MIC;
sp thread = audioflinger->openInput_l(srcModule,
&input,
&config,
device,
address,
ifUsbSource,
AUDIO_INPUT_FLAG_NONE);
newPatch->mRecordThread = (RecordThread *)thread.get();
ALOGV("audioflinger->openInput_l() returned %p inChannelMask %08x",
newPatch->mRecordThread.get(), config.channel_mask);
if (newPatch->mRecordThread == 0) {
status = NO_MEMORY;
goto exit;
}
status = createPatchConnections(newPatch, patch);
if (status != NO_ERROR) {
goto exit;
}
} else {
}
} break;
case AUDIO_PORT_TYPE_MIX: {
} break;
default:
status = BAD_VALUE;
goto exit;
}
exit:
return status;
}
通过上面的代码我们知道这个函数主要是打开输入输出设备,然后创建对应的播放和录音线程,最后再调用createPatchConnections函数,接下来我们来跟踪一下这个函数
4. AudioFlinger::PatchPanel::createPatchConnections函数分析
status_t AudioFlinger::PatchPanel::createPatchConnections(Patch *patch,
const struct audio_patch *audioPatch)
{
// use a pseudo LCM between input and output framecount
size_t playbackFrameCount = patch->mPlaybackThread->frameCount();
int playbackShift = __builtin_ctz(playbackFrameCount);
size_t recordFramecount = patch->mRecordThread->frameCount();
int shift = __builtin_ctz(recordFramecount);
if (playbackShift < shift) {
shift = playbackShift;
}
size_t frameCount = (playbackFrameCount * recordFramecount) >> shift;
ALOGI("createPatchConnections() playframeCount %zu recordFramecount %zu frameCount %zu",
playbackFrameCount, recordFramecount, frameCount);
// create a special record track to capture from record thread
uint32_t channelCount = patch->mPlaybackThread->channelCount();
audio_channel_mask_t inChannelMask = audio_channel_in_mask_from_count(channelCount);
audio_channel_mask_t outChannelMask = patch->mPlaybackThread->channelMask();
uint32_t sampleRate = patch->mPlaybackThread->sampleRate();
audio_format_t format = patch->mPlaybackThread->format();
patch->mPatchRecord = new RecordThread::PatchRecord(
patch->mRecordThread.get(),
sampleRate,
inChannelMask,
format,
frameCount,
NULL,
(size_t)0 /* bufferSize */,
AUDIO_INPUT_FLAG_NONE);
if (patch->mPatchRecord == 0) {
return NO_MEMORY;
}
status = patch->mPatchRecord->initCheck();
if (status != NO_ERROR) {
return status;
}
patch->mRecordThread->addPatchRecord(patch->mPatchRecord);
// create a special playback track to render to playback thread.
// this track is given the same buffer as the PatchRecord buffer
if(audioPatch->sources[0].ext.device.type == AUDIO_DEVICE_IN_USB_DEVICE) {
patch->mPatchTrack = new PlaybackThread::PatchTrack(
patch->mPlaybackThread.get(),
audioPatch->sources[1].ext.mix.usecase.stream,
sampleRate,
outChannelMask,
format,
frameCount,
patch->mPatchRecord->buffer(),
patch->mPatchRecord->bufferSize(),
AUDIO_OUTPUT_FLAG_FAST);
} else {
patch->mPatchTrack = new PlaybackThread::PatchTrack(
patch->mPlaybackThread.get(),
audioPatch->sources[1].ext.mix.usecase.stream,
sampleRate,
outChannelMask,
format,
frameCount,
patch->mPatchRecord->buffer(),
patch->mPatchRecord->bufferSize(),
AUDIO_OUTPUT_FLAG_NONE);
}
if (patch->mPatchTrack == 0) {
return NO_MEMORY;
}
status = patch->mPatchTrack->initCheck();
if (status != NO_ERROR) {
return status;
}
patch->mPlaybackThread->addPatchTrack(patch->mPatchTrack);
// tie playback and record tracks together
patch->mPatchRecord->setPeerProxy(patch->mPatchTrack.get());
patch->mPatchTrack->setPeerProxy(patch->mPatchRecord.get());
// start capture and playback
patch->mPatchRecord->start(AudioSystem::SYNC_EVENT_NONE, AUDIO_SESSION_NONE);
patch->mPatchTrack->start();
return status;
}
通过上面的代码可以看到这个函数首先是根据playbackFrameCount和recordFramecount计算出这个回环所需要的frameCount。然后分别创建record track和playback track,并把这两个track加到对应的tracks容器中去,接下里就是分别start playback和record,这样通路就链接成功了。这里面有个问题,frameCount的大小是怎么获取到的,因为这个值的大小是影响延时的直接因素。我们就以playbackFrameCount为线索跟踪一下,跟踪代码发现playbackFrameCount的值取决于AudioFlinger::PlaybackThread::readOutputParameters_l函数从hal层读取的buffersize和framesize。
调试这个功能遇到的最大的问题就是延时的问题,所以我们需要好好看一下hal层对于buffer的配置,因为buffer的大小是对延时最直接的影响。上面的代码可以看到我们做了修改,判断 if(audioPatch->sources[0].ext.device.type == AUDIO_DEVICE_IN_USB_DEVICE),当输入源是usb mic的时候PatchTrack的时候flag传一个AUDIO_OUTPUT_FLAG_FAST,对应的就是高通的low-latency模式,也就是低延时播放usecase。
看一下audio_hw.c对于latency的period_size的定义
struct pcm_config pcm_config_low_latency = {
.channels = 2,
.rate = DEFAULT_OUTPUT_SAMPLING_RATE,
.period_size = LOW_LATENCY_OUTPUT_PERIOD_SIZE,
.period_count = LOW_LATENCY_OUTPUT_PERIOD_COUNT,
.format = PCM_FORMAT_S16_LE,
.start_threshold = LOW_LATENCY_OUTPUT_PERIOD_SIZE / 4,
.stop_threshold = INT_MAX,
.avail_min = LOW_LATENCY_OUTPUT_PERIOD_SIZE / 4,
};
这里面宏的定义如下:
#define LOW_LATENCY_OUTPUT_PERIOD_SIZE 240
#define LOW_LATENCY_OUTPUT_PERIOD_COUNT 2
所以对于low-latency的buffer就已经很小了,理论上是可以达到延时的要求的,因为我们的车机music播放全车发声使用的device是AUDIO_DEVICE_OUT_BUS,然后实测发现当我们在AudioService.java中使用AUDIO_DEVICE_OUT_BUS的时候延时很大,通过log已经dump audioflinger发现走的并不是low-latency而是deep-buffer,由此可见hal层的usecase并不取决于createPatchConnections传输的AUDIO_OUTPUT_FLAG_FAST这个flag。那么这个又是从哪设置下去的呢?跟踪一下pcm_config_low_latency这个结构体的使用,发现是adev_open_output_stream函数会根据flag和devices在设置out->config的时候使用这个结构体,这也就清晰明了了,因为在开机的时候audio就会打开device,所以在framework上创建track的时候再设置flag就会不生效了。
但是我们又怎么解决延时问题,从而去使用low-latency这条通路呢,dump auido_policy发现AUDIO_DEVICE_OUT_SPEAKER使用的是PCM_CONFIG_AUDIO_PLAYBACK_PRIMARY,定义如下:
#define USECASE_AUDIO_PLAYBACK_PRIMARY USECASE_AUDIO_PLAYBACK_LOW_LATENCY
#define PCM_CONFIG_AUDIO_PLAYBACK_PRIMARY pcm_config_low_latency
所以我们可以在AudioService中createAudioPatch的时候使用AUDIO_DEVICE_OUT_SPEAKER。在调试的过程中发现使用low-latency之后会出现underrun,这时候就只需要适当调整period_size以及period_count的大小来找到一个underrun和延时在100ms以内的值就可以。
到这里K歌的耳返功能就已经打通了,但是这里有个问题就是Android只允许一个实例录音,也就是这条路通了之后录音功能已经车机的语音功能都相对不能用了,所以这就需要一种解决方案,找到一种方式是在audiopolicy中添加一个source给这条通路使用。
这个比较简单,就简单说一下思路。首先需要在audio-base.h中的audio_source_t枚举中添加上自己添加的source,我这里定义为AUDIO_SOURCE_USB_MIC。然后在AudioPolicyManager的checkInputsForDevice函数去调用mpClientInterface->openInput之前判断是usb mic的话把source由默认的AUDIO_SOURCE_MIC改为自己定义的AUDIO_SOURCE_USB_MIC。因为Android源生的做法是录音的时候getInput的时候会优先选择活动中的外设,最后才会选择主mic,所以需要在Engine.cpp的getDeviceForInputSource函数中把所有source的case选择设备的时候,把usb mic删除或者是优先级放到最低(必须保证优先级在主mic之后)。最后一步就是在AudioFlinger的PatchPanel类的createAudioPatch函数去audioflinger->openInput_l之前判断是usb mic把source改成AUDIO_SOURCE_USB_MIC。
到这里K歌的usb mic到speaker的一个回路就已经完成了。
主要是在usb hal中检测到usb mic插入的时候new两个thread一边录音一边往播放的pcm中写数据。但是在这之前要做准备工作,首先要分别打开上下行的声卡,然后还要始能下行的pcm device。核心代码如下:
int init_audio(unsigned int output_card, unsigned int output_device)
{
int i;
struct pcm_config config_play;
struct pcm_config config_record;
struct pcm *play_pcm = NULL;
struct pcm *record_pcm = NULL;
char mixer_path[MIXER_PATH_MAX_LENGTH];
ALOGV("%s: usb_mic-speaker loopback started!!!", __func__);
for (i = 0; i < BUFFER_COUNT; i++) {
buffers_loop[i] = malloc(BUFFER_SIZE);
buffer_states[i] = BUFFER_EMPTY;
}
mixer = mixer_open(0);
if (!mixer) {
ALOGD("Failed to open mixer last time,try again");
mixer = mixer_open(0);
}
memset(&config_record, 0, sizeof(config_record));
config_record.channels = 2;
config_record.rate = 48000;
config_record.period_size = BUFFER_SIZE / 4;
config_record.period_count = 2;
config_record.format = PCM_FORMAT_S16_LE;
memset(&config_play, 0, sizeof(config_play));
config_play.channels = 2;
config_play.rate = 48000;
config_play.period_size = BUFFER_SIZE / 4;
config_play.period_count = 4;
config_play.start_threshold = config_play.period_size / 4;
config_play.avail_min = config_play.period_size / 4;
config_play.format = PCM_FORMAT_S16_LE;
tinymix_set_value(mixer, tinymix_control_Channels, &tinymix_control_ChannelsValues, 1);
tinymix_set_value(mixer, tinymix_control_Mixer, &tinymix_control_MixerValues, 1);
record_pcm = pcm_open(1, 0, PCM_IN, &config_record);
if (record_pcm && !pcm_is_ready(record_pcm)) {
ALOGE("%s: capture_thread pcm_open error", __func__);
pcm_close(record_pcm);
record_pcm = NULL;
return -1;
}
play_pcm = pcm_open(output_card, output_device, PCM_OUT, &config_play);
if (!play_pcm || !pcm_is_ready(play_pcm)) {
ALOGE("%s: Unable to open PCM device %d/%d for output (%s)\n",
__func__, output_card, output_device, pcm_get_error(play_pcm));
pcm_close(play_pcm);
play_pcm = NULL;
return -1;
}
pthread_create(&play_tid, NULL, capture_thread, record_pcm);
pthread_create(&play_tid, NULL, play_thread, play_pcm);
return 0;
}
tinymix_set_value是仿照alsa架构的tinymix工具写的一个始能pcm device的函数,记得我们创建的pthread要提高优先级,可以用androidSetThreadPriority函数。所有代码如下:
diff --git a/modules/usbaudio/Android.mk b/modules/usbaudio/Android.mk
index e319899..bfc3f76 100644
--- a/modules/usbaudio/Android.mk
+++ b/modules/usbaudio/Android.mk
@@ -24,7 +24,7 @@
external/tinyalsa/include \
$(call include-path-for, audio-utils) \
$(call include-path-for, alsa-utils)
-LOCAL_SHARED_LIBRARIES := liblog libcutils libtinyalsa libaudioutils libalsautils
+LOCAL_SHARED_LIBRARIES := liblog libcutils libtinyalsa libaudioutils libalsautils libutils
LOCAL_MODULE_TAGS := optional
LOCAL_CFLAGS := -Wno-unused-parameter
diff --git a/modules/usbaudio/audio_hal.c b/modules/usbaudio/audio_hal.c
index 61f7371..eaf02ec 100755
--- a/modules/usbaudio/audio_hal.c
+++ b/modules/usbaudio/audio_hal.c
@@ -49,6 +49,8 @@
#include
#include
#include "audio_hal.h"
+#include
+#include
#define DEFAULT_INPUT_BUFFER_SIZE_MS 20
@@ -58,6 +60,32 @@
#define MAX_LISTEN_QUEUE 5
#define SEND_BUFFER_SIZE 20
+#define BUFFER_COUNT 4
+#define BUFFER_SIZE 1920
+
+#define BUFFER_EMPTY 0
+#define BUFFER_BUSY 1
+#define BUFFER_FULL 2
+
+#define MIXER_PATH_MAX_LENGTH 100
+static char* buffers_loop[BUFFER_COUNT];
+static int buffer_states[BUFFER_COUNT];
+static int empty_index = 0;
+static int full_index = -1;
+
+static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_cond_t empty_cond = PTHREAD_COND_INITIALIZER;
+static pthread_cond_t full_cond = PTHREAD_COND_INITIALIZER;
+
+static pthread_t play_tid;
+static struct pcm *pcm_played = NULL;
+static struct pcm *pcm_recorded = NULL;
+struct mixer *mixer;
+static const char* tinymix_control_Channels = "QUAT_TDM_RX_0 Channels";
+static const char* tinymix_control_ChannelsValues = "Two";
+static const char* tinymix_control_Mixer = "QUAT_TDM_RX_0 Audio Mixer MultiMedia3";
+static const char* tinymix_control_MixerValues = "1";
+
typedef enum {
AUDIO_A2DP_STATE_STARTING,
AUDIO_A2DP_STATE_STARTED,
@@ -887,6 +915,357 @@
}
}
+static void tinymix_set_byte_ctl(struct mixer_ctl *ctl,
+ char **values, unsigned int num_values)
+{
+ int ret;
+ char *buf;
+ char *end;
+ unsigned int i;
+ long n;
+ unsigned int *tlv, tlv_size;
+ unsigned int tlv_header_size = 0;
+
+ if (mixer_ctl_is_access_tlv_rw(ctl)) {
+ tlv_header_size = TLV_HEADER_SIZE;
+ }
+
+ tlv_size = num_values + tlv_header_size;
+
+ buf = calloc(1, tlv_size);
+ if (buf == NULL) {
+ ALOGE("%s: set_byte_ctl: Failed to alloc mem for bytes %d", __func__, num_values);
+ exit(EXIT_FAILURE);
+ }
+
+ tlv = (unsigned int *)buf;
+ tlv[0] = 0;
+ tlv[1] = num_values;
+
+ for (i = 0; i < num_values; i++) {
+ errno = 0;
+ n = strtol(values[i], &end, 0);
+ if (*end) {
+ ALOGE("%s: %s not an integer", __func__, values[i]);
+ goto fail;
+ }
+ if (errno) {
+ ALOGE("%s: strtol: %s: %s", __func__, values[i], strerror(errno));
+ goto fail;
+ }
+ if (n < 0 || n > 0xff) {
+ ALOGE("%s: %s should be between [0, 0xff]", __func__, values[i]);
+ goto fail;
+ }
+ /* start filling after the TLV header */
+ buf[i + tlv_header_size] = n;
+ }
+
+ ret = mixer_ctl_set_array(ctl, buf, tlv_size);
+ if (ret < 0) {
+ ALOGE("%s: Failed to set binary control", __func__);
+ goto fail;
+ }
+
+ free(buf);
+ return;
+
+fail:
+ free(buf);
+ exit(EXIT_FAILURE);
+}
+
+static int tinymix_set_value(struct mixer *mixer, const char *control,
+ char **values, unsigned int num_values)
+{
+ ALOGV("tinymix_set_value, control: %s; values: %s; num_values %d:", control, *values, num_values);
+ struct mixer_ctl *ctl;
+ enum mixer_ctl_type type;
+ unsigned int num_ctl_values;
+ unsigned int i;
+
+ if (isdigit(control[0]))
+ ctl = mixer_get_ctl(mixer, atoi(control));
+ else
+ ctl = mixer_get_ctl_by_name(mixer, control);
+
+ if (!ctl) {
+ ALOGE("%s: Invalid mixer control: %s", __func__, control);
+ return ENOENT;
+ }
+
+ type = mixer_ctl_get_type(ctl);
+ num_ctl_values = mixer_ctl_get_num_values(ctl);
+
+ if (type == MIXER_CTL_TYPE_BYTE) {
+ tinymix_set_byte_ctl(ctl, values, num_values);
+ return ENOENT;
+ }
+
+ if (isdigit(values[0][0])) {
+ if (num_values == 1) {
+ /* Set all values the same */
+ int value = atoi(values[0]);
+
+ for (i = 0; i < num_ctl_values; i++) {
+ if (mixer_ctl_set_value(ctl, i, value)) {
+ ALOGE("%s: Error: invalid value", __func__);
+ return EINVAL;
+ }
+ }
+ } else {
+ /* Set multiple values */
+ if (num_values > num_ctl_values) {
+ ALOGE("%s: Error: %d values given, but control only takes %d",
+ __func__, num_values, num_ctl_values);
+ return EINVAL;
+ }
+ for (i = 0; i < num_values; i++) {
+ if (mixer_ctl_set_value(ctl, i, atoi(values[i]))) {
+ ALOGE("%s: Error: invalid value for index %d", __func__, i);
+ return EINVAL;
+ }
+ }
+ }
+ } else {
+ if (type == MIXER_CTL_TYPE_ENUM) {
+ if (num_values != 1) {
+ ALOGE("%s: Enclose strings in quotes and try again", __func__);
+ return EINVAL;
+ }
+ if (mixer_ctl_set_enum_by_string(ctl, values[0])) {
+ ALOGE("%s: Error: invalid enum value", __func__);
+ return EINVAL;
+ }
+ } else {
+ ALOGE("%s: Error: only enum types can be set with strings", __func__);
+ return EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int get_empty()
+{
+ int index, other;
+
+ pthread_mutex_lock(&mutex);
+
+ while (empty_index == -1)
+ pthread_cond_wait(&empty_cond, &mutex);
+
+ index = empty_index;
+ if(index == 3)
+ other = 0;
+ else
+ other = index + 1;
+ //other = (index == 0 ? 1 : 0);
+ buffer_states[index] = BUFFER_BUSY;
+ if (buffer_states[other] == BUFFER_EMPTY)
+ empty_index = other;
+ else
+ empty_index = -1;
+
+ pthread_mutex_unlock(&mutex);
+ return index;
+}
+
+static void put_empty(int index)
+{
+ pthread_mutex_lock(&mutex);
+
+ buffer_states[index] = BUFFER_EMPTY;
+ if (empty_index == -1) {
+ empty_index = index;
+ pthread_cond_signal(&empty_cond);
+ }
+
+ pthread_mutex_unlock(&mutex);
+}
+
+static int get_full()
+{
+ int index, other;
+
+ pthread_mutex_lock(&mutex);
+
+ while (full_index == -1)
+ pthread_cond_wait(&full_cond, &mutex);
+
+ index = full_index;
+ if(index == 3)
+ other = 0;
+ else
+ other = index + 1;
+ //index = full_index;
+ //other = (index == 0 ? 1 : 0);
+ buffer_states[index] = BUFFER_BUSY;
+ if (buffer_states[other] == BUFFER_FULL)
+ full_index = other;
+ else
+ full_index = -1;
+
+ pthread_mutex_unlock(&mutex);
+ return index;
+}
+
+static void put_full(int index)
+{
+ pthread_mutex_lock(&mutex);
+
+ buffer_states[index] = BUFFER_FULL;
+ if (full_index == -1) {
+ full_index = index;
+ pthread_cond_signal(&full_cond);
+ }
+
+ pthread_mutex_unlock(&mutex);
+}
+
+static void* capture_thread(void* arg)
+{
+ pcm_recorded = arg;
+ ALOGV("%s: capture_thread start pid:%d, tid:%d", __func__, getpid(), gettid());
+
+ androidSetThreadPriority(0, ANDROID_PRIORITY_URGENT_AUDIO);
+
+ while (pcm_recorded) {
+ int index = get_empty();
+ if (pcm_read(pcm_recorded, buffers_loop[index], BUFFER_SIZE)) {
+ ALOGE("%s: capture_thread pcm_read error", __func__);
+ put_empty(index);
+ if(pcm_recorded != NULL) {
+ pcm_close(pcm_recorded);
+ pcm_recorded = NULL;
+ }
+ if(pcm_played != NULL) {
+ pcm_close(pcm_played);
+ pcm_played = NULL;
+ }
+ mixer_close(mixer);
+ } else {
+ put_full(index);
+ }
+ }
+
+ ALOGV("%s: capture_thread done", __func__);
+ pthread_exit(NULL);
+
+ return NULL;
+}
+static void* play_thread(void* arg)
+{
+ int index, err;
+ int first = 0;
+ pcm_played = arg;
+ ALOGV("%s: play_thread start pid:%d, tid:%d", __func__, getpid(), gettid());
+
+ androidSetThreadPriority(0, ANDROID_PRIORITY_URGENT_AUDIO);
+
+ while (pcm_played) {
+ index = get_full();
+ /*if (first < 1) {
+ first ++;
+ continue;
+ }
+ switch (index) {
+ case 0:
+ index = 3;
+ break;
+ case 1:
+ index = 0;
+ break;
+ case 2:
+ index = 1;
+ break;
+ case 3:
+ index = 2;
+ break;
+ }*/
+ err = pcm_write(pcm_played, buffers_loop[index], BUFFER_SIZE);
+ if (err) {
+ ALOGE("%s: pcm_write faild: %s", __func__, pcm_get_error(pcm_played));
+ if(pcm_played != NULL) {
+ pcm_close(pcm_played);
+ pcm_played = NULL;
+ }
+ }
+
+ put_empty(index);
+ }
+
+ ALOGV("%s: play_thread done", __func__);
+
+ pthread_exit(NULL);
+
+ return NULL;
+}
+
+int init_audio(unsigned int output_card, unsigned int output_device)
+{
+ int i;
+ struct pcm_config config_play;
+ struct pcm_config config_record;
+ struct pcm *play_pcm = NULL;
+ struct pcm *record_pcm = NULL;
+ char mixer_path[MIXER_PATH_MAX_LENGTH];
+
+ ALOGV("%s: usb_mic-speaker loopback started!!!", __func__);
+
+ for (i = 0; i < BUFFER_COUNT; i++) {
+ buffers_loop[i] = malloc(BUFFER_SIZE);
+ buffer_states[i] = BUFFER_EMPTY;
+ }
+
+ mixer = mixer_open(0);
+ if (!mixer) {
+ ALOGD("Failed to open mixer last time,try again");
+ mixer = mixer_open(0);
+ }
+
+ memset(&config_record, 0, sizeof(config_record));
+ config_record.channels = 2;
+ config_record.rate = 48000;
+ config_record.period_size = BUFFER_SIZE / 4;
+ config_record.period_count = 2;
+ config_record.format = PCM_FORMAT_S16_LE;
+
+ memset(&config_play, 0, sizeof(config_play));
+ config_play.channels = 2;
+ config_play.rate = 48000;
+ config_play.period_size = BUFFER_SIZE / 4;
+ config_play.period_count = 4;
+ config_play.start_threshold = config_play.period_size / 4;
+ config_play.avail_min = config_play.period_size / 4;
+ config_play.format = PCM_FORMAT_S16_LE;
+
+ tinymix_set_value(mixer, tinymix_control_Channels, &tinymix_control_ChannelsValues, 1);
+ tinymix_set_value(mixer, tinymix_control_Mixer, &tinymix_control_MixerValues, 1);
+ record_pcm = pcm_open(1, 0, PCM_IN, &config_record);
+ if (record_pcm && !pcm_is_ready(record_pcm)) {
+ ALOGE("%s: capture_thread pcm_open error", __func__);
+ pcm_close(record_pcm);
+ record_pcm = NULL;
+
+ return -1;
+ }
+ play_pcm = pcm_open(output_card, output_device, PCM_OUT, &config_play);
+ if (!play_pcm || !pcm_is_ready(play_pcm)) {
+ ALOGE("%s: Unable to open PCM device %d/%d for output (%s)\n",
+ __func__, output_card, output_device, pcm_get_error(play_pcm));
+ pcm_close(play_pcm);
+ play_pcm = NULL;
+
+ return -1;
+ }
+
+ pthread_create(&play_tid, NULL, capture_thread, record_pcm);
+ pthread_create(&play_tid, NULL, play_thread, play_pcm);
+
+ return 0;
+}
+
static size_t adev_get_input_buffer_size(const struct audio_hw_device *hw_dev,
const struct audio_config *config)
{
@@ -1285,6 +1664,8 @@
free(in);
}
+ init_audio(0, 4);
+
return ret;
}