1、接口函数:两个接口函数的主要的区别跟本地播放文件一样,主要是传入的是文件名还是流文件。
传入参数还有就是是否需要循环播放、是否需要和mic 采集混音以及文件使用的编解码、以及音量增益控制。
int StartPlayingFileAsMicrophone(int channel,
const char fileNameUTF8[1024],
bool loop = false,
bool mixWithMicrophone = false,
FileFormats format = kFileFormatPcm16kHzFile,
float volumeScaling = 1.0) override;
int StartPlayingFileAsMicrophone(int channel,
InStream* stream,
bool mixWithMicrophone = false,
FileFormats format = kFileFormatPcm16kHzFile,
float volumeScaling = 1.0) override;
2、根据channel 的数值,如果为-1,则接口函数调用transmit_mixer 的接口,对全部发出通道进行混音或者替换。
如果为其他数值,则混音或者替换对应通道的mic 采集。
int VoEFileImpl::StartPlayingFileAsMicrophone(int channel,
const char fileNameUTF8[1024],
bool loop,
bool mixWithMicrophone,
FileFormats format,
float volumeScaling) {
WEBRTC_TRACE(kTraceApiCall, kTraceVoice, VoEId(_shared->instance_id(), -1),
"StartPlayingFileAsMicrophone(channel=%d, fileNameUTF8=%s, "
"loop=%d, mixWithMicrophone=%d, format=%d, "
"volumeScaling=%5.3f)",
channel, fileNameUTF8, loop, mixWithMicrophone, format,
volumeScaling);
static_assert(1024 == FileWrapper::kMaxFileNameSize, "");
if (!_shared->statistics().Initialized()) {
_shared->SetLastError(VE_NOT_INITED, kTraceError);
return -1;
}
const uint32_t startPointMs(0);
const uint32_t stopPointMs(0);
if (channel == -1) {
int res = _shared->transmit_mixer()->StartPlayingFileAsMicrophone(
fileNameUTF8, loop, format, startPointMs, volumeScaling, stopPointMs,
NULL);
if (res) {
WEBRTC_TRACE(
kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
"StartPlayingFileAsMicrophone() failed to start playing file");
return (-1);
} else {
_shared->transmit_mixer()->SetMixWithMicStatus(mixWithMicrophone);
return (0);
}
} else {
// Add file after demultiplexing <=> affects one channel only
voe::ChannelOwner ch = _shared->channel_manager().GetChannel(channel);
voe::Channel* channelPtr = ch.channel();
if (channelPtr == NULL) {
_shared->SetLastError(
VE_CHANNEL_NOT_VALID, kTraceError,
"StartPlayingFileAsMicrophone() failed to locate channel");
return -1;
}
int res = channelPtr->StartPlayingFileAsMicrophone(
fileNameUTF8, loop, format, startPointMs, volumeScaling, stopPointMs,
NULL);
if (res) {
WEBRTC_TRACE(
kTraceError, kTraceVoice, VoEId(_shared->instance_id(), -1),
"StartPlayingFileAsMicrophone() failed to start playing file");
return -1;
} else {
channelPtr->SetMixWithMicStatus(mixWithMicrophone);
return 0;
}
}
}
3、接下来会创建FilePlayer 类,用来解码对应的文件或者流。
int Channel::StartPlayingFileAsMicrophone(const char* fileName,
bool loop,
FileFormats format,
int startPosition,
float volumeScaling,
int stopPosition,
const CodecInst* codecInst) {
WEBRTC_TRACE(kTraceInfo, kTraceVoice, VoEId(_instanceId, _channelId),
"Channel::StartPlayingFileAsMicrophone(fileNameUTF8[]=%s, "
"loop=%d, format=%d, volumeScaling=%5.3f, startPosition=%d, "
"stopPosition=%d)",
fileName, loop, format, volumeScaling, startPosition,
stopPosition);
rtc::CritScope cs(&_fileCritSect);
if (channel_state_.Get().input_file_playing) {
_engineStatisticsPtr->SetLastError(
VE_ALREADY_PLAYING, kTraceWarning,
"StartPlayingFileAsMicrophone() filePlayer is playing");
return 0;
}
// Destroy the old instance
if (input_file_player_) {
input_file_player_->RegisterModuleFileCallback(NULL);
input_file_player_.reset();
}
// Create the instance
input_file_player_ = FilePlayer::CreateFilePlayer(_inputFilePlayerId,
(const FileFormats)format);
if (!input_file_player_) {
_engineStatisticsPtr->SetLastError(
VE_INVALID_ARGUMENT, kTraceError,
"StartPlayingFileAsMicrophone() filePlayer format isnot correct");
return -1;
}
const uint32_t notificationTime(0);
if (input_file_player_->StartPlayingFile(
fileName, loop, startPosition, volumeScaling, notificationTime,
stopPosition, (const CodecInst*)codecInst) != 0) {
_engineStatisticsPtr->SetLastError(
VE_BAD_FILE, kTraceError,
"StartPlayingFile() failed to start file playout");
input_file_player_->StopPlayingFile();
input_file_player_.reset();
return -1;
}
input_file_player_->RegisterModuleFileCallback(this);
channel_state_.SetInputFilePlaying(true);
return 0;
}
4、得到解码后的pcm 文件:
// TODO(andrew): refactor Mix functions here and in transmit_mixer.cc to use
// a shared helper.
int32_t Channel::MixOrReplaceAudioWithFile(int mixingFrequency) {
std::unique_ptr
size_t fileSamples(0);
{
rtc::CritScope cs(&_fileCritSect);
if (!input_file_player_) {
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
"Channel::MixOrReplaceAudioWithFile() fileplayer"
" doesnt exist");
return -1;
}
if (input_file_player_->Get10msAudioFromFile(fileBuffer.get(), &fileSamples,
mixingFrequency) == -1) {
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
"Channel::MixOrReplaceAudioWithFile() file mixing "
"failed");
return -1;
}
if (fileSamples == 0) {
WEBRTC_TRACE(kTraceWarning, kTraceVoice, VoEId(_instanceId, _channelId),
"Channel::MixOrReplaceAudioWithFile() file is ended");
return 0;
}
}
assert(_audioFrame.samples_per_channel_ == fileSamples);
if (_mixFileWithMicrophone) {
// Currently file stream is always mono.
// TODO(xians): Change the code when FilePlayer supports real stereo.
MixWithSat(_audioFrame.data_, _audioFrame.num_channels_, fileBuffer.get(),
1, fileSamples);
} else {
// Replace ACM audio with file.
// Currently file stream is always mono.
// TODO(xians): Change the code when FilePlayer supports real stereo.
_audioFrame.UpdateFrame(
_channelId, 0xFFFFFFFF, fileBuffer.get(), fileSamples, mixingFrequency,
AudioFrame::kNormalSpeech, AudioFrame::kVadUnknown, 1);
}
return 0;
}