本文背景是需要通过浏览器进行录音,生成可供百度语音识别api使用的pcm音频文件,根据百度语音识别api文档要求,音频文件为pcm格式,单音道,16k采样率,16位深。本文仅就以上功能做最基本的实现。
音频源,也就是音频输入,可以是直接从设备输入的音频,也可以是远程获取的音频文件。
处理节点,分析器和处理器,比如音调节点,音量节点,声音处理节点。
输出源,指音频渲染设备,一般情况下是用户设备的扬声器,即context.destination。其实,音频源和输出源也都可以视为节点,这三者的关系可以用这张图表示:
注:1.AudioContext的详细介绍可以参考AudioContext说明文档
2.实际使用中可以存在多个处理节点,都可以使用connect依次关联起来
3.输入源和输出源实质上也是节点,整个音频处理的过程就是每经过一个节点,就对音频信号做一次处理
按照上述流程顺序,依次讲解录音过程
2.1 输入源 getUserMedia
首先调用用户的麦克风设备,需要用到navigator.mediaDevices.getUserMedia()
这个方法,该方法会返回一个promise,成功回调的参数是一个MediaStream对象,该对象就是麦克风采集的语音。
然后该MediaStream流媒体对象需要传入createMediaStreamSource()方法中,用于创建一个新的MediaStreamAudioSourceNode对象,也就是创建了一个输入源节点,从而使得来自MediaStream的音频可以被播放和操作。
navigator.mediaDevices.getUserMedia({ audio: true }).then(function (stream) { // 务必分开写then
var audioContext = new AudioContext()
var sourceNode = audioContext.createMediaStreamSource(stream) //sourceNode就是得到的输入源节点
})
2.2 处理节点createScriptProcessor
createScriptProcessor()方法用于创建一个处理节点,该处理节点有三个参数:
var scriptNode = audioContext.createScriptProcessor(4096, 1, 1)
scriptNode.onaudioprocess = event => {
audioData.input(event.inputBuffer.getChannelData(0)) //取单音道信号
}
//对音频信号进行处理
var audioData = {
size: 0,
buffer: [],
input: function (data) {
this.buffer.push(new Float32Array((data)))
this.size += data.length
},
//得到格式为pcm,采样率为16k,位深为16bit的音频文件
getData: function () {
var sampleBits = 16
var inputSampleRate = 44100
var outputSampleRate = 16000
var bytes = this.decompress(this.buffer, this.size, inputSampleRate, outputSampleRate)
var dataLen = bytes.length * (sampleBits / 8)
var buffer = new ArrayBuffer(dataLen) // For PCM , 浏览器无法播放pcm格式音频
var data = new DataView(buffer)
var offset = 0
data = this.reshapeData(sampleBits, offset, bytes, data)
return new Blob([data], { type: 'audio/pcm' })
},
// 将收到的音频信号进行预处理,即将二维数组转成一维数组,并且对音频信号进行降采样
decompress: function (buffer, size, inputSampleRate, outputSampleRate) {
var data = new Float32Array(size)
var offset = 0
for (var i = 0; i < buffer.length; i++) {
data.set(buffer[i], offset)
offset += buffer[i].length
}
// 降采样,采取每interval长度取一个信号点的方式
var interval = parseInt(inputSampleRate / outputSampleRate)
var length = data.length / interval
var result = new Float32Array(length)
var index = 0; var j = 0
while (index < length) {
result[index] = data[j]
j += interval
index++
}
return result
},
//将音频信号转为16bit位深
reshapeData: function (sampleBits, offset, bytes, data) {
var s
for (var i = 0; i < bytes.length; i++, offset += (sampleBits / 8)) {
s = Math.max(-1, Math.min(1, bytes[i]))
data.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true)
}
return data
}
2.3 输出源 audioContext.destination
输出源为connnect的最终节点
以上三个节点Source Node,scriptNode 和 audioContext.destination通过connect相连
sourceNode.connect(scriptNode) //输入源节点连接到处理节点
scriptNode.connect(audioContext.destination) //处理节点连接到输出源节点
test.vue
<template>
<div>
<audio id="player" controls autoplay></audio>
<button id="btn-start-recording" @click="startRecording">录音</button>
<button id="btn-stop-recording" @click="stopRecording">停止</button>
<button id="btn-play-recording" @click="getRecording">获取文件</button>
</div>
</template>
<script>
import { Recorder } from '../utils/recorder'
var recorder = null
export default {
name: 'test',
data: function () {
return {
area: 'province',
recorder: null,
temp: null
}
},
methods: {
startRecording: function () {
Recorder.get(function (rec) {
recorder = rec
recorder.start()
})
},
stopRecording: function () {
recorder.stop()
},
getRecording: function () {
var file = recorder.getBlob() //得到需要的pcm文件
console.log(file)
},
beforeDestroy () {
if (recorder !== null) {
recorder.stop()
recorder = null
}
}
}
</script>
recorder.js
function Recorder (stream) {
var audioContext = new AudioContext()
var sourceNode = audioContext.createMediaStreamSource(stream)
var scriptNode = audioContext.createScriptProcessor(4096, 1, 1)
var audioData = {
size: 0,
buffer: [],
input: function (data) {
this.buffer.push(new Float32Array((data)))
this.size += data.length
},
getData: function () {
var sampleBits = 16
var inputSampleRate = 44100
var outputSampleRate = 16000
var bytes = this.decompress(this.buffer, this.size, inputSampleRate, outputSampleRate)
var dataLen = bytes.length * (sampleBits / 8)
var buffer = new ArrayBuffer(dataLen) // For PCM , 浏览器无法播放pcm格式音频
var data = new DataView(buffer)
var offset = 0
data = this.reshapeData(sampleBits, offset, bytes, data)
return new Blob([data], { type: 'audio/pcm' })
},
// 将二维数组转成一维数组
decompress: function (buffer, size, inputSampleRate, outputSampleRate) {
var data = new Float32Array(size)
var offset = 0
for (var i = 0; i < buffer.length; i++) {
data.set(buffer[i], offset)
offset += buffer[i].length
}
// 降采样
var interval = parseInt(inputSampleRate / outputSampleRate)
var length = data.length / interval
var result = new Float32Array(length)
var index = 0; var j = 0
while (index < length) {
result[index] = data[j]
j += interval
index++
}
return result
},
reshapeData: function (sampleBits, offset, bytes, data) {
var s
for (var i = 0; i < bytes.length; i++, offset += (sampleBits / 8)) {
s = Math.max(-1, Math.min(1, bytes[i]))
data.setInt16(offset, s < 0 ? s * 0x8000 : s * 0x7FFF, true)
}
return data
}
}
// 监听录音的过程
var buffers = []
scriptNode.onaudioprocess = event => {
audioData.input(event.inputBuffer.getChannelData(0))
}
this.start = function () {
sourceNode.connect(scriptNode)
scriptNode.connect(audioContext.destination)
}
this.stop = function () {
sourceNode.disconnect()
scriptNode.disconnect() }
this.getBlob = function () {
return audioData.getData()
}
}
Recorder.get = function (callback) {
navigator.mediaDevices.getUserMedia({ audio: true }).then(function (stream) { // 务必分开写then
var rec = new Recorder(stream)
callback(rec)
})
}
export { Recorder }