学习HTML5音频录制

2019独角兽企业重金招聘Python工程师标准>>> hot3.png

之前玩过WebRTC,通过HTML5可以轻松实现语音视频聊天,网上的示例很多。最近想了解下HTML5能否录制音频,用什么接口可以做到。这里分享下学习资料。

浏览器

要获取音频和视频,需要用到getUserMedia。桌面平台支持的浏览器包括Chrome, Firefox, Opera和Edge。移动平台包括Chrome, Firefox和Opera,不过只是Android平台,iOS不行。微软似乎也放弃IE了,全力支持Edge。苹果对HTML5的支持始终不够。前段日子还有新闻报道说有开发者针对HTML5起诉苹果。

获取视频音频流

网上示例很多,从Mozilla的文档中找了一段最简单的代码:

var p = navigator.mediaDevices.getUserMedia({ audio: true, video: true });
 
p.then(function(mediaStream) {
  var video = document.querySelector('video');
  video.src = window.URL.createObjectURL(mediaStream);
  video.onloadedmetadata = function(e) {
// Do something with the video here.
    video.play();
 
  };
});
 
p.catch(function(err) { console.log(err.name); }); // always check for errors at the end.

录制Audio

在Mozilla的HTML5文档中看到了MediaRecorder。这个接口简单方便,但是比较新,浏览器的兼容性有限。

桌面

移动

不过也有替代方案,使用AudioNodes。基本步骤如下:

1. 通过getUserMedia获取视频音频流。

2. 通过createMediaStreamSource创建MediaStreamAudioSourceNode。

if (navigator.getUserMedia) {
   console.log('getUserMedia supported.');
   navigator.getUserMedia (
      // constraints: audio and video for this app
      {
         audio: true,
         video: true
      },
 
      // Success callback
      function(stream) {
         video.src = (window.URL && window.URL.createObjectURL(stream)) || stream;
         video.onloadedmetadata = function(e) {
            video.play();
            video.muted = 'true';
         };
 
         // Create a MediaStreamAudioSourceNode
         // Feed the HTMLMediaElement into it
         var source = audioCtx.createMediaStreamSource(stream);
 
      },
 
      // Error callback
      function(err) {
         console.log('The following gUM error occured: ' + err);
      }
   );
} else {
   console.log('getUserMedia not supported on your browser!');
}

3. 链接AudioNodes。创建ScriptProcessorNode。通过onaudioprocess来获取音频数据。

var scriptNode = audioCtx.createScriptProcessor(4096, 1, 1);
 
scriptNode.onaudioprocess = function(audioProcessingEvent) {
  // The input buffer is the song we loaded earlier
  var inputBuffer = audioProcessingEvent.inputBuffer;
 
  // Loop through the output channels (in this case there is only one)
  for (var channel = 0; channel < outputBuffer.numberOfChannels; channel++) {
    var inputData = inputBuffer.getChannelData(channel);
  }
}
 
source.connect(scriptNode);
scriptNode.connect(audioCtx.destination);

4. 通过XHR或者WebSockets来发送blob数据。

JavaScript库

流程就这么简单,但是写起来还是比较复杂的。所以需要参考下别人写的代码。在GitHub上可以找到RecordRTC和Recorderjs。前者可以录制视频和音频,如果只想录制音频,可以选择后面这个,比较简单。

关于RecordRTC,有一个站点可以体验视频音频录制:online demo。

现在分析下Recorderjs的源码。

onaudioprocess中获取音频buffer:

this.context = source.context;
        this.node = (this.context.createScriptProcessor || this.context.createJavaScriptNode).call(this.context, this.config.bufferLen, this.config.numChannels, this.config.numChannels);
 
        this.node.onaudioprocess = function (e) {
            if (!_this.recording) return;
 
            var buffer = [];
            for (var channel = 0; channel < _this.config.numChannels; channel++) {
                buffer.push(e.inputBuffer.getChannelData(channel));
            }
            _this.worker.postMessage({
                command: 'record',
                buffer: buffer
            });
        };
 
        source.connect(this.node);
        this.node.connect(this.context.destination); //this should not be necessary

用数组存储音频buffer:

function record(inputBuffer) {
                for (var channel = 0; channel < numChannels; channel++) {
                    recBuffers[channel].push(inputBuffer[channel]);
                }
                recLength += inputBuffer[0].length;
            }

WAV格式编码:

function encodeWAV(samples) {
                var buffer = new ArrayBuffer(44 + samples.length * 2);
                var view = new DataView(buffer);
 
                /* RIFF identifier */
                writeString(view, 0, 'RIFF');
                /* RIFF chunk length */
                view.setUint32(4, 36 + samples.length * 2, true);
                /* RIFF type */
                writeString(view, 8, 'WAVE');
                /* format chunk identifier */
                writeString(view, 12, 'fmt ');
                /* format chunk length */
                view.setUint32(16, 16, true);
                /* sample format (raw) */
                view.setUint16(20, 1, true);
                /* channel count */
                view.setUint16(22, numChannels, true);
                /* sample rate */
                view.setUint32(24, sampleRate, true);
                /* byte rate (sample rate * block align) */
                view.setUint32(28, sampleRate * 4, true);
                /* block align (channel count * bytes per sample) */
                view.setUint16(32, numChannels * 2, true);
                /* bits per sample */
                view.setUint16(34, 16, true);
                /* data chunk identifier */
                writeString(view, 36, 'data');
                /* data chunk length */
                view.setUint32(40, samples.length * 2, true);
 
                floatTo16BitPCM(view, 44, samples);
 
                return view;
            }

合并所有的buffer,并用WAV格式导出,最后转换成blob:

function exportWAV(type) {
                var buffers = [];
                for (var channel = 0; channel < numChannels; channel++) {
                    buffers.push(mergeBuffers(recBuffers[channel], recLength));
                }
                var interleaved = undefined;
                if (numChannels === 2) {
                    interleaved = interleave(buffers[0], buffers[1]);
                } else {
                    interleaved = buffers[0];
                }
                var dataview = encodeWAV(interleaved);
                var audioBlob = new Blob([dataview], { type: type });
            }

这样就可以保存或者发送录制的音频文件了。

参考资料

  • WebRTC roadmap
  • Using the MediaRecorder API
  • getUserMedia
  • Web Audio API

转载于:https://my.oschina.net/yushulx/blog/759919

你可能感兴趣的:(学习HTML5音频录制)