从今天开始学习音视频了,加油,给自己打call!!
今天先来第一步,通过系统摄像头把视频数据存储为h264。
也就是MediaCodec的编码的使用。创建MediaCodec有一些先前需要设置的参数:MediaFormat
MediaFormat
var videoFormat = MediaFormat.createVideoFormat(VCODEC_MIME, height, width)
videoFormat.setInteger(MediaFormat.KEY_BIT_RATE, bitrate)
videoFormat.setInteger(MediaFormat.KEY_FRAME_RATE, frameRate)
videoFormat.setInteger(
MediaFormat.KEY_COLOR_FORMAT, MediaCodecInfo.CodecCapabilities.COLOR_FormatYUV420Flexible
)
videoFormat.setInteger(MediaFormat.KEY_I_FRAME_INTERVAL, 1)
这里面有几个视频相关的概念
1.KEY_BIT_RATE
这是比特率,也就是常说的码率,这个参数越大视频质量越好,但有上限,一般可以使用xwidthhight x 可以为 1 3 5 等来控制质量
- KEY_FRAME_RATE
帧率,也即是1秒有多少帧 - KEY_COLOR_FORMAT
进去编码的视频格式,如果是surface数据传COLOR_FormatSurface ,摄像头数据也即是yuv格式的数据传COLOR_FormatYUV420Flexible
4.KEY_I_FRAME_INTERVAL
关键帧间隔时间
完整代码
fun init(width: Int, height: Int, frameRate: Int) {
if (!::mediaCodec.isInitialized) {
val bitrate = 2 * width * height
this.width = width
this.height = height
val mediaCodecInfo = selectCodec(VCODEC_MIME)
mediaCodec = MediaCodec.createByCodecName(mediaCodecInfo!!.name)
var videoFormat = MediaFormat.createVideoFormat(VCODEC_MIME, height, width)
videoFormat.setInteger(MediaFormat.KEY_BIT_RATE, bitrate)
videoFormat.setInteger(MediaFormat.KEY_FRAME_RATE, frameRate)
videoFormat.setInteger(
MediaFormat.KEY_COLOR_FORMAT, MediaCodecInfo.CodecCapabilities.COLOR_FormatYUV420Flexible
)
videoFormat.setInteger(MediaFormat.KEY_I_FRAME_INTERVAL, 1)
mediaCodec.configure(videoFormat, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE)
mediaCodec.start()
createFile()
}
}
开始编码
fun startEncoder() {
Thread {
isRuning = true
var input: ByteArray? = null
var pts: Long = 0
var generateIndex: Long = 0
while (isRuning) {
if (yuv420Queue.size > 0) {
input = yuv420Queue.poll()
val yuv420sp = ByteArray(getYuvBuffer(width, height))
nv212nv12(input, yuv420sp, width, height)
//旋转代码
YUV420spRotate90Clockwise(yuv420sp, input, width, height)
// input = yuv420sp
}
if (input != null) {
try {
val inputBuffers = mediaCodec.inputBuffers
val outputBuffers = mediaCodec.outputBuffers
val inputBufferIndex = mediaCodec.dequeueInputBuffer(-1)
if (inputBufferIndex > 0) {
//数据放入到inputBuffer中
val inputBuffer = inputBuffers[inputBufferIndex]
inputBuffer.clear()
inputBuffer.put(input, 0, input.size)
//把数据传给编码器并进行编码
mediaCodec.queueInputBuffer(
inputBufferIndex, 0,
input.size,
System.nanoTime() / 1000, 0
)
generateIndex++
val bufferInfo = MediaCodec.BufferInfo()
//输出buffer出队,返回成功的buffer索引。
var outputBufferIndex = mediaCodec.dequeueOutputBuffer(bufferInfo, 11000)
while (outputBufferIndex >= 0) {
val outputBuffer = outputBuffers[outputBufferIndex]
//防止视频数据错乱
outputBuffer.position(bufferInfo.offset)
outputBuffer.limit(bufferInfo.offset + bufferInfo.size)
val outData = ByteArray(bufferInfo.size)
outputBuffer.get(outData)
when {
bufferInfo.flags == MediaCodec.BUFFER_FLAG_CODEC_CONFIG -> {//sps pps
spsppsByte = outData
println("BUFFER_FLAG_CODEC_CONFIG = ")
}
bufferInfo.flags == MediaCodec.BUFFER_FLAG_KEY_FRAME -> {
println("BUFFER_FLAG_KEY_FRAME = ")
val keyframe = ByteArray(bufferInfo.size + spsppsByte.size)
System.arraycopy(spsppsByte, 0, keyframe, 0, spsppsByte.size)
System.arraycopy(outData, 0, keyframe, spsppsByte.size, outData.size)
outputStream.write(keyframe, 0, keyframe.size)
}
else -> {//视频数据
outputStream.write(outData, 0, outData.size)
}
}
mediaCodec.releaseOutputBuffer(outputBufferIndex, false)
outputBufferIndex = mediaCodec.dequeueOutputBuffer(bufferInfo, 0)
}
}
} catch (e: Exception) {
e.printStackTrace()
}
} else {
Thread.sleep(500)
}
}
// 停止编解码器并释放资源
try {
mediaCodec.stop()
mediaCodec.release()
} catch (e: Exception) {
e.printStackTrace()
}
// 关闭数据流
try {
outputStream.flush()
outputStream.close()
} catch (e: IOException) {
e.printStackTrace()
}
}.start()
}
核心代码,要注意的地方说下
获取yuv的大小不能直接写死3/2 要如下计算
fun getYuvBuffer(width: Int, height: Int): Int {
// stride = ALIGN(width, 16)
val stride = Math.ceil(width / 16.0).toInt() * 16
// y_size = stride * height
val y_size = stride * height
// c_stride = ALIGN(stride/2, 16)
val c_stride = Math.ceil(width / 32.0).toInt() * 16
// c_size = c_stride * height/2
val c_size = c_stride * height / 2
// size = y_size + c_size * 2
return y_size + c_size * 2
}
摄像头数据nv21 要转成nv12 MediaCodec 才支持
private fun nv212nv12(nv21: ByteArray?, nv12: ByteArray?, width: Int, height: Int) {
if (nv21 == null || nv12 == null) return
val framesize = width * height
var i = 0
var j = 0
System.arraycopy(nv21, 0, nv12, 0, framesize)
i = 0
while (i < framesize) {
nv12[i] = nv21[i]
i++
}
j = 0
while (j < framesize / 2) {
nv12[framesize + j - 1] = nv21[j + framesize]
j += 2
}
j = 0
while (j < framesize / 2) {
nv12[framesize + j] = nv21[j + framesize - 1]
j += 2
}
}
编码的第一帧数据是sps pps,所以要存起来,因为关键帧都要sps pps 数据、上面代码有注释
好了这样就能录制视频保存为h264 然后可以使用vlc 播放了
源代码
参考
http://www.cnblogs.com/renhui/p/7520690.html