视频采集会得到格式为CMSampleBufferRef的视频包,视频推流一般把视频流转换成flv格式
(1)首先将视频流转换成YUV的数据格式
//获取yuv数据
- (NSData*)convertVideoSmapleBufferToYuvData:(CMSampleBufferRef)videoSample {
//通过CMSampleBufferGetImageBuffer方法,获得CVImageBufferRef,里面就包含了YUV420数据的指针
CVImageBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(videoSample);
//锁住格式转换线程,开始转换格式
CVPixelBufferLockBaseAddress(pixelBuffer,0);
//获取图像宽度(像素)
size_t pixelWidth = CVPixelBufferGetWidth(pixelBuffer);
//获取图像高度(像素)
size_t pixelHeight = CVPixelBufferGetHeight(pixelBuffer);
//计算YUV中的Y所占字节数
size_t y_size = pixelWidth * pixelHeight;
//计算YUV中的U和V分别所占的字节数
size_t uv_size = y_size /4;
uint8_t * yuv_frame = aw_alloc(uv_size *2+ y_size);
//获取pixelBuffer中的Y数据
uint8_t * y_frame = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer,0);
memcpy(yuv_frame, y_frame, y_size);
//获取pixelBuffer中的UV数据
uint8_t * uv_frame = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer,1);
memcpy(yuv_frame + y_size, uv_frame, uv_size *2);
//获取到想要的数据,解锁格式转换线程
CVPixelBufferUnlockBaseAddress(pixelBuffer,0);
//yuv格式---->nv12格式
NSData * yuvData = [NSData dataWithBytesNoCopy:yuv_frame length:y_size + uv_size *2];
//由于相机偏转,我们需要对的到的视频页面进行旋转
return [self rotateNV12Data:nv12Data];
}
(2)由于相机偏转,我们需要对的到的视频页面进行旋转
- (NSData*)rotateNV12Data:(NSData*)nv12Data {
int degree = 0;
switch(self.videoConfig.orientation) {
case UIInterfaceOrientationLandscapeLeft:
degree = 90;
break;
case UIInterfaceOrientationLandscapeRight:
degree = 270;
break;
default:
//do nothing
break;
}
if(degree !=0) {
uint8_ t * src_nv12_bytes = (uint8_t*)nv12Data.bytes;
uint32_t width = (uint32_t)self.videoConfig.width;
uint32_t height = (uint32_t)self.videoConfig.height;
uint32_t w_x_h = (uint32_t)(self.videoConfig.width*self.videoConfig.height);
uint8_t * rotatedI420Bytes =aw_alloc(nv12Data.length);
NV12ToI420Rotate(src_nv12_bytes, width,
src_nv12_bytes + w_x_h, width,
rotatedI420Bytes, height,
rotatedI420Bytes + w_x_h, height /2,
rotatedI420Bytes + w_x_h + w_x_h /4, height /2,
width, height, (RotationModeEnum)degree);
I420ToNV12(rotatedI420Bytes, height,
rotatedI420Bytes + w_x_h, height /2,
rotatedI420Bytes + w_x_h + w_x_h /4, height /2,
src_nv12_bytes, height, src_nv12_bytes + w_x_h, height,
height, width);
aw_free(rotatedI420Bytes);
}
return nv12Data;
}
(3)将nv12格式的数据合成为flv格式
- (aw_flv_video_tag*)encodeYUVDataToFlvTag:(NSData*)yuvData{
if(!_vEnSession) {
return NULL;
}
OSStatus status = noErr;
//获取视频宽度
size_t pixelWidth = self.videoConfig.pushStreamWidth;
//获取视频高度
size_t pixelHeight = self.videoConfig.pushStreamHeight;
//NV12数据->CVPixelBufferRef中
//硬编码主要调用VTCompressionSessionEncodeFrame函数,此函数处理的是CVPixelBufferRef类型。
CVPixelBufferRef pixelBuf =NULL;
//初始化pixelBuf
CVPixelBufferCreate(NULL, pixelWidth, pixelHeight,kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange,NULL, &pixelBuf);
if(CVPixelBufferLockBaseAddress(pixelBuf,0) != kCVReturnSuccess){
NSLog(@"encode video lock base address failed");
return NULL;
}
//将YUV数据填充到CVPixelBufferRef中
size_t y_size = aw_stride(pixelWidth) * pixelHeight;
size_t uv_size = y_size /4;
uint8_t * yuv_frame = (uint8_t*)yuvData.bytes;
//获取y frame
uint8_t * y_frame = CVPixelBufferGetBaseAddressOfPlane(pixelBuf,0);
memcpy(y_frame, yuv_frame, y_size);
//获取uv frame
uint8_t * uv_frame = CVPixelBufferGetBaseAddressOfPlane(pixelBuf,1);
memcpy(uv_frame, yuv_frame + y_size, uv_size *2);
//编码CMSampleBufRef
//获取时间戳,由于视频帧是有顺序的,所以timestamp可以自定义一个累加字段
uint32_t ptsMs = timestamp+1;//self.vFrameCount++ * 1000.f / self.videoConfig.fps;
CMTime pts =CMTimeMake(ptsMs,1000);
//将NV12数据的PixelBuf送到硬编码器中,进行编码。
status = VTCompressionSessionEncodeFrame(_vEnSession, pixelBuf, pts,kCMTimeInvalid,NULL, pixelBuf,NULL);
if(status == noErr) {
dispatch_semaphore_wait(self.vSemaphore,DISPATCH_TIME_FOREVER);
if(_naluData) {
//硬编码成功,_naluData内的数据即为h264视频帧。
//因为是推流,所以需要获取帧长度,转成大端字节序,放到数据的最前面
uint32_t naluLen = (uint32_t)_naluData.length;
//小端转大端。计算机内一般都是小端,而网络和文件中一般都是大端。大端转小端和小端转大端算法一样,就是字节序反转就行了。
uint8_t naluLenArr[4] = {naluLen >>24&0xff, naluLen >>16&0xff, naluLen >>8&0xff, naluLen &0xff};
//数据拼接
NSMutableData * mutableData = [NSMutableData dataWithBytes:naluLenArr length:4];
[mutableData appendData:_naluData];
//h264 -> flv tag,合成flvtag之后就可以直接发送到服务端了。
aw_flv_video_tag * video_tag = aw_encoder_create_video_tag((int8_t*)mutableData.bytes, mutableData.length, ptsMs,0,self.isKeyFrame);
//编码完成,释放数据。
_naluData = nil;
_isKeyFrame = NO;
CVPixelBufferUnlockBaseAddress(pixelBuf,0);
CFRelease(pixelBuf);
return video_tag;
}
}else {
NSLog(@"encode video frame error");
}
CVPixelBufferUnlockBaseAddress(pixelBuf,0);
CFRelease(pixelBuf);
return NULL;
}