利用ffmpeg1.0提取音频并播放

 

很久以前,我就想在android做一个复读机程序,用来播放我喜欢的我从网上下的《老友记》,但编一个mp3播放机当然很容易,但其它格式只能借助其它解码工具了,于是在网上找了一堆关于ffmpeg的资料。其它能完成音频播放的工能,欲发几篇文章,一来记录下这几天成果,二是然望能同修们有帮助。

首先的的开发环境是ubuntu,用的是ffmpeg1.0,现在网上很多资料都在很以前比较老的版本,
网上已有很多关于ffmpeg的下载和在lunix底下编译的文章,这里不不多作介绍了,之于利用ndk编译我以后有时间,会说下自己观点和看法。

提取音频程序与解码可能参考 fmpeg-1.0/doc/examples/filtering_audio.c和ffmpeg-1.0/doc/examples/decoding_encoding.c;ffmpeg.c和ffplay太过复杂,对于我这个急于求成的外行来说不太适宜。网上有几篇文章很好比如,
http://ushertechblog.sinaapp.com/post-24.html

上篇文章也是参照一国外博客http://dranger.com/ffmpeg/tutorial03.html ,只是 这晨面用的都是ffmpeg很老的版本,底下这篇用的是最新ffmpeg,

http://blog.chinaunix.net/uid-26009923-id-3384770.html

下下是我参照上面文章写的代码,只改变了只改decode_audo_frame 与audio_callback函数里的一些内容,或许能让感觉代码更清晰点

#include <stdio.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <SDL.h>
#define SDL_AUDIO_BUFFER_SIZE 1024
typedef struct PacketQueue {
	AVPacketList * first_pkt, *last_pkt;
	int nb_packets;
	int size;
	SDL_mutex *mutex;		//mutex主要是用来实现资源的互拆的,跟java里在synchronize关键作用有相似之处
							//,虽然本和序中没有用多线程,但sdl在播放另启了一个线程。
	SDL_cond * cond;
} PacketQueue;
PacketQueue audioq;

void packet_queue_init(PacketQueue *q) {
	memset(q, 0, sizeof(PacketQueue));
	q->mutex = SDL_CreateMutex();
	q->cond = SDL_CreateCond();
}

int packet_queue_put(PacketQueue *q, AVPacket *pkt) {

	AVPacketList *pkt1;
	if (av_dup_packet(pkt) < 0)
		return -1;
	pkt1 = av_malloc(sizeof(AVPacketList));
	if (!pkt1)
		return -1;
	pkt1->pkt = *pkt;
	pkt1->next = NULL;

	SDL_LockMutex(q->mutex);

	if (!q->last_pkt)
		q->first_pkt = pkt1;
	else
		q->last_pkt->next = pkt1;
	q->last_pkt = pkt1;
	q->nb_packets++;
	q->size += pkt1->pkt.size;
	SDL_CondSignal(q->cond);

	SDL_UnlockMutex(q->mutex);
	return 0;
}

int quit = 0;
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block) {
	AVPacketList *pkt1;
	int ret;

	SDL_LockMutex(q->mutex);

	for (;;) {

		if (quit) {
			ret = -1;
			break;
		}

		pkt1 = q->first_pkt;
		if (pkt1) {
			q->first_pkt = pkt1->next;
			if (!q->first_pkt)
				q->last_pkt = NULL;
			q->nb_packets--;
			q->size -= pkt1->pkt.size;
			*pkt = pkt1->pkt;
			av_free(pkt1);			//这招我很赞赏,他在取去一个packet后,将上一个paket置空,而在下面的程序就不用再调用av_free操作了
			ret = 1;
			break;
		} else if (!block) {
			ret = 0;
			break;
		} else {
			SDL_CondWait(q->cond, q->mutex);
		}
	}
	SDL_UnlockMutex(q->mutex);
	return ret;
}

//int audio_decode_frame(AVCodecContext *aCodecCtx, uint8_t *audio_buf, int buf_size) {
int audio_decode_frame(AVCodecContext *aCodecCtx, AVFrame *frame,
		uint8_t *audio_buf) {
	static AVPacket pkt_temp;
	int len1, data_size, got_frame;
	int new_packet;
	for (;;) {
		while (pkt_temp.size > 0 || (!pkt_temp.data && new_packet)) {
			if (!frame) {
				if (!(frame = avcodec_alloc_frame()))
					return AVERROR(ENOMEM);
			} else {
				avcodec_get_frame_defaults(frame);
			}
			new_packet = 0;

			len1 = avcodec_decode_audio4(aCodecCtx, frame, &got_frame,
					&pkt_temp);
			if (len1 < 0) {
				/* if error, skip frame */
				pkt_temp.size = 0;
				break;
			}
			pkt_temp.data += len1;
			pkt_temp.size -= len1;

			if (got_frame <= 0) /* No data yet, get more frames */
				continue;
			data_size = av_samples_get_buffer_size(NULL, aCodecCtx->channels,
					frame->nb_samples, aCodecCtx->sample_fmt, 1);
			memcpy(audio_buf, frame->data[0], frame->linesize[0]);
			/* We have data, return it and come back for more later */
			return data_size;
		}
		if (quit)
			return -1;

		if ((new_packet = packet_queue_get(&audioq, &pkt_temp, 1)) < 0)
			return -1;

	}
}

void audio_callback(void *userdata, Uint8 *stream, int len) {
	AVCodecContext *aCodecCtx = (AVCodecContext *) userdata;
	//以后变量全定义成static ,确保下次循环,变量不会被初始化
	static uint8_t audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2];
	static unsigned int audio_buf_remain_size=0;		//记录下audio_buffer剩余数据量
	static unsigned int audio_buf_total_size=0;			//记录下audio_buffer总数据量
	static unsigned int audio_buf_index = 0;

	int read_size; //第次送入*stream中数据的真正长度,理论值是len,但在最后一次操作实际值可能会小于len;

	AVFrame *frame = NULL;


	int flag=0;
	while(len){

		if(audio_buf_index>=audio_buf_total_size){

			audio_buf_remain_size = audio_decode_frame(aCodecCtx, frame, audio_buf);
			  audio_buf_total_size=audio_buf_remain_size;


			audio_buf_index=0;
			if(audio_buf_total_size<0){
						audio_buf_remain_size=audio_buf_total_size = 1024;
						memset(audio_buf, 0, audio_buf_total_size);
						continue;
			}
		}



		read_size=(audio_buf_remain_size > len)? len : audio_buf_remain_size;


		memcpy(stream, (uint8_t *) audio_buf + audio_buf_index, read_size);

		audio_buf_index += read_size;
		audio_buf_remain_size -= read_size;
		stream += read_size;
		len -= read_size;
	}


}


int main(int argc, char *argv[]) {
	AVFormatContext *pFormatCtx;
	int i, videoStream, audioStream;
	AVCodecContext *pCodecCtx, *aCodecCtx;
	AVCodec *pCodec, *aCodec;
	AVFrame *pFrame;
	AVFrame *pFrameRGB;
	AVPacket packet;
	SDL_Event event;
	int frameFinished;
	int numBytes;
	uint8_t *buffer;

	if (argc < 2) {
		printf("Please provide a movie file\n");
		return -1;
	}

	if (SDL_Init(SDL_INIT_AUDIO)) {
		fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
		exit(1);
	}
	av_register_all();

	pFormatCtx = avformat_alloc_context();
	// Open video file
	//if(av_open_input_file(&pFormatCtx, argv[1], NULL, 0, NULL)!=0)
	if (avformat_open_input(&pFormatCtx, argv[1], NULL, NULL ) != 0)
		return -1; // Couldn't open file

	// Retrieve stream information
	if (avformat_find_stream_info(pFormatCtx, NULL ) < 0)
		return -1; // Couldn't find stream information

	// Dump information about file onto standard error
	av_dump_format(pFormatCtx, 0, argv[1], 0);

	// Find the first video stream
	videoStream = -1;
	audioStream = -1;
	for (i = 0; i < pFormatCtx->nb_streams; i++) {
		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO
				&& videoStream < 0)
			videoStream = i;
		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO
				&& audioStream < 0)
			audioStream = i;
	}
	if (videoStream == -1)
		return -1; // Didn't find a video stream
	if (audioStream == -1)
		return -1;
	// Get a pointer to the codec context for the video&audio stream
	pCodecCtx = pFormatCtx->streams[videoStream]->codec;

	aCodecCtx = pFormatCtx->streams[audioStream]->codec;

	SDL_AudioSpec wanted_spec, spec;
	wanted_spec.freq = aCodecCtx->sample_rate;
	wanted_spec.format = AUDIO_S16SYS;
	wanted_spec.channels = aCodecCtx->channels;
	wanted_spec.silence = 0;
	wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
	wanted_spec.callback = audio_callback;
	wanted_spec.userdata = aCodecCtx;

	if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
		fprintf(stderr, "SDL_OpenAudio:%s\n", SDL_GetError());
		return -1;
	}

	// Find the decoder for the video stream
	pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if (pCodec == NULL ) {
		fprintf(stderr, "Unsupported codec!\n");
		return -1; // Codec not found
	}
	// Find the decoder for the video stream
	aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
	if (aCodec == NULL ) {
		fprintf(stderr, "Unsupported codec!\n");
		return -1; // Codec not found
	}
	// Open codec
	if (avcodec_open2(aCodecCtx, aCodec, NULL ) < 0)
		return -1; // Could not open codec
	// Open codec
	if (avcodec_open2(pCodecCtx, pCodec, NULL ) < 0)
		return -1; // Could not open codec

	packet_queue_init(&audioq);
	SDL_PauseAudio(0);
	//Set up a screen

	while (av_read_frame(pFormatCtx, &packet) >= 0) {
		// Is this a packet from the video stream?
		if (packet.stream_index == audioStream) {
			packet_queue_put(&audioq, &packet);
		} else {
			av_free_packet(&packet);
		}
		SDL_PollEvent(&event);
		switch (event.type) {
		case SDL_QUIT:
			quit = 1;
			SDL_Quit();
			exit(0);
			break;
		default:
			break;
		}

	}
	// Free the packet that was allocated by av_read_frame
	av_free_packet(&packet);

	// Free the RGB image

	for (;;) {

		getchar();							//这个是用来阻塞主线程,也可以用来接收键盘命令,比如暂停播放之类的
		break;
	}
	// Close the codec
	avcodec_close(pCodecCtx);
	avcodec_close(aCodecCtx);

	// Close the video file
	avformat_close_input(&pFormatCtx);

	return 0;
}



 

你可能感兴趣的:(ffmpeg,音频开发)