(二) ffmpeg filter学习--混音实现

Audio 混音实现

从FFMPEG原生代码doc/examples/filtering_audio.c修改而来。

ffmpeg版本信息

 

1

2

3

4

5

6

7

8

9

10

11

ffmpeg version N-82997-g557c0df Copyright (c) 2000-2017 the FFmpeg developers

  built with gcc 5.4.0 (Ubuntu 5.4.0-6ubuntu1~16.04.4) 20160609

  configuration: --enable-libx264 --enable-gpl --enable-decoder=h264 --enable-encoder=libx264 --enable-shared --enable-static --disable-yasm --enable-nonfree --enable-libfdk-aac --enable-shared --enable-ffplay

  libavutil      55. 43.100 / 55. 43.100

  libavcodec     57. 70.101 / 57. 70.101

  libavformat    57. 61.100 / 57. 61.100

  libavdevice    57.  2.100 / 57.  2.100

  libavfilter     6. 68.100 /  6. 68.100

  libswscale      4.  3.101 /  4.  3.101

  libswresample   2.  4.100 /  2.  4.100

  libpostproc    54.  2.100 / 54.  2.100

 

代码实现:

 

1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

174

175

176

177

178

179

180

181

182

183

184

185

186

187

188

189

190

191

192

193

194

195

196

197

198

199

200

201

202

203

204

205

206

207

208

209

210

211

212

213

214

215

216

217

218

219

220

221

222

223

224

225

226

227

228

229

230

231

232

233

234

235

236

237

238

239

240

241

242

243

244

245

246

247

248

249

250

251

252

253

254

255

256

257

258

259

260

261

262

263

264

265

266

267

268

269

270

271

272

273

274

275

276

277

278

279

280

281

282

283

284

285

286

287

288

289

290

291

292

293

294

295

296

297

298

299

300

301

302

303

304

305

306

307

308

309

310

311

312

313

314

315

316

317

318

319

320

321

322

323

324

325

326

327

328

329

330

331

332

333

334

335

336

337

338

339

340

341

342

343

344

345

346

347

348

349

350

351

352

353

354

355

356

357

358

359

360

361

362

363

364

365

366

367

368

369

370

371

372

373

374

375

376

377

378

379

380

381

382

383

384

385

386

387

388

389

390

391

392

393

394

395

396

397

398

399

400

401

402

403

404

405

406

407

408

409

410

411

412

413

414

415

416

417

418

419

420

421

422

423

424

425

426

427

428

429

430

431

432

433

434

435

436

437

438

439

440

441

442

443

444

445

446

447

448

449

450

451

452

453

454

455

456

457

458

459

460

461

462

/*

 * Copyright (c) 2010 Nicolas George

 * Copyright (c) 2011 Stefano Sabatini

 * Copyright (c) 2012 Clément BÅ“sch

 *

 * Permission is hereby granted, free of charge, to any person obtaining a copy

 * of this software and associated documentation files (the "Software"), to deal

 * in the Software without restriction, including without limitation the rights

 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell

 * copies of the Software, and to permit persons to whom the Software is

 * furnished to do so, subject to the following conditions:

 *

 * The above copyright notice and this permission notice shall be included in

 * all copies or substantial portions of the Software.

 *

 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR

 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,

 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL

 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER

 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,

 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN

 * THE SOFTWARE.

 */

 

/**

 * @file

 * API example for audio decoding and filtering

 * @example filtering_audio.c

 */

 

#include

 

#include

#include

#include

#include

#include

#include

 

#define ENABLE_FILTERS 1

 

static const char *filter_descr = "[in0][in1]amix=inputs=2[out]";//"aresample=8000,aformat=sample_fmts=s16:channel_layouts=mono";

static const char *player       = "ffplay -f s16le -ar 8000 -ac 1 -";

 

static AVFormatContext *fmt_ctx1;

static AVFormatContext *fmt_ctx2;

 

static AVCodecContext *dec_ctx1;

static AVCodecContext *dec_ctx2;

 

AVFilterContext *buffersink_ctx;

AVFilterContext *buffersrc_ctx1;

AVFilterContext *buffersrc_ctx2;

 

AVFilterGraph *filter_graph;

static int audio_stream_index_1 = -1;

static int audio_stream_index_2 = -1;

 

 

static int open_input_file_1(const char *filename)

{

    int ret;

    AVCodec *dec;

 

    if ((ret = avformat_open_input(&fmt_ctx1, filename, NULL, NULL)) < 0) {

        av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");

        return ret;

    }

 

    if ((ret = avformat_find_stream_info(fmt_ctx1, NULL)) < 0) {

        av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");

        return ret;

    }

 

    /* select the audio stream */

    ret = av_find_best_stream(fmt_ctx1, AVMEDIA_TYPE_AUDIO, -1, -1, &dec, 0);

    if (ret < 0) {

        av_log(NULL, AV_LOG_ERROR, "Cannot find an audio stream in the input file\n");

        return ret;

    }

    audio_stream_index_1 = ret;

    dec_ctx1 = fmt_ctx1->streams[audio_stream_index_1]->codec;

    av_opt_set_int(dec_ctx1, "refcounted_frames", 1, 0);

 

    /* init the audio decoder */

    if ((ret = avcodec_open2(dec_ctx1, dec, NULL)) < 0) {

        av_log(NULL, AV_LOG_ERROR, "Cannot open audio decoder\n");

        return ret;

    }

 

    return 0;

}

 

static int open_input_file_2(const char *filename)

{

    int ret;

    AVCodec *dec;

 

    if ((ret = avformat_open_input(&fmt_ctx2, filename, NULL, NULL)) < 0) {

        av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");

        return ret;

    }

 

    if ((ret = avformat_find_stream_info(fmt_ctx2, NULL)) < 0) {

        av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");

        return ret;

    }

 

    /* select the audio stream */

    ret = av_find_best_stream(fmt_ctx2, AVMEDIA_TYPE_AUDIO, -1, -1, &dec, 0);

    if (ret < 0) {

        av_log(NULL, AV_LOG_ERROR, "Cannot find an audio stream in the input file\n");

        return ret;

    }

    audio_stream_index_2 = ret;

    dec_ctx2 = fmt_ctx2->streams[audio_stream_index_2]->codec;

    av_opt_set_int(dec_ctx2, "refcounted_frames", 1, 0);

 

    /* init the audio decoder */

    if ((ret = avcodec_open2(dec_ctx2, dec, NULL)) < 0) {

        av_log(NULL, AV_LOG_ERROR, "Cannot open audio decoder\n");

        return ret;

    }

 

    return 0;

}

 

static int init_filters(const char *filters_descr)

{

    char args1[512];

    char args2[512];

    int ret = 0;

    AVFilter *abuffersrc1  = avfilter_get_by_name("abuffer");

    AVFilter *abuffersrc2  = avfilter_get_by_name("abuffer");

    AVFilter *abuffersink = avfilter_get_by_name("abuffersink");

 

    AVFilterInOut *outputs1 = avfilter_inout_alloc();

    AVFilterInOut *outputs2 = avfilter_inout_alloc();

    AVFilterInOut *inputs  = avfilter_inout_alloc();

 

    static const enum AVSampleFormat out_sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 };

    static const int64_t out_channel_layouts[] = { AV_CH_LAYOUT_MONO, -1 };

    static const int out_sample_rates[] = { 8000, -1 };

    const AVFilterLink *outlink;

 

    AVRational time_base_1 = fmt_ctx1->streams[audio_stream_index_1]->time_base;

    AVRational time_base_2 = fmt_ctx2->streams[audio_stream_index_2]->time_base;

 

    filter_graph = avfilter_graph_alloc();

    if (!outputs1 || !inputs || !filter_graph) {

        ret = AVERROR(ENOMEM);

        goto end;

    }

 

    /* buffer audio source: the decoded frames from the decoder will be inserted here. */

    if (!dec_ctx1->channel_layout)

        dec_ctx1->channel_layout = av_get_default_channel_layout(dec_ctx1->channels);

    snprintf(args1, sizeof(args1),

            "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,

             time_base_1.num, time_base_1.den, dec_ctx1->sample_rate,

             av_get_sample_fmt_name(dec_ctx1->sample_fmt), dec_ctx1->channel_layout);

    ret = avfilter_graph_create_filter(&buffersrc_ctx1, abuffersrc1, "in1",

                                       args1, NULL, filter_graph);

    if (ret < 0) {

        av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");

        goto end;

    }

 

#if (ENABLE_FILTERS)

    /* buffer audio source: the decoded frames from the decoder will be inserted here. */

    if (!dec_ctx2->channel_layout)

        dec_ctx2->channel_layout = av_get_default_channel_layout(dec_ctx2->channels);

    snprintf(args2, sizeof(args2),

            "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,

             time_base_2.num, time_base_2.den, dec_ctx2->sample_rate,

             av_get_sample_fmt_name(dec_ctx2->sample_fmt), dec_ctx2->channel_layout);

    ret = avfilter_graph_create_filter(&buffersrc_ctx2, abuffersrc1, "in2",

                                       args2, NULL, filter_graph);

    if (ret < 0) {

        av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");

        goto end;

    }

#endif

    /* buffer audio sink: to terminate the filter chain. */

    ret = avfilter_graph_create_filter(&buffersink_ctx, abuffersink, "out",

                                       NULL, NULL, filter_graph);

    if (ret < 0) {

        av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");

        goto end;

    }

 

    ret = av_opt_set_int_list(buffersink_ctx, "sample_fmts", out_sample_fmts, -1,

                              AV_OPT_SEARCH_CHILDREN);

    if (ret < 0) {

        av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");

        goto end;

    }

 

    ret = av_opt_set_int_list(buffersink_ctx, "channel_layouts", out_channel_layouts, -1,

                              AV_OPT_SEARCH_CHILDREN);

    if (ret < 0) {

        av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");

        goto end;

    }

 

    ret = av_opt_set_int_list(buffersink_ctx, "sample_rates", out_sample_rates, -1,

                              AV_OPT_SEARCH_CHILDREN);

    if (ret < 0) {

        av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");

        goto end;

    }

 

    /*

     * Set the endpoints for the filter graph. The filter_graph will

     * be linked to the graph described by filters_descr.

     */

 

    /*

     * The buffer source output must be connected to the input pad of

     * the first filter described by filters_descr; since the first

     * filter input label is not specified, it is set to "in" by

     * default.

     */

    outputs1->name       = av_strdup("in0");

    outputs1->filter_ctx = buffersrc_ctx1;

    outputs1->pad_idx    = 0;

#if (ENABLE_FILTERS)

    outputs1->next       = outputs2;

 

    outputs2->name       = av_strdup("in1");

    outputs2->filter_ctx = buffersrc_ctx2;

    outputs2->pad_idx    = 0;

    outputs2->next       = NULL;

#else

    outputs1->next       = NULL;

#endif

    /*

     * The buffer sink input must be connected to the output pad of

     * the last filter described by filters_descr; since the last

     * filter output label is not specified, it is set to "out" by

     * default.

     */

    inputs->name       = av_strdup("out");

    inputs->filter_ctx = buffersink_ctx;

    inputs->pad_idx    = 0;

    inputs->next       = NULL;

 

 

    AVFilterInOut* filter_outputs[2];

    filter_outputs[0] = outputs1;

#if (ENABLE_FILTERS)

    filter_outputs[1] = outputs2;

#endif

 

    if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,

                                        &inputs, &outputs1, NULL)) < 0)//filter_outputs

    {

        av_log(NULL, AV_LOG_ERROR, "parse ptr fail, ret: %d\n", ret);

        goto end;

    }

 

    if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)

    {

        av_log(NULL, AV_LOG_ERROR, "config graph fail, ret: %d\n", ret);

        goto end;

    }

 

    /* Print summary of the sink buffer

     * Note: args buffer is reused to store channel layout string */

    outlink = buffersink_ctx->inputs[0];

    av_get_channel_layout_string(args1, sizeof(args1), -1, outlink->channel_layout);

    av_log(NULL, AV_LOG_INFO, "Output: srate:%dHz fmt:%s chlayout:%s\n",

           (int)outlink->sample_rate,

           (char *)av_x_if_null(av_get_sample_fmt_name(outlink->format), "?"),

           args1);

 

end:

    avfilter_inout_free(&inputs);

    avfilter_inout_free(&outputs1);

 

    return ret;

}

 

static void print_frame(const AVFrame *frame)

#if 0

{

    FILE *file = NULL;

    const int n = frame->nb_samples * av_get_channel_layout_nb_channels(av_frame_get_channel_layout(frame));

    const uint16_t *p     = (uint16_t*)frame->data[0];

    const uint16_t *p_end = p + n;

 

    file = fopen("tmp.pcm""ab+");

    if (NULL == file){

      perror("fopen tmp.mp3 error\n");

      return;

    else {

      perror("fopen tmp.aac successful\n");

    }

    fwrite(frame->data[0], n * 2, 1, file);

    fclose(file);

    file = NULL;

}

#else

{

    const int n = frame->nb_samples * av_get_channel_layout_nb_channels(av_frame_get_channel_layout(frame));

    const uint16_t *p     = (uint16_t*)frame->data[0];

    const uint16_t *p_end = p + n;

 

    while (p < p_end) {

        fputc(*p    & 0xff, stdout);

        fputc(*p>>8 & 0xff, stdout);

        p++;

    }

    fflush(stdout);

}

#endif

 

int main(int argc, char **argv)

{

    int ret;

    AVFrame *frame = av_frame_alloc();

    AVFrame *filt_frame = av_frame_alloc();

    int got_frame;

 

    if (!frame || !filt_frame) {

        perror("Could not allocate frame");

        exit(1);

    }

    /*

    if (argc != 2) {

        fprintf(stderr, "Usage: %s file | %s\n", argv[0], player);

        exit(1);

    }

    */

 

    av_register_all();

    avfilter_register_all();

 

    if ((ret = open_input_file_1(argv[1])) < 0)

    {

        av_log(NULL, AV_LOG_ERROR, "open input file fail, ret: %d\n", ret);

        goto end;

    }

    if ((ret = open_input_file_2(argv[2])) < 0)

    {

        av_log(NULL, AV_LOG_ERROR, "open input file fail, ret: %d\n", ret);

        goto end;

    }

    if ((ret = init_filters(filter_descr)) < 0)

    {

        av_log(NULL, AV_LOG_ERROR, "init filters fail, ret: %d\n", ret);

        goto end;

    }

 

    AVPacket packet0, packet;

    AVPacket _packet0, _packet;

 

    /* read all packets */

    packet0.data = NULL;

    packet.data = NULL;

 

    _packet0.data = NULL;

    _packet.data = NULL;

    while (1) {

        if (!packet0.data) {

            if ((ret = av_read_frame(fmt_ctx1, &packet)) < 0)

                break;

            packet0 = packet;

        }

 

        if (packet.stream_index == audio_stream_index_1) {

            got_frame = 0;

            ret = avcodec_decode_audio4(dec_ctx1, frame, &got_frame, &packet);

            if (ret < 0) {

                av_log(NULL, AV_LOG_ERROR, "Error decoding audio\n");

                continue;

            }

            packet.size -= ret;

            packet.data += ret;

 

            if (got_frame) {

                av_log(NULL, AV_LOG_ERROR, "push frame\n");

                /* push the audio data from decoded frame into the filtergraph */

                if (av_buffersrc_add_frame_flags(buffersrc_ctx1, frame, 0) < 0) {

                    av_log(NULL, AV_LOG_ERROR, "Error while feeding the audio filtergraph\n");

                    break;

                }

                av_log(NULL, AV_LOG_ERROR, "pull frame\n");

            }

 

            if (packet.size <= 0)

                av_packet_unref(&packet0);

        else {

            /* discard non-wanted packets */

            av_packet_unref(&packet0);

        }

 

        if (!_packet0.data) {

            if ((ret = av_read_frame(fmt_ctx2, &_packet)) < 0)

                break;

            _packet0 = _packet;

        }

 

        if (_packet.stream_index == audio_stream_index_2) {

            got_frame = 0;

            ret = avcodec_decode_audio4(dec_ctx2, frame, &got_frame, &_packet);

            if (ret < 0) {

                av_log(NULL, AV_LOG_ERROR, "Error decoding audio\n");

                continue;

            }

            _packet.size -= ret;

            _packet.data += ret;

 

            if (got_frame) {

                av_log(NULL, AV_LOG_ERROR, "push frame\n");

                /* push the audio data from decoded frame into the filtergraph */

                if (av_buffersrc_add_frame_flags(buffersrc_ctx2, frame, 0) < 0) {

                    av_log(NULL, AV_LOG_ERROR, "Error while feeding the audio filtergraph\n");

                    break;

                }

                av_log(NULL, AV_LOG_ERROR, "pull frame\n");

            }

 

            if (_packet.size <= 0)

                av_packet_unref(&_packet0);

        else {

            /* discard non-wanted packets */

            av_packet_unref(&_packet0);

        }

        /* pull filtered audio from the filtergraph */

        if (got_frame)

        {

            while (1) {

                ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);

                if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)

                    break;

                if (ret < 0)

                {

                    av_log(NULL, AV_LOG_ERROR, "buffersink get frame fail, ret: %d\n", ret);

                    goto end;

                }

                print_frame(filt_frame);

                av_frame_unref(filt_frame);

            }

        }

    }

end:

    avfilter_graph_free(&filter_graph);

    avcodec_close(dec_ctx1);

    avformat_close_input(&fmt_ctx1);

    avcodec_close(dec_ctx2);

    avformat_close_input(&fmt_ctx2);

    av_frame_free(&frame);

    av_frame_free(&filt_frame);

 

    if (ret < 0 && ret != AVERROR_EOF) {

        fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));

        exit(1);

    }

 

    exit(0);

}

  

filter工作是通过递归的方式工作,递归主要在ff_filter_graph_run_once函数里面实现。

 补充两个图:

filter的pipeline:

(二) ffmpeg filter学习--混音实现_第1张图片

filter add frame流程:

(二) ffmpeg filter学习--混音实现_第2张图片

 filter get frame流程:

(二) ffmpeg filter学习--混音实现_第3张图片

 

attention: 

amix的混音原理,可以从pipeline窥见一斑,先将两路PCM resample成同一格式,然后叠加,最后resample成可输出的格式。

PCM的叠加原理:假设混合PCM1和PCM2,则MIX_PCM=PCM1/2 + PCM2/2。

所以resample的效果决定了混音的效果。

原文链接:http://blog.csdn.net/dancing_night/article/details/53080385

原文链接:http://blog.csdn.net/langsim/article/details/50947747

你可能感兴趣的:(FFmpeg)