annotate ffmpeg/libavfilter/af_amix.c @ 13:844d341cf643 tip

Back up before ISMIR
author Yading Song <yading.song@eecs.qmul.ac.uk>
date Thu, 31 Oct 2013 13:17:06 +0000
parents 6840f77b83aa
children
rev   line source
yading@10 1 /*
yading@10 2 * Audio Mix Filter
yading@10 3 * Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
yading@10 4 *
yading@10 5 * This file is part of Libav.
yading@10 6 *
yading@10 7 * Libav is free software; you can redistribute it and/or
yading@10 8 * modify it under the terms of the GNU Lesser General Public
yading@10 9 * License as published by the Free Software Foundation; either
yading@10 10 * version 2.1 of the License, or (at your option) any later version.
yading@10 11 *
yading@10 12 * Libav is distributed in the hope that it will be useful,
yading@10 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
yading@10 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
yading@10 15 * Lesser General Public License for more details.
yading@10 16 *
yading@10 17 * You should have received a copy of the GNU Lesser General Public
yading@10 18 * License along with Libav; if not, write to the Free Software
yading@10 19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
yading@10 20 */
yading@10 21
yading@10 22 /**
yading@10 23 * @file
yading@10 24 * Audio Mix Filter
yading@10 25 *
yading@10 26 * Mixes audio from multiple sources into a single output. The channel layout,
yading@10 27 * sample rate, and sample format will be the same for all inputs and the
yading@10 28 * output.
yading@10 29 */
yading@10 30
yading@10 31 #include "libavutil/audio_fifo.h"
yading@10 32 #include "libavutil/avassert.h"
yading@10 33 #include "libavutil/avstring.h"
yading@10 34 #include "libavutil/channel_layout.h"
yading@10 35 #include "libavutil/common.h"
yading@10 36 #include "libavutil/float_dsp.h"
yading@10 37 #include "libavutil/mathematics.h"
yading@10 38 #include "libavutil/opt.h"
yading@10 39 #include "libavutil/samplefmt.h"
yading@10 40
yading@10 41 #include "audio.h"
yading@10 42 #include "avfilter.h"
yading@10 43 #include "formats.h"
yading@10 44 #include "internal.h"
yading@10 45
yading@10 46 #define INPUT_OFF 0 /**< input has reached EOF */
yading@10 47 #define INPUT_ON 1 /**< input is active */
yading@10 48 #define INPUT_INACTIVE 2 /**< input is on, but is currently inactive */
yading@10 49
yading@10 50 #define DURATION_LONGEST 0
yading@10 51 #define DURATION_SHORTEST 1
yading@10 52 #define DURATION_FIRST 2
yading@10 53
yading@10 54
yading@10 55 typedef struct FrameInfo {
yading@10 56 int nb_samples;
yading@10 57 int64_t pts;
yading@10 58 struct FrameInfo *next;
yading@10 59 } FrameInfo;
yading@10 60
yading@10 61 /**
yading@10 62 * Linked list used to store timestamps and frame sizes of all frames in the
yading@10 63 * FIFO for the first input.
yading@10 64 *
yading@10 65 * This is needed to keep timestamps synchronized for the case where multiple
yading@10 66 * input frames are pushed to the filter for processing before a frame is
yading@10 67 * requested by the output link.
yading@10 68 */
yading@10 69 typedef struct FrameList {
yading@10 70 int nb_frames;
yading@10 71 int nb_samples;
yading@10 72 FrameInfo *list;
yading@10 73 FrameInfo *end;
yading@10 74 } FrameList;
yading@10 75
yading@10 76 static void frame_list_clear(FrameList *frame_list)
yading@10 77 {
yading@10 78 if (frame_list) {
yading@10 79 while (frame_list->list) {
yading@10 80 FrameInfo *info = frame_list->list;
yading@10 81 frame_list->list = info->next;
yading@10 82 av_free(info);
yading@10 83 }
yading@10 84 frame_list->nb_frames = 0;
yading@10 85 frame_list->nb_samples = 0;
yading@10 86 frame_list->end = NULL;
yading@10 87 }
yading@10 88 }
yading@10 89
yading@10 90 static int frame_list_next_frame_size(FrameList *frame_list)
yading@10 91 {
yading@10 92 if (!frame_list->list)
yading@10 93 return 0;
yading@10 94 return frame_list->list->nb_samples;
yading@10 95 }
yading@10 96
yading@10 97 static int64_t frame_list_next_pts(FrameList *frame_list)
yading@10 98 {
yading@10 99 if (!frame_list->list)
yading@10 100 return AV_NOPTS_VALUE;
yading@10 101 return frame_list->list->pts;
yading@10 102 }
yading@10 103
yading@10 104 static void frame_list_remove_samples(FrameList *frame_list, int nb_samples)
yading@10 105 {
yading@10 106 if (nb_samples >= frame_list->nb_samples) {
yading@10 107 frame_list_clear(frame_list);
yading@10 108 } else {
yading@10 109 int samples = nb_samples;
yading@10 110 while (samples > 0) {
yading@10 111 FrameInfo *info = frame_list->list;
yading@10 112 av_assert0(info != NULL);
yading@10 113 if (info->nb_samples <= samples) {
yading@10 114 samples -= info->nb_samples;
yading@10 115 frame_list->list = info->next;
yading@10 116 if (!frame_list->list)
yading@10 117 frame_list->end = NULL;
yading@10 118 frame_list->nb_frames--;
yading@10 119 frame_list->nb_samples -= info->nb_samples;
yading@10 120 av_free(info);
yading@10 121 } else {
yading@10 122 info->nb_samples -= samples;
yading@10 123 info->pts += samples;
yading@10 124 frame_list->nb_samples -= samples;
yading@10 125 samples = 0;
yading@10 126 }
yading@10 127 }
yading@10 128 }
yading@10 129 }
yading@10 130
yading@10 131 static int frame_list_add_frame(FrameList *frame_list, int nb_samples, int64_t pts)
yading@10 132 {
yading@10 133 FrameInfo *info = av_malloc(sizeof(*info));
yading@10 134 if (!info)
yading@10 135 return AVERROR(ENOMEM);
yading@10 136 info->nb_samples = nb_samples;
yading@10 137 info->pts = pts;
yading@10 138 info->next = NULL;
yading@10 139
yading@10 140 if (!frame_list->list) {
yading@10 141 frame_list->list = info;
yading@10 142 frame_list->end = info;
yading@10 143 } else {
yading@10 144 av_assert0(frame_list->end != NULL);
yading@10 145 frame_list->end->next = info;
yading@10 146 frame_list->end = info;
yading@10 147 }
yading@10 148 frame_list->nb_frames++;
yading@10 149 frame_list->nb_samples += nb_samples;
yading@10 150
yading@10 151 return 0;
yading@10 152 }
yading@10 153
yading@10 154
yading@10 155 typedef struct MixContext {
yading@10 156 const AVClass *class; /**< class for AVOptions */
yading@10 157 AVFloatDSPContext fdsp;
yading@10 158
yading@10 159 int nb_inputs; /**< number of inputs */
yading@10 160 int active_inputs; /**< number of input currently active */
yading@10 161 int duration_mode; /**< mode for determining duration */
yading@10 162 float dropout_transition; /**< transition time when an input drops out */
yading@10 163
yading@10 164 int nb_channels; /**< number of channels */
yading@10 165 int sample_rate; /**< sample rate */
yading@10 166 int planar;
yading@10 167 AVAudioFifo **fifos; /**< audio fifo for each input */
yading@10 168 uint8_t *input_state; /**< current state of each input */
yading@10 169 float *input_scale; /**< mixing scale factor for each input */
yading@10 170 float scale_norm; /**< normalization factor for all inputs */
yading@10 171 int64_t next_pts; /**< calculated pts for next output frame */
yading@10 172 FrameList *frame_list; /**< list of frame info for the first input */
yading@10 173 } MixContext;
yading@10 174
yading@10 175 #define OFFSET(x) offsetof(MixContext, x)
yading@10 176 #define A AV_OPT_FLAG_AUDIO_PARAM
yading@10 177 #define F AV_OPT_FLAG_FILTERING_PARAM
yading@10 178 static const AVOption amix_options[] = {
yading@10 179 { "inputs", "Number of inputs.",
yading@10 180 OFFSET(nb_inputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, 32, A|F },
yading@10 181 { "duration", "How to determine the end-of-stream.",
yading@10 182 OFFSET(duration_mode), AV_OPT_TYPE_INT, { .i64 = DURATION_LONGEST }, 0, 2, A|F, "duration" },
yading@10 183 { "longest", "Duration of longest input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_LONGEST }, INT_MIN, INT_MAX, A|F, "duration" },
yading@10 184 { "shortest", "Duration of shortest input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_SHORTEST }, INT_MIN, INT_MAX, A|F, "duration" },
yading@10 185 { "first", "Duration of first input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_FIRST }, INT_MIN, INT_MAX, A|F, "duration" },
yading@10 186 { "dropout_transition", "Transition time, in seconds, for volume "
yading@10 187 "renormalization when an input stream ends.",
yading@10 188 OFFSET(dropout_transition), AV_OPT_TYPE_FLOAT, { .dbl = 2.0 }, 0, INT_MAX, A|F },
yading@10 189 { NULL },
yading@10 190 };
yading@10 191
yading@10 192 AVFILTER_DEFINE_CLASS(amix);
yading@10 193
yading@10 194 /**
yading@10 195 * Update the scaling factors to apply to each input during mixing.
yading@10 196 *
yading@10 197 * This balances the full volume range between active inputs and handles
yading@10 198 * volume transitions when EOF is encountered on an input but mixing continues
yading@10 199 * with the remaining inputs.
yading@10 200 */
yading@10 201 static void calculate_scales(MixContext *s, int nb_samples)
yading@10 202 {
yading@10 203 int i;
yading@10 204
yading@10 205 if (s->scale_norm > s->active_inputs) {
yading@10 206 s->scale_norm -= nb_samples / (s->dropout_transition * s->sample_rate);
yading@10 207 s->scale_norm = FFMAX(s->scale_norm, s->active_inputs);
yading@10 208 }
yading@10 209
yading@10 210 for (i = 0; i < s->nb_inputs; i++) {
yading@10 211 if (s->input_state[i] == INPUT_ON)
yading@10 212 s->input_scale[i] = 1.0f / s->scale_norm;
yading@10 213 else
yading@10 214 s->input_scale[i] = 0.0f;
yading@10 215 }
yading@10 216 }
yading@10 217
yading@10 218 static int config_output(AVFilterLink *outlink)
yading@10 219 {
yading@10 220 AVFilterContext *ctx = outlink->src;
yading@10 221 MixContext *s = ctx->priv;
yading@10 222 int i;
yading@10 223 char buf[64];
yading@10 224
yading@10 225 s->planar = av_sample_fmt_is_planar(outlink->format);
yading@10 226 s->sample_rate = outlink->sample_rate;
yading@10 227 outlink->time_base = (AVRational){ 1, outlink->sample_rate };
yading@10 228 s->next_pts = AV_NOPTS_VALUE;
yading@10 229
yading@10 230 s->frame_list = av_mallocz(sizeof(*s->frame_list));
yading@10 231 if (!s->frame_list)
yading@10 232 return AVERROR(ENOMEM);
yading@10 233
yading@10 234 s->fifos = av_mallocz(s->nb_inputs * sizeof(*s->fifos));
yading@10 235 if (!s->fifos)
yading@10 236 return AVERROR(ENOMEM);
yading@10 237
yading@10 238 s->nb_channels = av_get_channel_layout_nb_channels(outlink->channel_layout);
yading@10 239 for (i = 0; i < s->nb_inputs; i++) {
yading@10 240 s->fifos[i] = av_audio_fifo_alloc(outlink->format, s->nb_channels, 1024);
yading@10 241 if (!s->fifos[i])
yading@10 242 return AVERROR(ENOMEM);
yading@10 243 }
yading@10 244
yading@10 245 s->input_state = av_malloc(s->nb_inputs);
yading@10 246 if (!s->input_state)
yading@10 247 return AVERROR(ENOMEM);
yading@10 248 memset(s->input_state, INPUT_ON, s->nb_inputs);
yading@10 249 s->active_inputs = s->nb_inputs;
yading@10 250
yading@10 251 s->input_scale = av_mallocz(s->nb_inputs * sizeof(*s->input_scale));
yading@10 252 if (!s->input_scale)
yading@10 253 return AVERROR(ENOMEM);
yading@10 254 s->scale_norm = s->active_inputs;
yading@10 255 calculate_scales(s, 0);
yading@10 256
yading@10 257 av_get_channel_layout_string(buf, sizeof(buf), -1, outlink->channel_layout);
yading@10 258
yading@10 259 av_log(ctx, AV_LOG_VERBOSE,
yading@10 260 "inputs:%d fmt:%s srate:%d cl:%s\n", s->nb_inputs,
yading@10 261 av_get_sample_fmt_name(outlink->format), outlink->sample_rate, buf);
yading@10 262
yading@10 263 return 0;
yading@10 264 }
yading@10 265
yading@10 266 /**
yading@10 267 * Read samples from the input FIFOs, mix, and write to the output link.
yading@10 268 */
yading@10 269 static int output_frame(AVFilterLink *outlink, int nb_samples)
yading@10 270 {
yading@10 271 AVFilterContext *ctx = outlink->src;
yading@10 272 MixContext *s = ctx->priv;
yading@10 273 AVFrame *out_buf, *in_buf;
yading@10 274 int i;
yading@10 275
yading@10 276 calculate_scales(s, nb_samples);
yading@10 277
yading@10 278 out_buf = ff_get_audio_buffer(outlink, nb_samples);
yading@10 279 if (!out_buf)
yading@10 280 return AVERROR(ENOMEM);
yading@10 281
yading@10 282 in_buf = ff_get_audio_buffer(outlink, nb_samples);
yading@10 283 if (!in_buf) {
yading@10 284 av_frame_free(&out_buf);
yading@10 285 return AVERROR(ENOMEM);
yading@10 286 }
yading@10 287
yading@10 288 for (i = 0; i < s->nb_inputs; i++) {
yading@10 289 if (s->input_state[i] == INPUT_ON) {
yading@10 290 int planes, plane_size, p;
yading@10 291
yading@10 292 av_audio_fifo_read(s->fifos[i], (void **)in_buf->extended_data,
yading@10 293 nb_samples);
yading@10 294
yading@10 295 planes = s->planar ? s->nb_channels : 1;
yading@10 296 plane_size = nb_samples * (s->planar ? 1 : s->nb_channels);
yading@10 297 plane_size = FFALIGN(plane_size, 16);
yading@10 298
yading@10 299 for (p = 0; p < planes; p++) {
yading@10 300 s->fdsp.vector_fmac_scalar((float *)out_buf->extended_data[p],
yading@10 301 (float *) in_buf->extended_data[p],
yading@10 302 s->input_scale[i], plane_size);
yading@10 303 }
yading@10 304 }
yading@10 305 }
yading@10 306 av_frame_free(&in_buf);
yading@10 307
yading@10 308 out_buf->pts = s->next_pts;
yading@10 309 if (s->next_pts != AV_NOPTS_VALUE)
yading@10 310 s->next_pts += nb_samples;
yading@10 311
yading@10 312 return ff_filter_frame(outlink, out_buf);
yading@10 313 }
yading@10 314
yading@10 315 /**
yading@10 316 * Returns the smallest number of samples available in the input FIFOs other
yading@10 317 * than that of the first input.
yading@10 318 */
yading@10 319 static int get_available_samples(MixContext *s)
yading@10 320 {
yading@10 321 int i;
yading@10 322 int available_samples = INT_MAX;
yading@10 323
yading@10 324 av_assert0(s->nb_inputs > 1);
yading@10 325
yading@10 326 for (i = 1; i < s->nb_inputs; i++) {
yading@10 327 int nb_samples;
yading@10 328 if (s->input_state[i] == INPUT_OFF)
yading@10 329 continue;
yading@10 330 nb_samples = av_audio_fifo_size(s->fifos[i]);
yading@10 331 available_samples = FFMIN(available_samples, nb_samples);
yading@10 332 }
yading@10 333 if (available_samples == INT_MAX)
yading@10 334 return 0;
yading@10 335 return available_samples;
yading@10 336 }
yading@10 337
yading@10 338 /**
yading@10 339 * Requests a frame, if needed, from each input link other than the first.
yading@10 340 */
yading@10 341 static int request_samples(AVFilterContext *ctx, int min_samples)
yading@10 342 {
yading@10 343 MixContext *s = ctx->priv;
yading@10 344 int i, ret;
yading@10 345
yading@10 346 av_assert0(s->nb_inputs > 1);
yading@10 347
yading@10 348 for (i = 1; i < s->nb_inputs; i++) {
yading@10 349 ret = 0;
yading@10 350 if (s->input_state[i] == INPUT_OFF)
yading@10 351 continue;
yading@10 352 while (!ret && av_audio_fifo_size(s->fifos[i]) < min_samples)
yading@10 353 ret = ff_request_frame(ctx->inputs[i]);
yading@10 354 if (ret == AVERROR_EOF) {
yading@10 355 if (av_audio_fifo_size(s->fifos[i]) == 0) {
yading@10 356 s->input_state[i] = INPUT_OFF;
yading@10 357 continue;
yading@10 358 }
yading@10 359 } else if (ret < 0)
yading@10 360 return ret;
yading@10 361 }
yading@10 362 return 0;
yading@10 363 }
yading@10 364
yading@10 365 /**
yading@10 366 * Calculates the number of active inputs and determines EOF based on the
yading@10 367 * duration option.
yading@10 368 *
yading@10 369 * @return 0 if mixing should continue, or AVERROR_EOF if mixing should stop.
yading@10 370 */
yading@10 371 static int calc_active_inputs(MixContext *s)
yading@10 372 {
yading@10 373 int i;
yading@10 374 int active_inputs = 0;
yading@10 375 for (i = 0; i < s->nb_inputs; i++)
yading@10 376 active_inputs += !!(s->input_state[i] != INPUT_OFF);
yading@10 377 s->active_inputs = active_inputs;
yading@10 378
yading@10 379 if (!active_inputs ||
yading@10 380 (s->duration_mode == DURATION_FIRST && s->input_state[0] == INPUT_OFF) ||
yading@10 381 (s->duration_mode == DURATION_SHORTEST && active_inputs != s->nb_inputs))
yading@10 382 return AVERROR_EOF;
yading@10 383 return 0;
yading@10 384 }
yading@10 385
yading@10 386 static int request_frame(AVFilterLink *outlink)
yading@10 387 {
yading@10 388 AVFilterContext *ctx = outlink->src;
yading@10 389 MixContext *s = ctx->priv;
yading@10 390 int ret;
yading@10 391 int wanted_samples, available_samples;
yading@10 392
yading@10 393 ret = calc_active_inputs(s);
yading@10 394 if (ret < 0)
yading@10 395 return ret;
yading@10 396
yading@10 397 if (s->input_state[0] == INPUT_OFF) {
yading@10 398 ret = request_samples(ctx, 1);
yading@10 399 if (ret < 0)
yading@10 400 return ret;
yading@10 401
yading@10 402 ret = calc_active_inputs(s);
yading@10 403 if (ret < 0)
yading@10 404 return ret;
yading@10 405
yading@10 406 available_samples = get_available_samples(s);
yading@10 407 if (!available_samples)
yading@10 408 return AVERROR(EAGAIN);
yading@10 409
yading@10 410 return output_frame(outlink, available_samples);
yading@10 411 }
yading@10 412
yading@10 413 if (s->frame_list->nb_frames == 0) {
yading@10 414 ret = ff_request_frame(ctx->inputs[0]);
yading@10 415 if (ret == AVERROR_EOF) {
yading@10 416 s->input_state[0] = INPUT_OFF;
yading@10 417 if (s->nb_inputs == 1)
yading@10 418 return AVERROR_EOF;
yading@10 419 else
yading@10 420 return AVERROR(EAGAIN);
yading@10 421 } else if (ret < 0)
yading@10 422 return ret;
yading@10 423 }
yading@10 424 av_assert0(s->frame_list->nb_frames > 0);
yading@10 425
yading@10 426 wanted_samples = frame_list_next_frame_size(s->frame_list);
yading@10 427
yading@10 428 if (s->active_inputs > 1) {
yading@10 429 ret = request_samples(ctx, wanted_samples);
yading@10 430 if (ret < 0)
yading@10 431 return ret;
yading@10 432
yading@10 433 ret = calc_active_inputs(s);
yading@10 434 if (ret < 0)
yading@10 435 return ret;
yading@10 436 }
yading@10 437
yading@10 438 if (s->active_inputs > 1) {
yading@10 439 available_samples = get_available_samples(s);
yading@10 440 if (!available_samples)
yading@10 441 return AVERROR(EAGAIN);
yading@10 442 available_samples = FFMIN(available_samples, wanted_samples);
yading@10 443 } else {
yading@10 444 available_samples = wanted_samples;
yading@10 445 }
yading@10 446
yading@10 447 s->next_pts = frame_list_next_pts(s->frame_list);
yading@10 448 frame_list_remove_samples(s->frame_list, available_samples);
yading@10 449
yading@10 450 return output_frame(outlink, available_samples);
yading@10 451 }
yading@10 452
yading@10 453 static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
yading@10 454 {
yading@10 455 AVFilterContext *ctx = inlink->dst;
yading@10 456 MixContext *s = ctx->priv;
yading@10 457 AVFilterLink *outlink = ctx->outputs[0];
yading@10 458 int i, ret = 0;
yading@10 459
yading@10 460 for (i = 0; i < ctx->nb_inputs; i++)
yading@10 461 if (ctx->inputs[i] == inlink)
yading@10 462 break;
yading@10 463 if (i >= ctx->nb_inputs) {
yading@10 464 av_log(ctx, AV_LOG_ERROR, "unknown input link\n");
yading@10 465 ret = AVERROR(EINVAL);
yading@10 466 goto fail;
yading@10 467 }
yading@10 468
yading@10 469 if (i == 0) {
yading@10 470 int64_t pts = av_rescale_q(buf->pts, inlink->time_base,
yading@10 471 outlink->time_base);
yading@10 472 ret = frame_list_add_frame(s->frame_list, buf->nb_samples, pts);
yading@10 473 if (ret < 0)
yading@10 474 goto fail;
yading@10 475 }
yading@10 476
yading@10 477 ret = av_audio_fifo_write(s->fifos[i], (void **)buf->extended_data,
yading@10 478 buf->nb_samples);
yading@10 479
yading@10 480 fail:
yading@10 481 av_frame_free(&buf);
yading@10 482
yading@10 483 return ret;
yading@10 484 }
yading@10 485
yading@10 486 static int init(AVFilterContext *ctx)
yading@10 487 {
yading@10 488 MixContext *s = ctx->priv;
yading@10 489 int i;
yading@10 490
yading@10 491 for (i = 0; i < s->nb_inputs; i++) {
yading@10 492 char name[32];
yading@10 493 AVFilterPad pad = { 0 };
yading@10 494
yading@10 495 snprintf(name, sizeof(name), "input%d", i);
yading@10 496 pad.type = AVMEDIA_TYPE_AUDIO;
yading@10 497 pad.name = av_strdup(name);
yading@10 498 pad.filter_frame = filter_frame;
yading@10 499
yading@10 500 ff_insert_inpad(ctx, i, &pad);
yading@10 501 }
yading@10 502
yading@10 503 avpriv_float_dsp_init(&s->fdsp, 0);
yading@10 504
yading@10 505 return 0;
yading@10 506 }
yading@10 507
yading@10 508 static void uninit(AVFilterContext *ctx)
yading@10 509 {
yading@10 510 int i;
yading@10 511 MixContext *s = ctx->priv;
yading@10 512
yading@10 513 if (s->fifos) {
yading@10 514 for (i = 0; i < s->nb_inputs; i++)
yading@10 515 av_audio_fifo_free(s->fifos[i]);
yading@10 516 av_freep(&s->fifos);
yading@10 517 }
yading@10 518 frame_list_clear(s->frame_list);
yading@10 519 av_freep(&s->frame_list);
yading@10 520 av_freep(&s->input_state);
yading@10 521 av_freep(&s->input_scale);
yading@10 522
yading@10 523 for (i = 0; i < ctx->nb_inputs; i++)
yading@10 524 av_freep(&ctx->input_pads[i].name);
yading@10 525 }
yading@10 526
yading@10 527 static int query_formats(AVFilterContext *ctx)
yading@10 528 {
yading@10 529 AVFilterFormats *formats = NULL;
yading@10 530 ff_add_format(&formats, AV_SAMPLE_FMT_FLT);
yading@10 531 ff_add_format(&formats, AV_SAMPLE_FMT_FLTP);
yading@10 532 ff_set_common_formats(ctx, formats);
yading@10 533 ff_set_common_channel_layouts(ctx, ff_all_channel_layouts());
yading@10 534 ff_set_common_samplerates(ctx, ff_all_samplerates());
yading@10 535 return 0;
yading@10 536 }
yading@10 537
yading@10 538 static const AVFilterPad avfilter_af_amix_outputs[] = {
yading@10 539 {
yading@10 540 .name = "default",
yading@10 541 .type = AVMEDIA_TYPE_AUDIO,
yading@10 542 .config_props = config_output,
yading@10 543 .request_frame = request_frame
yading@10 544 },
yading@10 545 { NULL }
yading@10 546 };
yading@10 547
yading@10 548 AVFilter avfilter_af_amix = {
yading@10 549 .name = "amix",
yading@10 550 .description = NULL_IF_CONFIG_SMALL("Audio mixing."),
yading@10 551 .priv_size = sizeof(MixContext),
yading@10 552 .priv_class = &amix_class,
yading@10 553
yading@10 554 .init = init,
yading@10 555 .uninit = uninit,
yading@10 556 .query_formats = query_formats,
yading@10 557
yading@10 558 .inputs = NULL,
yading@10 559 .outputs = avfilter_af_amix_outputs,
yading@10 560
yading@10 561 .flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
yading@10 562 };