yading@10: /* yading@10: * Copyright (c) 2011 Nicolas George yading@10: * yading@10: * This file is part of FFmpeg. yading@10: * yading@10: * FFmpeg is free software; you can redistribute it and/or yading@10: * modify it under the terms of the GNU Lesser General Public yading@10: * License as published by the Free Software Foundation; either yading@10: * version 2.1 of the License, or (at your option) any later version. yading@10: * yading@10: * FFmpeg is distributed in the hope that it will be useful, yading@10: * but WITHOUT ANY WARRANTY; without even the implied warranty of yading@10: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the yading@10: * GNU Lesser General Public License for more details. yading@10: * yading@10: * You should have received a copy of the GNU Lesser General Public yading@10: * License along with FFmpeg; if not, write to the Free Software yading@10: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA yading@10: */ yading@10: yading@10: /** yading@10: * @file yading@10: * Audio merging filter yading@10: */ yading@10: yading@10: #include "libavutil/avstring.h" yading@10: #include "libavutil/bprint.h" yading@10: #include "libavutil/channel_layout.h" yading@10: #include "libavutil/opt.h" yading@10: #include "libswresample/swresample.h" // only for SWR_CH_MAX yading@10: #include "avfilter.h" yading@10: #include "audio.h" yading@10: #include "bufferqueue.h" yading@10: #include "internal.h" yading@10: yading@10: typedef struct { yading@10: const AVClass *class; yading@10: int nb_inputs; yading@10: int route[SWR_CH_MAX]; /**< channels routing, see copy_samples */ yading@10: int bps; yading@10: struct amerge_input { yading@10: struct FFBufQueue queue; yading@10: int nb_ch; /**< number of channels for the input */ yading@10: int nb_samples; yading@10: int pos; yading@10: } *in; yading@10: } AMergeContext; yading@10: yading@10: #define OFFSET(x) offsetof(AMergeContext, x) yading@10: #define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM yading@10: yading@10: static const AVOption amerge_options[] = { yading@10: { "inputs", "specify the number of inputs", OFFSET(nb_inputs), yading@10: AV_OPT_TYPE_INT, { .i64 = 2 }, 2, SWR_CH_MAX, FLAGS }, yading@10: {0} yading@10: }; yading@10: yading@10: AVFILTER_DEFINE_CLASS(amerge); yading@10: yading@10: static av_cold void uninit(AVFilterContext *ctx) yading@10: { yading@10: AMergeContext *am = ctx->priv; yading@10: int i; yading@10: yading@10: for (i = 0; i < am->nb_inputs; i++) { yading@10: if (am->in) yading@10: ff_bufqueue_discard_all(&am->in[i].queue); yading@10: if (ctx->input_pads) yading@10: av_freep(&ctx->input_pads[i].name); yading@10: } yading@10: av_freep(&am->in); yading@10: } yading@10: yading@10: static int query_formats(AVFilterContext *ctx) yading@10: { yading@10: AMergeContext *am = ctx->priv; yading@10: int64_t inlayout[SWR_CH_MAX], outlayout = 0; yading@10: AVFilterFormats *formats; yading@10: AVFilterChannelLayouts *layouts; yading@10: int i, overlap = 0, nb_ch = 0; yading@10: yading@10: for (i = 0; i < am->nb_inputs; i++) { yading@10: if (!ctx->inputs[i]->in_channel_layouts || yading@10: !ctx->inputs[i]->in_channel_layouts->nb_channel_layouts) { yading@10: av_log(ctx, AV_LOG_WARNING, yading@10: "No channel layout for input %d\n", i + 1); yading@10: return AVERROR(EAGAIN); yading@10: } yading@10: inlayout[i] = ctx->inputs[i]->in_channel_layouts->channel_layouts[0]; yading@10: if (ctx->inputs[i]->in_channel_layouts->nb_channel_layouts > 1) { yading@10: char buf[256]; yading@10: av_get_channel_layout_string(buf, sizeof(buf), 0, inlayout[i]); yading@10: av_log(ctx, AV_LOG_INFO, "Using \"%s\" for input %d\n", buf, i + 1); yading@10: } yading@10: am->in[i].nb_ch = av_get_channel_layout_nb_channels(inlayout[i]); yading@10: if (outlayout & inlayout[i]) yading@10: overlap++; yading@10: outlayout |= inlayout[i]; yading@10: nb_ch += am->in[i].nb_ch; yading@10: } yading@10: if (nb_ch > SWR_CH_MAX) { yading@10: av_log(ctx, AV_LOG_ERROR, "Too many channels (max %d)\n", SWR_CH_MAX); yading@10: return AVERROR(EINVAL); yading@10: } yading@10: if (overlap) { yading@10: av_log(ctx, AV_LOG_WARNING, yading@10: "Input channel layouts overlap: " yading@10: "output layout will be determined by the number of distinct input channels\n"); yading@10: for (i = 0; i < nb_ch; i++) yading@10: am->route[i] = i; yading@10: outlayout = av_get_default_channel_layout(nb_ch); yading@10: if (!outlayout) yading@10: outlayout = ((int64_t)1 << nb_ch) - 1; yading@10: } else { yading@10: int *route[SWR_CH_MAX]; yading@10: int c, out_ch_number = 0; yading@10: yading@10: route[0] = am->route; yading@10: for (i = 1; i < am->nb_inputs; i++) yading@10: route[i] = route[i - 1] + am->in[i - 1].nb_ch; yading@10: for (c = 0; c < 64; c++) yading@10: for (i = 0; i < am->nb_inputs; i++) yading@10: if ((inlayout[i] >> c) & 1) yading@10: *(route[i]++) = out_ch_number++; yading@10: } yading@10: formats = ff_make_format_list(ff_packed_sample_fmts_array); yading@10: ff_set_common_formats(ctx, formats); yading@10: for (i = 0; i < am->nb_inputs; i++) { yading@10: layouts = NULL; yading@10: ff_add_channel_layout(&layouts, inlayout[i]); yading@10: ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts); yading@10: } yading@10: layouts = NULL; yading@10: ff_add_channel_layout(&layouts, outlayout); yading@10: ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts); yading@10: ff_set_common_samplerates(ctx, ff_all_samplerates()); yading@10: return 0; yading@10: } yading@10: yading@10: static int config_output(AVFilterLink *outlink) yading@10: { yading@10: AVFilterContext *ctx = outlink->src; yading@10: AMergeContext *am = ctx->priv; yading@10: AVBPrint bp; yading@10: int i; yading@10: yading@10: for (i = 1; i < am->nb_inputs; i++) { yading@10: if (ctx->inputs[i]->sample_rate != ctx->inputs[0]->sample_rate) { yading@10: av_log(ctx, AV_LOG_ERROR, yading@10: "Inputs must have the same sample rate " yading@10: "%d for in%d vs %d\n", yading@10: ctx->inputs[i]->sample_rate, i, ctx->inputs[0]->sample_rate); yading@10: return AVERROR(EINVAL); yading@10: } yading@10: } yading@10: am->bps = av_get_bytes_per_sample(ctx->outputs[0]->format); yading@10: outlink->sample_rate = ctx->inputs[0]->sample_rate; yading@10: outlink->time_base = ctx->inputs[0]->time_base; yading@10: yading@10: av_bprint_init(&bp, 0, 1); yading@10: for (i = 0; i < am->nb_inputs; i++) { yading@10: av_bprintf(&bp, "%sin%d:", i ? " + " : "", i); yading@10: av_bprint_channel_layout(&bp, -1, ctx->inputs[i]->channel_layout); yading@10: } yading@10: av_bprintf(&bp, " -> out:"); yading@10: av_bprint_channel_layout(&bp, -1, ctx->outputs[0]->channel_layout); yading@10: av_log(ctx, AV_LOG_VERBOSE, "%s\n", bp.str); yading@10: yading@10: return 0; yading@10: } yading@10: yading@10: static int request_frame(AVFilterLink *outlink) yading@10: { yading@10: AVFilterContext *ctx = outlink->src; yading@10: AMergeContext *am = ctx->priv; yading@10: int i, ret; yading@10: yading@10: for (i = 0; i < am->nb_inputs; i++) yading@10: if (!am->in[i].nb_samples) yading@10: if ((ret = ff_request_frame(ctx->inputs[i])) < 0) yading@10: return ret; yading@10: return 0; yading@10: } yading@10: yading@10: /** yading@10: * Copy samples from several input streams to one output stream. yading@10: * @param nb_inputs number of inputs yading@10: * @param in inputs; used only for the nb_ch field; yading@10: * @param route routing values; yading@10: * input channel i goes to output channel route[i]; yading@10: * i < in[0].nb_ch are the channels from the first output; yading@10: * i >= in[0].nb_ch are the channels from the second output yading@10: * @param ins pointer to the samples of each inputs, in packed format; yading@10: * will be left at the end of the copied samples yading@10: * @param outs pointer to the samples of the output, in packet format; yading@10: * must point to a buffer big enough; yading@10: * will be left at the end of the copied samples yading@10: * @param ns number of samples to copy yading@10: * @param bps bytes per sample yading@10: */ yading@10: static inline void copy_samples(int nb_inputs, struct amerge_input in[], yading@10: int *route, uint8_t *ins[], yading@10: uint8_t **outs, int ns, int bps) yading@10: { yading@10: int *route_cur; yading@10: int i, c, nb_ch = 0; yading@10: yading@10: for (i = 0; i < nb_inputs; i++) yading@10: nb_ch += in[i].nb_ch; yading@10: while (ns--) { yading@10: route_cur = route; yading@10: for (i = 0; i < nb_inputs; i++) { yading@10: for (c = 0; c < in[i].nb_ch; c++) { yading@10: memcpy((*outs) + bps * *(route_cur++), ins[i], bps); yading@10: ins[i] += bps; yading@10: } yading@10: } yading@10: *outs += nb_ch * bps; yading@10: } yading@10: } yading@10: yading@10: static int filter_frame(AVFilterLink *inlink, AVFrame *insamples) yading@10: { yading@10: AVFilterContext *ctx = inlink->dst; yading@10: AMergeContext *am = ctx->priv; yading@10: AVFilterLink *const outlink = ctx->outputs[0]; yading@10: int input_number; yading@10: int nb_samples, ns, i; yading@10: AVFrame *outbuf, *inbuf[SWR_CH_MAX]; yading@10: uint8_t *ins[SWR_CH_MAX], *outs; yading@10: yading@10: for (input_number = 0; input_number < am->nb_inputs; input_number++) yading@10: if (inlink == ctx->inputs[input_number]) yading@10: break; yading@10: av_assert1(input_number < am->nb_inputs); yading@10: if (ff_bufqueue_is_full(&am->in[input_number].queue)) { yading@10: av_frame_free(&insamples); yading@10: return AVERROR(ENOMEM); yading@10: } yading@10: ff_bufqueue_add(ctx, &am->in[input_number].queue, av_frame_clone(insamples)); yading@10: am->in[input_number].nb_samples += insamples->nb_samples; yading@10: av_frame_free(&insamples); yading@10: nb_samples = am->in[0].nb_samples; yading@10: for (i = 1; i < am->nb_inputs; i++) yading@10: nb_samples = FFMIN(nb_samples, am->in[i].nb_samples); yading@10: if (!nb_samples) yading@10: return 0; yading@10: yading@10: outbuf = ff_get_audio_buffer(ctx->outputs[0], nb_samples); yading@10: if (!outbuf) yading@10: return AVERROR(ENOMEM); yading@10: outs = outbuf->data[0]; yading@10: for (i = 0; i < am->nb_inputs; i++) { yading@10: inbuf[i] = ff_bufqueue_peek(&am->in[i].queue, 0); yading@10: ins[i] = inbuf[i]->data[0] + yading@10: am->in[i].pos * am->in[i].nb_ch * am->bps; yading@10: } yading@10: av_frame_copy_props(outbuf, inbuf[0]); yading@10: outbuf->pts = inbuf[0]->pts == AV_NOPTS_VALUE ? AV_NOPTS_VALUE : yading@10: inbuf[0]->pts + yading@10: av_rescale_q(am->in[0].pos, yading@10: (AVRational){ 1, ctx->inputs[0]->sample_rate }, yading@10: ctx->outputs[0]->time_base); yading@10: yading@10: outbuf->nb_samples = nb_samples; yading@10: outbuf->channel_layout = outlink->channel_layout; yading@10: av_frame_set_channels(outbuf, outlink->channels); yading@10: yading@10: while (nb_samples) { yading@10: ns = nb_samples; yading@10: for (i = 0; i < am->nb_inputs; i++) yading@10: ns = FFMIN(ns, inbuf[i]->nb_samples - am->in[i].pos); yading@10: /* Unroll the most common sample formats: speed +~350% for the loop, yading@10: +~13% overall (including two common decoders) */ yading@10: switch (am->bps) { yading@10: case 1: yading@10: copy_samples(am->nb_inputs, am->in, am->route, ins, &outs, ns, 1); yading@10: break; yading@10: case 2: yading@10: copy_samples(am->nb_inputs, am->in, am->route, ins, &outs, ns, 2); yading@10: break; yading@10: case 4: yading@10: copy_samples(am->nb_inputs, am->in, am->route, ins, &outs, ns, 4); yading@10: break; yading@10: default: yading@10: copy_samples(am->nb_inputs, am->in, am->route, ins, &outs, ns, am->bps); yading@10: break; yading@10: } yading@10: yading@10: nb_samples -= ns; yading@10: for (i = 0; i < am->nb_inputs; i++) { yading@10: am->in[i].nb_samples -= ns; yading@10: am->in[i].pos += ns; yading@10: if (am->in[i].pos == inbuf[i]->nb_samples) { yading@10: am->in[i].pos = 0; yading@10: av_frame_free(&inbuf[i]); yading@10: ff_bufqueue_get(&am->in[i].queue); yading@10: inbuf[i] = ff_bufqueue_peek(&am->in[i].queue, 0); yading@10: ins[i] = inbuf[i] ? inbuf[i]->data[0] : NULL; yading@10: } yading@10: } yading@10: } yading@10: return ff_filter_frame(ctx->outputs[0], outbuf); yading@10: } yading@10: yading@10: static av_cold int init(AVFilterContext *ctx) yading@10: { yading@10: AMergeContext *am = ctx->priv; yading@10: int i; yading@10: yading@10: am->in = av_calloc(am->nb_inputs, sizeof(*am->in)); yading@10: if (!am->in) yading@10: return AVERROR(ENOMEM); yading@10: for (i = 0; i < am->nb_inputs; i++) { yading@10: char *name = av_asprintf("in%d", i); yading@10: AVFilterPad pad = { yading@10: .name = name, yading@10: .type = AVMEDIA_TYPE_AUDIO, yading@10: .filter_frame = filter_frame, yading@10: }; yading@10: if (!name) yading@10: return AVERROR(ENOMEM); yading@10: ff_insert_inpad(ctx, i, &pad); yading@10: } yading@10: return 0; yading@10: } yading@10: yading@10: static const AVFilterPad amerge_outputs[] = { yading@10: { yading@10: .name = "default", yading@10: .type = AVMEDIA_TYPE_AUDIO, yading@10: .config_props = config_output, yading@10: .request_frame = request_frame, yading@10: }, yading@10: { NULL } yading@10: }; yading@10: yading@10: AVFilter avfilter_af_amerge = { yading@10: .name = "amerge", yading@10: .description = NULL_IF_CONFIG_SMALL("Merge two or more audio streams into " yading@10: "a single multi-channel stream."), yading@10: .priv_size = sizeof(AMergeContext), yading@10: .init = init, yading@10: .uninit = uninit, yading@10: .query_formats = query_formats, yading@10: .inputs = NULL, yading@10: .outputs = amerge_outputs, yading@10: .priv_class = &amerge_class, yading@10: .flags = AVFILTER_FLAG_DYNAMIC_INPUTS, yading@10: };