yading@10
|
1 /*
|
yading@10
|
2 * Copyright (c) 2011 Mina Nagy Zaki
|
yading@10
|
3 * Copyright (c) 2000 Edward Beingessner And Sundry Contributors.
|
yading@10
|
4 * This source code is freely redistributable and may be used for any purpose.
|
yading@10
|
5 * This copyright notice must be maintained. Edward Beingessner And Sundry
|
yading@10
|
6 * Contributors are not responsible for the consequences of using this
|
yading@10
|
7 * software.
|
yading@10
|
8 *
|
yading@10
|
9 * This file is part of FFmpeg.
|
yading@10
|
10 *
|
yading@10
|
11 * FFmpeg is free software; you can redistribute it and/or
|
yading@10
|
12 * modify it under the terms of the GNU Lesser General Public
|
yading@10
|
13 * License as published by the Free Software Foundation; either
|
yading@10
|
14 * version 2.1 of the License, or (at your option) any later version.
|
yading@10
|
15 *
|
yading@10
|
16 * FFmpeg is distributed in the hope that it will be useful,
|
yading@10
|
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
yading@10
|
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
yading@10
|
19 * Lesser General Public License for more details.
|
yading@10
|
20 *
|
yading@10
|
21 * You should have received a copy of the GNU Lesser General Public
|
yading@10
|
22 * License along with FFmpeg; if not, write to the Free Software
|
yading@10
|
23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
yading@10
|
24 */
|
yading@10
|
25
|
yading@10
|
26 /**
|
yading@10
|
27 * @file
|
yading@10
|
28 * Stereo Widening Effect. Adds audio cues to move stereo image in
|
yading@10
|
29 * front of the listener. Adapted from the libsox earwax effect.
|
yading@10
|
30 */
|
yading@10
|
31
|
yading@10
|
32 #include "libavutil/channel_layout.h"
|
yading@10
|
33 #include "avfilter.h"
|
yading@10
|
34 #include "audio.h"
|
yading@10
|
35 #include "formats.h"
|
yading@10
|
36
|
yading@10
|
37 #define NUMTAPS 64
|
yading@10
|
38
|
yading@10
|
39 static const int8_t filt[NUMTAPS] = {
|
yading@10
|
40 /* 30° 330° */
|
yading@10
|
41 4, -6, /* 32 tap stereo FIR filter. */
|
yading@10
|
42 4, -11, /* One side filters as if the */
|
yading@10
|
43 -1, -5, /* signal was from 30 degrees */
|
yading@10
|
44 3, 3, /* from the ear, the other as */
|
yading@10
|
45 -2, 5, /* if 330 degrees. */
|
yading@10
|
46 -5, 0,
|
yading@10
|
47 9, 1,
|
yading@10
|
48 6, 3, /* Input */
|
yading@10
|
49 -4, -1, /* Left Right */
|
yading@10
|
50 -5, -3, /* __________ __________ */
|
yading@10
|
51 -2, -5, /* | | | | */
|
yading@10
|
52 -7, 1, /* .---| Hh,0(f) | | Hh,0(f) |---. */
|
yading@10
|
53 6, -7, /* / |__________| |__________| \ */
|
yading@10
|
54 30, -29, /* / \ / \ */
|
yading@10
|
55 12, -3, /* / X \ */
|
yading@10
|
56 -11, 4, /* / / \ \ */
|
yading@10
|
57 -3, 7, /* ____V_____ __________V V__________ _____V____ */
|
yading@10
|
58 -20, 23, /* | | | | | | | | */
|
yading@10
|
59 2, 0, /* | Hh,30(f) | | Hh,330(f)| | Hh,330(f)| | Hh,30(f) | */
|
yading@10
|
60 1, -6, /* |__________| |__________| |__________| |__________| */
|
yading@10
|
61 -14, -5, /* \ ___ / \ ___ / */
|
yading@10
|
62 15, -18, /* \ / \ / _____ \ / \ / */
|
yading@10
|
63 6, 7, /* `->| + |<--' / \ `-->| + |<-' */
|
yading@10
|
64 15, -10, /* \___/ _/ \_ \___/ */
|
yading@10
|
65 -14, 22, /* \ / \ / \ / */
|
yading@10
|
66 -7, -2, /* `--->| | | |<---' */
|
yading@10
|
67 -4, 9, /* \_/ \_/ */
|
yading@10
|
68 6, -12, /* */
|
yading@10
|
69 6, -6, /* Headphones */
|
yading@10
|
70 0, -11,
|
yading@10
|
71 0, -5,
|
yading@10
|
72 4, 0};
|
yading@10
|
73
|
yading@10
|
74 typedef struct {
|
yading@10
|
75 int16_t taps[NUMTAPS * 2];
|
yading@10
|
76 } EarwaxContext;
|
yading@10
|
77
|
yading@10
|
78 static int query_formats(AVFilterContext *ctx)
|
yading@10
|
79 {
|
yading@10
|
80 static const int sample_rates[] = { 44100, -1 };
|
yading@10
|
81
|
yading@10
|
82 AVFilterFormats *formats = NULL;
|
yading@10
|
83 AVFilterChannelLayouts *layout = NULL;
|
yading@10
|
84
|
yading@10
|
85 ff_add_format(&formats, AV_SAMPLE_FMT_S16);
|
yading@10
|
86 ff_set_common_formats(ctx, formats);
|
yading@10
|
87 ff_add_channel_layout(&layout, AV_CH_LAYOUT_STEREO);
|
yading@10
|
88 ff_set_common_channel_layouts(ctx, layout);
|
yading@10
|
89 ff_set_common_samplerates(ctx, ff_make_format_list(sample_rates));
|
yading@10
|
90
|
yading@10
|
91 return 0;
|
yading@10
|
92 }
|
yading@10
|
93
|
yading@10
|
94 //FIXME: replace with DSPContext.scalarproduct_int16
|
yading@10
|
95 static inline int16_t *scalarproduct(const int16_t *in, const int16_t *endin, int16_t *out)
|
yading@10
|
96 {
|
yading@10
|
97 int32_t sample;
|
yading@10
|
98 int16_t j;
|
yading@10
|
99
|
yading@10
|
100 while (in < endin) {
|
yading@10
|
101 sample = 0;
|
yading@10
|
102 for (j = 0; j < NUMTAPS; j++)
|
yading@10
|
103 sample += in[j] * filt[j];
|
yading@10
|
104 *out = av_clip_int16(sample >> 6);
|
yading@10
|
105 out++;
|
yading@10
|
106 in++;
|
yading@10
|
107 }
|
yading@10
|
108
|
yading@10
|
109 return out;
|
yading@10
|
110 }
|
yading@10
|
111
|
yading@10
|
112 static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
|
yading@10
|
113 {
|
yading@10
|
114 AVFilterLink *outlink = inlink->dst->outputs[0];
|
yading@10
|
115 int16_t *taps, *endin, *in, *out;
|
yading@10
|
116 AVFrame *outsamples = ff_get_audio_buffer(inlink, insamples->nb_samples);
|
yading@10
|
117
|
yading@10
|
118 if (!outsamples) {
|
yading@10
|
119 av_frame_free(&insamples);
|
yading@10
|
120 return AVERROR(ENOMEM);
|
yading@10
|
121 }
|
yading@10
|
122 av_frame_copy_props(outsamples, insamples);
|
yading@10
|
123
|
yading@10
|
124 taps = ((EarwaxContext *)inlink->dst->priv)->taps;
|
yading@10
|
125 out = (int16_t *)outsamples->data[0];
|
yading@10
|
126 in = (int16_t *)insamples ->data[0];
|
yading@10
|
127
|
yading@10
|
128 // copy part of new input and process with saved input
|
yading@10
|
129 memcpy(taps+NUMTAPS, in, NUMTAPS * sizeof(*taps));
|
yading@10
|
130 out = scalarproduct(taps, taps + NUMTAPS, out);
|
yading@10
|
131
|
yading@10
|
132 // process current input
|
yading@10
|
133 endin = in + insamples->nb_samples * 2 - NUMTAPS;
|
yading@10
|
134 scalarproduct(in, endin, out);
|
yading@10
|
135
|
yading@10
|
136 // save part of input for next round
|
yading@10
|
137 memcpy(taps, endin, NUMTAPS * sizeof(*taps));
|
yading@10
|
138
|
yading@10
|
139 av_frame_free(&insamples);
|
yading@10
|
140 return ff_filter_frame(outlink, outsamples);
|
yading@10
|
141 }
|
yading@10
|
142
|
yading@10
|
143 static const AVFilterPad earwax_inputs[] = {
|
yading@10
|
144 {
|
yading@10
|
145 .name = "default",
|
yading@10
|
146 .type = AVMEDIA_TYPE_AUDIO,
|
yading@10
|
147 .filter_frame = filter_frame,
|
yading@10
|
148 },
|
yading@10
|
149 { NULL }
|
yading@10
|
150 };
|
yading@10
|
151
|
yading@10
|
152 static const AVFilterPad earwax_outputs[] = {
|
yading@10
|
153 {
|
yading@10
|
154 .name = "default",
|
yading@10
|
155 .type = AVMEDIA_TYPE_AUDIO,
|
yading@10
|
156 },
|
yading@10
|
157 { NULL }
|
yading@10
|
158 };
|
yading@10
|
159
|
yading@10
|
160 AVFilter avfilter_af_earwax = {
|
yading@10
|
161 .name = "earwax",
|
yading@10
|
162 .description = NULL_IF_CONFIG_SMALL("Widen the stereo image."),
|
yading@10
|
163 .query_formats = query_formats,
|
yading@10
|
164 .priv_size = sizeof(EarwaxContext),
|
yading@10
|
165 .inputs = earwax_inputs,
|
yading@10
|
166 .outputs = earwax_outputs,
|
yading@10
|
167 };
|