yading@10
|
1 /*
|
yading@10
|
2 * Copyright (c) 2007 Bobby Bingham
|
yading@10
|
3 *
|
yading@10
|
4 * This file is part of FFmpeg.
|
yading@10
|
5 *
|
yading@10
|
6 * FFmpeg is free software; you can redistribute it and/or
|
yading@10
|
7 * modify it under the terms of the GNU Lesser General Public
|
yading@10
|
8 * License as published by the Free Software Foundation; either
|
yading@10
|
9 * version 2.1 of the License, or (at your option) any later version.
|
yading@10
|
10 *
|
yading@10
|
11 * FFmpeg is distributed in the hope that it will be useful,
|
yading@10
|
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
yading@10
|
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
yading@10
|
14 * Lesser General Public License for more details.
|
yading@10
|
15 *
|
yading@10
|
16 * You should have received a copy of the GNU Lesser General Public
|
yading@10
|
17 * License along with FFmpeg; if not, write to the Free Software
|
yading@10
|
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
yading@10
|
19 */
|
yading@10
|
20
|
yading@10
|
21 /**
|
yading@10
|
22 * @file
|
yading@10
|
23 * FIFO buffering filter
|
yading@10
|
24 */
|
yading@10
|
25
|
yading@10
|
26 #include "libavutil/avassert.h"
|
yading@10
|
27 #include "libavutil/channel_layout.h"
|
yading@10
|
28 #include "libavutil/common.h"
|
yading@10
|
29 #include "libavutil/mathematics.h"
|
yading@10
|
30 #include "libavutil/samplefmt.h"
|
yading@10
|
31
|
yading@10
|
32 #include "audio.h"
|
yading@10
|
33 #include "avfilter.h"
|
yading@10
|
34 #include "internal.h"
|
yading@10
|
35 #include "video.h"
|
yading@10
|
36
|
yading@10
|
37 typedef struct Buf {
|
yading@10
|
38 AVFrame *frame;
|
yading@10
|
39 struct Buf *next;
|
yading@10
|
40 } Buf;
|
yading@10
|
41
|
yading@10
|
42 typedef struct {
|
yading@10
|
43 Buf root;
|
yading@10
|
44 Buf *last; ///< last buffered frame
|
yading@10
|
45
|
yading@10
|
46 /**
|
yading@10
|
47 * When a specific number of output samples is requested, the partial
|
yading@10
|
48 * buffer is stored here
|
yading@10
|
49 */
|
yading@10
|
50 AVFrame *out;
|
yading@10
|
51 int allocated_samples; ///< number of samples out was allocated for
|
yading@10
|
52 } FifoContext;
|
yading@10
|
53
|
yading@10
|
54 static av_cold int init(AVFilterContext *ctx)
|
yading@10
|
55 {
|
yading@10
|
56 FifoContext *fifo = ctx->priv;
|
yading@10
|
57 fifo->last = &fifo->root;
|
yading@10
|
58
|
yading@10
|
59 return 0;
|
yading@10
|
60 }
|
yading@10
|
61
|
yading@10
|
62 static av_cold void uninit(AVFilterContext *ctx)
|
yading@10
|
63 {
|
yading@10
|
64 FifoContext *fifo = ctx->priv;
|
yading@10
|
65 Buf *buf, *tmp;
|
yading@10
|
66
|
yading@10
|
67 for (buf = fifo->root.next; buf; buf = tmp) {
|
yading@10
|
68 tmp = buf->next;
|
yading@10
|
69 av_frame_free(&buf->frame);
|
yading@10
|
70 av_free(buf);
|
yading@10
|
71 }
|
yading@10
|
72
|
yading@10
|
73 av_frame_free(&fifo->out);
|
yading@10
|
74 }
|
yading@10
|
75
|
yading@10
|
76 static int add_to_queue(AVFilterLink *inlink, AVFrame *frame)
|
yading@10
|
77 {
|
yading@10
|
78 FifoContext *fifo = inlink->dst->priv;
|
yading@10
|
79
|
yading@10
|
80 fifo->last->next = av_mallocz(sizeof(Buf));
|
yading@10
|
81 if (!fifo->last->next) {
|
yading@10
|
82 av_frame_free(&frame);
|
yading@10
|
83 return AVERROR(ENOMEM);
|
yading@10
|
84 }
|
yading@10
|
85
|
yading@10
|
86 fifo->last = fifo->last->next;
|
yading@10
|
87 fifo->last->frame = frame;
|
yading@10
|
88
|
yading@10
|
89 return 0;
|
yading@10
|
90 }
|
yading@10
|
91
|
yading@10
|
92 static void queue_pop(FifoContext *s)
|
yading@10
|
93 {
|
yading@10
|
94 Buf *tmp = s->root.next->next;
|
yading@10
|
95 if (s->last == s->root.next)
|
yading@10
|
96 s->last = &s->root;
|
yading@10
|
97 av_freep(&s->root.next);
|
yading@10
|
98 s->root.next = tmp;
|
yading@10
|
99 }
|
yading@10
|
100
|
yading@10
|
101 /**
|
yading@10
|
102 * Move data pointers and pts offset samples forward.
|
yading@10
|
103 */
|
yading@10
|
104 static void buffer_offset(AVFilterLink *link, AVFrame *frame,
|
yading@10
|
105 int offset)
|
yading@10
|
106 {
|
yading@10
|
107 int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout);
|
yading@10
|
108 int planar = av_sample_fmt_is_planar(link->format);
|
yading@10
|
109 int planes = planar ? nb_channels : 1;
|
yading@10
|
110 int block_align = av_get_bytes_per_sample(link->format) * (planar ? 1 : nb_channels);
|
yading@10
|
111 int i;
|
yading@10
|
112
|
yading@10
|
113 av_assert0(frame->nb_samples > offset);
|
yading@10
|
114
|
yading@10
|
115 for (i = 0; i < planes; i++)
|
yading@10
|
116 frame->extended_data[i] += block_align * offset;
|
yading@10
|
117 if (frame->data != frame->extended_data)
|
yading@10
|
118 memcpy(frame->data, frame->extended_data,
|
yading@10
|
119 FFMIN(planes, FF_ARRAY_ELEMS(frame->data)) * sizeof(*frame->data));
|
yading@10
|
120 frame->linesize[0] -= block_align*offset;
|
yading@10
|
121 frame->nb_samples -= offset;
|
yading@10
|
122
|
yading@10
|
123 if (frame->pts != AV_NOPTS_VALUE) {
|
yading@10
|
124 frame->pts += av_rescale_q(offset, (AVRational){1, link->sample_rate},
|
yading@10
|
125 link->time_base);
|
yading@10
|
126 }
|
yading@10
|
127 }
|
yading@10
|
128
|
yading@10
|
129 static int calc_ptr_alignment(AVFrame *frame)
|
yading@10
|
130 {
|
yading@10
|
131 int planes = av_sample_fmt_is_planar(frame->format) ?
|
yading@10
|
132 av_get_channel_layout_nb_channels(frame->channel_layout) : 1;
|
yading@10
|
133 int min_align = 128;
|
yading@10
|
134 int p;
|
yading@10
|
135
|
yading@10
|
136 for (p = 0; p < planes; p++) {
|
yading@10
|
137 int cur_align = 128;
|
yading@10
|
138 while ((intptr_t)frame->extended_data[p] % cur_align)
|
yading@10
|
139 cur_align >>= 1;
|
yading@10
|
140 if (cur_align < min_align)
|
yading@10
|
141 min_align = cur_align;
|
yading@10
|
142 }
|
yading@10
|
143 return min_align;
|
yading@10
|
144 }
|
yading@10
|
145
|
yading@10
|
146 static int return_audio_frame(AVFilterContext *ctx)
|
yading@10
|
147 {
|
yading@10
|
148 AVFilterLink *link = ctx->outputs[0];
|
yading@10
|
149 FifoContext *s = ctx->priv;
|
yading@10
|
150 AVFrame *head = s->root.next->frame;
|
yading@10
|
151 AVFrame *out;
|
yading@10
|
152 int ret;
|
yading@10
|
153
|
yading@10
|
154 if (!s->out &&
|
yading@10
|
155 head->nb_samples >= link->request_samples &&
|
yading@10
|
156 calc_ptr_alignment(head) >= 32) {
|
yading@10
|
157 if (head->nb_samples == link->request_samples) {
|
yading@10
|
158 out = head;
|
yading@10
|
159 queue_pop(s);
|
yading@10
|
160 } else {
|
yading@10
|
161 out = av_frame_clone(head);
|
yading@10
|
162 if (!out)
|
yading@10
|
163 return AVERROR(ENOMEM);
|
yading@10
|
164
|
yading@10
|
165 out->nb_samples = link->request_samples;
|
yading@10
|
166 buffer_offset(link, head, link->request_samples);
|
yading@10
|
167 }
|
yading@10
|
168 } else {
|
yading@10
|
169 int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout);
|
yading@10
|
170
|
yading@10
|
171 if (!s->out) {
|
yading@10
|
172 s->out = ff_get_audio_buffer(link, link->request_samples);
|
yading@10
|
173 if (!s->out)
|
yading@10
|
174 return AVERROR(ENOMEM);
|
yading@10
|
175
|
yading@10
|
176 s->out->nb_samples = 0;
|
yading@10
|
177 s->out->pts = head->pts;
|
yading@10
|
178 s->allocated_samples = link->request_samples;
|
yading@10
|
179 } else if (link->request_samples != s->allocated_samples) {
|
yading@10
|
180 av_log(ctx, AV_LOG_ERROR, "request_samples changed before the "
|
yading@10
|
181 "buffer was returned.\n");
|
yading@10
|
182 return AVERROR(EINVAL);
|
yading@10
|
183 }
|
yading@10
|
184
|
yading@10
|
185 while (s->out->nb_samples < s->allocated_samples) {
|
yading@10
|
186 int len = FFMIN(s->allocated_samples - s->out->nb_samples,
|
yading@10
|
187 head->nb_samples);
|
yading@10
|
188
|
yading@10
|
189 av_samples_copy(s->out->extended_data, head->extended_data,
|
yading@10
|
190 s->out->nb_samples, 0, len, nb_channels,
|
yading@10
|
191 link->format);
|
yading@10
|
192 s->out->nb_samples += len;
|
yading@10
|
193
|
yading@10
|
194 if (len == head->nb_samples) {
|
yading@10
|
195 av_frame_free(&head);
|
yading@10
|
196 queue_pop(s);
|
yading@10
|
197
|
yading@10
|
198 if (!s->root.next &&
|
yading@10
|
199 (ret = ff_request_frame(ctx->inputs[0])) < 0) {
|
yading@10
|
200 if (ret == AVERROR_EOF) {
|
yading@10
|
201 av_samples_set_silence(s->out->extended_data,
|
yading@10
|
202 s->out->nb_samples,
|
yading@10
|
203 s->allocated_samples -
|
yading@10
|
204 s->out->nb_samples,
|
yading@10
|
205 nb_channels, link->format);
|
yading@10
|
206 s->out->nb_samples = s->allocated_samples;
|
yading@10
|
207 break;
|
yading@10
|
208 }
|
yading@10
|
209 return ret;
|
yading@10
|
210 }
|
yading@10
|
211 head = s->root.next->frame;
|
yading@10
|
212 } else {
|
yading@10
|
213 buffer_offset(link, head, len);
|
yading@10
|
214 }
|
yading@10
|
215 }
|
yading@10
|
216 out = s->out;
|
yading@10
|
217 s->out = NULL;
|
yading@10
|
218 }
|
yading@10
|
219 return ff_filter_frame(link, out);
|
yading@10
|
220 }
|
yading@10
|
221
|
yading@10
|
222 static int request_frame(AVFilterLink *outlink)
|
yading@10
|
223 {
|
yading@10
|
224 FifoContext *fifo = outlink->src->priv;
|
yading@10
|
225 int ret = 0;
|
yading@10
|
226
|
yading@10
|
227 if (!fifo->root.next) {
|
yading@10
|
228 if ((ret = ff_request_frame(outlink->src->inputs[0])) < 0)
|
yading@10
|
229 return ret;
|
yading@10
|
230 av_assert0(fifo->root.next);
|
yading@10
|
231 }
|
yading@10
|
232
|
yading@10
|
233 if (outlink->request_samples) {
|
yading@10
|
234 return return_audio_frame(outlink->src);
|
yading@10
|
235 } else {
|
yading@10
|
236 ret = ff_filter_frame(outlink, fifo->root.next->frame);
|
yading@10
|
237 queue_pop(fifo);
|
yading@10
|
238 }
|
yading@10
|
239
|
yading@10
|
240 return ret;
|
yading@10
|
241 }
|
yading@10
|
242
|
yading@10
|
243 static const AVFilterPad avfilter_vf_fifo_inputs[] = {
|
yading@10
|
244 {
|
yading@10
|
245 .name = "default",
|
yading@10
|
246 .type = AVMEDIA_TYPE_VIDEO,
|
yading@10
|
247 .get_video_buffer = ff_null_get_video_buffer,
|
yading@10
|
248 .filter_frame = add_to_queue,
|
yading@10
|
249 },
|
yading@10
|
250 { NULL }
|
yading@10
|
251 };
|
yading@10
|
252
|
yading@10
|
253 static const AVFilterPad avfilter_vf_fifo_outputs[] = {
|
yading@10
|
254 {
|
yading@10
|
255 .name = "default",
|
yading@10
|
256 .type = AVMEDIA_TYPE_VIDEO,
|
yading@10
|
257 .request_frame = request_frame,
|
yading@10
|
258 },
|
yading@10
|
259 { NULL }
|
yading@10
|
260 };
|
yading@10
|
261
|
yading@10
|
262 AVFilter avfilter_vf_fifo = {
|
yading@10
|
263 .name = "fifo",
|
yading@10
|
264 .description = NULL_IF_CONFIG_SMALL("Buffer input images and send them when they are requested."),
|
yading@10
|
265
|
yading@10
|
266 .init = init,
|
yading@10
|
267 .uninit = uninit,
|
yading@10
|
268
|
yading@10
|
269 .priv_size = sizeof(FifoContext),
|
yading@10
|
270
|
yading@10
|
271 .inputs = avfilter_vf_fifo_inputs,
|
yading@10
|
272 .outputs = avfilter_vf_fifo_outputs,
|
yading@10
|
273 };
|
yading@10
|
274
|
yading@10
|
275 static const AVFilterPad avfilter_af_afifo_inputs[] = {
|
yading@10
|
276 {
|
yading@10
|
277 .name = "default",
|
yading@10
|
278 .type = AVMEDIA_TYPE_AUDIO,
|
yading@10
|
279 .get_audio_buffer = ff_null_get_audio_buffer,
|
yading@10
|
280 .filter_frame = add_to_queue,
|
yading@10
|
281 },
|
yading@10
|
282 { NULL }
|
yading@10
|
283 };
|
yading@10
|
284
|
yading@10
|
285 static const AVFilterPad avfilter_af_afifo_outputs[] = {
|
yading@10
|
286 {
|
yading@10
|
287 .name = "default",
|
yading@10
|
288 .type = AVMEDIA_TYPE_AUDIO,
|
yading@10
|
289 .request_frame = request_frame,
|
yading@10
|
290 },
|
yading@10
|
291 { NULL }
|
yading@10
|
292 };
|
yading@10
|
293
|
yading@10
|
294 AVFilter avfilter_af_afifo = {
|
yading@10
|
295 .name = "afifo",
|
yading@10
|
296 .description = NULL_IF_CONFIG_SMALL("Buffer input frames and send them when they are requested."),
|
yading@10
|
297
|
yading@10
|
298 .init = init,
|
yading@10
|
299 .uninit = uninit,
|
yading@10
|
300
|
yading@10
|
301 .priv_size = sizeof(FifoContext),
|
yading@10
|
302
|
yading@10
|
303 .inputs = avfilter_af_afifo_inputs,
|
yading@10
|
304 .outputs = avfilter_af_afifo_outputs,
|
yading@10
|
305 };
|