yading@10: /* yading@10: * Copyright (c) 2007 Bobby Bingham yading@10: * yading@10: * This file is part of FFmpeg. yading@10: * yading@10: * FFmpeg is free software; you can redistribute it and/or yading@10: * modify it under the terms of the GNU Lesser General Public yading@10: * License as published by the Free Software Foundation; either yading@10: * version 2.1 of the License, or (at your option) any later version. yading@10: * yading@10: * FFmpeg is distributed in the hope that it will be useful, yading@10: * but WITHOUT ANY WARRANTY; without even the implied warranty of yading@10: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU yading@10: * Lesser General Public License for more details. yading@10: * yading@10: * You should have received a copy of the GNU Lesser General Public yading@10: * License along with FFmpeg; if not, write to the Free Software yading@10: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA yading@10: */ yading@10: yading@10: /** yading@10: * @file yading@10: * FIFO buffering filter yading@10: */ yading@10: yading@10: #include "libavutil/avassert.h" yading@10: #include "libavutil/channel_layout.h" yading@10: #include "libavutil/common.h" yading@10: #include "libavutil/mathematics.h" yading@10: #include "libavutil/samplefmt.h" yading@10: yading@10: #include "audio.h" yading@10: #include "avfilter.h" yading@10: #include "internal.h" yading@10: #include "video.h" yading@10: yading@10: typedef struct Buf { yading@10: AVFrame *frame; yading@10: struct Buf *next; yading@10: } Buf; yading@10: yading@10: typedef struct { yading@10: Buf root; yading@10: Buf *last; ///< last buffered frame yading@10: yading@10: /** yading@10: * When a specific number of output samples is requested, the partial yading@10: * buffer is stored here yading@10: */ yading@10: AVFrame *out; yading@10: int allocated_samples; ///< number of samples out was allocated for yading@10: } FifoContext; yading@10: yading@10: static av_cold int init(AVFilterContext *ctx) yading@10: { yading@10: FifoContext *fifo = ctx->priv; yading@10: fifo->last = &fifo->root; yading@10: yading@10: return 0; yading@10: } yading@10: yading@10: static av_cold void uninit(AVFilterContext *ctx) yading@10: { yading@10: FifoContext *fifo = ctx->priv; yading@10: Buf *buf, *tmp; yading@10: yading@10: for (buf = fifo->root.next; buf; buf = tmp) { yading@10: tmp = buf->next; yading@10: av_frame_free(&buf->frame); yading@10: av_free(buf); yading@10: } yading@10: yading@10: av_frame_free(&fifo->out); yading@10: } yading@10: yading@10: static int add_to_queue(AVFilterLink *inlink, AVFrame *frame) yading@10: { yading@10: FifoContext *fifo = inlink->dst->priv; yading@10: yading@10: fifo->last->next = av_mallocz(sizeof(Buf)); yading@10: if (!fifo->last->next) { yading@10: av_frame_free(&frame); yading@10: return AVERROR(ENOMEM); yading@10: } yading@10: yading@10: fifo->last = fifo->last->next; yading@10: fifo->last->frame = frame; yading@10: yading@10: return 0; yading@10: } yading@10: yading@10: static void queue_pop(FifoContext *s) yading@10: { yading@10: Buf *tmp = s->root.next->next; yading@10: if (s->last == s->root.next) yading@10: s->last = &s->root; yading@10: av_freep(&s->root.next); yading@10: s->root.next = tmp; yading@10: } yading@10: yading@10: /** yading@10: * Move data pointers and pts offset samples forward. yading@10: */ yading@10: static void buffer_offset(AVFilterLink *link, AVFrame *frame, yading@10: int offset) yading@10: { yading@10: int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout); yading@10: int planar = av_sample_fmt_is_planar(link->format); yading@10: int planes = planar ? nb_channels : 1; yading@10: int block_align = av_get_bytes_per_sample(link->format) * (planar ? 1 : nb_channels); yading@10: int i; yading@10: yading@10: av_assert0(frame->nb_samples > offset); yading@10: yading@10: for (i = 0; i < planes; i++) yading@10: frame->extended_data[i] += block_align * offset; yading@10: if (frame->data != frame->extended_data) yading@10: memcpy(frame->data, frame->extended_data, yading@10: FFMIN(planes, FF_ARRAY_ELEMS(frame->data)) * sizeof(*frame->data)); yading@10: frame->linesize[0] -= block_align*offset; yading@10: frame->nb_samples -= offset; yading@10: yading@10: if (frame->pts != AV_NOPTS_VALUE) { yading@10: frame->pts += av_rescale_q(offset, (AVRational){1, link->sample_rate}, yading@10: link->time_base); yading@10: } yading@10: } yading@10: yading@10: static int calc_ptr_alignment(AVFrame *frame) yading@10: { yading@10: int planes = av_sample_fmt_is_planar(frame->format) ? yading@10: av_get_channel_layout_nb_channels(frame->channel_layout) : 1; yading@10: int min_align = 128; yading@10: int p; yading@10: yading@10: for (p = 0; p < planes; p++) { yading@10: int cur_align = 128; yading@10: while ((intptr_t)frame->extended_data[p] % cur_align) yading@10: cur_align >>= 1; yading@10: if (cur_align < min_align) yading@10: min_align = cur_align; yading@10: } yading@10: return min_align; yading@10: } yading@10: yading@10: static int return_audio_frame(AVFilterContext *ctx) yading@10: { yading@10: AVFilterLink *link = ctx->outputs[0]; yading@10: FifoContext *s = ctx->priv; yading@10: AVFrame *head = s->root.next->frame; yading@10: AVFrame *out; yading@10: int ret; yading@10: yading@10: if (!s->out && yading@10: head->nb_samples >= link->request_samples && yading@10: calc_ptr_alignment(head) >= 32) { yading@10: if (head->nb_samples == link->request_samples) { yading@10: out = head; yading@10: queue_pop(s); yading@10: } else { yading@10: out = av_frame_clone(head); yading@10: if (!out) yading@10: return AVERROR(ENOMEM); yading@10: yading@10: out->nb_samples = link->request_samples; yading@10: buffer_offset(link, head, link->request_samples); yading@10: } yading@10: } else { yading@10: int nb_channels = av_get_channel_layout_nb_channels(link->channel_layout); yading@10: yading@10: if (!s->out) { yading@10: s->out = ff_get_audio_buffer(link, link->request_samples); yading@10: if (!s->out) yading@10: return AVERROR(ENOMEM); yading@10: yading@10: s->out->nb_samples = 0; yading@10: s->out->pts = head->pts; yading@10: s->allocated_samples = link->request_samples; yading@10: } else if (link->request_samples != s->allocated_samples) { yading@10: av_log(ctx, AV_LOG_ERROR, "request_samples changed before the " yading@10: "buffer was returned.\n"); yading@10: return AVERROR(EINVAL); yading@10: } yading@10: yading@10: while (s->out->nb_samples < s->allocated_samples) { yading@10: int len = FFMIN(s->allocated_samples - s->out->nb_samples, yading@10: head->nb_samples); yading@10: yading@10: av_samples_copy(s->out->extended_data, head->extended_data, yading@10: s->out->nb_samples, 0, len, nb_channels, yading@10: link->format); yading@10: s->out->nb_samples += len; yading@10: yading@10: if (len == head->nb_samples) { yading@10: av_frame_free(&head); yading@10: queue_pop(s); yading@10: yading@10: if (!s->root.next && yading@10: (ret = ff_request_frame(ctx->inputs[0])) < 0) { yading@10: if (ret == AVERROR_EOF) { yading@10: av_samples_set_silence(s->out->extended_data, yading@10: s->out->nb_samples, yading@10: s->allocated_samples - yading@10: s->out->nb_samples, yading@10: nb_channels, link->format); yading@10: s->out->nb_samples = s->allocated_samples; yading@10: break; yading@10: } yading@10: return ret; yading@10: } yading@10: head = s->root.next->frame; yading@10: } else { yading@10: buffer_offset(link, head, len); yading@10: } yading@10: } yading@10: out = s->out; yading@10: s->out = NULL; yading@10: } yading@10: return ff_filter_frame(link, out); yading@10: } yading@10: yading@10: static int request_frame(AVFilterLink *outlink) yading@10: { yading@10: FifoContext *fifo = outlink->src->priv; yading@10: int ret = 0; yading@10: yading@10: if (!fifo->root.next) { yading@10: if ((ret = ff_request_frame(outlink->src->inputs[0])) < 0) yading@10: return ret; yading@10: av_assert0(fifo->root.next); yading@10: } yading@10: yading@10: if (outlink->request_samples) { yading@10: return return_audio_frame(outlink->src); yading@10: } else { yading@10: ret = ff_filter_frame(outlink, fifo->root.next->frame); yading@10: queue_pop(fifo); yading@10: } yading@10: yading@10: return ret; yading@10: } yading@10: yading@10: static const AVFilterPad avfilter_vf_fifo_inputs[] = { yading@10: { yading@10: .name = "default", yading@10: .type = AVMEDIA_TYPE_VIDEO, yading@10: .get_video_buffer = ff_null_get_video_buffer, yading@10: .filter_frame = add_to_queue, yading@10: }, yading@10: { NULL } yading@10: }; yading@10: yading@10: static const AVFilterPad avfilter_vf_fifo_outputs[] = { yading@10: { yading@10: .name = "default", yading@10: .type = AVMEDIA_TYPE_VIDEO, yading@10: .request_frame = request_frame, yading@10: }, yading@10: { NULL } yading@10: }; yading@10: yading@10: AVFilter avfilter_vf_fifo = { yading@10: .name = "fifo", yading@10: .description = NULL_IF_CONFIG_SMALL("Buffer input images and send them when they are requested."), yading@10: yading@10: .init = init, yading@10: .uninit = uninit, yading@10: yading@10: .priv_size = sizeof(FifoContext), yading@10: yading@10: .inputs = avfilter_vf_fifo_inputs, yading@10: .outputs = avfilter_vf_fifo_outputs, yading@10: }; yading@10: yading@10: static const AVFilterPad avfilter_af_afifo_inputs[] = { yading@10: { yading@10: .name = "default", yading@10: .type = AVMEDIA_TYPE_AUDIO, yading@10: .get_audio_buffer = ff_null_get_audio_buffer, yading@10: .filter_frame = add_to_queue, yading@10: }, yading@10: { NULL } yading@10: }; yading@10: yading@10: static const AVFilterPad avfilter_af_afifo_outputs[] = { yading@10: { yading@10: .name = "default", yading@10: .type = AVMEDIA_TYPE_AUDIO, yading@10: .request_frame = request_frame, yading@10: }, yading@10: { NULL } yading@10: }; yading@10: yading@10: AVFilter avfilter_af_afifo = { yading@10: .name = "afifo", yading@10: .description = NULL_IF_CONFIG_SMALL("Buffer input frames and send them when they are requested."), yading@10: yading@10: .init = init, yading@10: .uninit = uninit, yading@10: yading@10: .priv_size = sizeof(FifoContext), yading@10: yading@10: .inputs = avfilter_af_afifo_inputs, yading@10: .outputs = avfilter_af_afifo_outputs, yading@10: };