yading@10: /* yading@10: * Copyright (c) 2010 Stefano Sabatini yading@10: * Copyright (c) 2008 Victor Paesa yading@10: * yading@10: * This file is part of FFmpeg. yading@10: * yading@10: * FFmpeg is free software; you can redistribute it and/or yading@10: * modify it under the terms of the GNU Lesser General Public yading@10: * License as published by the Free Software Foundation; either yading@10: * version 2.1 of the License, or (at your option) any later version. yading@10: * yading@10: * FFmpeg is distributed in the hope that it will be useful, yading@10: * but WITHOUT ANY WARRANTY; without even the implied warranty of yading@10: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU yading@10: * Lesser General Public License for more details. yading@10: * yading@10: * You should have received a copy of the GNU Lesser General Public yading@10: * License along with FFmpeg; if not, write to the Free Software yading@10: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA yading@10: */ yading@10: yading@10: /** yading@10: * @file yading@10: * video presentation timestamp (PTS) modification filter yading@10: */ yading@10: yading@10: #include "libavutil/eval.h" yading@10: #include "libavutil/internal.h" yading@10: #include "libavutil/mathematics.h" yading@10: #include "libavutil/opt.h" yading@10: #include "libavutil/time.h" yading@10: #include "avfilter.h" yading@10: #include "internal.h" yading@10: #include "audio.h" yading@10: #include "video.h" yading@10: yading@10: static const char *const var_names[] = { yading@10: "FRAME_RATE", ///< defined only for constant frame-rate video yading@10: "INTERLACED", ///< tell if the current frame is interlaced yading@10: "N", ///< frame number (starting at zero) yading@10: "NB_CONSUMED_SAMPLES", ///< number of samples consumed by the filter (only audio) yading@10: "NB_SAMPLES", ///< number of samples in the current frame (only audio) yading@10: "POS", ///< original position in the file of the frame yading@10: "PREV_INPTS", ///< previous input PTS yading@10: "PREV_INT", ///< previous input time in seconds yading@10: "PREV_OUTPTS", ///< previous output PTS yading@10: "PREV_OUTT", ///< previous output time in seconds yading@10: "PTS", ///< original pts in the file of the frame yading@10: "SAMPLE_RATE", ///< sample rate (only audio) yading@10: "STARTPTS", ///< PTS at start of movie yading@10: "STARTT", ///< time at start of movie yading@10: "T", ///< original time in the file of the frame yading@10: "TB", ///< timebase yading@10: "RTCTIME", ///< wallclock (RTC) time in micro seconds yading@10: "RTCSTART", ///< wallclock (RTC) time at the start of the movie in micro seconds yading@10: NULL yading@10: }; yading@10: yading@10: enum var_name { yading@10: VAR_FRAME_RATE, yading@10: VAR_INTERLACED, yading@10: VAR_N, yading@10: VAR_NB_CONSUMED_SAMPLES, yading@10: VAR_NB_SAMPLES, yading@10: VAR_POS, yading@10: VAR_PREV_INPTS, yading@10: VAR_PREV_INT, yading@10: VAR_PREV_OUTPTS, yading@10: VAR_PREV_OUTT, yading@10: VAR_PTS, yading@10: VAR_SAMPLE_RATE, yading@10: VAR_STARTPTS, yading@10: VAR_STARTT, yading@10: VAR_T, yading@10: VAR_TB, yading@10: VAR_RTCTIME, yading@10: VAR_RTCSTART, yading@10: VAR_VARS_NB yading@10: }; yading@10: yading@10: typedef struct { yading@10: const AVClass *class; yading@10: char *expr_str; yading@10: AVExpr *expr; yading@10: double var_values[VAR_VARS_NB]; yading@10: enum AVMediaType type; yading@10: } SetPTSContext; yading@10: yading@10: static av_cold int init(AVFilterContext *ctx) yading@10: { yading@10: SetPTSContext *setpts = ctx->priv; yading@10: int ret; yading@10: yading@10: if ((ret = av_expr_parse(&setpts->expr, setpts->expr_str, yading@10: var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) { yading@10: av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n", setpts->expr_str); yading@10: return ret; yading@10: } yading@10: yading@10: setpts->var_values[VAR_N ] = 0.0; yading@10: setpts->var_values[VAR_PREV_INPTS ] = setpts->var_values[VAR_PREV_INT ] = NAN; yading@10: setpts->var_values[VAR_PREV_OUTPTS] = setpts->var_values[VAR_PREV_OUTT] = NAN; yading@10: setpts->var_values[VAR_STARTPTS ] = setpts->var_values[VAR_STARTT ] = NAN; yading@10: return 0; yading@10: } yading@10: yading@10: static int config_input(AVFilterLink *inlink) yading@10: { yading@10: AVFilterContext *ctx = inlink->dst; yading@10: SetPTSContext *setpts = ctx->priv; yading@10: yading@10: setpts->type = inlink->type; yading@10: setpts->var_values[VAR_TB] = av_q2d(inlink->time_base); yading@10: setpts->var_values[VAR_RTCSTART] = av_gettime(); yading@10: yading@10: setpts->var_values[VAR_SAMPLE_RATE] = yading@10: setpts->type == AVMEDIA_TYPE_AUDIO ? inlink->sample_rate : NAN; yading@10: yading@10: setpts->var_values[VAR_FRAME_RATE] = inlink->frame_rate.num && inlink->frame_rate.den ? yading@10: av_q2d(inlink->frame_rate) : NAN; yading@10: yading@10: av_log(inlink->src, AV_LOG_VERBOSE, "TB:%f FRAME_RATE:%f SAMPLE_RATE:%f\n", yading@10: setpts->var_values[VAR_TB], yading@10: setpts->var_values[VAR_FRAME_RATE], yading@10: setpts->var_values[VAR_SAMPLE_RATE]); yading@10: return 0; yading@10: } yading@10: yading@10: #define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d)) yading@10: #define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)) yading@10: #define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)*av_q2d(tb)) yading@10: yading@10: #define BUF_SIZE 64 yading@10: yading@10: static inline char *double2int64str(char *buf, double v) yading@10: { yading@10: if (isnan(v)) snprintf(buf, BUF_SIZE, "nan"); yading@10: else snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)v); yading@10: return buf; yading@10: } yading@10: yading@10: #define d2istr(v) double2int64str((char[BUF_SIZE]){0}, v) yading@10: yading@10: static int filter_frame(AVFilterLink *inlink, AVFrame *frame) yading@10: { yading@10: SetPTSContext *setpts = inlink->dst->priv; yading@10: int64_t in_pts = frame->pts; yading@10: double d; yading@10: yading@10: if (isnan(setpts->var_values[VAR_STARTPTS])) { yading@10: setpts->var_values[VAR_STARTPTS] = TS2D(frame->pts); yading@10: setpts->var_values[VAR_STARTT ] = TS2T(frame->pts, inlink->time_base); yading@10: } yading@10: setpts->var_values[VAR_PTS ] = TS2D(frame->pts); yading@10: setpts->var_values[VAR_T ] = TS2T(frame->pts, inlink->time_base); yading@10: setpts->var_values[VAR_POS ] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame); yading@10: setpts->var_values[VAR_RTCTIME ] = av_gettime(); yading@10: yading@10: switch (inlink->type) { yading@10: case AVMEDIA_TYPE_VIDEO: yading@10: setpts->var_values[VAR_INTERLACED] = frame->interlaced_frame; yading@10: break; yading@10: yading@10: case AVMEDIA_TYPE_AUDIO: yading@10: setpts->var_values[VAR_NB_SAMPLES] = frame->nb_samples; yading@10: break; yading@10: } yading@10: yading@10: d = av_expr_eval(setpts->expr, setpts->var_values, NULL); yading@10: yading@10: av_log(inlink->dst, AV_LOG_DEBUG, yading@10: "N:%"PRId64" PTS:%s T:%f POS:%s", yading@10: (int64_t)setpts->var_values[VAR_N], yading@10: d2istr(setpts->var_values[VAR_PTS]), yading@10: setpts->var_values[VAR_T], yading@10: d2istr(setpts->var_values[VAR_POS])); yading@10: switch (inlink->type) { yading@10: case AVMEDIA_TYPE_VIDEO: yading@10: av_log(inlink->dst, AV_LOG_DEBUG, " INTERLACED:%"PRId64, yading@10: (int64_t)setpts->var_values[VAR_INTERLACED]); yading@10: break; yading@10: case AVMEDIA_TYPE_AUDIO: yading@10: av_log(inlink->dst, AV_LOG_DEBUG, " NB_SAMPLES:%"PRId64" NB_CONSUMED_SAMPLES:%"PRId64, yading@10: (int64_t)setpts->var_values[VAR_NB_SAMPLES], yading@10: (int64_t)setpts->var_values[VAR_NB_CONSUMED_SAMPLES]); yading@10: break; yading@10: } yading@10: av_log(inlink->dst, AV_LOG_DEBUG, " -> PTS:%s T:%f\n", d2istr(d), TS2T(d, inlink->time_base)); yading@10: yading@10: frame->pts = D2TS(d); yading@10: yading@10: setpts->var_values[VAR_PREV_INPTS ] = TS2D(in_pts); yading@10: setpts->var_values[VAR_PREV_INT ] = TS2T(in_pts, inlink->time_base); yading@10: setpts->var_values[VAR_PREV_OUTPTS] = TS2D(frame->pts); yading@10: setpts->var_values[VAR_PREV_OUTT] = TS2T(frame->pts, inlink->time_base); yading@10: setpts->var_values[VAR_N] += 1.0; yading@10: if (setpts->type == AVMEDIA_TYPE_AUDIO) { yading@10: setpts->var_values[VAR_NB_CONSUMED_SAMPLES] += frame->nb_samples; yading@10: } yading@10: return ff_filter_frame(inlink->dst->outputs[0], frame); yading@10: } yading@10: yading@10: static av_cold void uninit(AVFilterContext *ctx) yading@10: { yading@10: SetPTSContext *setpts = ctx->priv; yading@10: av_expr_free(setpts->expr); yading@10: setpts->expr = NULL; yading@10: } yading@10: yading@10: #if CONFIG_ASETPTS_FILTER yading@10: yading@10: #define OFFSET(x) offsetof(SetPTSContext, x) yading@10: #define AFLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM yading@10: static const AVOption aoptions[] = { yading@10: { "expr", "Expression determining the frame timestamp", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "PTS" }, .flags = AFLAGS }, yading@10: { NULL }, yading@10: }; yading@10: yading@10: static const AVClass asetpts_class = { yading@10: .class_name = "asetpts", yading@10: .item_name = av_default_item_name, yading@10: .option = aoptions, yading@10: .version = LIBAVUTIL_VERSION_INT, yading@10: }; yading@10: yading@10: static const AVFilterPad avfilter_af_asetpts_inputs[] = { yading@10: { yading@10: .name = "default", yading@10: .type = AVMEDIA_TYPE_AUDIO, yading@10: .get_audio_buffer = ff_null_get_audio_buffer, yading@10: .config_props = config_input, yading@10: .filter_frame = filter_frame, yading@10: }, yading@10: { NULL } yading@10: }; yading@10: yading@10: static const AVFilterPad avfilter_af_asetpts_outputs[] = { yading@10: { yading@10: .name = "default", yading@10: .type = AVMEDIA_TYPE_AUDIO, yading@10: }, yading@10: { NULL } yading@10: }; yading@10: yading@10: AVFilter avfilter_af_asetpts = { yading@10: .name = "asetpts", yading@10: .description = NULL_IF_CONFIG_SMALL("Set PTS for the output audio frame."), yading@10: .init = init, yading@10: .uninit = uninit, yading@10: .priv_size = sizeof(SetPTSContext), yading@10: .priv_class= &asetpts_class, yading@10: .inputs = avfilter_af_asetpts_inputs, yading@10: .outputs = avfilter_af_asetpts_outputs, yading@10: }; yading@10: #endif /* CONFIG_ASETPTS_FILTER */ yading@10: yading@10: #if CONFIG_SETPTS_FILTER yading@10: yading@10: #define OFFSET(x) offsetof(SetPTSContext, x) yading@10: #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM yading@10: static const AVOption options[] = { yading@10: { "expr", "Expression determining the frame timestamp", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "PTS" }, .flags = FLAGS }, yading@10: { NULL }, yading@10: }; yading@10: yading@10: static const AVClass setpts_class = { yading@10: .class_name = "setpts", yading@10: .item_name = av_default_item_name, yading@10: .option = options, yading@10: .version = LIBAVUTIL_VERSION_INT, yading@10: }; yading@10: yading@10: static const AVFilterPad avfilter_vf_setpts_inputs[] = { yading@10: { yading@10: .name = "default", yading@10: .type = AVMEDIA_TYPE_VIDEO, yading@10: .get_video_buffer = ff_null_get_video_buffer, yading@10: .config_props = config_input, yading@10: .filter_frame = filter_frame, yading@10: }, yading@10: { NULL } yading@10: }; yading@10: yading@10: static const AVFilterPad avfilter_vf_setpts_outputs[] = { yading@10: { yading@10: .name = "default", yading@10: .type = AVMEDIA_TYPE_VIDEO, yading@10: }, yading@10: { NULL } yading@10: }; yading@10: yading@10: AVFilter avfilter_vf_setpts = { yading@10: .name = "setpts", yading@10: .description = NULL_IF_CONFIG_SMALL("Set PTS for the output video frame."), yading@10: .init = init, yading@10: .uninit = uninit, yading@10: yading@10: .priv_size = sizeof(SetPTSContext), yading@10: .priv_class = &setpts_class, yading@10: yading@10: .inputs = avfilter_vf_setpts_inputs, yading@10: .outputs = avfilter_vf_setpts_outputs, yading@10: }; yading@10: #endif /* CONFIG_SETPTS_FILTER */