yading@10
|
1 /*
|
yading@10
|
2 * Copyright (c) 2010 Stefano Sabatini
|
yading@10
|
3 * Copyright (c) 2008 Victor Paesa
|
yading@10
|
4 *
|
yading@10
|
5 * This file is part of FFmpeg.
|
yading@10
|
6 *
|
yading@10
|
7 * FFmpeg is free software; you can redistribute it and/or
|
yading@10
|
8 * modify it under the terms of the GNU Lesser General Public
|
yading@10
|
9 * License as published by the Free Software Foundation; either
|
yading@10
|
10 * version 2.1 of the License, or (at your option) any later version.
|
yading@10
|
11 *
|
yading@10
|
12 * FFmpeg is distributed in the hope that it will be useful,
|
yading@10
|
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
yading@10
|
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
yading@10
|
15 * Lesser General Public License for more details.
|
yading@10
|
16 *
|
yading@10
|
17 * You should have received a copy of the GNU Lesser General Public
|
yading@10
|
18 * License along with FFmpeg; if not, write to the Free Software
|
yading@10
|
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
yading@10
|
20 */
|
yading@10
|
21
|
yading@10
|
22 /**
|
yading@10
|
23 * @file
|
yading@10
|
24 * video presentation timestamp (PTS) modification filter
|
yading@10
|
25 */
|
yading@10
|
26
|
yading@10
|
27 #include "libavutil/eval.h"
|
yading@10
|
28 #include "libavutil/internal.h"
|
yading@10
|
29 #include "libavutil/mathematics.h"
|
yading@10
|
30 #include "libavutil/opt.h"
|
yading@10
|
31 #include "libavutil/time.h"
|
yading@10
|
32 #include "avfilter.h"
|
yading@10
|
33 #include "internal.h"
|
yading@10
|
34 #include "audio.h"
|
yading@10
|
35 #include "video.h"
|
yading@10
|
36
|
yading@10
|
37 static const char *const var_names[] = {
|
yading@10
|
38 "FRAME_RATE", ///< defined only for constant frame-rate video
|
yading@10
|
39 "INTERLACED", ///< tell if the current frame is interlaced
|
yading@10
|
40 "N", ///< frame number (starting at zero)
|
yading@10
|
41 "NB_CONSUMED_SAMPLES", ///< number of samples consumed by the filter (only audio)
|
yading@10
|
42 "NB_SAMPLES", ///< number of samples in the current frame (only audio)
|
yading@10
|
43 "POS", ///< original position in the file of the frame
|
yading@10
|
44 "PREV_INPTS", ///< previous input PTS
|
yading@10
|
45 "PREV_INT", ///< previous input time in seconds
|
yading@10
|
46 "PREV_OUTPTS", ///< previous output PTS
|
yading@10
|
47 "PREV_OUTT", ///< previous output time in seconds
|
yading@10
|
48 "PTS", ///< original pts in the file of the frame
|
yading@10
|
49 "SAMPLE_RATE", ///< sample rate (only audio)
|
yading@10
|
50 "STARTPTS", ///< PTS at start of movie
|
yading@10
|
51 "STARTT", ///< time at start of movie
|
yading@10
|
52 "T", ///< original time in the file of the frame
|
yading@10
|
53 "TB", ///< timebase
|
yading@10
|
54 "RTCTIME", ///< wallclock (RTC) time in micro seconds
|
yading@10
|
55 "RTCSTART", ///< wallclock (RTC) time at the start of the movie in micro seconds
|
yading@10
|
56 NULL
|
yading@10
|
57 };
|
yading@10
|
58
|
yading@10
|
59 enum var_name {
|
yading@10
|
60 VAR_FRAME_RATE,
|
yading@10
|
61 VAR_INTERLACED,
|
yading@10
|
62 VAR_N,
|
yading@10
|
63 VAR_NB_CONSUMED_SAMPLES,
|
yading@10
|
64 VAR_NB_SAMPLES,
|
yading@10
|
65 VAR_POS,
|
yading@10
|
66 VAR_PREV_INPTS,
|
yading@10
|
67 VAR_PREV_INT,
|
yading@10
|
68 VAR_PREV_OUTPTS,
|
yading@10
|
69 VAR_PREV_OUTT,
|
yading@10
|
70 VAR_PTS,
|
yading@10
|
71 VAR_SAMPLE_RATE,
|
yading@10
|
72 VAR_STARTPTS,
|
yading@10
|
73 VAR_STARTT,
|
yading@10
|
74 VAR_T,
|
yading@10
|
75 VAR_TB,
|
yading@10
|
76 VAR_RTCTIME,
|
yading@10
|
77 VAR_RTCSTART,
|
yading@10
|
78 VAR_VARS_NB
|
yading@10
|
79 };
|
yading@10
|
80
|
yading@10
|
81 typedef struct {
|
yading@10
|
82 const AVClass *class;
|
yading@10
|
83 char *expr_str;
|
yading@10
|
84 AVExpr *expr;
|
yading@10
|
85 double var_values[VAR_VARS_NB];
|
yading@10
|
86 enum AVMediaType type;
|
yading@10
|
87 } SetPTSContext;
|
yading@10
|
88
|
yading@10
|
89 static av_cold int init(AVFilterContext *ctx)
|
yading@10
|
90 {
|
yading@10
|
91 SetPTSContext *setpts = ctx->priv;
|
yading@10
|
92 int ret;
|
yading@10
|
93
|
yading@10
|
94 if ((ret = av_expr_parse(&setpts->expr, setpts->expr_str,
|
yading@10
|
95 var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) {
|
yading@10
|
96 av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n", setpts->expr_str);
|
yading@10
|
97 return ret;
|
yading@10
|
98 }
|
yading@10
|
99
|
yading@10
|
100 setpts->var_values[VAR_N ] = 0.0;
|
yading@10
|
101 setpts->var_values[VAR_PREV_INPTS ] = setpts->var_values[VAR_PREV_INT ] = NAN;
|
yading@10
|
102 setpts->var_values[VAR_PREV_OUTPTS] = setpts->var_values[VAR_PREV_OUTT] = NAN;
|
yading@10
|
103 setpts->var_values[VAR_STARTPTS ] = setpts->var_values[VAR_STARTT ] = NAN;
|
yading@10
|
104 return 0;
|
yading@10
|
105 }
|
yading@10
|
106
|
yading@10
|
107 static int config_input(AVFilterLink *inlink)
|
yading@10
|
108 {
|
yading@10
|
109 AVFilterContext *ctx = inlink->dst;
|
yading@10
|
110 SetPTSContext *setpts = ctx->priv;
|
yading@10
|
111
|
yading@10
|
112 setpts->type = inlink->type;
|
yading@10
|
113 setpts->var_values[VAR_TB] = av_q2d(inlink->time_base);
|
yading@10
|
114 setpts->var_values[VAR_RTCSTART] = av_gettime();
|
yading@10
|
115
|
yading@10
|
116 setpts->var_values[VAR_SAMPLE_RATE] =
|
yading@10
|
117 setpts->type == AVMEDIA_TYPE_AUDIO ? inlink->sample_rate : NAN;
|
yading@10
|
118
|
yading@10
|
119 setpts->var_values[VAR_FRAME_RATE] = inlink->frame_rate.num && inlink->frame_rate.den ?
|
yading@10
|
120 av_q2d(inlink->frame_rate) : NAN;
|
yading@10
|
121
|
yading@10
|
122 av_log(inlink->src, AV_LOG_VERBOSE, "TB:%f FRAME_RATE:%f SAMPLE_RATE:%f\n",
|
yading@10
|
123 setpts->var_values[VAR_TB],
|
yading@10
|
124 setpts->var_values[VAR_FRAME_RATE],
|
yading@10
|
125 setpts->var_values[VAR_SAMPLE_RATE]);
|
yading@10
|
126 return 0;
|
yading@10
|
127 }
|
yading@10
|
128
|
yading@10
|
129 #define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d))
|
yading@10
|
130 #define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
|
yading@10
|
131 #define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)*av_q2d(tb))
|
yading@10
|
132
|
yading@10
|
133 #define BUF_SIZE 64
|
yading@10
|
134
|
yading@10
|
135 static inline char *double2int64str(char *buf, double v)
|
yading@10
|
136 {
|
yading@10
|
137 if (isnan(v)) snprintf(buf, BUF_SIZE, "nan");
|
yading@10
|
138 else snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)v);
|
yading@10
|
139 return buf;
|
yading@10
|
140 }
|
yading@10
|
141
|
yading@10
|
142 #define d2istr(v) double2int64str((char[BUF_SIZE]){0}, v)
|
yading@10
|
143
|
yading@10
|
144 static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
|
yading@10
|
145 {
|
yading@10
|
146 SetPTSContext *setpts = inlink->dst->priv;
|
yading@10
|
147 int64_t in_pts = frame->pts;
|
yading@10
|
148 double d;
|
yading@10
|
149
|
yading@10
|
150 if (isnan(setpts->var_values[VAR_STARTPTS])) {
|
yading@10
|
151 setpts->var_values[VAR_STARTPTS] = TS2D(frame->pts);
|
yading@10
|
152 setpts->var_values[VAR_STARTT ] = TS2T(frame->pts, inlink->time_base);
|
yading@10
|
153 }
|
yading@10
|
154 setpts->var_values[VAR_PTS ] = TS2D(frame->pts);
|
yading@10
|
155 setpts->var_values[VAR_T ] = TS2T(frame->pts, inlink->time_base);
|
yading@10
|
156 setpts->var_values[VAR_POS ] = av_frame_get_pkt_pos(frame) == -1 ? NAN : av_frame_get_pkt_pos(frame);
|
yading@10
|
157 setpts->var_values[VAR_RTCTIME ] = av_gettime();
|
yading@10
|
158
|
yading@10
|
159 switch (inlink->type) {
|
yading@10
|
160 case AVMEDIA_TYPE_VIDEO:
|
yading@10
|
161 setpts->var_values[VAR_INTERLACED] = frame->interlaced_frame;
|
yading@10
|
162 break;
|
yading@10
|
163
|
yading@10
|
164 case AVMEDIA_TYPE_AUDIO:
|
yading@10
|
165 setpts->var_values[VAR_NB_SAMPLES] = frame->nb_samples;
|
yading@10
|
166 break;
|
yading@10
|
167 }
|
yading@10
|
168
|
yading@10
|
169 d = av_expr_eval(setpts->expr, setpts->var_values, NULL);
|
yading@10
|
170
|
yading@10
|
171 av_log(inlink->dst, AV_LOG_DEBUG,
|
yading@10
|
172 "N:%"PRId64" PTS:%s T:%f POS:%s",
|
yading@10
|
173 (int64_t)setpts->var_values[VAR_N],
|
yading@10
|
174 d2istr(setpts->var_values[VAR_PTS]),
|
yading@10
|
175 setpts->var_values[VAR_T],
|
yading@10
|
176 d2istr(setpts->var_values[VAR_POS]));
|
yading@10
|
177 switch (inlink->type) {
|
yading@10
|
178 case AVMEDIA_TYPE_VIDEO:
|
yading@10
|
179 av_log(inlink->dst, AV_LOG_DEBUG, " INTERLACED:%"PRId64,
|
yading@10
|
180 (int64_t)setpts->var_values[VAR_INTERLACED]);
|
yading@10
|
181 break;
|
yading@10
|
182 case AVMEDIA_TYPE_AUDIO:
|
yading@10
|
183 av_log(inlink->dst, AV_LOG_DEBUG, " NB_SAMPLES:%"PRId64" NB_CONSUMED_SAMPLES:%"PRId64,
|
yading@10
|
184 (int64_t)setpts->var_values[VAR_NB_SAMPLES],
|
yading@10
|
185 (int64_t)setpts->var_values[VAR_NB_CONSUMED_SAMPLES]);
|
yading@10
|
186 break;
|
yading@10
|
187 }
|
yading@10
|
188 av_log(inlink->dst, AV_LOG_DEBUG, " -> PTS:%s T:%f\n", d2istr(d), TS2T(d, inlink->time_base));
|
yading@10
|
189
|
yading@10
|
190 frame->pts = D2TS(d);
|
yading@10
|
191
|
yading@10
|
192 setpts->var_values[VAR_PREV_INPTS ] = TS2D(in_pts);
|
yading@10
|
193 setpts->var_values[VAR_PREV_INT ] = TS2T(in_pts, inlink->time_base);
|
yading@10
|
194 setpts->var_values[VAR_PREV_OUTPTS] = TS2D(frame->pts);
|
yading@10
|
195 setpts->var_values[VAR_PREV_OUTT] = TS2T(frame->pts, inlink->time_base);
|
yading@10
|
196 setpts->var_values[VAR_N] += 1.0;
|
yading@10
|
197 if (setpts->type == AVMEDIA_TYPE_AUDIO) {
|
yading@10
|
198 setpts->var_values[VAR_NB_CONSUMED_SAMPLES] += frame->nb_samples;
|
yading@10
|
199 }
|
yading@10
|
200 return ff_filter_frame(inlink->dst->outputs[0], frame);
|
yading@10
|
201 }
|
yading@10
|
202
|
yading@10
|
203 static av_cold void uninit(AVFilterContext *ctx)
|
yading@10
|
204 {
|
yading@10
|
205 SetPTSContext *setpts = ctx->priv;
|
yading@10
|
206 av_expr_free(setpts->expr);
|
yading@10
|
207 setpts->expr = NULL;
|
yading@10
|
208 }
|
yading@10
|
209
|
yading@10
|
210 #if CONFIG_ASETPTS_FILTER
|
yading@10
|
211
|
yading@10
|
212 #define OFFSET(x) offsetof(SetPTSContext, x)
|
yading@10
|
213 #define AFLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
|
yading@10
|
214 static const AVOption aoptions[] = {
|
yading@10
|
215 { "expr", "Expression determining the frame timestamp", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "PTS" }, .flags = AFLAGS },
|
yading@10
|
216 { NULL },
|
yading@10
|
217 };
|
yading@10
|
218
|
yading@10
|
219 static const AVClass asetpts_class = {
|
yading@10
|
220 .class_name = "asetpts",
|
yading@10
|
221 .item_name = av_default_item_name,
|
yading@10
|
222 .option = aoptions,
|
yading@10
|
223 .version = LIBAVUTIL_VERSION_INT,
|
yading@10
|
224 };
|
yading@10
|
225
|
yading@10
|
226 static const AVFilterPad avfilter_af_asetpts_inputs[] = {
|
yading@10
|
227 {
|
yading@10
|
228 .name = "default",
|
yading@10
|
229 .type = AVMEDIA_TYPE_AUDIO,
|
yading@10
|
230 .get_audio_buffer = ff_null_get_audio_buffer,
|
yading@10
|
231 .config_props = config_input,
|
yading@10
|
232 .filter_frame = filter_frame,
|
yading@10
|
233 },
|
yading@10
|
234 { NULL }
|
yading@10
|
235 };
|
yading@10
|
236
|
yading@10
|
237 static const AVFilterPad avfilter_af_asetpts_outputs[] = {
|
yading@10
|
238 {
|
yading@10
|
239 .name = "default",
|
yading@10
|
240 .type = AVMEDIA_TYPE_AUDIO,
|
yading@10
|
241 },
|
yading@10
|
242 { NULL }
|
yading@10
|
243 };
|
yading@10
|
244
|
yading@10
|
245 AVFilter avfilter_af_asetpts = {
|
yading@10
|
246 .name = "asetpts",
|
yading@10
|
247 .description = NULL_IF_CONFIG_SMALL("Set PTS for the output audio frame."),
|
yading@10
|
248 .init = init,
|
yading@10
|
249 .uninit = uninit,
|
yading@10
|
250 .priv_size = sizeof(SetPTSContext),
|
yading@10
|
251 .priv_class= &asetpts_class,
|
yading@10
|
252 .inputs = avfilter_af_asetpts_inputs,
|
yading@10
|
253 .outputs = avfilter_af_asetpts_outputs,
|
yading@10
|
254 };
|
yading@10
|
255 #endif /* CONFIG_ASETPTS_FILTER */
|
yading@10
|
256
|
yading@10
|
257 #if CONFIG_SETPTS_FILTER
|
yading@10
|
258
|
yading@10
|
259 #define OFFSET(x) offsetof(SetPTSContext, x)
|
yading@10
|
260 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
|
yading@10
|
261 static const AVOption options[] = {
|
yading@10
|
262 { "expr", "Expression determining the frame timestamp", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "PTS" }, .flags = FLAGS },
|
yading@10
|
263 { NULL },
|
yading@10
|
264 };
|
yading@10
|
265
|
yading@10
|
266 static const AVClass setpts_class = {
|
yading@10
|
267 .class_name = "setpts",
|
yading@10
|
268 .item_name = av_default_item_name,
|
yading@10
|
269 .option = options,
|
yading@10
|
270 .version = LIBAVUTIL_VERSION_INT,
|
yading@10
|
271 };
|
yading@10
|
272
|
yading@10
|
273 static const AVFilterPad avfilter_vf_setpts_inputs[] = {
|
yading@10
|
274 {
|
yading@10
|
275 .name = "default",
|
yading@10
|
276 .type = AVMEDIA_TYPE_VIDEO,
|
yading@10
|
277 .get_video_buffer = ff_null_get_video_buffer,
|
yading@10
|
278 .config_props = config_input,
|
yading@10
|
279 .filter_frame = filter_frame,
|
yading@10
|
280 },
|
yading@10
|
281 { NULL }
|
yading@10
|
282 };
|
yading@10
|
283
|
yading@10
|
284 static const AVFilterPad avfilter_vf_setpts_outputs[] = {
|
yading@10
|
285 {
|
yading@10
|
286 .name = "default",
|
yading@10
|
287 .type = AVMEDIA_TYPE_VIDEO,
|
yading@10
|
288 },
|
yading@10
|
289 { NULL }
|
yading@10
|
290 };
|
yading@10
|
291
|
yading@10
|
292 AVFilter avfilter_vf_setpts = {
|
yading@10
|
293 .name = "setpts",
|
yading@10
|
294 .description = NULL_IF_CONFIG_SMALL("Set PTS for the output video frame."),
|
yading@10
|
295 .init = init,
|
yading@10
|
296 .uninit = uninit,
|
yading@10
|
297
|
yading@10
|
298 .priv_size = sizeof(SetPTSContext),
|
yading@10
|
299 .priv_class = &setpts_class,
|
yading@10
|
300
|
yading@10
|
301 .inputs = avfilter_vf_setpts_inputs,
|
yading@10
|
302 .outputs = avfilter_vf_setpts_outputs,
|
yading@10
|
303 };
|
yading@10
|
304 #endif /* CONFIG_SETPTS_FILTER */
|