libavcodec/srtdec.c
Go to the documentation of this file.
1 /*
2  * SubRip subtitle decoder
3  * Copyright (c) 2010 Aurelien Jacobs <aurel@gnuage.org>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/avstring.h"
23 #include "libavutil/common.h"
24 #include "libavutil/intreadwrite.h"
25 #include "libavutil/parseutils.h"
26 #include "avcodec.h"
27 #include "ass.h"
28 
29 static int html_color_parse(AVCodecContext *avctx, const char *str)
30 {
31  uint8_t rgba[4];
32  if (av_parse_color(rgba, str, strcspn(str, "\" >"), avctx) < 0)
33  return -1;
34  return rgba[0] | rgba[1] << 8 | rgba[2] << 16;
35 }
36 
37 enum {
43 };
44 
45 typedef struct {
46  char tag[128];
47  char param[PARAM_NUMBER][128];
48 } SrtStack;
49 
50 static const char *srt_to_ass(AVCodecContext *avctx, char *out, char *out_end,
51  const char *in, int x1, int y1, int x2, int y2)
52 {
53  char *param, buffer[128], tmp[128];
54  int len, tag_close, sptr = 1, line_start = 1, an = 0, end = 0;
55  SrtStack stack[16];
56 
57  stack[0].tag[0] = 0;
58  strcpy(stack[0].param[PARAM_SIZE], "{\\fs}");
59  strcpy(stack[0].param[PARAM_COLOR], "{\\c}");
60  strcpy(stack[0].param[PARAM_FACE], "{\\fn}");
61 
62  if (x1 >= 0 && y1 >= 0) {
63  if (x2 >= 0 && y2 >= 0 && (x2 != x1 || y2 != y1))
64  snprintf(out, out_end-out,
65  "{\\an1}{\\move(%d,%d,%d,%d)}", x1, y1, x2, y2);
66  else
67  snprintf(out, out_end-out, "{\\an1}{\\pos(%d,%d)}", x1, y1);
68  out += strlen(out);
69  }
70 
71  for (; out < out_end && !end && *in; in++) {
72  switch (*in) {
73  case '\r':
74  break;
75  case '\n':
76  if (line_start) {
77  end = 1;
78  break;
79  }
80  while (out[-1] == ' ')
81  out--;
82  snprintf(out, out_end-out, "\\N");
83  if(out<out_end) out += strlen(out);
84  line_start = 1;
85  break;
86  case ' ':
87  if (!line_start)
88  *out++ = *in;
89  break;
90  case '{': /* skip all {\xxx} substrings except for {\an%d}
91  and all microdvd like styles such as {Y:xxx} */
92  len = 0;
93  an += sscanf(in, "{\\an%*1u}%n", &len) >= 0 && len > 0;
94  if ((an != 1 && (len = 0, sscanf(in, "{\\%*[^}]}%n", &len) >= 0 && len > 0)) ||
95  (len = 0, sscanf(in, "{%*1[CcFfoPSsYy]:%*[^}]}%n", &len) >= 0 && len > 0)) {
96  in += len - 1;
97  } else
98  *out++ = *in;
99  break;
100  case '<':
101  tag_close = in[1] == '/';
102  len = 0;
103  if (sscanf(in+tag_close+1, "%127[^>]>%n", buffer, &len) >= 1 && len > 0) {
104  if ((param = strchr(buffer, ' ')))
105  *param++ = 0;
106  if ((!tag_close && sptr < FF_ARRAY_ELEMS(stack)) ||
107  ( tag_close && sptr > 0 && !strcmp(stack[sptr-1].tag, buffer))) {
108  int i, j, unknown = 0;
109  in += len + tag_close;
110  if (!tag_close)
111  memset(stack+sptr, 0, sizeof(*stack));
112  if (!strcmp(buffer, "font")) {
113  if (tag_close) {
114  for (i=PARAM_NUMBER-1; i>=0; i--)
115  if (stack[sptr-1].param[i][0])
116  for (j=sptr-2; j>=0; j--)
117  if (stack[j].param[i][0]) {
118  snprintf(out, out_end-out,
119  "%s", stack[j].param[i]);
120  if(out<out_end) out += strlen(out);
121  break;
122  }
123  } else {
124  while (param) {
125  if (!strncmp(param, "size=", 5)) {
126  unsigned font_size;
127  param += 5 + (param[5] == '"');
128  if (sscanf(param, "%u", &font_size) == 1) {
129  snprintf(stack[sptr].param[PARAM_SIZE],
130  sizeof(stack[0].param[PARAM_SIZE]),
131  "{\\fs%u}", font_size);
132  }
133  } else if (!strncmp(param, "color=", 6)) {
134  param += 6 + (param[6] == '"');
135  snprintf(stack[sptr].param[PARAM_COLOR],
136  sizeof(stack[0].param[PARAM_COLOR]),
137  "{\\c&H%X&}",
138  html_color_parse(avctx, param));
139  } else if (!strncmp(param, "face=", 5)) {
140  param += 5 + (param[5] == '"');
141  len = strcspn(param,
142  param[-1] == '"' ? "\"" :" ");
143  av_strlcpy(tmp, param,
144  FFMIN(sizeof(tmp), len+1));
145  param += len;
146  snprintf(stack[sptr].param[PARAM_FACE],
147  sizeof(stack[0].param[PARAM_FACE]),
148  "{\\fn%s}", tmp);
149  }
150  if ((param = strchr(param, ' ')))
151  param++;
152  }
153  for (i=0; i<PARAM_NUMBER; i++)
154  if (stack[sptr].param[i][0]) {
155  snprintf(out, out_end-out,
156  "%s", stack[sptr].param[i]);
157  if(out<out_end) out += strlen(out);
158  }
159  }
160  } else if (!buffer[1] && strspn(buffer, "bisu") == 1) {
161  snprintf(out, out_end-out,
162  "{\\%c%d}", buffer[0], !tag_close);
163  if(out<out_end) out += strlen(out);
164  } else {
165  unknown = 1;
166  snprintf(tmp, sizeof(tmp), "</%s>", buffer);
167  }
168  if (tag_close) {
169  sptr--;
170  } else if (unknown && !strstr(in, tmp)) {
171  in -= len + tag_close;
172  *out++ = *in;
173  } else
174  av_strlcpy(stack[sptr++].tag, buffer,
175  sizeof(stack[0].tag));
176  break;
177  }
178  }
179  default:
180  *out++ = *in;
181  break;
182  }
183  if (*in != ' ' && *in != '\r' && *in != '\n')
184  line_start = 0;
185  }
186 
187  out = FFMIN(out, out_end-3);
188  while (!strncmp(out-2, "\\N", 2))
189  out -= 2;
190  while (out[-1] == ' ')
191  out--;
192  snprintf(out, out_end-out, "\r\n");
193  return in;
194 }
195 
196 static const char *read_ts(const char *buf, int *ts_start, int *ts_end,
197  int *x1, int *y1, int *x2, int *y2)
198 {
199  int i, hs, ms, ss, he, me, se;
200 
201  for (i=0; i<2; i++) {
202  /* try to read timestamps in either the first or second line */
203  int c = sscanf(buf, "%d:%2d:%2d%*1[,.]%3d --> %d:%2d:%2d%*1[,.]%3d"
204  "%*[ ]X1:%u X2:%u Y1:%u Y2:%u",
205  &hs, &ms, &ss, ts_start, &he, &me, &se, ts_end,
206  x1, x2, y1, y2);
207  buf += strcspn(buf, "\n") + 1;
208  if (c >= 8) {
209  *ts_start = 100*(ss + 60*(ms + 60*hs)) + *ts_start/10;
210  *ts_end = 100*(se + 60*(me + 60*he)) + *ts_end /10;
211  return buf;
212  }
213  }
214  return NULL;
215 }
216 
218  void *data, int *got_sub_ptr, AVPacket *avpkt)
219 {
220  AVSubtitle *sub = data;
221  int ts_start, ts_end, x1 = -1, y1 = -1, x2 = -1, y2 = -1;
222  char buffer[2048];
223  const char *ptr = avpkt->data;
224  const char *end = avpkt->data + avpkt->size;
225  int size;
227 
228  if (p && size == 16) {
229  x1 = AV_RL32(p );
230  y1 = AV_RL32(p + 4);
231  x2 = AV_RL32(p + 8);
232  y2 = AV_RL32(p + 12);
233  }
234 
235  if (avpkt->size <= 0)
236  return avpkt->size;
237 
238  while (ptr < end && *ptr) {
239  if (avctx->codec->id == AV_CODEC_ID_SRT) {
240  ptr = read_ts(ptr, &ts_start, &ts_end, &x1, &y1, &x2, &y2);
241  if (!ptr)
242  break;
243  } else {
244  // Do final divide-by-10 outside rescale to force rounding down.
245  ts_start = av_rescale_q(avpkt->pts,
246  avctx->time_base,
247  (AVRational){1,100});
248  ts_end = av_rescale_q(avpkt->pts + avpkt->duration,
249  avctx->time_base,
250  (AVRational){1,100});
251  }
252  ptr = srt_to_ass(avctx, buffer, buffer+sizeof(buffer), ptr,
253  x1, y1, x2, y2);
254  ff_ass_add_rect(sub, buffer, ts_start, ts_end-ts_start, 0);
255  }
256 
257  *got_sub_ptr = sub->num_rects > 0;
258  return avpkt->size;
259 }
260 
261 #if CONFIG_SRT_DECODER
262 /* deprecated decoder */
263 AVCodec ff_srt_decoder = {
264  .name = "srt",
265  .long_name = NULL_IF_CONFIG_SMALL("SubRip subtitle with embedded timing"),
266  .type = AVMEDIA_TYPE_SUBTITLE,
267  .id = AV_CODEC_ID_SRT,
269  .decode = srt_decode_frame,
270 };
271 #endif
272 
273 #if CONFIG_SUBRIP_DECODER
274 AVCodec ff_subrip_decoder = {
275  .name = "subrip",
276  .long_name = NULL_IF_CONFIG_SMALL("SubRip subtitle"),
277  .type = AVMEDIA_TYPE_SUBTITLE,
278  .id = AV_CODEC_ID_SUBRIP,
280  .decode = srt_decode_frame,
281 };
282 #endif
static const char * read_ts(const char *buf, int *ts_start, int *ts_end, int *x1, int *y1, int *x2, int *y2)
const struct AVCodec * codec
char tag[128]
Subtitle event position.
#define me
y1
Definition: lab5.m:33
static const char * srt_to_ass(AVCodecContext *avctx, char *out, char *out_end, const char *in, int x1, int y1, int x2, int y2)
About Git write you should know how to use GIT properly Luckily Git comes with excellent documentation git help man git shows you the available git< command > help man git< command > shows information about the subcommand< command > The most comprehensive manual is the website Git Reference visit they are quite exhaustive You do not need a special username or password All you need is to provide a ssh public key to the Git server admin What follows now is a basic introduction to Git and some FFmpeg specific guidelines Read it at least if you are granted commit privileges to the FFmpeg project you are expected to be familiar with these rules I if not You can get git from etc no matter how small Every one of them has been saved from looking like a fool by this many times It s very easy for stray debug output or cosmetic modifications to slip in
Definition: git-howto.txt:5
x1
Definition: genspecsines3.m:7
#define FF_ARRAY_ELEMS(a)
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
uint8_t
end end
int ff_ass_subtitle_header_default(AVCodecContext *avctx)
Generate a suitable AVCodecContext.subtitle_header for SUBTITLE_ASS with default style.
Definition: ass.c:54
uint8_t * data
uint32_t tag
Definition: movenc.c:894
int duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx)
Put the RGBA values that correspond to color_string in rgba_color.
Definition: parseutils.c:337
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:130
enum AVCodecID id
static int html_color_parse(AVCodecContext *avctx, const char *str)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Spectrum Plot time data
const char * name
Name of the codec implementation.
external API header
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:82
int size
int ff_ass_add_rect(AVSubtitle *sub, const char *dialog, int ts_start, int duration, int raw)
Add an ASS dialog line to an AVSubtitle as a new AVSubtitleRect.
Definition: ass.c:80
#define FFMIN(a, b)
Definition: common.h:58
#define AV_RL32
y2
Definition: lab5.m:34
NULL
Definition: eval.c:55
main external API structure.
void * buf
Definition: avisynth_c.h:594
x2
Definition: genspecsines3.m:8
synthesis window for stochastic i
rational number numerator/denominator
Definition: rational.h:43
#define snprintf
Definition: snprintf.h:34
misc parsing utilities
common internal and external API header
static double c[64]
the buffer and buffer reference mechanism is intended to as much as expensive copies of that data while still allowing the filters to produce correct results The data is stored in buffers represented by AVFilterBuffer structures They must not be accessed but through references stored in AVFilterBufferRef structures Several references can point to the same buffer
int len
uint8_t * av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:289
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=av_sample_fmt_is_planar(in_fmt);out_planar=av_sample_fmt_is_planar(out_fmt);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> out
static int srt_decode_frame(AVCodecContext *avctx, void *data, int *got_sub_ptr, AVPacket *avpkt)
This structure stores compressed data.
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...