libavcodec/rawdec.c
Go to the documentation of this file.
1 /*
2  * Raw Video Decoder
3  * Copyright (c) 2001 Fabrice Bellard
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Raw Video Decoder
25  */
26 
27 #include "avcodec.h"
28 #include "raw.h"
29 #include "libavutil/avassert.h"
30 #include "libavutil/buffer.h"
31 #include "libavutil/common.h"
32 #include "libavutil/intreadwrite.h"
33 #include "libavutil/imgutils.h"
34 #include "libavutil/opt.h"
35 
36 typedef struct RawVideoContext {
39  int frame_size; /* size of the frame in bytes */
40  int flip;
41  int is_2_4_bpp; // 2 or 4 bpp raw in avi/mov
42  int is_yuv2;
43  int tff;
45 
46 static const AVOption options[]={
47 {"top", "top field first", offsetof(RawVideoContext, tff), AV_OPT_TYPE_INT, {.i64 = -1}, -1, 1, AV_OPT_FLAG_DECODING_PARAM|AV_OPT_FLAG_VIDEO_PARAM},
48 {NULL}
49 };
50 
51 static const AVClass class = {
52  .class_name = "rawdec",
53  .option = options,
55 };
56 
57 static const PixelFormatTag pix_fmt_bps_avi[] = {
58  { AV_PIX_FMT_MONOWHITE, 1 },
59  { AV_PIX_FMT_PAL8, 2 },
60  { AV_PIX_FMT_PAL8, 4 },
61  { AV_PIX_FMT_PAL8, 8 },
62  { AV_PIX_FMT_RGB444LE, 12 },
63  { AV_PIX_FMT_RGB555LE, 15 },
64  { AV_PIX_FMT_RGB555LE, 16 },
65  { AV_PIX_FMT_BGR24, 24 },
66  { AV_PIX_FMT_BGRA, 32 },
67  { AV_PIX_FMT_NONE, 0 },
68 };
69 
70 static const PixelFormatTag pix_fmt_bps_mov[] = {
71  { AV_PIX_FMT_MONOWHITE, 1 },
72  { AV_PIX_FMT_PAL8, 2 },
73  { AV_PIX_FMT_PAL8, 4 },
74  { AV_PIX_FMT_PAL8, 8 },
75  // FIXME swscale does not support 16 bit in .mov, sample 16bit.mov
76  // http://developer.apple.com/documentation/QuickTime/QTFF/QTFFChap3/qtff3.html
77  { AV_PIX_FMT_RGB555BE, 16 },
78  { AV_PIX_FMT_RGB24, 24 },
79  { AV_PIX_FMT_ARGB, 32 },
80  { AV_PIX_FMT_MONOWHITE,33 },
81  { AV_PIX_FMT_NONE, 0 },
82 };
83 
85  unsigned int fourcc)
86 {
87  while (tags->pix_fmt >= 0) {
88  if (tags->fourcc == fourcc)
89  return tags->pix_fmt;
90  tags++;
91  }
92  return AV_PIX_FMT_NONE;
93 }
94 
95 #if LIBAVCODEC_VERSION_MAJOR < 55
96 enum AVPixelFormat ff_find_pix_fmt(const PixelFormatTag *tags, unsigned int fourcc)
97 {
98  return avpriv_find_pix_fmt(tags, fourcc);
99 }
100 #endif
101 
103 {
104  RawVideoContext *context = avctx->priv_data;
105  const AVPixFmtDescriptor *desc;
106 
107  if ( avctx->codec_tag == MKTAG('r','a','w',' ')
108  || avctx->codec_tag == MKTAG('N','O','1','6'))
109  avctx->pix_fmt = avpriv_find_pix_fmt(pix_fmt_bps_mov,
110  avctx->bits_per_coded_sample);
111  else if (avctx->codec_tag == MKTAG('W', 'R', 'A', 'W'))
112  avctx->pix_fmt = avpriv_find_pix_fmt(pix_fmt_bps_avi,
113  avctx->bits_per_coded_sample);
114  else if (avctx->codec_tag)
116  else if (avctx->pix_fmt == AV_PIX_FMT_NONE && avctx->bits_per_coded_sample)
117  avctx->pix_fmt = avpriv_find_pix_fmt(pix_fmt_bps_avi,
118  avctx->bits_per_coded_sample);
119 
120  desc = av_pix_fmt_desc_get(avctx->pix_fmt);
121  if (!desc) {
122  av_log(avctx, AV_LOG_ERROR, "Invalid pixel format.\n");
123  return AVERROR(EINVAL);
124  }
125 
126  if (desc->flags & (PIX_FMT_PAL | PIX_FMT_PSEUDOPAL)) {
128  if (!context->palette)
129  return AVERROR(ENOMEM);
130  if (desc->flags & PIX_FMT_PSEUDOPAL)
131  avpriv_set_systematic_pal2((uint32_t*)context->palette->data, avctx->pix_fmt);
132  else
133  memset(context->palette->data, 0, AVPALETTE_SIZE);
134  }
135 
136  context->frame_size = avpicture_get_size(avctx->pix_fmt, avctx->width,
137  avctx->height);
138  if ((avctx->bits_per_coded_sample == 4 || avctx->bits_per_coded_sample == 2) &&
139  avctx->pix_fmt == AV_PIX_FMT_PAL8 &&
140  (!avctx->codec_tag || avctx->codec_tag == MKTAG('r','a','w',' ')))
141  context->is_2_4_bpp = 1;
142 
143  if ((avctx->extradata_size >= 9 &&
144  !memcmp(avctx->extradata + avctx->extradata_size - 9, "BottomUp", 9)) ||
145  avctx->codec_tag == MKTAG('c','y','u','v') ||
146  avctx->codec_tag == MKTAG(3, 0, 0, 0) ||
147  avctx->codec_tag == MKTAG('W','R','A','W'))
148  context->flip = 1;
149 
150  if (avctx->codec_tag == AV_RL32("yuv2") &&
151  avctx->pix_fmt == AV_PIX_FMT_YUYV422)
152  context->is_yuv2 = 1;
153 
154  return 0;
155 }
156 
157 static void flip(AVCodecContext *avctx, AVPicture *picture)
158 {
159  picture->data[0] += picture->linesize[0] * (avctx->height - 1);
160  picture->linesize[0] *= -1;
161 }
162 
163 static int raw_decode(AVCodecContext *avctx, void *data, int *got_frame,
164  AVPacket *avpkt)
165 {
166  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
167  RawVideoContext *context = avctx->priv_data;
168  const uint8_t *buf = avpkt->data;
169  int buf_size = avpkt->size;
170  int linesize_align = 4;
171  int res, len;
172  int need_copy = !avpkt->buf || context->is_2_4_bpp || context->is_yuv2;
173 
174  AVFrame *frame = data;
175  AVPicture *picture = data;
176 
177  frame->pict_type = AV_PICTURE_TYPE_I;
178  frame->key_frame = 1;
179  frame->reordered_opaque = avctx->reordered_opaque;
180  frame->pkt_pts = avctx->pkt->pts;
181  av_frame_set_pkt_pos (frame, avctx->pkt->pos);
182  av_frame_set_pkt_duration(frame, avctx->pkt->duration);
183 
184  if (context->tff >= 0) {
185  frame->interlaced_frame = 1;
186  frame->top_field_first = context->tff;
187  }
188 
189  if ((res = av_image_check_size(avctx->width, avctx->height, 0, avctx)) < 0)
190  return res;
191 
192  if (need_copy)
193  frame->buf[0] = av_buffer_alloc(context->frame_size);
194  else
195  frame->buf[0] = av_buffer_ref(avpkt->buf);
196  if (!frame->buf[0])
197  return AVERROR(ENOMEM);
198 
199  //2bpp and 4bpp raw in avi and mov (yes this is ugly ...)
200  if (context->is_2_4_bpp) {
201  int i;
202  uint8_t *dst = frame->buf[0]->data;
203  buf_size = context->frame_size - AVPALETTE_SIZE;
204  if (avctx->bits_per_coded_sample == 4) {
205  for (i = 0; 2 * i + 1 < buf_size && i<avpkt->size; i++) {
206  dst[2 * i + 0] = buf[i] >> 4;
207  dst[2 * i + 1] = buf[i] & 15;
208  }
209  linesize_align = 8;
210  } else {
211  av_assert0(avctx->bits_per_coded_sample == 2);
212  for (i = 0; 4 * i + 3 < buf_size && i<avpkt->size; i++) {
213  dst[4 * i + 0] = buf[i] >> 6;
214  dst[4 * i + 1] = buf[i] >> 4 & 3;
215  dst[4 * i + 2] = buf[i] >> 2 & 3;
216  dst[4 * i + 3] = buf[i] & 3;
217  }
218  linesize_align = 16;
219  }
220  buf = dst;
221  } else if (need_copy) {
222  memcpy(frame->buf[0]->data, buf, FFMIN(buf_size, context->frame_size));
223  buf = frame->buf[0]->data;
224  }
225 
226  if (avctx->codec_tag == MKTAG('A', 'V', '1', 'x') ||
227  avctx->codec_tag == MKTAG('A', 'V', 'u', 'p'))
228  buf += buf_size - context->frame_size;
229 
230  len = context->frame_size - (avctx->pix_fmt==AV_PIX_FMT_PAL8 ? AVPALETTE_SIZE : 0);
231  if (buf_size < len) {
232  av_log(avctx, AV_LOG_ERROR, "Invalid buffer size, packet size %d < expected frame_size %d\n", buf_size, len);
233  av_buffer_unref(&frame->buf[0]);
234  return AVERROR(EINVAL);
235  }
236 
237  if ((res = avpicture_fill(picture, buf, avctx->pix_fmt,
238  avctx->width, avctx->height)) < 0) {
239  av_buffer_unref(&frame->buf[0]);
240  return res;
241  }
242 
243  if (avctx->pix_fmt == AV_PIX_FMT_PAL8) {
245  NULL);
246 
247  if (pal) {
248  av_buffer_unref(&context->palette);
250  if (!context->palette) {
251  av_buffer_unref(&frame->buf[0]);
252  return AVERROR(ENOMEM);
253  }
254  memcpy(context->palette->data, pal, AVPALETTE_SIZE);
255  frame->palette_has_changed = 1;
256  }
257  }
258 
259  if ((avctx->pix_fmt==AV_PIX_FMT_BGR24 ||
260  avctx->pix_fmt==AV_PIX_FMT_GRAY8 ||
261  avctx->pix_fmt==AV_PIX_FMT_RGB555LE ||
262  avctx->pix_fmt==AV_PIX_FMT_RGB555BE ||
263  avctx->pix_fmt==AV_PIX_FMT_RGB565LE ||
264  avctx->pix_fmt==AV_PIX_FMT_MONOWHITE ||
265  avctx->pix_fmt==AV_PIX_FMT_PAL8) &&
266  FFALIGN(frame->linesize[0], linesize_align) * avctx->height <= buf_size)
267  frame->linesize[0] = FFALIGN(frame->linesize[0], linesize_align);
268 
269  if (avctx->pix_fmt == AV_PIX_FMT_NV12 && avctx->codec_tag == MKTAG('N', 'V', '1', '2') &&
270  FFALIGN(frame->linesize[0], linesize_align) * avctx->height +
271  FFALIGN(frame->linesize[1], linesize_align) * ((avctx->height + 1) / 2) <= buf_size) {
272  int la0 = FFALIGN(frame->linesize[0], linesize_align);
273  frame->data[1] += (la0 - frame->linesize[0]) * avctx->height;
274  frame->linesize[0] = la0;
275  frame->linesize[1] = FFALIGN(frame->linesize[1], linesize_align);
276  }
277 
278  if ((avctx->pix_fmt == AV_PIX_FMT_PAL8 && buf_size < context->frame_size) ||
279  (desc->flags & PIX_FMT_PSEUDOPAL)) {
280  frame->buf[1] = av_buffer_ref(context->palette);
281  if (!frame->buf[1]) {
282  av_buffer_unref(&frame->buf[0]);
283  return AVERROR(ENOMEM);
284  }
285  frame->data[1] = frame->buf[1]->data;
286  }
287 
288  if (avctx->pix_fmt == AV_PIX_FMT_BGR24 &&
289  ((frame->linesize[0] + 3) & ~3) * avctx->height <= buf_size)
290  frame->linesize[0] = (frame->linesize[0] + 3) & ~3;
291 
292  if (context->flip)
293  flip(avctx, picture);
294 
295  if (avctx->codec_tag == MKTAG('Y', 'V', '1', '2') ||
296  avctx->codec_tag == MKTAG('Y', 'V', '1', '6') ||
297  avctx->codec_tag == MKTAG('Y', 'V', '2', '4') ||
298  avctx->codec_tag == MKTAG('Y', 'V', 'U', '9'))
299  FFSWAP(uint8_t *, picture->data[1], picture->data[2]);
300 
301  if (avctx->codec_tag == AV_RL32("I420") && (avctx->width+1)*(avctx->height+1) * 3/2 == buf_size) {
302  picture->data[1] = picture->data[1] + (avctx->width+1)*(avctx->height+1) -avctx->width*avctx->height;
303  picture->data[2] = picture->data[2] + ((avctx->width+1)*(avctx->height+1) -avctx->width*avctx->height)*5/4;
304  }
305 
306  if (avctx->codec_tag == AV_RL32("yuv2") &&
307  avctx->pix_fmt == AV_PIX_FMT_YUYV422) {
308  int x, y;
309  uint8_t *line = picture->data[0];
310  for (y = 0; y < avctx->height; y++) {
311  for (x = 0; x < avctx->width; x++)
312  line[2 * x + 1] ^= 0x80;
313  line += picture->linesize[0];
314  }
315  }
316  if (avctx->codec_tag == AV_RL32("YVYU") &&
317  avctx->pix_fmt == AV_PIX_FMT_YUYV422) {
318  int x, y;
319  uint8_t *line = picture->data[0];
320  for(y = 0; y < avctx->height; y++) {
321  for(x = 0; x < avctx->width - 1; x += 2)
322  FFSWAP(uint8_t, line[2*x + 1], line[2*x + 3]);
323  line += picture->linesize[0];
324  }
325  }
326 
327  if (avctx->field_order > AV_FIELD_PROGRESSIVE) { /* we have interlaced material flagged in container */
328  frame->interlaced_frame = 1;
329  if (avctx->field_order == AV_FIELD_TT || avctx->field_order == AV_FIELD_TB)
330  frame->top_field_first = 1;
331  }
332 
333  *got_frame = 1;
334  return buf_size;
335 }
336 
338 {
339  RawVideoContext *context = avctx->priv_data;
340 
341  av_buffer_unref(&context->palette);
342  return 0;
343 }
344 
346  .name = "rawvideo",
347  .type = AVMEDIA_TYPE_VIDEO,
348  .id = AV_CODEC_ID_RAWVIDEO,
349  .priv_data_size = sizeof(RawVideoContext),
352  .decode = raw_decode,
353  .long_name = NULL_IF_CONFIG_SMALL("raw video"),
354  .priv_class = &class,
355 };
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
unsigned int fourcc
Definition: raw.h:35
int linesize[AV_NUM_DATA_POINTERS]
number of bytes per line
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:1778
This structure describes decoded (raw) audio or video data.
Definition: frame.h:76
AVCodec ff_rawvideo_decoder
AVOption.
Definition: opt.h:251
misc image utilities
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:70
int64_t pos
byte position in stream, -1 if unknown
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:343
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0 ...
Definition: pixfmt.h:117
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
enum AVPixelFormat pix_fmt
Definition: raw.h:34
enum AVPixelFormat avpriv_find_pix_fmt(const PixelFormatTag *tags, unsigned int fourcc)
four components are given, that&#39;s all.
static const AVOption options[]
int avpicture_fill(AVPicture *picture, const uint8_t *ptr, enum AVPixelFormat pix_fmt, int width, int height)
Fill in the AVPicture fields, always assume a linesize alignment of 1.
Definition: avpicture.c:34
#define FFALIGN(x, a)
Definition: common.h:63
packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
Definition: pixfmt.h:115
void av_frame_set_pkt_duration(AVFrame *frame, int64_t val)
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:55
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
uint8_t
#define av_cold
Definition: attributes.h:78
8 bit with PIX_FMT_RGB32 palette
Definition: pixfmt.h:79
AVOptions.
uint8_t * data[AV_NUM_DATA_POINTERS]
#define AVPALETTE_SIZE
Definition: pixfmt.h:33
packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0 ...
Definition: pixfmt.h:137
const PixelFormatTag ff_raw_pix_fmt_tags[]
Definition: raw.c:31
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
static const PixelFormatTag pix_fmt_bps_avi[]
uint8_t * data
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:270
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
int duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
frame
Definition: stft.m:14
Discrete Time axis x
AVBufferRef * palette
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
void av_frame_set_pkt_pos(AVFrame *frame, int64_t val)
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:99
Spectrum Plot time data
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:93
Definition: graph2dot.c:48
simple assert() macros that are a bit more flexible than ISO C assert().
#define PIX_FMT_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:90
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt)
Definition: imgutils.c:150
const char * name
Name of the codec implementation.
static const PixelFormatTag pix_fmt_bps_mov[]
external API header
packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
Definition: pixfmt.h:96
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:231
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:144
#define FFMIN(a, b)
Definition: common.h:58
Raw Video Codec.
int width
picture width / height.
static int raw_decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
#define AV_RL32
int64_t reordered_opaque
opaque 64bit number (generally a PTS) that will be reordered and output in AVFrame.reordered_opaque
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:71
LIBAVUTIL_VERSION_INT
Definition: eval.c:55
NULL
Definition: eval.c:55
AVBufferRef * av_buffer_alloc(int size)
Allocate an AVBuffer of the given size using av_malloc().
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:101
uint8_t flags
Definition: pixdesc.h:76
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:55
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:285
main external API structure.
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:375
uint8_t * data
The data buffer.
Definition: buffer.h:89
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:148
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> (&#39;D&#39;<<24) + (&#39;C&#39;<<16) + (&#39;B&#39;<<8) + &#39;A&#39;).
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:69
void * buf
Definition: avisynth_c.h:594
int64_t reordered_opaque
reordered opaque 64bit (generally an integer or a double precision float PTS but can be anything)...
Definition: frame.h:302
Describe the class of an AVClass context structure.
Definition: log.h:50
synthesis window for stochastic i
static av_cold int raw_close_decoder(AVCodecContext *avctx)
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:282
int palette_has_changed
Tell user application that palette has changed from previous frame.
Definition: frame.h:280
refcounted data buffer API
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:164
enum AVPixelFormat ff_find_pix_fmt(const PixelFormatTag *tags, unsigned int fourcc)
packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0 ...
Definition: pixfmt.h:116
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
AVPacket * pkt
Current packet as passed into the decoder, to avoid having to pass the packet into every function...
A reference to a data buffer.
Definition: buffer.h:81
Y , 8bpp.
Definition: pixfmt.h:76
common internal and external API header
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb...
Definition: pixfmt.h:77
static av_cold int raw_init_decoder(AVCodecContext *avctx)
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
function y
Definition: D.m:1
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:275
int len
#define PIX_FMT_PSEUDOPAL
The pixel format is "pseudo-paletted".
Definition: pixdesc.h:100
else dst[i][x+y *dst_stride[i]]
Definition: vf_mcdeint.c:160
int avpicture_get_size(enum AVPixelFormat pix_fmt, int width, int height)
Calculate the size in bytes that a picture of the given width and height would occupy if stored in th...
Definition: avpicture.c:49
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:139
enum AVFieldOrder field_order
Field order.
uint8_t * av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:289
static int decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: crystalhd.c:868
#define FFSWAP(type, a, b)
Definition: common.h:61
#define MKTAG(a, b, c, d)
Definition: common.h:282
AVPixelFormat
Pixel format.
Definition: pixfmt.h:66
This structure stores compressed data.
struct RawVideoContext RawVideoContext
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...