fraps.c
Go to the documentation of this file.
1 /*
2  * Fraps FPS1 decoder
3  * Copyright (c) 2005 Roine Gustafsson
4  * Copyright (c) 2006 Konstantin Shishkov
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * Lossless Fraps 'FPS1' decoder
26  * @author Roine Gustafsson (roine at users sf net)
27  * @author Konstantin Shishkov
28  *
29  * Codec algorithm for version 0 is taken from Transcode <www.transcoding.org>
30  *
31  * Version 2 files support by Konstantin Shishkov
32  */
33 
34 #include "avcodec.h"
35 #include "get_bits.h"
36 #include "huffman.h"
37 #include "bytestream.h"
38 #include "dsputil.h"
39 #include "internal.h"
40 #include "thread.h"
41 
42 #define FPS_TAG MKTAG('F', 'P', 'S', 'x')
43 
44 /**
45  * local variable storage
46  */
47 typedef struct FrapsContext {
52 } FrapsContext;
53 
54 
55 /**
56  * initializes decoder
57  * @param avctx codec context
58  * @return 0 on success or negative if fails
59  */
61 {
62  FrapsContext * const s = avctx->priv_data;
63 
64  s->avctx = avctx;
65  s->tmpbuf = NULL;
66 
67  ff_dsputil_init(&s->dsp, avctx);
68 
69  return 0;
70 }
71 
72 /**
73  * Comparator - our nodes should ascend by count
74  * but with preserved symbol order
75  */
76 static int huff_cmp(const void *va, const void *vb)
77 {
78  const Node *a = va, *b = vb;
79  return (a->count - b->count)*256 + a->sym - b->sym;
80 }
81 
82 /**
83  * decode Fraps v2 packed plane
84  */
86  int h, const uint8_t *src, int size, int Uoff,
87  const int step)
88 {
89  int i, j, ret;
90  GetBitContext gb;
91  VLC vlc;
92  Node nodes[512];
93 
94  for (i = 0; i < 256; i++)
95  nodes[i].count = bytestream_get_le32(&src);
96  size -= 1024;
97  if ((ret = ff_huff_build_tree(s->avctx, &vlc, 256, nodes, huff_cmp,
99  return ret;
100  /* we have built Huffman table and are ready to decode plane */
101 
102  /* convert bits so they may be used by standard bitreader */
103  s->dsp.bswap_buf((uint32_t *)s->tmpbuf, (const uint32_t *)src, size >> 2);
104 
105  init_get_bits(&gb, s->tmpbuf, size * 8);
106  for (j = 0; j < h; j++) {
107  for (i = 0; i < w*step; i += step) {
108  dst[i] = get_vlc2(&gb, vlc.table, 9, 3);
109  /* lines are stored as deltas between previous lines
110  * and we need to add 0x80 to the first lines of chroma planes
111  */
112  if (j)
113  dst[i] += dst[i - stride];
114  else if (Uoff)
115  dst[i] += 0x80;
116  if (get_bits_left(&gb) < 0) {
117  ff_free_vlc(&vlc);
118  return AVERROR_INVALIDDATA;
119  }
120  }
121  dst += stride;
122  }
123  ff_free_vlc(&vlc);
124  return 0;
125 }
126 
128  void *data, int *got_frame,
129  AVPacket *avpkt)
130 {
131  FrapsContext * const s = avctx->priv_data;
132  const uint8_t *buf = avpkt->data;
133  int buf_size = avpkt->size;
134  ThreadFrame frame = { .f = data };
135  AVFrame * const f = data;
136  uint32_t header;
137  unsigned int version,header_size;
138  unsigned int x, y;
139  const uint32_t *buf32;
140  uint32_t *luma1,*luma2,*cb,*cr;
141  uint32_t offs[4];
142  int i, j, ret, is_chroma;
143  const int planes = 3;
144  uint8_t *out;
145 
146  header = AV_RL32(buf);
147  version = header & 0xff;
148  header_size = (header & (1<<30))? 8 : 4; /* bit 30 means pad to 8 bytes */
149 
150  if (version > 5) {
151  av_log(avctx, AV_LOG_ERROR,
152  "This file is encoded with Fraps version %d. " \
153  "This codec can only decode versions <= 5.\n", version);
154  return AVERROR_PATCHWELCOME;
155  }
156 
157  buf += header_size;
158 
159  if (version < 2) {
160  unsigned needed_size = avctx->width * avctx->height * 3;
161  if (version == 0) needed_size /= 2;
162  needed_size += header_size;
163  /* bit 31 means same as previous pic */
164  if (header & (1U<<31)) {
165  *got_frame = 0;
166  return buf_size;
167  }
168  if (buf_size != needed_size) {
169  av_log(avctx, AV_LOG_ERROR,
170  "Invalid frame length %d (should be %d)\n",
171  buf_size, needed_size);
172  return AVERROR_INVALIDDATA;
173  }
174  } else {
175  /* skip frame */
176  if (buf_size == 8) {
177  *got_frame = 0;
178  return buf_size;
179  }
180  if (AV_RL32(buf) != FPS_TAG || buf_size < planes*1024 + 24) {
181  av_log(avctx, AV_LOG_ERROR, "Fraps: error in data stream\n");
182  return AVERROR_INVALIDDATA;
183  }
184  for (i = 0; i < planes; i++) {
185  offs[i] = AV_RL32(buf + 4 + i * 4);
186  if (offs[i] >= buf_size - header_size || (i && offs[i] <= offs[i - 1] + 1024)) {
187  av_log(avctx, AV_LOG_ERROR, "Fraps: plane %i offset is out of bounds\n", i);
188  return AVERROR_INVALIDDATA;
189  }
190  }
191  offs[planes] = buf_size - header_size;
192  for (i = 0; i < planes; i++) {
193  av_fast_padded_malloc(&s->tmpbuf, &s->tmpbuf_size, offs[i + 1] - offs[i] - 1024);
194  if (!s->tmpbuf)
195  return AVERROR(ENOMEM);
196  }
197  }
198 
200  f->key_frame = 1;
201 
202  avctx->pix_fmt = version & 1 ? AV_PIX_FMT_BGR24 : AV_PIX_FMT_YUVJ420P;
203 
204  if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
205  return ret;
206 
207  switch (version) {
208  case 0:
209  default:
210  /* Fraps v0 is a reordered YUV420 */
211  if (((avctx->width % 8) != 0) || ((avctx->height % 2) != 0)) {
212  av_log(avctx, AV_LOG_ERROR, "Invalid frame size %dx%d\n",
213  avctx->width, avctx->height);
214  return AVERROR_INVALIDDATA;
215  }
216 
217  buf32 = (const uint32_t*)buf;
218  for (y = 0; y < avctx->height / 2; y++) {
219  luma1 = (uint32_t*)&f->data[0][ y * 2 * f->linesize[0] ];
220  luma2 = (uint32_t*)&f->data[0][ (y * 2 + 1) * f->linesize[0] ];
221  cr = (uint32_t*)&f->data[1][ y * f->linesize[1] ];
222  cb = (uint32_t*)&f->data[2][ y * f->linesize[2] ];
223  for (x = 0; x < avctx->width; x += 8) {
224  *luma1++ = *buf32++;
225  *luma1++ = *buf32++;
226  *luma2++ = *buf32++;
227  *luma2++ = *buf32++;
228  *cr++ = *buf32++;
229  *cb++ = *buf32++;
230  }
231  }
232  break;
233 
234  case 1:
235  /* Fraps v1 is an upside-down BGR24 */
236  for (y = 0; y<avctx->height; y++)
237  memcpy(&f->data[0][(avctx->height - y - 1) * f->linesize[0]],
238  &buf[y * avctx->width * 3],
239  3 * avctx->width);
240  break;
241 
242  case 2:
243  case 4:
244  /**
245  * Fraps v2 is Huffman-coded YUV420 planes
246  * Fraps v4 is virtually the same
247  */
248  for (i = 0; i < planes; i++) {
249  is_chroma = !!i;
250  if ((ret = fraps2_decode_plane(s, f->data[i], f->linesize[i],
251  avctx->width >> is_chroma,
252  avctx->height >> is_chroma,
253  buf + offs[i], offs[i + 1] - offs[i],
254  is_chroma, 1)) < 0) {
255  av_log(avctx, AV_LOG_ERROR, "Error decoding plane %i\n", i);
256  return ret;
257  }
258  }
259  break;
260  case 3:
261  case 5:
262  /* Virtually the same as version 4, but is for RGB24 */
263  for (i = 0; i < planes; i++) {
264  if ((ret = fraps2_decode_plane(s, f->data[0] + i + (f->linesize[0] * (avctx->height - 1)),
265  -f->linesize[0], avctx->width, avctx->height,
266  buf + offs[i], offs[i + 1] - offs[i], 0, 3)) < 0) {
267  av_log(avctx, AV_LOG_ERROR, "Error decoding plane %i\n", i);
268  return ret;
269  }
270  }
271  out = f->data[0];
272  // convert pseudo-YUV into real RGB
273  for (j = 0; j < avctx->height; j++) {
274  uint8_t *line_end = out + 3*avctx->width;
275  while (out < line_end) {
276  out[0] += out[1];
277  out[2] += out[1];
278  out += 3;
279  }
280  out += f->linesize[0] - 3*avctx->width;
281  }
282  break;
283  }
284 
285  *got_frame = 1;
286 
287  return buf_size;
288 }
289 
290 
291 /**
292  * closes decoder
293  * @param avctx codec context
294  * @return 0 on success or negative if fails
295  */
297 {
298  FrapsContext *s = (FrapsContext*)avctx->priv_data;
299 
300  av_freep(&s->tmpbuf);
301  return 0;
302 }
303 
304 
306  .name = "fraps",
307  .type = AVMEDIA_TYPE_VIDEO,
308  .id = AV_CODEC_ID_FRAPS,
309  .priv_data_size = sizeof(FrapsContext),
310  .init = decode_init,
311  .close = decode_end,
312  .decode = decode_frame,
313  .capabilities = CODEC_CAP_DR1 | CODEC_CAP_FRAME_THREADS,
314  .long_name = NULL_IF_CONFIG_SMALL("Fraps"),
315 };
av_cold void ff_dsputil_init(DSPContext *c, AVCodecContext *avctx)
Definition: dsputil.c:2675
const char * s
Definition: avisynth_c.h:668
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
This structure describes decoded (raw) audio or video data.
Definition: frame.h:76
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
Sinusoidal phase f
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional FF_INPUT_BUFFER_PADDING_SIZE at the end w...
int version
Definition: avisynth_c.h:666
int stride
Definition: mace.c:144
int ff_huff_build_tree(AVCodecContext *avctx, VLC *vlc, int nb_codes, Node *nodes, HuffCmp cmp, int flags)
nodes size must be 2*nb_codes first nb_codes nodes.count must be set
Definition: huffman.c:133
output residual component w
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
Definition: huffman.h:32
uint8_t
#define av_cold
Definition: attributes.h:78
#define b
Definition: input.c:42
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
uint8_t * data
bitstream reader API header.
static int fraps2_decode_plane(FrapsContext *s, uint8_t *dst, int stride, int w, int h, const uint8_t *src, int size, int Uoff, const int step)
decode Fraps v2 packed plane
Definition: fraps.c:85
frame
Definition: stft.m:14
Discrete Time axis x
#define U(x)
static int get_bits_left(GetBitContext *gb)
Definition: get_bits.h:557
Multithreading support functions.
#define FF_HUFFMAN_FLAG_ZERO_COUNT
Definition: huffman.h:39
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Spectrum Plot time data
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
const char * name
Name of the codec implementation.
external API header
int size
Definition: get_bits.h:63
static int huff_cmp(const void *va, const void *vb)
Comparator - our nodes should ascend by count but with preserved symbol order.
Definition: fraps.c:76
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:144
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_...
Definition: pixfmt.h:80
ret
Definition: avfilter.c:821
int width
picture width / height.
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE(*table)[2], int bits, int max_depth)
Parse a vlc code.
Definition: get_bits.h:524
#define AV_RL32
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:71
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: fraps.c:127
AVCodecContext * avctx
Definition: fraps.c:48
NULL
Definition: eval.c:55
AVS_Value src
Definition: avisynth_c.h:523
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:101
static av_cold int decode_end(AVCodecContext *avctx)
closes decoder
Definition: fraps.c:296
FIXME Range Coding of cb
Definition: snow.txt:367
main external API structure.
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:375
static av_cold int decode_init(AVCodecContext *avctx)
initializes decoder
Definition: fraps.c:60
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:148
void * buf
Definition: avisynth_c.h:594
synthesis window for stochastic i
void(* bswap_buf)(uint32_t *dst, const uint32_t *src, int w)
Definition: dsputil.h:208
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:379
huffman tree builder and VLC generator
int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
Wrapper around get_buffer() for frame-multithreaded codecs.
Definition: pthread.c:1066
int16_t sym
Definition: huffman.h:33
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
struct FrapsContext FrapsContext
local variable storage
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
common internal api header.
#define CODEC_CAP_FRAME_THREADS
Codec supports frame-level multithreading.
AVCodec ff_fraps_decoder
Definition: fraps.c:305
uint8_t * tmpbuf
Definition: fraps.c:49
function y
Definition: D.m:1
DSP utils.
else dst[i][x+y *dst_stride[i]]
Definition: vf_mcdeint.c:160
VLC_TYPE(* table)[2]
code, bits
Definition: get_bits.h:65
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:139
local variable storage
Definition: fraps.c:47
uint32_t count
Definition: huffman.h:35
void INT64 INT64 count
Definition: avisynth_c.h:594
DSPContext dsp
Definition: fraps.c:51
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=av_sample_fmt_is_planar(in_fmt);out_planar=av_sample_fmt_is_planar(out_fmt);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> out
static int decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: crystalhd.c:868
static double cr(void *priv, double x, double y)
Definition: vf_geq.c:85
This structure stores compressed data.
int tmpbuf_size
Definition: fraps.c:50
void ff_free_vlc(VLC *vlc)
Definition: bitstream.c:344
for(j=16;j >0;--j)
#define FPS_TAG
Definition: fraps.c:42
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
DSPContext.
Definition: dsputil.h:127