libavcodec/dxa.c
Go to the documentation of this file.
1 /*
2  * Feeble Files/ScummVM DXA decoder
3  * Copyright (c) 2007 Konstantin Shishkov
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * DXA Video decoder
25  */
26 
27 #include <stdio.h>
28 #include <stdlib.h>
29 
30 #include "libavutil/common.h"
31 #include "libavutil/intreadwrite.h"
32 #include "bytestream.h"
33 #include "avcodec.h"
34 #include "internal.h"
35 
36 #include <zlib.h>
37 
38 /*
39  * Decoder context
40  */
41 typedef struct DxaDecContext {
43 
44  int dsize;
46  uint32_t pal[256];
48 
49 static const int shift1[6] = { 0, 8, 8, 8, 4, 4 };
50 static const int shift2[6] = { 0, 0, 8, 4, 0, 4 };
51 
53  int stride, uint8_t *src, uint8_t *ref)
54 {
55  uint8_t *code, *data, *mv, *msk, *tmp, *tmp2;
56  int i, j, k;
57  int type, x, y, d, d2;
58  uint32_t mask;
59 
60  code = src + 12;
61  data = code + ((avctx->width * avctx->height) >> 4);
62  mv = data + AV_RB32(src + 0);
63  msk = mv + AV_RB32(src + 4);
64 
65  for(j = 0; j < avctx->height; j += 4){
66  for(i = 0; i < avctx->width; i += 4){
67  tmp = dst + i;
68  tmp2 = ref + i;
69  type = *code++;
70  switch(type){
71  case 4: // motion compensation
72  x = (*mv) >> 4; if(x & 8) x = 8 - x;
73  y = (*mv++) & 0xF; if(y & 8) y = 8 - y;
74  tmp2 += x + y*stride;
75  case 0: // skip
76  case 5: // skip in method 12
77  for(y = 0; y < 4; y++){
78  memcpy(tmp, tmp2, 4);
79  tmp += stride;
80  tmp2 += stride;
81  }
82  break;
83  case 1: // masked change
84  case 10: // masked change with only half of pixels changed
85  case 11: // cases 10-15 are for method 12 only
86  case 12:
87  case 13:
88  case 14:
89  case 15:
90  if(type == 1){
91  mask = AV_RB16(msk);
92  msk += 2;
93  }else{
94  type -= 10;
95  mask = ((msk[0] & 0xF0) << shift1[type]) | ((msk[0] & 0xF) << shift2[type]);
96  msk++;
97  }
98  for(y = 0; y < 4; y++){
99  for(x = 0; x < 4; x++){
100  tmp[x] = (mask & 0x8000) ? *data++ : tmp2[x];
101  mask <<= 1;
102  }
103  tmp += stride;
104  tmp2 += stride;
105  }
106  break;
107  case 2: // fill block
108  for(y = 0; y < 4; y++){
109  memset(tmp, data[0], 4);
110  tmp += stride;
111  }
112  data++;
113  break;
114  case 3: // raw block
115  for(y = 0; y < 4; y++){
116  memcpy(tmp, data, 4);
117  data += 4;
118  tmp += stride;
119  }
120  break;
121  case 8: // subblocks - method 13 only
122  mask = *msk++;
123  for(k = 0; k < 4; k++){
124  d = ((k & 1) << 1) + ((k & 2) * stride);
125  d2 = ((k & 1) << 1) + ((k & 2) * stride);
126  tmp2 = ref + i + d2;
127  switch(mask & 0xC0){
128  case 0x80: // motion compensation
129  x = (*mv) >> 4; if(x & 8) x = 8 - x;
130  y = (*mv++) & 0xF; if(y & 8) y = 8 - y;
131  tmp2 += x + y*stride;
132  case 0x00: // skip
133  tmp[d + 0 ] = tmp2[0];
134  tmp[d + 1 ] = tmp2[1];
135  tmp[d + 0 + stride] = tmp2[0 + stride];
136  tmp[d + 1 + stride] = tmp2[1 + stride];
137  break;
138  case 0x40: // fill
139  tmp[d + 0 ] = data[0];
140  tmp[d + 1 ] = data[0];
141  tmp[d + 0 + stride] = data[0];
142  tmp[d + 1 + stride] = data[0];
143  data++;
144  break;
145  case 0xC0: // raw
146  tmp[d + 0 ] = *data++;
147  tmp[d + 1 ] = *data++;
148  tmp[d + 0 + stride] = *data++;
149  tmp[d + 1 + stride] = *data++;
150  break;
151  }
152  mask <<= 2;
153  }
154  break;
155  case 32: // vector quantization - 2 colors
156  mask = AV_RB16(msk);
157  msk += 2;
158  for(y = 0; y < 4; y++){
159  for(x = 0; x < 4; x++){
160  tmp[x] = data[mask & 1];
161  mask >>= 1;
162  }
163  tmp += stride;
164  tmp2 += stride;
165  }
166  data += 2;
167  break;
168  case 33: // vector quantization - 3 or 4 colors
169  case 34:
170  mask = AV_RB32(msk);
171  msk += 4;
172  for(y = 0; y < 4; y++){
173  for(x = 0; x < 4; x++){
174  tmp[x] = data[mask & 3];
175  mask >>= 2;
176  }
177  tmp += stride;
178  tmp2 += stride;
179  }
180  data += type - 30;
181  break;
182  default:
183  av_log(avctx, AV_LOG_ERROR, "Unknown opcode %d\n", type);
184  return AVERROR_INVALIDDATA;
185  }
186  }
187  dst += stride * 4;
188  ref += stride * 4;
189  }
190  return 0;
191 }
192 
193 static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
194 {
195  AVFrame *frame = data;
196  DxaDecContext * const c = avctx->priv_data;
197  uint8_t *outptr, *srcptr, *tmpptr;
198  unsigned long dsize;
199  int i, j, compr, ret;
200  int stride;
201  int pc = 0;
202  GetByteContext gb;
203 
204  bytestream2_init(&gb, avpkt->data, avpkt->size);
205 
206  /* make the palette available on the way out */
207  if (bytestream2_peek_le32(&gb) == MKTAG('C','M','A','P')) {
208  bytestream2_skip(&gb, 4);
209  for(i = 0; i < 256; i++){
210  c->pal[i] = 0xFFU << 24 | bytestream2_get_be24(&gb);
211  }
212  pc = 1;
213  }
214 
215  if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0)
216  return ret;
217  memcpy(frame->data[1], c->pal, AVPALETTE_SIZE);
218  frame->palette_has_changed = pc;
219 
220  outptr = frame->data[0];
221  srcptr = c->decomp_buf;
222  tmpptr = c->prev.data[0];
223  stride = frame->linesize[0];
224 
225  if (bytestream2_get_le32(&gb) == MKTAG('N','U','L','L'))
226  compr = -1;
227  else
228  compr = bytestream2_get_byte(&gb);
229 
230  dsize = c->dsize;
231  if (compr != 4 && compr != -1) {
232  bytestream2_skip(&gb, 4);
233  if (uncompress(c->decomp_buf, &dsize, avpkt->data + bytestream2_tell(&gb),
234  bytestream2_get_bytes_left(&gb)) != Z_OK) {
235  av_log(avctx, AV_LOG_ERROR, "Uncompress failed!\n");
236  return AVERROR_UNKNOWN;
237  }
238  }
239  switch(compr){
240  case -1:
241  frame->key_frame = 0;
242  frame->pict_type = AV_PICTURE_TYPE_P;
243  if(c->prev.data[0])
244  memcpy(frame->data[0], c->prev.data[0], frame->linesize[0] * avctx->height);
245  else{ // Should happen only when first frame is 'NULL'
246  memset(frame->data[0], 0, frame->linesize[0] * avctx->height);
247  frame->key_frame = 1;
248  frame->pict_type = AV_PICTURE_TYPE_I;
249  }
250  break;
251  case 2:
252  case 3:
253  case 4:
254  case 5:
255  frame->key_frame = !(compr & 1);
256  frame->pict_type = (compr & 1) ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
257  for(j = 0; j < avctx->height; j++){
258  if((compr & 1) && tmpptr){
259  for(i = 0; i < avctx->width; i++)
260  outptr[i] = srcptr[i] ^ tmpptr[i];
261  tmpptr += stride;
262  }else
263  memcpy(outptr, srcptr, avctx->width);
264  outptr += stride;
265  srcptr += avctx->width;
266  }
267  break;
268  case 12: // ScummVM coding
269  case 13:
270  frame->key_frame = 0;
271  frame->pict_type = AV_PICTURE_TYPE_P;
272  if (!c->prev.data[0]) {
273  av_log(avctx, AV_LOG_ERROR, "Missing reference frame\n");
274  return AVERROR_INVALIDDATA;
275  }
276  decode_13(avctx, c, frame->data[0], frame->linesize[0], srcptr, c->prev.data[0]);
277  break;
278  default:
279  av_log(avctx, AV_LOG_ERROR, "Unknown/unsupported compression type %d\n", compr);
280  return AVERROR_INVALIDDATA;
281  }
282 
283  av_frame_unref(&c->prev);
284  if ((ret = av_frame_ref(&c->prev, frame)) < 0)
285  return ret;
286 
287  *got_frame = 1;
288 
289  /* always report that the buffer was completely consumed */
290  return avpkt->size;
291 }
292 
294 {
295  DxaDecContext * const c = avctx->priv_data;
296 
297  avctx->pix_fmt = AV_PIX_FMT_PAL8;
298 
300 
301  c->dsize = avctx->width * avctx->height * 2;
302  c->decomp_buf = av_malloc(c->dsize);
303  if (!c->decomp_buf) {
304  av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n");
305  return AVERROR(ENOMEM);
306  }
307 
308  return 0;
309 }
310 
312 {
313  DxaDecContext * const c = avctx->priv_data;
314 
315  av_freep(&c->decomp_buf);
316  av_frame_unref(&c->prev);
317 
318  return 0;
319 }
320 
322  .name = "dxa",
323  .type = AVMEDIA_TYPE_VIDEO,
324  .id = AV_CODEC_ID_DXA,
325  .priv_data_size = sizeof(DxaDecContext),
326  .init = decode_init,
327  .close = decode_end,
328  .decode = decode_frame,
329  .capabilities = CODEC_CAP_DR1,
330  .long_name = NULL_IF_CONFIG_SMALL("Feeble Files/ScummVM DXA"),
331 };
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
This structure describes decoded (raw) audio or video data.
Definition: frame.h:76
FIXME Range Coding of cr are ref
Definition: snow.txt:367
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:130
int stride
Definition: mace.c:144
static av_cold int decode_end(AVCodecContext *avctx)
uint8_t * decomp_buf
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
set threshold d
uint8_t
#define av_cold
Definition: attributes.h:78
8 bit with PIX_FMT_RGB32 palette
Definition: pixfmt.h:79
#define AVPALETTE_SIZE
Definition: pixfmt.h:33
#define AV_RB32
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
uint8_t * data
frame
Definition: stft.m:14
Discrete Time axis x
#define U(x)
static const int shift1[6]
static const uint16_t mask[17]
Definition: lzw.c:37
#define AV_RB16
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:159
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Spectrum Plot time data
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:149
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
init variable d2
const char * name
Name of the codec implementation.
external API header
static int decode_13(AVCodecContext *avctx, DxaDecContext *c, uint8_t *dst, int stride, uint8_t *src, uint8_t *ref)
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:144
ret
Definition: avfilter.c:821
int width
picture width / height.
static const int8_t mv[256][2]
static av_always_inline int bytestream2_tell(GetByteContext *g)
Definition: bytestream.h:183
for k
or the Software in violation of any applicable export control laws in any jurisdiction Except as provided by mandatorily applicable UPF has no obligation to provide you with source code to the Software In the event Software contains any source code
AVS_Value src
Definition: avisynth_c.h:523
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:101
main external API structure.
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:375
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:148
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:73
void avcodec_get_frame_defaults(AVFrame *frame)
Set the fields of the given AVFrame to default values.
synthesis window for stochastic i
AVCodec ff_dxa_decoder
int palette_has_changed
Tell user application that palette has changed from previous frame.
Definition: frame.h:280
uint32_t pal[256]
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:330
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
#define type
int av_frame_ref(AVFrame *dst, AVFrame *src)
Setup a new reference to the data described by an given frame.
Definition: frame.c:228
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
common internal api header.
common internal and external API header
static av_cold int decode_init(AVCodecContext *avctx)
static const int shift2[6]
static double c[64]
function y
Definition: D.m:1
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
struct DxaDecContext DxaDecContext
else dst[i][x+y *dst_stride[i]]
Definition: vf_mcdeint.c:160
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:139
static int decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: crystalhd.c:868
#define MKTAG(a, b, c, d)
Definition: common.h:282
This structure stores compressed data.
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Predicted.
Definition: avutil.h:217