libavcodec/iff.c
Go to the documentation of this file.
1 /*
2  * IFF ACBM/DEEP/ILBM/PBM bitmap decoder
3  * Copyright (c) 2010 Peter Ross <pross@xvid.org>
4  * Copyright (c) 2010 Sebastian Vater <cdgs.basty@googlemail.com>
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 /**
24  * @file
25  * IFF ACBM/DEEP/ILBM/PBM bitmap decoder
26  */
27 
28 #include "libavutil/imgutils.h"
29 #include "bytestream.h"
30 #include "avcodec.h"
31 #include "get_bits.h"
32 #include "internal.h"
33 
34 // TODO: masking bits
35 typedef enum {
40 } mask_type;
41 
42 typedef struct {
44  int planesize;
46  uint8_t * ham_buf; ///< temporary buffer for planar to chunky conversation
47  uint32_t *ham_palbuf; ///< HAM decode table
48  uint32_t *mask_buf; ///< temporary buffer for palette indices
49  uint32_t *mask_palbuf; ///< masking palette table
50  unsigned compression; ///< delta compression method used
51  unsigned bpp; ///< bits per plane to decode (differs from bits_per_coded_sample if HAM)
52  unsigned ham; ///< 0 if non-HAM or number of hold bits (6 for bpp > 6, 4 otherwise)
53  unsigned flags; ///< 1 for EHB, 0 is no extra half darkening
54  unsigned transparency; ///< TODO: transparency color index in palette
55  unsigned masking; ///< TODO: masking method used
56  int init; // 1 if buffer and palette data already initialized, 0 otherwise
57  int16_t tvdc[16]; ///< TVDC lookup table
58 } IffContext;
59 
60 #define LUT8_PART(plane, v) \
61  AV_LE2NE64C(UINT64_C(0x0000000)<<32 | v) << plane, \
62  AV_LE2NE64C(UINT64_C(0x1000000)<<32 | v) << plane, \
63  AV_LE2NE64C(UINT64_C(0x0010000)<<32 | v) << plane, \
64  AV_LE2NE64C(UINT64_C(0x1010000)<<32 | v) << plane, \
65  AV_LE2NE64C(UINT64_C(0x0000100)<<32 | v) << plane, \
66  AV_LE2NE64C(UINT64_C(0x1000100)<<32 | v) << plane, \
67  AV_LE2NE64C(UINT64_C(0x0010100)<<32 | v) << plane, \
68  AV_LE2NE64C(UINT64_C(0x1010100)<<32 | v) << plane, \
69  AV_LE2NE64C(UINT64_C(0x0000001)<<32 | v) << plane, \
70  AV_LE2NE64C(UINT64_C(0x1000001)<<32 | v) << plane, \
71  AV_LE2NE64C(UINT64_C(0x0010001)<<32 | v) << plane, \
72  AV_LE2NE64C(UINT64_C(0x1010001)<<32 | v) << plane, \
73  AV_LE2NE64C(UINT64_C(0x0000101)<<32 | v) << plane, \
74  AV_LE2NE64C(UINT64_C(0x1000101)<<32 | v) << plane, \
75  AV_LE2NE64C(UINT64_C(0x0010101)<<32 | v) << plane, \
76  AV_LE2NE64C(UINT64_C(0x1010101)<<32 | v) << plane
77 
78 #define LUT8(plane) { \
79  LUT8_PART(plane, 0x0000000), \
80  LUT8_PART(plane, 0x1000000), \
81  LUT8_PART(plane, 0x0010000), \
82  LUT8_PART(plane, 0x1010000), \
83  LUT8_PART(plane, 0x0000100), \
84  LUT8_PART(plane, 0x1000100), \
85  LUT8_PART(plane, 0x0010100), \
86  LUT8_PART(plane, 0x1010100), \
87  LUT8_PART(plane, 0x0000001), \
88  LUT8_PART(plane, 0x1000001), \
89  LUT8_PART(plane, 0x0010001), \
90  LUT8_PART(plane, 0x1010001), \
91  LUT8_PART(plane, 0x0000101), \
92  LUT8_PART(plane, 0x1000101), \
93  LUT8_PART(plane, 0x0010101), \
94  LUT8_PART(plane, 0x1010101), \
95 }
96 
97 // 8 planes * 8-bit mask
98 static const uint64_t plane8_lut[8][256] = {
99  LUT8(0), LUT8(1), LUT8(2), LUT8(3),
100  LUT8(4), LUT8(5), LUT8(6), LUT8(7),
101 };
102 
103 #define LUT32(plane) { \
104  0, 0, 0, 0, \
105  0, 0, 0, 1 << plane, \
106  0, 0, 1 << plane, 0, \
107  0, 0, 1 << plane, 1 << plane, \
108  0, 1 << plane, 0, 0, \
109  0, 1 << plane, 0, 1 << plane, \
110  0, 1 << plane, 1 << plane, 0, \
111  0, 1 << plane, 1 << plane, 1 << plane, \
112  1 << plane, 0, 0, 0, \
113  1 << plane, 0, 0, 1 << plane, \
114  1 << plane, 0, 1 << plane, 0, \
115  1 << plane, 0, 1 << plane, 1 << plane, \
116  1 << plane, 1 << plane, 0, 0, \
117  1 << plane, 1 << plane, 0, 1 << plane, \
118  1 << plane, 1 << plane, 1 << plane, 0, \
119  1 << plane, 1 << plane, 1 << plane, 1 << plane, \
120 }
121 
122 // 32 planes * 4-bit mask * 4 lookup tables each
123 static const uint32_t plane32_lut[32][16*4] = {
124  LUT32( 0), LUT32( 1), LUT32( 2), LUT32( 3),
125  LUT32( 4), LUT32( 5), LUT32( 6), LUT32( 7),
126  LUT32( 8), LUT32( 9), LUT32(10), LUT32(11),
127  LUT32(12), LUT32(13), LUT32(14), LUT32(15),
128  LUT32(16), LUT32(17), LUT32(18), LUT32(19),
129  LUT32(20), LUT32(21), LUT32(22), LUT32(23),
130  LUT32(24), LUT32(25), LUT32(26), LUT32(27),
131  LUT32(28), LUT32(29), LUT32(30), LUT32(31),
132 };
133 
134 // Gray to RGB, required for palette table of grayscale images with bpp < 8
135 static av_always_inline uint32_t gray2rgb(const uint32_t x) {
136  return x << 16 | x << 8 | x;
137 }
138 
139 /**
140  * Convert CMAP buffer (stored in extradata) to lavc palette format
141  */
142 static int cmap_read_palette(AVCodecContext *avctx, uint32_t *pal)
143 {
144  IffContext *s = avctx->priv_data;
145  int count, i;
146  const uint8_t *const palette = avctx->extradata + AV_RB16(avctx->extradata);
147  int palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
148 
149  if (avctx->bits_per_coded_sample > 8) {
150  av_log(avctx, AV_LOG_ERROR, "bits_per_coded_sample > 8 not supported\n");
151  return AVERROR_INVALIDDATA;
152  }
153 
154  count = 1 << avctx->bits_per_coded_sample;
155  // If extradata is smaller than actually needed, fill the remaining with black.
156  count = FFMIN(palette_size / 3, count);
157  if (count) {
158  for (i=0; i < count; i++) {
159  pal[i] = 0xFF000000 | AV_RB24(palette + i*3);
160  }
161  if (s->flags && count >= 32) { // EHB
162  for (i = 0; i < 32; i++)
163  pal[i + 32] = 0xFF000000 | (AV_RB24(palette + i*3) & 0xFEFEFE) >> 1;
164  count = FFMAX(count, 64);
165  }
166  } else { // Create gray-scale color palette for bps < 8
167  count = 1 << avctx->bits_per_coded_sample;
168 
169  for (i=0; i < count; i++) {
170  pal[i] = 0xFF000000 | gray2rgb((i * 255) >> avctx->bits_per_coded_sample);
171  }
172  }
173  if (s->masking == MASK_HAS_MASK) {
174  memcpy(pal + (1 << avctx->bits_per_coded_sample), pal, count * 4);
175  for (i = 0; i < count; i++)
176  pal[i] &= 0xFFFFFF;
177  } else if (s->masking == MASK_HAS_TRANSPARENT_COLOR &&
178  s->transparency < 1 << avctx->bits_per_coded_sample)
179  pal[s->transparency] &= 0xFFFFFF;
180  return 0;
181 }
182 
183 /**
184  * Extracts the IFF extra context and updates internal
185  * decoder structures.
186  *
187  * @param avctx the AVCodecContext where to extract extra context to
188  * @param avpkt the AVPacket to extract extra context from or NULL to use avctx
189  * @return 0 in case of success, a negative error code otherwise
190  */
191 static int extract_header(AVCodecContext *const avctx,
192  const AVPacket *const avpkt) {
193  const uint8_t *buf;
194  unsigned buf_size;
195  IffContext *s = avctx->priv_data;
196  int i, palette_size;
197 
198  if (avctx->extradata_size < 2) {
199  av_log(avctx, AV_LOG_ERROR, "not enough extradata\n");
200  return AVERROR_INVALIDDATA;
201  }
202  palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
203 
204  if (avpkt) {
205  int image_size;
206  if (avpkt->size < 2)
207  return AVERROR_INVALIDDATA;
208  image_size = avpkt->size - AV_RB16(avpkt->data);
209  buf = avpkt->data;
210  buf_size = bytestream_get_be16(&buf);
211  if (buf_size <= 1 || image_size <= 1) {
212  av_log(avctx, AV_LOG_ERROR,
213  "Invalid image size received: %u -> image data offset: %d\n",
214  buf_size, image_size);
215  return AVERROR_INVALIDDATA;
216  }
217  } else {
218  buf = avctx->extradata;
219  buf_size = bytestream_get_be16(&buf);
220  if (buf_size <= 1 || palette_size < 0) {
221  av_log(avctx, AV_LOG_ERROR,
222  "Invalid palette size received: %u -> palette data offset: %d\n",
223  buf_size, palette_size);
224  return AVERROR_INVALIDDATA;
225  }
226  }
227 
228  if (buf_size >= 41) {
229  s->compression = bytestream_get_byte(&buf);
230  s->bpp = bytestream_get_byte(&buf);
231  s->ham = bytestream_get_byte(&buf);
232  s->flags = bytestream_get_byte(&buf);
233  s->transparency = bytestream_get_be16(&buf);
234  s->masking = bytestream_get_byte(&buf);
235  for (i = 0; i < 16; i++)
236  s->tvdc[i] = bytestream_get_be16(&buf);
237 
238  if (s->masking == MASK_HAS_MASK) {
239  if (s->bpp >= 8 && !s->ham) {
240  avctx->pix_fmt = AV_PIX_FMT_RGB32;
241  av_freep(&s->mask_buf);
242  av_freep(&s->mask_palbuf);
244  if (!s->mask_buf)
245  return AVERROR(ENOMEM);
246  if (s->bpp > 16) {
247  av_log(avctx, AV_LOG_ERROR, "bpp %d too large for palette\n", s->bpp);
248  av_freep(&s->mask_buf);
249  return AVERROR(ENOMEM);
250  }
251  s->mask_palbuf = av_malloc((2 << s->bpp) * sizeof(uint32_t) + FF_INPUT_BUFFER_PADDING_SIZE);
252  if (!s->mask_palbuf) {
253  av_freep(&s->mask_buf);
254  return AVERROR(ENOMEM);
255  }
256  }
257  s->bpp++;
258  } else if (s->masking != MASK_NONE && s->masking != MASK_HAS_TRANSPARENT_COLOR) {
259  av_log(avctx, AV_LOG_ERROR, "Masking not supported\n");
260  return AVERROR_PATCHWELCOME;
261  }
262  if (!s->bpp || s->bpp > 32) {
263  av_log(avctx, AV_LOG_ERROR, "Invalid number of bitplanes: %u\n", s->bpp);
264  return AVERROR_INVALIDDATA;
265  } else if (s->ham >= 8) {
266  av_log(avctx, AV_LOG_ERROR, "Invalid number of hold bits for HAM: %u\n", s->ham);
267  return AVERROR_INVALIDDATA;
268  }
269 
270  av_freep(&s->ham_buf);
271  av_freep(&s->ham_palbuf);
272 
273  if (s->ham) {
274  int i, count = FFMIN(palette_size / 3, 1 << s->ham);
275  int ham_count;
276  const uint8_t *const palette = avctx->extradata + AV_RB16(avctx->extradata);
277 
279  if (!s->ham_buf)
280  return AVERROR(ENOMEM);
281 
282  ham_count = 8 * (1 << s->ham);
283  s->ham_palbuf = av_malloc((ham_count << !!(s->masking == MASK_HAS_MASK)) * sizeof (uint32_t) + FF_INPUT_BUFFER_PADDING_SIZE);
284  if (!s->ham_palbuf) {
285  av_freep(&s->ham_buf);
286  return AVERROR(ENOMEM);
287  }
288 
289  if (count) { // HAM with color palette attached
290  // prefill with black and palette and set HAM take direct value mask to zero
291  memset(s->ham_palbuf, 0, (1 << s->ham) * 2 * sizeof (uint32_t));
292  for (i=0; i < count; i++) {
293  s->ham_palbuf[i*2+1] = 0xFF000000 | AV_RL24(palette + i*3);
294  }
295  count = 1 << s->ham;
296  } else { // HAM with grayscale color palette
297  count = 1 << s->ham;
298  for (i=0; i < count; i++) {
299  s->ham_palbuf[i*2] = 0xFF000000; // take direct color value from palette
300  s->ham_palbuf[i*2+1] = 0xFF000000 | av_le2ne32(gray2rgb((i * 255) >> s->ham));
301  }
302  }
303  for (i=0; i < count; i++) {
304  uint32_t tmp = i << (8 - s->ham);
305  tmp |= tmp >> s->ham;
306  s->ham_palbuf[(i+count)*2] = 0xFF00FFFF; // just modify blue color component
307  s->ham_palbuf[(i+count*2)*2] = 0xFFFFFF00; // just modify red color component
308  s->ham_palbuf[(i+count*3)*2] = 0xFFFF00FF; // just modify green color component
309  s->ham_palbuf[(i+count)*2+1] = 0xFF000000 | tmp << 16;
310  s->ham_palbuf[(i+count*2)*2+1] = 0xFF000000 | tmp;
311  s->ham_palbuf[(i+count*3)*2+1] = 0xFF000000 | tmp << 8;
312  }
313  if (s->masking == MASK_HAS_MASK) {
314  for (i = 0; i < ham_count; i++)
315  s->ham_palbuf[(1 << s->bpp) + i] = s->ham_palbuf[i] | 0xFF000000;
316  }
317  }
318  }
319 
320  return 0;
321 }
322 
324 {
325  IffContext *s = avctx->priv_data;
326  int err;
327 
328  if (avctx->bits_per_coded_sample <= 8) {
329  int palette_size;
330 
331  if (avctx->extradata_size >= 2)
332  palette_size = avctx->extradata_size - AV_RB16(avctx->extradata);
333  else
334  palette_size = 0;
335  avctx->pix_fmt = (avctx->bits_per_coded_sample < 8) ||
336  (avctx->extradata_size >= 2 && palette_size) ? AV_PIX_FMT_PAL8 : AV_PIX_FMT_GRAY8;
337  } else if (avctx->bits_per_coded_sample <= 32) {
338  if (avctx->codec_tag == MKTAG('R','G','B','8')) {
339  avctx->pix_fmt = AV_PIX_FMT_RGB32;
340  } else if (avctx->codec_tag == MKTAG('R','G','B','N')) {
341  avctx->pix_fmt = AV_PIX_FMT_RGB444;
342  } else if (avctx->codec_tag != MKTAG('D','E','E','P')) {
343  if (avctx->bits_per_coded_sample == 24) {
344  avctx->pix_fmt = AV_PIX_FMT_0BGR32;
345  } else if (avctx->bits_per_coded_sample == 32) {
346  avctx->pix_fmt = AV_PIX_FMT_BGR32;
347  } else {
348  avpriv_request_sample(avctx, "unknown bits_per_coded_sample");
349  return AVERROR_PATCHWELCOME;
350  }
351  }
352  } else {
353  return AVERROR_INVALIDDATA;
354  }
355 
356  if ((err = av_image_check_size(avctx->width, avctx->height, 0, avctx)))
357  return err;
358  s->planesize = FFALIGN(avctx->width, 16) >> 3; // Align plane size in bits to word-boundary
360  if (!s->planebuf)
361  return AVERROR(ENOMEM);
362 
363  s->bpp = avctx->bits_per_coded_sample;
364  s->frame = av_frame_alloc();
365  if (!s->frame)
366  return AVERROR(ENOMEM);
367 
368  if ((err = extract_header(avctx, NULL)) < 0)
369  return err;
370 
371  return 0;
372 }
373 
374 /**
375  * Decode interleaved plane buffer up to 8bpp
376  * @param dst Destination buffer
377  * @param buf Source buffer
378  * @param buf_size
379  * @param plane plane number to decode as
380  */
381 static void decodeplane8(uint8_t *dst, const uint8_t *buf, int buf_size, int plane)
382 {
383  const uint64_t *lut = plane8_lut[plane];
384  if (plane >= 8) {
385  av_log(NULL, AV_LOG_WARNING, "Ignoring extra planes beyond 8\n");
386  return;
387  }
388  do {
389  uint64_t v = AV_RN64A(dst) | lut[*buf++];
390  AV_WN64A(dst, v);
391  dst += 8;
392  } while (--buf_size);
393 }
394 
395 /**
396  * Decode interleaved plane buffer up to 24bpp
397  * @param dst Destination buffer
398  * @param buf Source buffer
399  * @param buf_size
400  * @param plane plane number to decode as
401  */
402 static void decodeplane32(uint32_t *dst, const uint8_t *buf, int buf_size, int plane)
403 {
404  const uint32_t *lut = plane32_lut[plane];
405  do {
406  unsigned mask = (*buf >> 2) & ~3;
407  dst[0] |= lut[mask++];
408  dst[1] |= lut[mask++];
409  dst[2] |= lut[mask++];
410  dst[3] |= lut[mask];
411  mask = (*buf++ << 2) & 0x3F;
412  dst[4] |= lut[mask++];
413  dst[5] |= lut[mask++];
414  dst[6] |= lut[mask++];
415  dst[7] |= lut[mask];
416  dst += 8;
417  } while (--buf_size);
418 }
419 
420 #define DECODE_HAM_PLANE32(x) \
421  first = buf[x] << 1; \
422  second = buf[(x)+1] << 1; \
423  delta &= pal[first++]; \
424  delta |= pal[first]; \
425  dst[x] = delta; \
426  delta &= pal[second++]; \
427  delta |= pal[second]; \
428  dst[(x)+1] = delta
429 
430 /**
431  * Converts one line of HAM6/8-encoded chunky buffer to 24bpp.
432  *
433  * @param dst the destination 24bpp buffer
434  * @param buf the source 8bpp chunky buffer
435  * @param pal the HAM decode table
436  * @param buf_size the plane size in bytes
437  */
438 static void decode_ham_plane32(uint32_t *dst, const uint8_t *buf,
439  const uint32_t *const pal, unsigned buf_size)
440 {
441  uint32_t delta = pal[1]; /* first palette entry */
442  do {
443  uint32_t first, second;
448  buf += 8;
449  dst += 8;
450  } while (--buf_size);
451 }
452 
453 static void lookup_pal_indicies(uint32_t *dst, const uint32_t *buf,
454  const uint32_t *const pal, unsigned width)
455 {
456  do {
457  *dst++ = pal[*buf++];
458  } while (--width);
459 }
460 
461 /**
462  * Decode one complete byterun1 encoded line.
463  *
464  * @param dst the destination buffer where to store decompressed bitstream
465  * @param dst_size the destination plane size in bytes
466  * @param buf the source byterun1 compressed bitstream
467  * @param buf_end the EOF of source byterun1 compressed bitstream
468  * @return number of consumed bytes in byterun1 compressed bitstream
469 */
470 static int decode_byterun(uint8_t *dst, int dst_size,
471  const uint8_t *buf, const uint8_t *const buf_end) {
472  const uint8_t *const buf_start = buf;
473  unsigned x;
474  for (x = 0; x < dst_size && buf < buf_end;) {
475  unsigned length;
476  const int8_t value = *buf++;
477  if (value >= 0) {
478  length = value + 1;
479  memcpy(dst + x, buf, FFMIN3(length, dst_size - x, buf_end - buf));
480  buf += length;
481  } else if (value > -128) {
482  length = -value + 1;
483  memset(dst + x, *buf++, FFMIN(length, dst_size - x));
484  } else { // noop
485  continue;
486  }
487  x += length;
488  }
489  return buf - buf_start;
490 }
491 
492 #define DECODE_RGBX_COMMON(type) \
493  if (!length) { \
494  length = bytestream2_get_byte(gb); \
495  if (!length) { \
496  length = bytestream2_get_be16(gb); \
497  if (!length) \
498  return; \
499  } \
500  } \
501  for (i = 0; i < length; i++) { \
502  *(type *)(dst + y*linesize + x * sizeof(type)) = pixel; \
503  x += 1; \
504  if (x >= width) { \
505  y += 1; \
506  if (y >= height) \
507  return; \
508  x = 0; \
509  } \
510  }
511 
512 /**
513  * Decode RGB8 buffer
514  * @param[out] dst Destination buffer
515  * @param width Width of destination buffer (pixels)
516  * @param height Height of destination buffer (pixels)
517  * @param linesize Line size of destination buffer (bytes)
518  */
519 static void decode_rgb8(GetByteContext *gb, uint8_t *dst, int width, int height, int linesize)
520 {
521  int x = 0, y = 0, i, length;
522  while (bytestream2_get_bytes_left(gb) >= 4) {
523  uint32_t pixel = 0xFF000000 | bytestream2_get_be24(gb);
524  length = bytestream2_get_byte(gb) & 0x7F;
525  DECODE_RGBX_COMMON(uint32_t)
526  }
527 }
528 
529 /**
530  * Decode RGBN buffer
531  * @param[out] dst Destination buffer
532  * @param width Width of destination buffer (pixels)
533  * @param height Height of destination buffer (pixels)
534  * @param linesize Line size of destination buffer (bytes)
535  */
536 static void decode_rgbn(GetByteContext *gb, uint8_t *dst, int width, int height, int linesize)
537 {
538  int x = 0, y = 0, i, length;
539  while (bytestream2_get_bytes_left(gb) >= 2) {
540  uint32_t pixel = bytestream2_get_be16u(gb);
541  length = pixel & 0x7;
542  pixel >>= 4;
543  DECODE_RGBX_COMMON(uint16_t)
544  }
545 }
546 
547 /**
548  * Decode DEEP RLE 32-bit buffer
549  * @param[out] dst Destination buffer
550  * @param[in] src Source buffer
551  * @param src_size Source buffer size (bytes)
552  * @param width Width of destination buffer (pixels)
553  * @param height Height of destination buffer (pixels)
554  * @param linesize Line size of destination buffer (bytes)
555  */
556 static void decode_deep_rle32(uint8_t *dst, const uint8_t *src, int src_size, int width, int height, int linesize)
557 {
558  const uint8_t *src_end = src + src_size;
559  int x = 0, y = 0, i;
560  while (src + 5 <= src_end) {
561  int opcode;
562  opcode = *(int8_t *)src++;
563  if (opcode >= 0) {
564  int size = opcode + 1;
565  for (i = 0; i < size; i++) {
566  int length = FFMIN(size - i, width);
567  memcpy(dst + y*linesize + x * 4, src, length * 4);
568  src += length * 4;
569  x += length;
570  i += length;
571  if (x >= width) {
572  x = 0;
573  y += 1;
574  if (y >= height)
575  return;
576  }
577  }
578  } else {
579  int size = -opcode + 1;
580  uint32_t pixel = AV_RN32(src);
581  for (i = 0; i < size; i++) {
582  *(uint32_t *)(dst + y*linesize + x * 4) = pixel;
583  x += 1;
584  if (x >= width) {
585  x = 0;
586  y += 1;
587  if (y >= height)
588  return;
589  }
590  }
591  src += 4;
592  }
593  }
594 }
595 
596 /**
597  * Decode DEEP TVDC 32-bit buffer
598  * @param[out] dst Destination buffer
599  * @param[in] src Source buffer
600  * @param src_size Source buffer size (bytes)
601  * @param width Width of destination buffer (pixels)
602  * @param height Height of destination buffer (pixels)
603  * @param linesize Line size of destination buffer (bytes)
604  * @param[int] tvdc TVDC lookup table
605  */
606 static void decode_deep_tvdc32(uint8_t *dst, const uint8_t *src, int src_size, int width, int height, int linesize, const int16_t *tvdc)
607 {
608  int x = 0, y = 0, plane = 0;
609  int8_t pixel = 0;
610  int i, j;
611 
612  for (i = 0; i < src_size * 2;) {
613 #define GETNIBBLE ((i & 1) ? (src[i>>1] & 0xF) : (src[i>>1] >> 4))
614  int d = tvdc[GETNIBBLE];
615  i++;
616  if (d) {
617  pixel += d;
618  dst[y * linesize + x*4 + plane] = pixel;
619  x++;
620  } else {
621  if (i >= src_size * 2)
622  return;
623  d = GETNIBBLE + 1;
624  i++;
625  d = FFMIN(d, width - x);
626  for (j = 0; j < d; j++) {
627  dst[y * linesize + x*4 + plane] = pixel;
628  x++;
629  }
630  }
631  if (x >= width) {
632  plane++;
633  if (plane >= 4) {
634  y++;
635  if (y >= height)
636  return;
637  plane = 0;
638  }
639  x = 0;
640  pixel = 0;
641  i = (i + 1) & ~1;
642  }
643  }
644 }
645 
646 static int unsupported(AVCodecContext *avctx)
647 {
648  IffContext *s = avctx->priv_data;
649  avpriv_request_sample(avctx, "bitmap (compression %i, bpp %i, ham %i)", s->compression, s->bpp, s->ham);
650  return AVERROR_INVALIDDATA;
651 }
652 
653 static int decode_frame(AVCodecContext *avctx,
654  void *data, int *got_frame,
655  AVPacket *avpkt)
656 {
657  IffContext *s = avctx->priv_data;
658  const uint8_t *buf = avpkt->size >= 2 ? avpkt->data + AV_RB16(avpkt->data) : NULL;
659  const int buf_size = avpkt->size >= 2 ? avpkt->size - AV_RB16(avpkt->data) : 0;
660  const uint8_t *buf_end = buf+buf_size;
661  int y, plane, res;
662  GetByteContext gb;
663 
664  if ((res = extract_header(avctx, avpkt)) < 0)
665  return res;
666  if ((res = ff_reget_buffer(avctx, s->frame)) < 0)
667  return res;
668  if (!s->init && avctx->bits_per_coded_sample <= 8 &&
669  avctx->pix_fmt == AV_PIX_FMT_PAL8) {
670  if ((res = cmap_read_palette(avctx, (uint32_t*)s->frame->data[1])) < 0)
671  return res;
672  } else if (!s->init && avctx->bits_per_coded_sample <= 8 &&
673  avctx->pix_fmt == AV_PIX_FMT_RGB32) {
674  if ((res = cmap_read_palette(avctx, s->mask_palbuf)) < 0)
675  return res;
676  }
677  s->init = 1;
678 
679  switch (s->compression) {
680  case 0:
681  if (avctx->codec_tag == MKTAG('A','C','B','M')) {
682  if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
683  memset(s->frame->data[0], 0, avctx->height * s->frame->linesize[0]);
684  for (plane = 0; plane < s->bpp; plane++) {
685  for(y = 0; y < avctx->height && buf < buf_end; y++ ) {
686  uint8_t *row = &s->frame->data[0][ y*s->frame->linesize[0] ];
687  decodeplane8(row, buf, FFMIN(s->planesize, buf_end - buf), plane);
688  buf += s->planesize;
689  }
690  }
691  } else if (s->ham) { // HAM to AV_PIX_FMT_BGR32
692  memset(s->frame->data[0], 0, avctx->height * s->frame->linesize[0]);
693  for(y = 0; y < avctx->height; y++) {
694  uint8_t *row = &s->frame->data[0][y * s->frame->linesize[0]];
695  memset(s->ham_buf, 0, s->planesize * 8);
696  for (plane = 0; plane < s->bpp; plane++) {
697  const uint8_t * start = buf + (plane * avctx->height + y) * s->planesize;
698  if (start >= buf_end)
699  break;
700  decodeplane8(s->ham_buf, start, FFMIN(s->planesize, buf_end - start), plane);
701  }
702  decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, s->planesize);
703  }
704  } else
705  return unsupported(avctx);
706  } else if (avctx->codec_tag == MKTAG('D','E','E','P')) {
707  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
708  int raw_width = avctx->width * (av_get_bits_per_pixel(desc) >> 3);
709  int x;
710  for(y = 0; y < avctx->height && buf < buf_end; y++ ) {
711  uint8_t *row = &s->frame->data[0][y * s->frame->linesize[0]];
712  memcpy(row, buf, FFMIN(raw_width, buf_end - buf));
713  buf += raw_width;
714  if (avctx->pix_fmt == AV_PIX_FMT_BGR32) {
715  for(x = 0; x < avctx->width; x++)
716  row[4 * x + 3] = row[4 * x + 3] & 0xF0 | (row[4 * x + 3] >> 4);
717  }
718  }
719  } else if (avctx->codec_tag == MKTAG('I','L','B','M')) { // interleaved
720  if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
721  for(y = 0; y < avctx->height; y++ ) {
722  uint8_t *row = &s->frame->data[0][ y*s->frame->linesize[0] ];
723  memset(row, 0, avctx->width);
724  for (plane = 0; plane < s->bpp && buf < buf_end; plane++) {
725  decodeplane8(row, buf, FFMIN(s->planesize, buf_end - buf), plane);
726  buf += s->planesize;
727  }
728  }
729  } else if (s->ham) { // HAM to AV_PIX_FMT_BGR32
730  for (y = 0; y < avctx->height; y++) {
731  uint8_t *row = &s->frame->data[0][ y*s->frame->linesize[0] ];
732  memset(s->ham_buf, 0, s->planesize * 8);
733  for (plane = 0; plane < s->bpp && buf < buf_end; plane++) {
734  decodeplane8(s->ham_buf, buf, FFMIN(s->planesize, buf_end - buf), plane);
735  buf += s->planesize;
736  }
737  decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, s->planesize);
738  }
739  } else { // AV_PIX_FMT_BGR32
740  for(y = 0; y < avctx->height; y++ ) {
741  uint8_t *row = &s->frame->data[0][y*s->frame->linesize[0]];
742  memset(row, 0, avctx->width << 2);
743  for (plane = 0; plane < s->bpp && buf < buf_end; plane++) {
744  decodeplane32((uint32_t *) row, buf, FFMIN(s->planesize, buf_end - buf), plane);
745  buf += s->planesize;
746  }
747  }
748  }
749  } else if (avctx->codec_tag == MKTAG('P','B','M',' ')) { // IFF-PBM
750  if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
751  for(y = 0; y < avctx->height && buf_end > buf; y++ ) {
752  uint8_t *row = &s->frame->data[0][y * s->frame->linesize[0]];
753  memcpy(row, buf, FFMIN(avctx->width, buf_end - buf));
754  buf += avctx->width + (avctx->width % 2); // padding if odd
755  }
756  } else if (s->ham) { // IFF-PBM: HAM to AV_PIX_FMT_BGR32
757  for (y = 0; y < avctx->height && buf_end > buf; y++) {
758  uint8_t *row = &s->frame->data[0][ y*s->frame->linesize[0] ];
759  memcpy(s->ham_buf, buf, FFMIN(avctx->width, buf_end - buf));
760  buf += avctx->width + (avctx->width & 1); // padding if odd
761  decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, s->planesize);
762  }
763  } else
764  return unsupported(avctx);
765  }
766  break;
767  case 1:
768  if (avctx->codec_tag == MKTAG('I','L','B','M')) { //interleaved
769  if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
770  for(y = 0; y < avctx->height ; y++ ) {
771  uint8_t *row = &s->frame->data[0][ y*s->frame->linesize[0] ];
772  memset(row, 0, avctx->width);
773  for (plane = 0; plane < s->bpp; plane++) {
774  buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
775  decodeplane8(row, s->planebuf, s->planesize, plane);
776  }
777  }
778  } else if (avctx->bits_per_coded_sample <= 8) { //8-bit (+ mask) to AV_PIX_FMT_BGR32
779  for (y = 0; y < avctx->height ; y++ ) {
780  uint8_t *row = &s->frame->data[0][y*s->frame->linesize[0]];
781  memset(s->mask_buf, 0, avctx->width * sizeof(uint32_t));
782  for (plane = 0; plane < s->bpp; plane++) {
783  buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
784  decodeplane32(s->mask_buf, s->planebuf, s->planesize, plane);
785  }
786  lookup_pal_indicies((uint32_t *) row, s->mask_buf, s->mask_palbuf, avctx->width);
787  }
788  } else if (s->ham) { // HAM to AV_PIX_FMT_BGR32
789  for (y = 0; y < avctx->height ; y++) {
790  uint8_t *row = &s->frame->data[0][y*s->frame->linesize[0]];
791  memset(s->ham_buf, 0, s->planesize * 8);
792  for (plane = 0; plane < s->bpp; plane++) {
793  buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
794  decodeplane8(s->ham_buf, s->planebuf, s->planesize, plane);
795  }
796  decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, s->planesize);
797  }
798  } else { //AV_PIX_FMT_BGR32
799  for(y = 0; y < avctx->height ; y++ ) {
800  uint8_t *row = &s->frame->data[0][y*s->frame->linesize[0]];
801  memset(row, 0, avctx->width << 2);
802  for (plane = 0; plane < s->bpp; plane++) {
803  buf += decode_byterun(s->planebuf, s->planesize, buf, buf_end);
804  decodeplane32((uint32_t *) row, s->planebuf, s->planesize, plane);
805  }
806  }
807  }
808  } else if (avctx->codec_tag == MKTAG('P','B','M',' ')) { // IFF-PBM
809  if (avctx->pix_fmt == AV_PIX_FMT_PAL8 || avctx->pix_fmt == AV_PIX_FMT_GRAY8) {
810  for(y = 0; y < avctx->height ; y++ ) {
811  uint8_t *row = &s->frame->data[0][y*s->frame->linesize[0]];
812  buf += decode_byterun(row, avctx->width, buf, buf_end);
813  }
814  } else if (s->ham) { // IFF-PBM: HAM to AV_PIX_FMT_BGR32
815  for (y = 0; y < avctx->height ; y++) {
816  uint8_t *row = &s->frame->data[0][y*s->frame->linesize[0]];
817  buf += decode_byterun(s->ham_buf, avctx->width, buf, buf_end);
818  decode_ham_plane32((uint32_t *) row, s->ham_buf, s->ham_palbuf, s->planesize);
819  }
820  } else
821  return unsupported(avctx);
822  } else if (avctx->codec_tag == MKTAG('D','E','E','P')) { // IFF-DEEP
823  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
824  if (av_get_bits_per_pixel(desc) == 32)
825  decode_deep_rle32(s->frame->data[0], buf, buf_size, avctx->width, avctx->height, s->frame->linesize[0]);
826  else
827  return unsupported(avctx);
828  }
829  break;
830  case 4:
831  bytestream2_init(&gb, buf, buf_size);
832  if (avctx->codec_tag == MKTAG('R','G','B','8'))
833  decode_rgb8(&gb, s->frame->data[0], avctx->width, avctx->height, s->frame->linesize[0]);
834  else if (avctx->codec_tag == MKTAG('R','G','B','N'))
835  decode_rgbn(&gb, s->frame->data[0], avctx->width, avctx->height, s->frame->linesize[0]);
836  else
837  return unsupported(avctx);
838  break;
839  case 5:
840  if (avctx->codec_tag == MKTAG('D','E','E','P')) {
841  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
842  if (av_get_bits_per_pixel(desc) == 32)
843  decode_deep_tvdc32(s->frame->data[0], buf, buf_size, avctx->width, avctx->height, s->frame->linesize[0], s->tvdc);
844  else
845  return unsupported(avctx);
846  } else
847  return unsupported(avctx);
848  break;
849  default:
850  return unsupported(avctx);
851  }
852 
853  if ((res = av_frame_ref(data, s->frame)) < 0)
854  return res;
855 
856  *got_frame = 1;
857 
858  return buf_size;
859 }
860 
862 {
863  IffContext *s = avctx->priv_data;
864  av_frame_free(&s->frame);
865  av_freep(&s->planebuf);
866  av_freep(&s->ham_buf);
867  av_freep(&s->ham_palbuf);
868  return 0;
869 }
870 
871 #if CONFIG_IFF_ILBM_DECODER
872 AVCodec ff_iff_ilbm_decoder = {
873  .name = "iff",
874  .type = AVMEDIA_TYPE_VIDEO,
875  .id = AV_CODEC_ID_IFF_ILBM,
876  .priv_data_size = sizeof(IffContext),
877  .init = decode_init,
878  .close = decode_end,
879  .decode = decode_frame,
880  .capabilities = CODEC_CAP_DR1,
881  .long_name = NULL_IF_CONFIG_SMALL("IFF"),
882 };
883 #endif
884 #if CONFIG_IFF_BYTERUN1_DECODER
885 AVCodec ff_iff_byterun1_decoder = {
886  .name = "iff",
887  .type = AVMEDIA_TYPE_VIDEO,
889  .priv_data_size = sizeof(IffContext),
890  .init = decode_init,
891  .close = decode_end,
892  .decode = decode_frame,
893  .capabilities = CODEC_CAP_DR1,
894  .long_name = NULL_IF_CONFIG_SMALL("IFF"),
895 };
896 #endif
Definition: start.py:1
float v
const char * s
Definition: avisynth_c.h:668
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static void decodeplane32(uint32_t *dst, const uint8_t *buf, int buf_size, int plane)
Decode interleaved plane buffer up to 24bpp.
unsigned compression
delta compression method used
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:1778
This structure describes decoded (raw) audio or video data.
Definition: frame.h:76
uint32_t * mask_palbuf
masking palette table
misc image utilities
unsigned masking
TODO: masking method used.
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc)
Return the number of bits per pixel used by the pixel format described by pixdesc.
Definition: pixdesc.c:1731
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:154
unsigned transparency
TODO: transparency color index in palette.
static int cmap_read_palette(AVCodecContext *avctx, uint32_t *pal)
Convert CMAP buffer (stored in extradata) to lavc palette format.
#define AV_RB24
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:130
#define av_le2ne32(x)
Definition: bswap.h:96
#define AV_PIX_FMT_RGB444
Definition: pixfmt.h:270
unsigned flags
1 for EHB, 0 is no extra half darkening
#define FFALIGN(x, a)
Definition: common.h:63
static void decodeplane8(uint8_t *dst, const uint8_t *buf, int buf_size, int plane)
Decode interleaved plane buffer up to 8bpp.
uint32_t * mask_buf
temporary buffer for palette indices
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
set threshold d
#define DECODE_HAM_PLANE32(x)
initialize output if(nPeaks >3)%at least 3 peaks in spectrum for trying to find f0 nf0peaks
static av_cold int decode_end(AVCodecContext *avctx)
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
uint8_t * planebuf
uint8_t
#define av_cold
Definition: attributes.h:78
static void decode_deep_tvdc32(uint8_t *dst, const uint8_t *src, int src_size, int width, int height, int linesize, const int16_t *tvdc)
Decode DEEP TVDC 32-bit buffer.
float delta
8 bit with PIX_FMT_RGB32 palette
Definition: pixfmt.h:79
uint8_t * ham_buf
temporary buffer for planar to chunky conversation
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But first
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
uint8_t * data
#define FFMIN3(a, b, c)
Definition: common.h:59
bitstream reader API header.
mask_type
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
Discrete Time axis x
static const uint16_t mask[17]
Definition: lzw.c:37
#define AV_RB16
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Spectrum Plot time data
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:149
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
#define AV_PIX_FMT_0BGR32
Definition: pixfmt.h:264
const char * name
Name of the codec implementation.
#define FFMAX(a, b)
Definition: common.h:56
external API header
int size
#define GETNIBBLE
#define FF_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
#define LUT32(plane)
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:231
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame)
Identical in function to av_frame_make_writable(), except it uses ff_get_buffer() to allocate the buf...
#define FFMIN(a, b)
Definition: common.h:58
int width
picture width / height.
#define AV_WN64A(p, v)
Definition: intreadwrite.h:534
AVFrame * frame
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:261
NULL
Definition: eval.c:55
static int width
Definition: tests/utils.c:158
AVS_Value src
Definition: avisynth_c.h:523
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:101
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:55
#define DECODE_RGBX_COMMON(type)
main external API structure.
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:375
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:259
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:148
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> (&#39;D&#39;<<24) + (&#39;C&#39;<<16) + (&#39;B&#39;<<8) + &#39;A&#39;).
void * buf
Definition: avisynth_c.h:594
BYTE int const BYTE int int int height
Definition: avisynth_c.h:713
double value
Definition: eval.c:82
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:73
synthesis window for stochastic i
static av_always_inline uint32_t gray2rgb(const uint32_t x)
static void decode_rgbn(GetByteContext *gb, uint8_t *dst, int width, int height, int linesize)
Decode RGBN buffer.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
#define AV_RN32(p)
Definition: intreadwrite.h:356
uint8_t pixel
Definition: tiny_ssim.c:40
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:95
#define AV_RL24
int av_frame_ref(AVFrame *dst, AVFrame *src)
Setup a new reference to the data described by an given frame.
Definition: frame.c:228
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
int palette
Definition: v4l.c:61
Y , 8bpp.
Definition: pixfmt.h:76
common internal api header.
unsigned ham
0 if non-HAM or number of hold bits (6 for bpp > 6, 4 otherwise)
static const uint32_t plane32_lut[32][16 *4]
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:108
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
function y
Definition: D.m:1
static void decode_rgb8(GetByteContext *gb, uint8_t *dst, int width, int height, int linesize)
Decode RGB8 buffer.
#define LUT8(plane)
static void decode_ham_plane32(uint32_t *dst, const uint8_t *buf, const uint32_t *const pal, unsigned buf_size)
Converts one line of HAM6/8-encoded chunky buffer to 24bpp.
static av_cold int decode_init(AVCodecContext *avctx)
static const uint64_t plane8_lut[8][256]
else dst[i][x+y *dst_stride[i]]
Definition: vf_mcdeint.c:160
void INT64 INT64 count
Definition: avisynth_c.h:594
#define av_always_inline
Definition: attributes.h:41
static int decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: crystalhd.c:868
const char int length
Definition: avisynth_c.h:668
static int extract_header(AVCodecContext *const avctx, const AVPacket *const avpkt)
Extracts the IFF extra context and updates internal decoder structures.
static void lookup_pal_indicies(uint32_t *dst, const uint32_t *buf, const uint32_t *const pal, unsigned width)
int16_t tvdc[16]
TVDC lookup table.
static int decode_byterun(uint8_t *dst, int dst_size, const uint8_t *buf, const uint8_t *const buf_end)
Decode one complete byterun1 encoded line.
static int unsupported(AVCodecContext *avctx)
#define AV_RN64A(p)
Definition: intreadwrite.h:522
#define MKTAG(a, b, c, d)
Definition: common.h:282
This structure stores compressed data.
uint32_t * ham_palbuf
HAM decode table.
unsigned bpp
bits per plane to decode (differs from bits_per_coded_sample if HAM)
static void decode_deep_rle32(uint8_t *dst, const uint8_t *src, int src_size, int width, int height, int linesize)
Decode DEEP RLE 32-bit buffer.