yading@10: /* yading@10: * Feeble Files/ScummVM DXA decoder yading@10: * Copyright (c) 2007 Konstantin Shishkov yading@10: * yading@10: * This file is part of FFmpeg. yading@10: * yading@10: * FFmpeg is free software; you can redistribute it and/or yading@10: * modify it under the terms of the GNU Lesser General Public yading@10: * License as published by the Free Software Foundation; either yading@10: * version 2.1 of the License, or (at your option) any later version. yading@10: * yading@10: * FFmpeg is distributed in the hope that it will be useful, yading@10: * but WITHOUT ANY WARRANTY; without even the implied warranty of yading@10: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU yading@10: * Lesser General Public License for more details. yading@10: * yading@10: * You should have received a copy of the GNU Lesser General Public yading@10: * License along with FFmpeg; if not, write to the Free Software yading@10: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA yading@10: */ yading@10: yading@10: /** yading@10: * @file yading@10: * DXA Video decoder yading@10: */ yading@10: yading@10: #include yading@10: #include yading@10: yading@10: #include "libavutil/common.h" yading@10: #include "libavutil/intreadwrite.h" yading@10: #include "bytestream.h" yading@10: #include "avcodec.h" yading@10: #include "internal.h" yading@10: yading@10: #include yading@10: yading@10: /* yading@10: * Decoder context yading@10: */ yading@10: typedef struct DxaDecContext { yading@10: AVFrame prev; yading@10: yading@10: int dsize; yading@10: uint8_t *decomp_buf; yading@10: uint32_t pal[256]; yading@10: } DxaDecContext; yading@10: yading@10: static const int shift1[6] = { 0, 8, 8, 8, 4, 4 }; yading@10: static const int shift2[6] = { 0, 0, 8, 4, 0, 4 }; yading@10: yading@10: static int decode_13(AVCodecContext *avctx, DxaDecContext *c, uint8_t* dst, yading@10: int stride, uint8_t *src, uint8_t *ref) yading@10: { yading@10: uint8_t *code, *data, *mv, *msk, *tmp, *tmp2; yading@10: int i, j, k; yading@10: int type, x, y, d, d2; yading@10: uint32_t mask; yading@10: yading@10: code = src + 12; yading@10: data = code + ((avctx->width * avctx->height) >> 4); yading@10: mv = data + AV_RB32(src + 0); yading@10: msk = mv + AV_RB32(src + 4); yading@10: yading@10: for(j = 0; j < avctx->height; j += 4){ yading@10: for(i = 0; i < avctx->width; i += 4){ yading@10: tmp = dst + i; yading@10: tmp2 = ref + i; yading@10: type = *code++; yading@10: switch(type){ yading@10: case 4: // motion compensation yading@10: x = (*mv) >> 4; if(x & 8) x = 8 - x; yading@10: y = (*mv++) & 0xF; if(y & 8) y = 8 - y; yading@10: tmp2 += x + y*stride; yading@10: case 0: // skip yading@10: case 5: // skip in method 12 yading@10: for(y = 0; y < 4; y++){ yading@10: memcpy(tmp, tmp2, 4); yading@10: tmp += stride; yading@10: tmp2 += stride; yading@10: } yading@10: break; yading@10: case 1: // masked change yading@10: case 10: // masked change with only half of pixels changed yading@10: case 11: // cases 10-15 are for method 12 only yading@10: case 12: yading@10: case 13: yading@10: case 14: yading@10: case 15: yading@10: if(type == 1){ yading@10: mask = AV_RB16(msk); yading@10: msk += 2; yading@10: }else{ yading@10: type -= 10; yading@10: mask = ((msk[0] & 0xF0) << shift1[type]) | ((msk[0] & 0xF) << shift2[type]); yading@10: msk++; yading@10: } yading@10: for(y = 0; y < 4; y++){ yading@10: for(x = 0; x < 4; x++){ yading@10: tmp[x] = (mask & 0x8000) ? *data++ : tmp2[x]; yading@10: mask <<= 1; yading@10: } yading@10: tmp += stride; yading@10: tmp2 += stride; yading@10: } yading@10: break; yading@10: case 2: // fill block yading@10: for(y = 0; y < 4; y++){ yading@10: memset(tmp, data[0], 4); yading@10: tmp += stride; yading@10: } yading@10: data++; yading@10: break; yading@10: case 3: // raw block yading@10: for(y = 0; y < 4; y++){ yading@10: memcpy(tmp, data, 4); yading@10: data += 4; yading@10: tmp += stride; yading@10: } yading@10: break; yading@10: case 8: // subblocks - method 13 only yading@10: mask = *msk++; yading@10: for(k = 0; k < 4; k++){ yading@10: d = ((k & 1) << 1) + ((k & 2) * stride); yading@10: d2 = ((k & 1) << 1) + ((k & 2) * stride); yading@10: tmp2 = ref + i + d2; yading@10: switch(mask & 0xC0){ yading@10: case 0x80: // motion compensation yading@10: x = (*mv) >> 4; if(x & 8) x = 8 - x; yading@10: y = (*mv++) & 0xF; if(y & 8) y = 8 - y; yading@10: tmp2 += x + y*stride; yading@10: case 0x00: // skip yading@10: tmp[d + 0 ] = tmp2[0]; yading@10: tmp[d + 1 ] = tmp2[1]; yading@10: tmp[d + 0 + stride] = tmp2[0 + stride]; yading@10: tmp[d + 1 + stride] = tmp2[1 + stride]; yading@10: break; yading@10: case 0x40: // fill yading@10: tmp[d + 0 ] = data[0]; yading@10: tmp[d + 1 ] = data[0]; yading@10: tmp[d + 0 + stride] = data[0]; yading@10: tmp[d + 1 + stride] = data[0]; yading@10: data++; yading@10: break; yading@10: case 0xC0: // raw yading@10: tmp[d + 0 ] = *data++; yading@10: tmp[d + 1 ] = *data++; yading@10: tmp[d + 0 + stride] = *data++; yading@10: tmp[d + 1 + stride] = *data++; yading@10: break; yading@10: } yading@10: mask <<= 2; yading@10: } yading@10: break; yading@10: case 32: // vector quantization - 2 colors yading@10: mask = AV_RB16(msk); yading@10: msk += 2; yading@10: for(y = 0; y < 4; y++){ yading@10: for(x = 0; x < 4; x++){ yading@10: tmp[x] = data[mask & 1]; yading@10: mask >>= 1; yading@10: } yading@10: tmp += stride; yading@10: tmp2 += stride; yading@10: } yading@10: data += 2; yading@10: break; yading@10: case 33: // vector quantization - 3 or 4 colors yading@10: case 34: yading@10: mask = AV_RB32(msk); yading@10: msk += 4; yading@10: for(y = 0; y < 4; y++){ yading@10: for(x = 0; x < 4; x++){ yading@10: tmp[x] = data[mask & 3]; yading@10: mask >>= 2; yading@10: } yading@10: tmp += stride; yading@10: tmp2 += stride; yading@10: } yading@10: data += type - 30; yading@10: break; yading@10: default: yading@10: av_log(avctx, AV_LOG_ERROR, "Unknown opcode %d\n", type); yading@10: return AVERROR_INVALIDDATA; yading@10: } yading@10: } yading@10: dst += stride * 4; yading@10: ref += stride * 4; yading@10: } yading@10: return 0; yading@10: } yading@10: yading@10: static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt) yading@10: { yading@10: AVFrame *frame = data; yading@10: DxaDecContext * const c = avctx->priv_data; yading@10: uint8_t *outptr, *srcptr, *tmpptr; yading@10: unsigned long dsize; yading@10: int i, j, compr, ret; yading@10: int stride; yading@10: int pc = 0; yading@10: GetByteContext gb; yading@10: yading@10: bytestream2_init(&gb, avpkt->data, avpkt->size); yading@10: yading@10: /* make the palette available on the way out */ yading@10: if (bytestream2_peek_le32(&gb) == MKTAG('C','M','A','P')) { yading@10: bytestream2_skip(&gb, 4); yading@10: for(i = 0; i < 256; i++){ yading@10: c->pal[i] = 0xFFU << 24 | bytestream2_get_be24(&gb); yading@10: } yading@10: pc = 1; yading@10: } yading@10: yading@10: if ((ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF)) < 0) yading@10: return ret; yading@10: memcpy(frame->data[1], c->pal, AVPALETTE_SIZE); yading@10: frame->palette_has_changed = pc; yading@10: yading@10: outptr = frame->data[0]; yading@10: srcptr = c->decomp_buf; yading@10: tmpptr = c->prev.data[0]; yading@10: stride = frame->linesize[0]; yading@10: yading@10: if (bytestream2_get_le32(&gb) == MKTAG('N','U','L','L')) yading@10: compr = -1; yading@10: else yading@10: compr = bytestream2_get_byte(&gb); yading@10: yading@10: dsize = c->dsize; yading@10: if (compr != 4 && compr != -1) { yading@10: bytestream2_skip(&gb, 4); yading@10: if (uncompress(c->decomp_buf, &dsize, avpkt->data + bytestream2_tell(&gb), yading@10: bytestream2_get_bytes_left(&gb)) != Z_OK) { yading@10: av_log(avctx, AV_LOG_ERROR, "Uncompress failed!\n"); yading@10: return AVERROR_UNKNOWN; yading@10: } yading@10: } yading@10: switch(compr){ yading@10: case -1: yading@10: frame->key_frame = 0; yading@10: frame->pict_type = AV_PICTURE_TYPE_P; yading@10: if(c->prev.data[0]) yading@10: memcpy(frame->data[0], c->prev.data[0], frame->linesize[0] * avctx->height); yading@10: else{ // Should happen only when first frame is 'NULL' yading@10: memset(frame->data[0], 0, frame->linesize[0] * avctx->height); yading@10: frame->key_frame = 1; yading@10: frame->pict_type = AV_PICTURE_TYPE_I; yading@10: } yading@10: break; yading@10: case 2: yading@10: case 3: yading@10: case 4: yading@10: case 5: yading@10: frame->key_frame = !(compr & 1); yading@10: frame->pict_type = (compr & 1) ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I; yading@10: for(j = 0; j < avctx->height; j++){ yading@10: if((compr & 1) && tmpptr){ yading@10: for(i = 0; i < avctx->width; i++) yading@10: outptr[i] = srcptr[i] ^ tmpptr[i]; yading@10: tmpptr += stride; yading@10: }else yading@10: memcpy(outptr, srcptr, avctx->width); yading@10: outptr += stride; yading@10: srcptr += avctx->width; yading@10: } yading@10: break; yading@10: case 12: // ScummVM coding yading@10: case 13: yading@10: frame->key_frame = 0; yading@10: frame->pict_type = AV_PICTURE_TYPE_P; yading@10: if (!c->prev.data[0]) { yading@10: av_log(avctx, AV_LOG_ERROR, "Missing reference frame\n"); yading@10: return AVERROR_INVALIDDATA; yading@10: } yading@10: decode_13(avctx, c, frame->data[0], frame->linesize[0], srcptr, c->prev.data[0]); yading@10: break; yading@10: default: yading@10: av_log(avctx, AV_LOG_ERROR, "Unknown/unsupported compression type %d\n", compr); yading@10: return AVERROR_INVALIDDATA; yading@10: } yading@10: yading@10: av_frame_unref(&c->prev); yading@10: if ((ret = av_frame_ref(&c->prev, frame)) < 0) yading@10: return ret; yading@10: yading@10: *got_frame = 1; yading@10: yading@10: /* always report that the buffer was completely consumed */ yading@10: return avpkt->size; yading@10: } yading@10: yading@10: static av_cold int decode_init(AVCodecContext *avctx) yading@10: { yading@10: DxaDecContext * const c = avctx->priv_data; yading@10: yading@10: avctx->pix_fmt = AV_PIX_FMT_PAL8; yading@10: yading@10: avcodec_get_frame_defaults(&c->prev); yading@10: yading@10: c->dsize = avctx->width * avctx->height * 2; yading@10: c->decomp_buf = av_malloc(c->dsize); yading@10: if (!c->decomp_buf) { yading@10: av_log(avctx, AV_LOG_ERROR, "Can't allocate decompression buffer.\n"); yading@10: return AVERROR(ENOMEM); yading@10: } yading@10: yading@10: return 0; yading@10: } yading@10: yading@10: static av_cold int decode_end(AVCodecContext *avctx) yading@10: { yading@10: DxaDecContext * const c = avctx->priv_data; yading@10: yading@10: av_freep(&c->decomp_buf); yading@10: av_frame_unref(&c->prev); yading@10: yading@10: return 0; yading@10: } yading@10: yading@10: AVCodec ff_dxa_decoder = { yading@10: .name = "dxa", yading@10: .type = AVMEDIA_TYPE_VIDEO, yading@10: .id = AV_CODEC_ID_DXA, yading@10: .priv_data_size = sizeof(DxaDecContext), yading@10: .init = decode_init, yading@10: .close = decode_end, yading@10: .decode = decode_frame, yading@10: .capabilities = CODEC_CAP_DR1, yading@10: .long_name = NULL_IF_CONFIG_SMALL("Feeble Files/ScummVM DXA"), yading@10: };