libavcodec/c93.c
Go to the documentation of this file.
1 /*
2  * Interplay C93 video decoder
3  * Copyright (c) 2007 Anssi Hannula <anssi.hannula@gmail.com>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "avcodec.h"
23 #include "bytestream.h"
24 #include "internal.h"
25 
26 typedef struct {
27  AVFrame pictures[2];
30 
31 typedef enum {
39  C93_NOOP = 0x0E,
40  C93_8X8_INTRA = 0x0F,
41 } C93BlockType;
42 
43 #define WIDTH 320
44 #define HEIGHT 192
45 
46 #define C93_HAS_PALETTE 0x01
47 #define C93_FIRST_FRAME 0x02
48 
50 {
51  C93DecoderContext *s = avctx->priv_data;
52  avctx->pix_fmt = AV_PIX_FMT_PAL8;
55  return 0;
56 }
57 
58 static av_cold int decode_end(AVCodecContext *avctx)
59 {
60  C93DecoderContext * const c93 = avctx->priv_data;
61 
62  av_frame_unref(&c93->pictures[0]);
63  av_frame_unref(&c93->pictures[1]);
64 
65  return 0;
66 }
67 
68 static inline int copy_block(AVCodecContext *avctx, uint8_t *to,
69  uint8_t *from, int offset, int height, int stride)
70 {
71  int i;
72  int width = height;
73  int from_x = offset % WIDTH;
74  int from_y = offset / WIDTH;
75  int overflow = from_x + width - WIDTH;
76 
77  if (!from) {
78  /* silently ignoring predictive blocks in first frame */
79  return 0;
80  }
81 
82  if (from_y + height > HEIGHT) {
83  av_log(avctx, AV_LOG_ERROR, "invalid offset %d during C93 decoding\n",
84  offset);
85  return AVERROR_INVALIDDATA;
86  }
87 
88  if (overflow > 0) {
89  width -= overflow;
90  for (i = 0; i < height; i++) {
91  memcpy(&to[i*stride+width], &from[(from_y+i)*stride], overflow);
92  }
93  }
94 
95  for (i = 0; i < height; i++) {
96  memcpy(&to[i*stride], &from[(from_y+i)*stride+from_x], width);
97  }
98 
99  return 0;
100 }
101 
102 static inline void draw_n_color(uint8_t *out, int stride, int width,
103  int height, int bpp, uint8_t cols[4], uint8_t grps[4], uint32_t col)
104 {
105  int x, y;
106  for (y = 0; y < height; y++) {
107  if (grps)
108  cols[0] = grps[3 * (y >> 1)];
109  for (x = 0; x < width; x++) {
110  if (grps)
111  cols[1]= grps[(x >> 1) + 1];
112  out[x + y*stride] = cols[col & ((1 << bpp) - 1)];
113  col >>= bpp;
114  }
115  }
116 }
117 
118 static int decode_frame(AVCodecContext *avctx, void *data,
119  int *got_frame, AVPacket *avpkt)
120 {
121  const uint8_t *buf = avpkt->data;
122  int buf_size = avpkt->size;
123  C93DecoderContext * const c93 = avctx->priv_data;
124  AVFrame * const newpic = &c93->pictures[c93->currentpic];
125  AVFrame * const oldpic = &c93->pictures[c93->currentpic^1];
126  GetByteContext gb;
127  uint8_t *out;
128  int stride, ret, i, x, y, b, bt = 0;
129 
130  c93->currentpic ^= 1;
131 
132  if ((ret = ff_reget_buffer(avctx, newpic)) < 0)
133  return ret;
134 
135  stride = newpic->linesize[0];
136 
137  bytestream2_init(&gb, buf, buf_size);
138  b = bytestream2_get_byte(&gb);
139  if (b & C93_FIRST_FRAME) {
140  newpic->pict_type = AV_PICTURE_TYPE_I;
141  newpic->key_frame = 1;
142  } else {
143  newpic->pict_type = AV_PICTURE_TYPE_P;
144  newpic->key_frame = 0;
145  }
146 
147  for (y = 0; y < HEIGHT; y += 8) {
148  out = newpic->data[0] + y * stride;
149  for (x = 0; x < WIDTH; x += 8) {
150  uint8_t *copy_from = oldpic->data[0];
151  unsigned int offset, j;
152  uint8_t cols[4], grps[4];
153  C93BlockType block_type;
154 
155  if (!bt)
156  bt = bytestream2_get_byte(&gb);
157 
158  block_type= bt & 0x0F;
159  switch (block_type) {
160  case C93_8X8_FROM_PREV:
161  offset = bytestream2_get_le16(&gb);
162  if ((ret = copy_block(avctx, out, copy_from, offset, 8, stride)) < 0)
163  return ret;
164  break;
165 
166  case C93_4X4_FROM_CURR:
167  copy_from = newpic->data[0];
168  case C93_4X4_FROM_PREV:
169  for (j = 0; j < 8; j += 4) {
170  for (i = 0; i < 8; i += 4) {
171  int offset = bytestream2_get_le16(&gb);
172  int from_x = offset % WIDTH;
173  int from_y = offset / WIDTH;
174  if (block_type == C93_4X4_FROM_CURR && from_y == y+j &&
175  (FFABS(from_x - x-i) < 4 || FFABS(from_x - x-i) > WIDTH-4)) {
176  avpriv_request_sample(avctx, "block overlap %d %d %d %d\n", from_x, x+i, from_y, y+j);
177  return AVERROR_INVALIDDATA;
178  }
179  if ((ret = copy_block(avctx, &out[j*stride+i],
180  copy_from, offset, 4, stride)) < 0)
181  return ret;
182  }
183  }
184  break;
185 
186  case C93_8X8_2COLOR:
187  bytestream2_get_buffer(&gb, cols, 2);
188  for (i = 0; i < 8; i++) {
189  draw_n_color(out + i*stride, stride, 8, 1, 1, cols,
190  NULL, bytestream2_get_byte(&gb));
191  }
192 
193  break;
194 
195  case C93_4X4_2COLOR:
196  case C93_4X4_4COLOR:
197  case C93_4X4_4COLOR_GRP:
198  for (j = 0; j < 8; j += 4) {
199  for (i = 0; i < 8; i += 4) {
200  if (block_type == C93_4X4_2COLOR) {
201  bytestream2_get_buffer(&gb, cols, 2);
202  draw_n_color(out + i + j*stride, stride, 4, 4,
203  1, cols, NULL, bytestream2_get_le16(&gb));
204  } else if (block_type == C93_4X4_4COLOR) {
205  bytestream2_get_buffer(&gb, cols, 4);
206  draw_n_color(out + i + j*stride, stride, 4, 4,
207  2, cols, NULL, bytestream2_get_le32(&gb));
208  } else {
209  bytestream2_get_buffer(&gb, grps, 4);
210  draw_n_color(out + i + j*stride, stride, 4, 4,
211  1, cols, grps, bytestream2_get_le16(&gb));
212  }
213  }
214  }
215  break;
216 
217  case C93_NOOP:
218  break;
219 
220  case C93_8X8_INTRA:
221  for (j = 0; j < 8; j++)
222  bytestream2_get_buffer(&gb, out + j*stride, 8);
223  break;
224 
225  default:
226  av_log(avctx, AV_LOG_ERROR, "unexpected type %x at %dx%d\n",
227  block_type, x, y);
228  return AVERROR_INVALIDDATA;
229  }
230  bt >>= 4;
231  out += 8;
232  }
233  }
234 
235  if (b & C93_HAS_PALETTE) {
236  uint32_t *palette = (uint32_t *) newpic->data[1];
237  for (i = 0; i < 256; i++) {
238  palette[i] = 0xFFU << 24 | bytestream2_get_be24(&gb);
239  }
240  newpic->palette_has_changed = 1;
241  } else {
242  if (oldpic->data[1])
243  memcpy(newpic->data[1], oldpic->data[1], 256 * 4);
244  }
245 
246  if ((ret = av_frame_ref(data, newpic)) < 0)
247  return ret;
248  *got_frame = 1;
249 
250  return buf_size;
251 }
252 
254  .name = "c93",
255  .type = AVMEDIA_TYPE_VIDEO,
256  .id = AV_CODEC_ID_C93,
257  .priv_data_size = sizeof(C93DecoderContext),
258  .init = decode_init,
259  .close = decode_end,
260  .decode = decode_frame,
261  .capabilities = CODEC_CAP_DR1,
262  .long_name = NULL_IF_CONFIG_SMALL("Interplay C93"),
263 };
#define WIDTH
const char * s
Definition: avisynth_c.h:668
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
This structure describes decoded (raw) audio or video data.
Definition: frame.h:76
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:130
int stride
Definition: mace.c:144
AVCodec ff_c93_decoder
static void draw_n_color(uint8_t *out, int stride, int width, int height, int bpp, uint8_t cols[4], uint8_t grps[4], uint32_t col)
static av_cold int decode_init(AVCodecContext *avctx)
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
uint8_t
#define av_cold
Definition: attributes.h:78
8 bit with PIX_FMT_RGB32 palette
Definition: pixfmt.h:79
#define b
Definition: input.c:42
const char * from
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
uint8_t * data
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
static av_cold int decode_end(AVCodecContext *avctx)
Discrete Time axis x
#define U(x)
#define C93_HAS_PALETTE
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Spectrum Plot time data
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:258
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
const char * name
Name of the codec implementation.
static const uint8_t offset[127][2]
Definition: vf_spp.c:70
external API header
#define C93_FIRST_FRAME
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame)
Identical in function to av_frame_make_writable(), except it uses ff_get_buffer() to allocate the buf...
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:144
ret
Definition: avfilter.c:821
#define FFABS(a)
Definition: common.h:53
static int copy_from(IpvideoContext *s, AVFrame *src, AVFrame *dst, int delta_x, int delta_y)
NULL
Definition: eval.c:55
static int width
Definition: tests/utils.c:158
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:101
main external API structure.
AVFrame pictures[2]
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:375
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:148
void * buf
Definition: avisynth_c.h:594
BYTE int const BYTE int int int height
Definition: avisynth_c.h:713
void avcodec_get_frame_defaults(AVFrame *frame)
Set the fields of the given AVFrame to default values.
synthesis window for stochastic i
int palette_has_changed
Tell user application that palette has changed from previous frame.
Definition: frame.h:280
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:330
int av_frame_ref(AVFrame *dst, AVFrame *src)
Setup a new reference to the data described by an given frame.
Definition: frame.c:228
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
int palette
Definition: v4l.c:61
FFmpeg Automated Testing Environment ************************************Table of Contents *****************FFmpeg Automated Testing Environment Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass target exec to configure or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script tests fate sh from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at doc fate_config sh template Create a configuration that suits your based on the configuration template The slot configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern< arch >< os >< compiler >< compiler version > The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the fate_recv variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ssh command with one or more v options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory FATE makefile targets and variables *************************************Makefile can be set to
Definition: fate.txt:142
common internal api header.
function y
Definition: D.m:1
C93BlockType
static int copy_block(AVCodecContext *avctx, uint8_t *to, uint8_t *from, int offset, int height, int stride)
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:139
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=av_sample_fmt_is_planar(in_fmt);out_planar=av_sample_fmt_is_planar(out_fmt);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> out
static int decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: crystalhd.c:868
#define HEIGHT
This structure stores compressed data.
for(j=16;j >0;--j)
Predicted.
Definition: avutil.h:217