FFmpeg
|
eatgq.c
Go to the documentation of this file.
Definition: bytestream.h:32
Definition: eatgq.c:40
static void tgq_idct_put_mb(TgqContext *s, int16_t(*block)[64], AVFrame *frame, int mb_x, int mb_y)
Definition: eatgq.c:107
void avcodec_set_dimensions(AVCodecContext *s, int width, int height)
Definition: libavcodec/utils.c:177
static void tgq_idct_put_mb_dconly(TgqContext *s, AVFrame *frame, int mb_x, int mb_y, const int8_t *dc)
Definition: eatgq.c:134
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
Definition: bytestream.h:130
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: libavcodec/avcodec.h:1253
DECLARE_ALIGNED(16, int16_t, block)[6][64]
static void tgq_decode_block(TgqContext *s, int16_t block[64], GetBitContext *gb)
Definition: eatgq.c:61
Definition: libavcodec/avcodec.h:224
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: libavcodec/avcodec.h:743
bitstream reader API header.
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
Definition: bytestream.h:159
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: libavutil/internal.h:123
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
Definition: bytestream.h:258
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
Definition: bytestream.h:149
external API header
static void tgq_dconly(TgqContext *s, unsigned char *dst, int dst_stride, int dc)
Definition: eatgq.c:125
struct TgqContext TgqContext
static int tgq_decode_mb(TgqContext *s, AVFrame *frame, int mb_y, int mb_x)
Definition: eatgq.c:151
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:101
FIXME Range Coding of cr are mx and my are Motion Vector top and top right vectors is used as motion vector prediction the used motion vector is the sum of the predictor and(mvx_diff, mvy_diff)*mv_scale Intra DC Predicton block[y][x] dc[1]
Definition: snow.txt:392
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: libavcodec/utils.c:823
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Definition: get_bits.h:379
void ff_init_scantable_permutation(uint8_t *idct_permutation, int idct_permutation_type)
Definition: dsputil.c:131
AAN (Arai Agui Nakajima) (I)DCT tables.
void ff_ea_idct_put_c(uint8_t *dest, int linesize, int16_t *block)
Definition: eaidct.c:80
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:68
Definition: get_bits.h:54
common internal api header.
DSP utils.
void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
Definition: dsputil.c:110
Definition: avutil.h:143
static int decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: crystalhd.c:868
static void tgq_calculate_qtable(TgqContext *s, int quant)
Definition: eatgq.c:186
static int tgq_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Definition: eatgq.c:197
Generated on Mon Nov 18 2024 06:51:53 for FFmpeg by 1.8.11