42 int16_t delta_pal[768];
50 uint16_t *frm0, *frm1, *
frm2;
62 uint16_t codebook[256];
63 uint16_t small_codebook[4];
70 int seq_num, codec, rotate_code, rle_output_size;
101 const int edge_max = edge_size - 1;
105 }
else if (y == edge_max) {
109 }
else if (x == edge_max) {
143 int pos,
int npoints)
146 points[0] = (x0 * pos + x1 * (npoints - pos) + (npoints >> 1)) / npoints;
147 points[1] = (y0 * pos + y1 * (npoints - pos) + (npoints >> 1)) / npoints;
162 static void make_glyphs(int8_t *pglyphs,
const int8_t *xvec,
const int8_t *yvec,
163 const int side_length)
165 const int glyph_size = side_length * side_length;
166 int8_t *pglyph = pglyphs;
182 for (ipoint = 0; ipoint <= npoints; ipoint++) {
190 for (irow = point[1]; irow >= 0; irow--)
191 pglyph[point[0] + irow * side_length] = 1;
195 for (irow = point[1]; irow < side_length; irow++)
196 pglyph[point[0] + irow * side_length] = 1;
200 for (icol = point[0]; icol >= 0; icol--)
201 pglyph[icol + point[1] * side_length] = 1;
205 for (icol = point[0]; icol < side_length; icol++)
206 pglyph[icol + point[1] * side_length] = 1;
258 if (rotate_code == 2)
290 for (i = 0; i < 256; i++)
311 opcode = bytestream2_get_byte(&ctx->
gb);
312 run_len = (opcode >> 1) + 1;
317 color = bytestream2_get_byte(&ctx->
gb);
318 memset(dst, color, run_len);
338 for (i = 0; i <
height; i++) {
344 len = bytestream2_get_le16u(&ctx->
gb);
351 code = bytestream2_get_byteu(&ctx->
gb);
353 code = (code >> 1) + 1;
354 if (pos + code > width)
357 val = bytestream2_get_byteu(&ctx->
gb);
359 memset(dst + pos, val, code);
364 for (j = 0; j <
code; j++) {
365 val = bytestream2_get_byteu(&ctx->
gb);
385 for (j = 0; j < 4; j++) {
386 for (i = 0; i < 4; i++) {
387 if ((pos + i) < 0 || (pos + i) >= height * stride)
404 int compr, mvoff, seq,
flags;
405 uint32_t decoded_size;
408 compr = bytestream2_get_byte(&ctx->
gb);
409 mvoff = bytestream2_get_byte(&ctx->
gb);
410 seq = bytestream2_get_le16(&ctx->
gb);
411 decoded_size = bytestream2_get_le32(&ctx->
gb);
413 flags = bytestream2_get_byte(&ctx->
gb);
416 if (decoded_size > ctx->
height * stride - left - top * stride) {
417 decoded_size = ctx->
height * stride - left - top *
stride;
423 if (((seq & 1) || !(flags & 1)) && (compr && compr != 2))
436 for (i = 0; i <
height; i++) {
452 for (j = 0; j <
height; j += 4) {
453 for (i = 0; i <
width; i += 4) {
462 code = bytestream2_get_byteu(&ctx->
gb);
467 for (k = 0; k < 4; k++)
473 for (k = 0; k < 4; k++)
474 memset(dst + i + k * stride, bytestream2_get_byteu(&ctx->
gb), 4);
479 t = bytestream2_get_byteu(&ctx->
gb);
480 for (k = 0; k < 4; k++)
481 memset(dst + i + k * stride, t, 4);
484 if (compr == 4 && !code) {
487 skip_run = bytestream2_get_byteu(&ctx->
gb) + 1;
494 codec37_mv(dst + i, prev + i + mx + my * stride,
495 ctx->
height, stride, i + mx, j + my);
503 for (j = 0; j <
height; j += 4) {
504 for (i = 0; i <
width; i += 4) {
511 code = bytestream2_get_byte(&ctx->
gb);
515 for (k = 0; k < 4; k++)
517 }
else if (compr == 4 && !code) {
520 skip_run = bytestream2_get_byteu(&ctx->
gb) + 1;
527 codec37_mv(dst + i, prev + i + mx + my * stride,
528 ctx->
height, stride, i + mx, j + my);
538 "subcodec 37 compression %d not implemented\n", compr);
555 code = bytestream2_get_byteu(&ctx->
gb);
562 dst[0] = bytestream2_get_byteu(&ctx->
gb);
563 dst[1] = bytestream2_get_byteu(&ctx->
gb);
564 dst[0+
stride] = bytestream2_get_byteu(&ctx->
gb);
565 dst[1+
stride] = bytestream2_get_byteu(&ctx->
gb);
568 if (
process_block(ctx, dst, prev1, prev2, stride, tbl, size))
570 if (
process_block(ctx, dst + size, prev1 + size, prev2 + size,
576 if (
process_block(ctx, dst, prev1, prev2, stride, tbl, size))
578 if (
process_block(ctx, dst + size, prev1 + size, prev2 + size,
587 t = bytestream2_get_byteu(&ctx->
gb);
588 for (k = 0; k <
size; k++)
589 memset(dst + k * stride, t, size);
595 code = bytestream2_get_byteu(&ctx->
gb);
599 for (k = 0; k <
size; k++)
600 for (t = 0; t <
size; t++)
601 dst[t + k * stride] = colors[!*pglyph++];
604 for (k = 0; k <
size; k++)
605 memcpy(dst + k * stride, prev1 + k * stride, size);
610 t = bytestream2_get_byte(&ctx->
gb);
612 for (k = 0; k <
size; k++)
613 memset(dst + k * stride, t, size);
622 if (index < - mx - my*stride ||
623 (ctx->
buf_size>>1) - index < mx + size + (my + size - 1)*stride) {
628 for (k = 0; k <
size; k++)
629 memcpy(dst + k * stride, prev2 + mx + (my + k) * stride, size);
638 int i, j, seq, compr, new_rot, tbl_pos, skip;
643 uint32_t decoded_size;
646 seq = bytestream2_get_le16(&ctx->
gb);
647 compr = bytestream2_get_byte(&ctx->
gb);
648 new_rot = bytestream2_get_byte(&ctx->
gb);
649 skip = bytestream2_get_byte(&ctx->
gb);
651 decoded_size = bytestream2_get_le32(&ctx->
gb);
654 if (decoded_size > ctx->
height * stride - left - top * stride) {
655 decoded_size = ctx->
height * stride - left - top *
stride;
663 memset(prev1, 0, ctx->
height * stride);
664 memset(prev2, 0, ctx->
height * stride);
671 for (j = 0; j <
height; j++) {
679 for (j = 0; j <
height; j += 2) {
680 for (i = 0; i <
width; i += 2) {
681 dst[
i] = dst[i + 1] =
682 dst[stride +
i] = dst[stride + i + 1] = bytestream2_get_byteu(&ctx->
gb);
689 for (j = 0; j <
height; j += 8) {
690 for (i = 0; i <
width; i += 8) {
691 if (
process_block(ctx, dst + i, prev1 + i, prev2 + i, stride,
713 "subcodec 47 compression %d not implemented\n", compr);
727 uint16_t codec, top, left,
w, h;
729 codec = bytestream2_get_le16u(&ctx->
gb);
730 left = bytestream2_get_le16u(&ctx->
gb);
731 top = bytestream2_get_le16u(&ctx->
gb);
732 w = bytestream2_get_le16u(&ctx->
gb);
733 h = bytestream2_get_le16u(&ctx->
gb);
735 if (ctx->
width < left + w || ctx->
height < top + h) {
770 uint16_t *frm = ctx->
frm0;
777 for (y = 0; y < ctx->
height; y++) {
778 for (x = 0; x < ctx->
width; x++)
779 frm[x] = bytestream2_get_le16u(&ctx->
gb);
791 static void copy_block(uint16_t *pdest, uint16_t *psrc,
int block_size,
int pitch)
797 switch (block_size) {
815 for (y = 0; y < block_size; y++, pdest += pitch)
816 for (x = 0; x < block_size; x++)
821 uint16_t bg_color,
int block_size,
int pitch)
824 uint16_t colors[2] = { fg_color, bg_color };
835 for (y = 0; y < block_size; y++, dst += pitch)
836 for (x = 0; x < block_size; x++)
837 *dst++ = colors[*pglyph++];
845 if (block_size == 2) {
851 indices = bytestream2_get_le32u(&ctx->
gb);
852 dst[0] = ctx->
codebook[indices & 0xFF]; indices >>= 8;
853 dst[1] = ctx->
codebook[indices & 0xFF]; indices >>= 8;
854 dst[pitch] = ctx->
codebook[indices & 0xFF]; indices >>= 8;
855 dst[pitch + 1] = ctx->
codebook[indices & 0xFF];
857 uint16_t fgcolor, bgcolor;
863 glyph = bytestream2_get_byteu(&ctx->
gb);
864 bgcolor = ctx->
codebook[bytestream2_get_byteu(&ctx->
gb)];
865 fgcolor = ctx->
codebook[bytestream2_get_byteu(&ctx->
gb)];
867 draw_glyph(ctx, dst, glyph, fgcolor, bgcolor, block_size, pitch);
876 if (block_size == 2) {
880 dst[0] = bytestream2_get_le16u(&ctx->
gb);
881 dst[1] = bytestream2_get_le16u(&ctx->
gb);
882 dst[pitch] = bytestream2_get_le16u(&ctx->
gb);
883 dst[pitch + 1] = bytestream2_get_le16u(&ctx->
gb);
885 uint16_t fgcolor, bgcolor;
891 glyph = bytestream2_get_byteu(&ctx->
gb);
892 bgcolor = bytestream2_get_le16u(&ctx->
gb);
893 fgcolor = bytestream2_get_le16u(&ctx->
gb);
895 draw_glyph(ctx, dst, glyph, fgcolor, bgcolor, block_size, pitch);
903 int start_pos = cx + mx + (cy + my) * ctx->
pitch;
904 int end_pos = start_pos + (block_size - 1) * (ctx->
pitch + 1);
906 int good = start_pos >= 0 && end_pos < (ctx->
buf_size >> 1);
910 cx + mx, cy + my, cx, cy, block_size);
918 int16_t mx, my,
index;
924 opcode = bytestream2_get_byteu(&ctx->
gb);
926 av_dlog(ctx->
avctx,
"opcode 0x%0X cx %d cy %d blk %d\n", opcode, cx, cy, blk_size);
932 if (
good_mvec(ctx, cx, cy, mx, my, blk_size)) {
934 ctx->
frm2 + cx + mx + ctx->
pitch * (cy + my),
935 blk_size, ctx->
pitch);
941 index = bytestream2_get_le16u(&ctx->
gb);
943 mx = index % ctx->
width;
944 my = index / ctx->
width;
946 if (
good_mvec(ctx, cx, cy, mx, my, blk_size)) {
948 ctx->
frm2 + cx + mx + ctx->
pitch * (cy + my),
949 blk_size, ctx->
pitch);
955 blk_size, ctx->
pitch);
981 bytestream2_get_le16u(&ctx->
gb), blk_size, ctx->
pitch);
1052 uint16_t *frm = ctx->
frm0;
1059 *frm++ = ctx->
codebook[bytestream2_get_byteu(&ctx->
gb)];
1066 uint16_t *pdest = ctx->
frm0;
1104 hdr->
width = bytestream2_get_le32u(&ctx->
gb);
1105 hdr->
height = bytestream2_get_le32u(&ctx->
gb);
1112 hdr->
seq_num = bytestream2_get_le16u(&ctx->
gb);
1113 hdr->
codec = bytestream2_get_byteu(&ctx->
gb);
1118 for (i = 0; i < 4; i++)
1120 hdr->
bg_color = bytestream2_get_le16u(&ctx->
gb);
1125 for (i = 0; i < 256; i++)
1126 ctx->
codebook[i] = bytestream2_get_le16u(&ctx->
gb);
1145 int srcpitch = ctx->
pitch * (hdr ?
sizeof(ctx->
frm0[0]) : 1);
1154 memcpy(dst, src, srcpitch);
1178 sig = bytestream2_get_be32u(&ctx->
gb);
1179 size = bytestream2_get_be32u(&ctx->
gb);
1187 case MKBETAG(
'N',
'P',
'A',
'L'):
1188 if (size != 256 * 3) {
1193 for (i = 0; i < 256; i++)
1194 ctx->
pal[i] = 0xFFU << 24 | bytestream2_get_be24u(&ctx->
gb);
1196 case MKBETAG(
'F',
'O',
'B',
'J'):
1202 case MKBETAG(
'X',
'P',
'A',
'L'):
1203 if (size == 6 || size == 4) {
1207 for (i = 0; i < 256; i++) {
1208 for (j = 0; j < 3; j++) {
1209 int t = (ctx->
pal[
i] >> (16 - j * 8)) & 0xFF;
1210 tmp[j] = av_clip_uint8((t * 129 + ctx->
delta_pal[i * 3 + j]) >> 7);
1215 if (size < 768 * 2 + 4) {
1221 for (i = 0; i < 768; i++)
1222 ctx->
delta_pal[i] = bytestream2_get_le16u(&ctx->
gb);
1223 if (size >= 768 * 5 + 4) {
1224 for (i = 0; i < 256; i++)
1225 ctx->
pal[i] = 0xFFU << 24 | bytestream2_get_be24u(&ctx->
gb);
1227 memset(ctx->
pal, 0,
sizeof(ctx->
pal));
1231 case MKBETAG(
'S',
'T',
'O',
'R'):
1234 case MKBETAG(
'F',
'T',
'C',
'H'):
1270 "subcodec %d: error decoding frame\n", header.
codec);
static const int8_t c37_mv[]
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static int process_frame_obj(SANMVideoContext *ctx)
This structure describes decoded (raw) audio or video data.
static int read_frame_header(SANMVideoContext *ctx, SANMFrameHeader *hdr)
int8_t p4x4glyphs[NGLYPHS][16]
static void fill_frame(uint16_t *pbuf, int buf_size, uint16_t color)
static av_cold int init(AVCodecContext *avctx)
static const int8_t glyph4_x[GLYPH_COORD_VECT_SIZE]
#define AV_LOG_WARNING
Something somehow does not look correct.
static void copy_block16(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
void avcodec_set_dimensions(AVCodecContext *s, int width, int height)
static int opcode_0xf8(SANMVideoContext *ctx, int cx, int cy, int block_size, int pitch)
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame_ptr, AVPacket *pkt)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional FF_INPUT_BUFFER_PADDING_SIZE at the end w...
static enum GlyphEdge which_edge(int x, int y, int edge_size)
Return enum GlyphEdge of box where point (x, y) lies.
#define FF_ARRAY_ELEMS(a)
av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (%s)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt), use_generic?ac->func_descr_generic:ac->func_descr)
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g, uint8_t *dst, unsigned int size)
output residual component w
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
static const uint8_t run_len[7][16]
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
void void avpriv_request_sample(void *avc, const char *msg,...) av_printf_format(2
Log a generic warning message about a missing feature.
static int draw_glyph(SANMVideoContext *ctx, uint16_t *dst, int index, uint16_t fg_color, uint16_t bg_color, int block_size, int pitch)
static av_cold int decode_init(AVCodecContext *avctx)
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
8 bit with PIX_FMT_RGB32 palette
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
static int good_mvec(SANMVideoContext *ctx, int cx, int cy, int mx, int my, int block_size)
static void codec37_mv(uint8_t *dst, const uint8_t *src, int height, int stride, int x, int y)
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
static const int8_t glyph4_y[GLYPH_COORD_VECT_SIZE]
int(* frm_decoder)(SANMVideoContext *ctx)
static av_always_inline void bytestream2_skipu(GetByteContext *g, unsigned int size)
static int decode_5(SANMVideoContext *ctx)
uint16_t small_codebook[4]
static av_cold int decode_end(AVCodecContext *avctx)
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
static int rle_decode(SANMVideoContext *ctx, uint8_t *dst, const int out_size)
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g, uint8_t *dst, unsigned int size)
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
simple assert() macros that are a bit more flexible than ISO C assert().
void av_log(void *avcl, int level, const char *fmt,...)
static int decode_3(SANMVideoContext *ctx)
const char * name
Name of the codec implementation.
static enum GlyphDir which_direction(enum GlyphEdge edge0, enum GlyphEdge edge1)
static void make_glyphs(int8_t *pglyphs, const int8_t *xvec, const int8_t *yvec, const int side_length)
Construct glyphs by iterating through vectors coordinates.
uint32_t stored_frame_size
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
enum AVPictureType pict_type
Picture type of the frame.
int width
picture width / height.
static void destroy_buffers(SANMVideoContext *ctx)
static void interp_point(int8_t *points, int x0, int y0, int x1, int y1, int pos, int npoints)
Interpolate two points.
static void copy_block(uint16_t *pdest, uint16_t *psrc, int block_size, int pitch)
static int old_codec47(SANMVideoContext *ctx, int top, int left, int width, int height)
static int copy_output(SANMVideoContext *ctx, SANMFrameHeader *hdr)
#define GLYPH_COORD_VECT_SIZE
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
static av_cold int init_buffers(SANMVideoContext *ctx)
static av_always_inline int bytestream2_tell(GetByteContext *g)
or the Software in violation of any applicable export control laws in any jurisdiction Except as provided by mandatorily applicable UPF has no obligation to provide you with source code to the Software In the event Software contains any source code
static int decode_nop(SANMVideoContext *ctx)
static int decode_6(SANMVideoContext *ctx)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
static const int8_t glyph8_x[GLYPH_COORD_VECT_SIZE]
main external API structure.
static int codec2subblock(SANMVideoContext *ctx, int cx, int cy, int blk_size)
static void close(AVCodecParserContext *s)
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int process_block(SANMVideoContext *ctx, uint8_t *dst, uint8_t *prev1, uint8_t *prev2, int stride, int tbl, int size)
static int opcode_0xf7(SANMVideoContext *ctx, int cx, int cy, int block_size, int pitch)
static int decode_2(SANMVideoContext *ctx)
BYTE int const BYTE int int int height
synthesis window for stochastic i
static const int8_t motion_vectors[256][2]
static int decode_8(SANMVideoContext *ctx)
static int decode_0(SANMVideoContext *ctx)
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
static void rotate_bufs(SANMVideoContext *ctx, int rotate_code)
static const frm_decoder v1_decoders[]
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static void fill_block(uint16_t *pdest, uint16_t color, int block_size, int pitch)
unsigned int rle_buf_size
static const int8_t glyph8_y[GLYPH_COORD_VECT_SIZE]
static int old_codec37(SANMVideoContext *ctx, int top, int left, int width, int height)
common internal api header.
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static void init_sizes(SANMVideoContext *ctx, int width, int height)
#define MKBETAG(a, b, c, d)
static int decode_4(SANMVideoContext *ctx)
static void copy_block4(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
else dst[i][x+y *dst_stride[i]]
int key_frame
1 -> keyframe, 0-> not
static av_always_inline int bytestream2_seek(GetByteContext *g, int offset, int whence)
static void copy_block8(uint8_t *dst, const uint8_t *src, int dstStride, int srcStride, int h)
#define AV_PIX_FMT_RGB565
static int old_codec1(SANMVideoContext *ctx, int top, int left, int width, int height)
int8_t p8x8glyphs[NGLYPHS][64]
static int decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
#define FFSWAP(type, a, b)
This structure stores compressed data.