yading@10: /* yading@10: * SVQ1 Encoder yading@10: * Copyright (C) 2004 Mike Melanson yading@10: * yading@10: * This file is part of FFmpeg. yading@10: * yading@10: * FFmpeg is free software; you can redistribute it and/or yading@10: * modify it under the terms of the GNU Lesser General Public yading@10: * License as published by the Free Software Foundation; either yading@10: * version 2.1 of the License, or (at your option) any later version. yading@10: * yading@10: * FFmpeg is distributed in the hope that it will be useful, yading@10: * but WITHOUT ANY WARRANTY; without even the implied warranty of yading@10: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU yading@10: * Lesser General Public License for more details. yading@10: * yading@10: * You should have received a copy of the GNU Lesser General Public yading@10: * License along with FFmpeg; if not, write to the Free Software yading@10: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA yading@10: */ yading@10: yading@10: /** yading@10: * @file yading@10: * Sorenson Vector Quantizer #1 (SVQ1) video codec. yading@10: * For more information of the SVQ1 algorithm, visit: yading@10: * http://www.pcisys.net/~melanson/codecs/ yading@10: */ yading@10: yading@10: #include "avcodec.h" yading@10: #include "dsputil.h" yading@10: #include "hpeldsp.h" yading@10: #include "mpegvideo.h" yading@10: #include "h263.h" yading@10: #include "internal.h" yading@10: #include "libavutil/avassert.h" yading@10: #include "svq1.h" yading@10: #include "svq1enc_cb.h" yading@10: yading@10: yading@10: typedef struct SVQ1Context { yading@10: /* FIXME: Needed for motion estimation, should not be used for anything yading@10: * else, the idea is to make the motion estimation eventually independent yading@10: * of MpegEncContext, so this will be removed then. */ yading@10: MpegEncContext m; yading@10: AVCodecContext *avctx; yading@10: DSPContext dsp; yading@10: HpelDSPContext hdsp; yading@10: AVFrame picture; yading@10: AVFrame current_picture; yading@10: AVFrame last_picture; yading@10: PutBitContext pb; yading@10: GetBitContext gb; yading@10: yading@10: /* why ooh why this sick breadth first order, yading@10: * everything is slower and more complex */ yading@10: PutBitContext reorder_pb[6]; yading@10: yading@10: int frame_width; yading@10: int frame_height; yading@10: yading@10: /* Y plane block dimensions */ yading@10: int y_block_width; yading@10: int y_block_height; yading@10: yading@10: /* U & V plane (C planes) block dimensions */ yading@10: int c_block_width; yading@10: int c_block_height; yading@10: yading@10: uint16_t *mb_type; yading@10: uint32_t *dummy; yading@10: int16_t (*motion_val8[3])[2]; yading@10: int16_t (*motion_val16[3])[2]; yading@10: yading@10: int64_t rd_total; yading@10: yading@10: uint8_t *scratchbuf; yading@10: } SVQ1Context; yading@10: yading@10: static void svq1_write_header(SVQ1Context *s, int frame_type) yading@10: { yading@10: int i; yading@10: yading@10: /* frame code */ yading@10: put_bits(&s->pb, 22, 0x20); yading@10: yading@10: /* temporal reference (sure hope this is a "don't care") */ yading@10: put_bits(&s->pb, 8, 0x00); yading@10: yading@10: /* frame type */ yading@10: put_bits(&s->pb, 2, frame_type - 1); yading@10: yading@10: if (frame_type == AV_PICTURE_TYPE_I) { yading@10: /* no checksum since frame code is 0x20 */ yading@10: /* no embedded string either */ yading@10: /* output 5 unknown bits (2 + 2 + 1) */ yading@10: put_bits(&s->pb, 5, 2); /* 2 needed by quicktime decoder */ yading@10: yading@10: i = ff_match_2uint16((void*)ff_svq1_frame_size_table, yading@10: FF_ARRAY_ELEMS(ff_svq1_frame_size_table), yading@10: s->frame_width, s->frame_height); yading@10: put_bits(&s->pb, 3, i); yading@10: yading@10: if (i == 7) { yading@10: put_bits(&s->pb, 12, s->frame_width); yading@10: put_bits(&s->pb, 12, s->frame_height); yading@10: } yading@10: } yading@10: yading@10: /* no checksum or extra data (next 2 bits get 0) */ yading@10: put_bits(&s->pb, 2, 0); yading@10: } yading@10: yading@10: #define QUALITY_THRESHOLD 100 yading@10: #define THRESHOLD_MULTIPLIER 0.6 yading@10: yading@10: static int encode_block(SVQ1Context *s, uint8_t *src, uint8_t *ref, yading@10: uint8_t *decoded, int stride, int level, yading@10: int threshold, int lambda, int intra) yading@10: { yading@10: int count, y, x, i, j, split, best_mean, best_score, best_count; yading@10: int best_vector[6]; yading@10: int block_sum[7] = { 0, 0, 0, 0, 0, 0 }; yading@10: int w = 2 << (level + 2 >> 1); yading@10: int h = 2 << (level + 1 >> 1); yading@10: int size = w * h; yading@10: int16_t block[7][256]; yading@10: const int8_t *codebook_sum, *codebook; yading@10: const uint16_t(*mean_vlc)[2]; yading@10: const uint8_t(*multistage_vlc)[2]; yading@10: yading@10: best_score = 0; yading@10: // FIXME: Optimize, this does not need to be done multiple times. yading@10: if (intra) { yading@10: codebook_sum = svq1_intra_codebook_sum[level]; yading@10: codebook = ff_svq1_intra_codebooks[level]; yading@10: mean_vlc = ff_svq1_intra_mean_vlc; yading@10: multistage_vlc = ff_svq1_intra_multistage_vlc[level]; yading@10: for (y = 0; y < h; y++) { yading@10: for (x = 0; x < w; x++) { yading@10: int v = src[x + y * stride]; yading@10: block[0][x + w * y] = v; yading@10: best_score += v * v; yading@10: block_sum[0] += v; yading@10: } yading@10: } yading@10: } else { yading@10: codebook_sum = svq1_inter_codebook_sum[level]; yading@10: codebook = ff_svq1_inter_codebooks[level]; yading@10: mean_vlc = ff_svq1_inter_mean_vlc + 256; yading@10: multistage_vlc = ff_svq1_inter_multistage_vlc[level]; yading@10: for (y = 0; y < h; y++) { yading@10: for (x = 0; x < w; x++) { yading@10: int v = src[x + y * stride] - ref[x + y * stride]; yading@10: block[0][x + w * y] = v; yading@10: best_score += v * v; yading@10: block_sum[0] += v; yading@10: } yading@10: } yading@10: } yading@10: yading@10: best_count = 0; yading@10: best_score -= (int)((unsigned)block_sum[0] * block_sum[0] >> (level + 3)); yading@10: best_mean = block_sum[0] + (size >> 1) >> (level + 3); yading@10: yading@10: if (level < 4) { yading@10: for (count = 1; count < 7; count++) { yading@10: int best_vector_score = INT_MAX; yading@10: int best_vector_sum = -999, best_vector_mean = -999; yading@10: const int stage = count - 1; yading@10: const int8_t *vector; yading@10: yading@10: for (i = 0; i < 16; i++) { yading@10: int sum = codebook_sum[stage * 16 + i]; yading@10: int sqr, diff, score; yading@10: yading@10: vector = codebook + stage * size * 16 + i * size; yading@10: sqr = s->dsp.ssd_int8_vs_int16(vector, block[stage], size); yading@10: diff = block_sum[stage] - sum; yading@10: score = sqr - (diff * (int64_t)diff >> (level + 3)); // FIXME: 64bit slooow yading@10: if (score < best_vector_score) { yading@10: int mean = diff + (size >> 1) >> (level + 3); yading@10: av_assert2(mean > -300 && mean < 300); yading@10: mean = av_clip(mean, intra ? 0 : -256, 255); yading@10: best_vector_score = score; yading@10: best_vector[stage] = i; yading@10: best_vector_sum = sum; yading@10: best_vector_mean = mean; yading@10: } yading@10: } yading@10: av_assert0(best_vector_mean != -999); yading@10: vector = codebook + stage * size * 16 + best_vector[stage] * size; yading@10: for (j = 0; j < size; j++) yading@10: block[stage + 1][j] = block[stage][j] - vector[j]; yading@10: block_sum[stage + 1] = block_sum[stage] - best_vector_sum; yading@10: best_vector_score += lambda * yading@10: (+1 + 4 * count + yading@10: multistage_vlc[1 + count][1] yading@10: + mean_vlc[best_vector_mean][1]); yading@10: yading@10: if (best_vector_score < best_score) { yading@10: best_score = best_vector_score; yading@10: best_count = count; yading@10: best_mean = best_vector_mean; yading@10: } yading@10: } yading@10: } yading@10: yading@10: split = 0; yading@10: if (best_score > threshold && level) { yading@10: int score = 0; yading@10: int offset = level & 1 ? stride * h / 2 : w / 2; yading@10: PutBitContext backup[6]; yading@10: yading@10: for (i = level - 1; i >= 0; i--) yading@10: backup[i] = s->reorder_pb[i]; yading@10: score += encode_block(s, src, ref, decoded, stride, level - 1, yading@10: threshold >> 1, lambda, intra); yading@10: score += encode_block(s, src + offset, ref + offset, decoded + offset, yading@10: stride, level - 1, threshold >> 1, lambda, intra); yading@10: score += lambda; yading@10: yading@10: if (score < best_score) { yading@10: best_score = score; yading@10: split = 1; yading@10: } else { yading@10: for (i = level - 1; i >= 0; i--) yading@10: s->reorder_pb[i] = backup[i]; yading@10: } yading@10: } yading@10: if (level > 0) yading@10: put_bits(&s->reorder_pb[level], 1, split); yading@10: yading@10: if (!split) { yading@10: av_assert1(best_mean >= 0 && best_mean < 256 || !intra); yading@10: av_assert1(best_mean >= -256 && best_mean < 256); yading@10: av_assert1(best_count >= 0 && best_count < 7); yading@10: av_assert1(level < 4 || best_count == 0); yading@10: yading@10: /* output the encoding */ yading@10: put_bits(&s->reorder_pb[level], yading@10: multistage_vlc[1 + best_count][1], yading@10: multistage_vlc[1 + best_count][0]); yading@10: put_bits(&s->reorder_pb[level], mean_vlc[best_mean][1], yading@10: mean_vlc[best_mean][0]); yading@10: yading@10: for (i = 0; i < best_count; i++) { yading@10: av_assert2(best_vector[i] >= 0 && best_vector[i] < 16); yading@10: put_bits(&s->reorder_pb[level], 4, best_vector[i]); yading@10: } yading@10: yading@10: for (y = 0; y < h; y++) yading@10: for (x = 0; x < w; x++) yading@10: decoded[x + y * stride] = src[x + y * stride] - yading@10: block[best_count][x + w * y] + yading@10: best_mean; yading@10: } yading@10: yading@10: return best_score; yading@10: } yading@10: yading@10: static int svq1_encode_plane(SVQ1Context *s, int plane, yading@10: unsigned char *src_plane, yading@10: unsigned char *ref_plane, yading@10: unsigned char *decoded_plane, yading@10: int width, int height, int src_stride, int stride) yading@10: { yading@10: int x, y; yading@10: int i; yading@10: int block_width, block_height; yading@10: int level; yading@10: int threshold[6]; yading@10: uint8_t *src = s->scratchbuf + stride * 16; yading@10: const int lambda = (s->picture.quality * s->picture.quality) >> yading@10: (2 * FF_LAMBDA_SHIFT); yading@10: yading@10: /* figure out the acceptable level thresholds in advance */ yading@10: threshold[5] = QUALITY_THRESHOLD; yading@10: for (level = 4; level >= 0; level--) yading@10: threshold[level] = threshold[level + 1] * THRESHOLD_MULTIPLIER; yading@10: yading@10: block_width = (width + 15) / 16; yading@10: block_height = (height + 15) / 16; yading@10: yading@10: if (s->picture.pict_type == AV_PICTURE_TYPE_P) { yading@10: s->m.avctx = s->avctx; yading@10: s->m.current_picture_ptr = &s->m.current_picture; yading@10: s->m.last_picture_ptr = &s->m.last_picture; yading@10: s->m.last_picture.f.data[0] = ref_plane; yading@10: s->m.linesize = yading@10: s->m.last_picture.f.linesize[0] = yading@10: s->m.new_picture.f.linesize[0] = yading@10: s->m.current_picture.f.linesize[0] = stride; yading@10: s->m.width = width; yading@10: s->m.height = height; yading@10: s->m.mb_width = block_width; yading@10: s->m.mb_height = block_height; yading@10: s->m.mb_stride = s->m.mb_width + 1; yading@10: s->m.b8_stride = 2 * s->m.mb_width + 1; yading@10: s->m.f_code = 1; yading@10: s->m.pict_type = s->picture.pict_type; yading@10: s->m.me_method = s->avctx->me_method; yading@10: s->m.me.scene_change_score = 0; yading@10: s->m.flags = s->avctx->flags; yading@10: // s->m.out_format = FMT_H263; yading@10: // s->m.unrestricted_mv = 1; yading@10: s->m.lambda = s->picture.quality; yading@10: s->m.qscale = s->m.lambda * 139 + yading@10: FF_LAMBDA_SCALE * 64 >> yading@10: FF_LAMBDA_SHIFT + 7; yading@10: s->m.lambda2 = s->m.lambda * s->m.lambda + yading@10: FF_LAMBDA_SCALE / 2 >> yading@10: FF_LAMBDA_SHIFT; yading@10: yading@10: if (!s->motion_val8[plane]) { yading@10: s->motion_val8[plane] = av_mallocz((s->m.b8_stride * yading@10: block_height * 2 + 2) * yading@10: 2 * sizeof(int16_t)); yading@10: s->motion_val16[plane] = av_mallocz((s->m.mb_stride * yading@10: (block_height + 2) + 1) * yading@10: 2 * sizeof(int16_t)); yading@10: } yading@10: yading@10: s->m.mb_type = s->mb_type; yading@10: yading@10: // dummies, to avoid segfaults yading@10: s->m.current_picture.mb_mean = (uint8_t *)s->dummy; yading@10: s->m.current_picture.mb_var = (uint16_t *)s->dummy; yading@10: s->m.current_picture.mc_mb_var = (uint16_t *)s->dummy; yading@10: s->m.current_picture.mb_type = s->dummy; yading@10: yading@10: s->m.current_picture.motion_val[0] = s->motion_val8[plane] + 2; yading@10: s->m.p_mv_table = s->motion_val16[plane] + yading@10: s->m.mb_stride + 1; yading@10: s->m.dsp = s->dsp; // move yading@10: ff_init_me(&s->m); yading@10: yading@10: s->m.me.dia_size = s->avctx->dia_size; yading@10: s->m.first_slice_line = 1; yading@10: for (y = 0; y < block_height; y++) { yading@10: s->m.new_picture.f.data[0] = src - y * 16 * stride; // ugly yading@10: s->m.mb_y = y; yading@10: yading@10: for (i = 0; i < 16 && i + 16 * y < height; i++) { yading@10: memcpy(&src[i * stride], &src_plane[(i + 16 * y) * src_stride], yading@10: width); yading@10: for (x = width; x < 16 * block_width; x++) yading@10: src[i * stride + x] = src[i * stride + x - 1]; yading@10: } yading@10: for (; i < 16 && i + 16 * y < 16 * block_height; i++) yading@10: memcpy(&src[i * stride], &src[(i - 1) * stride], yading@10: 16 * block_width); yading@10: yading@10: for (x = 0; x < block_width; x++) { yading@10: s->m.mb_x = x; yading@10: ff_init_block_index(&s->m); yading@10: ff_update_block_index(&s->m); yading@10: yading@10: ff_estimate_p_frame_motion(&s->m, x, y); yading@10: } yading@10: s->m.first_slice_line = 0; yading@10: } yading@10: yading@10: ff_fix_long_p_mvs(&s->m); yading@10: ff_fix_long_mvs(&s->m, NULL, 0, s->m.p_mv_table, s->m.f_code, yading@10: CANDIDATE_MB_TYPE_INTER, 0); yading@10: } yading@10: yading@10: s->m.first_slice_line = 1; yading@10: for (y = 0; y < block_height; y++) { yading@10: for (i = 0; i < 16 && i + 16 * y < height; i++) { yading@10: memcpy(&src[i * stride], &src_plane[(i + 16 * y) * src_stride], yading@10: width); yading@10: for (x = width; x < 16 * block_width; x++) yading@10: src[i * stride + x] = src[i * stride + x - 1]; yading@10: } yading@10: for (; i < 16 && i + 16 * y < 16 * block_height; i++) yading@10: memcpy(&src[i * stride], &src[(i - 1) * stride], 16 * block_width); yading@10: yading@10: s->m.mb_y = y; yading@10: for (x = 0; x < block_width; x++) { yading@10: uint8_t reorder_buffer[3][6][7 * 32]; yading@10: int count[3][6]; yading@10: int offset = y * 16 * stride + x * 16; yading@10: uint8_t *decoded = decoded_plane + offset; yading@10: uint8_t *ref = ref_plane + offset; yading@10: int score[4] = { 0, 0, 0, 0 }, best; yading@10: uint8_t *temp = s->scratchbuf; yading@10: yading@10: if (s->pb.buf_end - s->pb.buf - yading@10: (put_bits_count(&s->pb) >> 3) < 3000) { // FIXME: check size yading@10: av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); yading@10: return -1; yading@10: } yading@10: yading@10: s->m.mb_x = x; yading@10: ff_init_block_index(&s->m); yading@10: ff_update_block_index(&s->m); yading@10: yading@10: if (s->picture.pict_type == AV_PICTURE_TYPE_I || yading@10: (s->m.mb_type[x + y * s->m.mb_stride] & yading@10: CANDIDATE_MB_TYPE_INTRA)) { yading@10: for (i = 0; i < 6; i++) yading@10: init_put_bits(&s->reorder_pb[i], reorder_buffer[0][i], yading@10: 7 * 32); yading@10: if (s->picture.pict_type == AV_PICTURE_TYPE_P) { yading@10: const uint8_t *vlc = ff_svq1_block_type_vlc[SVQ1_BLOCK_INTRA]; yading@10: put_bits(&s->reorder_pb[5], vlc[1], vlc[0]); yading@10: score[0] = vlc[1] * lambda; yading@10: } yading@10: score[0] += encode_block(s, src + 16 * x, NULL, temp, stride, yading@10: 5, 64, lambda, 1); yading@10: for (i = 0; i < 6; i++) { yading@10: count[0][i] = put_bits_count(&s->reorder_pb[i]); yading@10: flush_put_bits(&s->reorder_pb[i]); yading@10: } yading@10: } else yading@10: score[0] = INT_MAX; yading@10: yading@10: best = 0; yading@10: yading@10: if (s->picture.pict_type == AV_PICTURE_TYPE_P) { yading@10: const uint8_t *vlc = ff_svq1_block_type_vlc[SVQ1_BLOCK_INTER]; yading@10: int mx, my, pred_x, pred_y, dxy; yading@10: int16_t *motion_ptr; yading@10: yading@10: motion_ptr = ff_h263_pred_motion(&s->m, 0, 0, &pred_x, &pred_y); yading@10: if (s->m.mb_type[x + y * s->m.mb_stride] & yading@10: CANDIDATE_MB_TYPE_INTER) { yading@10: for (i = 0; i < 6; i++) yading@10: init_put_bits(&s->reorder_pb[i], reorder_buffer[1][i], yading@10: 7 * 32); yading@10: yading@10: put_bits(&s->reorder_pb[5], vlc[1], vlc[0]); yading@10: yading@10: s->m.pb = s->reorder_pb[5]; yading@10: mx = motion_ptr[0]; yading@10: my = motion_ptr[1]; yading@10: av_assert1(mx >= -32 && mx <= 31); yading@10: av_assert1(my >= -32 && my <= 31); yading@10: av_assert1(pred_x >= -32 && pred_x <= 31); yading@10: av_assert1(pred_y >= -32 && pred_y <= 31); yading@10: ff_h263_encode_motion(&s->m, mx - pred_x, 1); yading@10: ff_h263_encode_motion(&s->m, my - pred_y, 1); yading@10: s->reorder_pb[5] = s->m.pb; yading@10: score[1] += lambda * put_bits_count(&s->reorder_pb[5]); yading@10: yading@10: dxy = (mx & 1) + 2 * (my & 1); yading@10: yading@10: s->hdsp.put_pixels_tab[0][dxy](temp + 16, yading@10: ref + (mx >> 1) + yading@10: stride * (my >> 1), yading@10: stride, 16); yading@10: yading@10: score[1] += encode_block(s, src + 16 * x, temp + 16, yading@10: decoded, stride, 5, 64, lambda, 0); yading@10: best = score[1] <= score[0]; yading@10: yading@10: vlc = ff_svq1_block_type_vlc[SVQ1_BLOCK_SKIP]; yading@10: score[2] = s->dsp.sse[0](NULL, src + 16 * x, ref, yading@10: stride, 16); yading@10: score[2] += vlc[1] * lambda; yading@10: if (score[2] < score[best] && mx == 0 && my == 0) { yading@10: best = 2; yading@10: s->hdsp.put_pixels_tab[0][0](decoded, ref, stride, 16); yading@10: for (i = 0; i < 6; i++) yading@10: count[2][i] = 0; yading@10: put_bits(&s->pb, vlc[1], vlc[0]); yading@10: } yading@10: } yading@10: yading@10: if (best == 1) { yading@10: for (i = 0; i < 6; i++) { yading@10: count[1][i] = put_bits_count(&s->reorder_pb[i]); yading@10: flush_put_bits(&s->reorder_pb[i]); yading@10: } yading@10: } else { yading@10: motion_ptr[0] = yading@10: motion_ptr[1] = yading@10: motion_ptr[2] = yading@10: motion_ptr[3] = yading@10: motion_ptr[0 + 2 * s->m.b8_stride] = yading@10: motion_ptr[1 + 2 * s->m.b8_stride] = yading@10: motion_ptr[2 + 2 * s->m.b8_stride] = yading@10: motion_ptr[3 + 2 * s->m.b8_stride] = 0; yading@10: } yading@10: } yading@10: yading@10: s->rd_total += score[best]; yading@10: yading@10: for (i = 5; i >= 0; i--) yading@10: avpriv_copy_bits(&s->pb, reorder_buffer[best][i], yading@10: count[best][i]); yading@10: if (best == 0) yading@10: s->hdsp.put_pixels_tab[0][0](decoded, temp, stride, 16); yading@10: } yading@10: s->m.first_slice_line = 0; yading@10: } yading@10: return 0; yading@10: } yading@10: yading@10: static av_cold int svq1_encode_init(AVCodecContext *avctx) yading@10: { yading@10: SVQ1Context *const s = avctx->priv_data; yading@10: yading@10: ff_dsputil_init(&s->dsp, avctx); yading@10: ff_hpeldsp_init(&s->hdsp, avctx->flags); yading@10: avctx->coded_frame = &s->picture; yading@10: yading@10: s->frame_width = avctx->width; yading@10: s->frame_height = avctx->height; yading@10: yading@10: s->y_block_width = (s->frame_width + 15) / 16; yading@10: s->y_block_height = (s->frame_height + 15) / 16; yading@10: yading@10: s->c_block_width = (s->frame_width / 4 + 15) / 16; yading@10: s->c_block_height = (s->frame_height / 4 + 15) / 16; yading@10: yading@10: s->avctx = avctx; yading@10: s->m.avctx = avctx; yading@10: s->m.picture_structure = PICT_FRAME; yading@10: s->m.me.temp = yading@10: s->m.me.scratchpad = av_mallocz((avctx->width + 64) * yading@10: 2 * 16 * 2 * sizeof(uint8_t)); yading@10: s->m.me.map = av_mallocz(ME_MAP_SIZE * sizeof(uint32_t)); yading@10: s->m.me.score_map = av_mallocz(ME_MAP_SIZE * sizeof(uint32_t)); yading@10: s->mb_type = av_mallocz((s->y_block_width + 1) * yading@10: s->y_block_height * sizeof(int16_t)); yading@10: s->dummy = av_mallocz((s->y_block_width + 1) * yading@10: s->y_block_height * sizeof(int32_t)); yading@10: ff_h263_encode_init(&s->m); // mv_penalty yading@10: yading@10: return 0; yading@10: } yading@10: yading@10: static int svq1_encode_frame(AVCodecContext *avctx, AVPacket *pkt, yading@10: const AVFrame *pict, int *got_packet) yading@10: { yading@10: SVQ1Context *const s = avctx->priv_data; yading@10: AVFrame *const p = &s->picture; yading@10: AVFrame temp; yading@10: int i, ret; yading@10: yading@10: if ((ret = ff_alloc_packet2(avctx, pkt, s->y_block_width * s->y_block_height * yading@10: MAX_MB_BYTES*3 + FF_MIN_BUFFER_SIZE)) < 0) yading@10: return ret; yading@10: yading@10: if (avctx->pix_fmt != AV_PIX_FMT_YUV410P) { yading@10: av_log(avctx, AV_LOG_ERROR, "unsupported pixel format\n"); yading@10: return -1; yading@10: } yading@10: yading@10: if (!s->current_picture.data[0]) { yading@10: if ((ret = ff_get_buffer(avctx, &s->current_picture, 0))< 0 || yading@10: (ret = ff_get_buffer(avctx, &s->last_picture, 0)) < 0) { yading@10: return ret; yading@10: } yading@10: s->scratchbuf = av_malloc(s->current_picture.linesize[0] * 16 * 2); yading@10: } yading@10: yading@10: temp = s->current_picture; yading@10: s->current_picture = s->last_picture; yading@10: s->last_picture = temp; yading@10: yading@10: init_put_bits(&s->pb, pkt->data, pkt->size); yading@10: yading@10: *p = *pict; yading@10: p->pict_type = avctx->gop_size && avctx->frame_number % avctx->gop_size ? yading@10: AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I; yading@10: p->key_frame = p->pict_type == AV_PICTURE_TYPE_I; yading@10: yading@10: svq1_write_header(s, p->pict_type); yading@10: for (i = 0; i < 3; i++) yading@10: if (svq1_encode_plane(s, i, yading@10: s->picture.data[i], yading@10: s->last_picture.data[i], yading@10: s->current_picture.data[i], yading@10: s->frame_width / (i ? 4 : 1), yading@10: s->frame_height / (i ? 4 : 1), yading@10: s->picture.linesize[i], yading@10: s->current_picture.linesize[i]) < 0) yading@10: return -1; yading@10: yading@10: // avpriv_align_put_bits(&s->pb); yading@10: while (put_bits_count(&s->pb) & 31) yading@10: put_bits(&s->pb, 1, 0); yading@10: yading@10: flush_put_bits(&s->pb); yading@10: yading@10: pkt->size = put_bits_count(&s->pb) / 8; yading@10: if (p->pict_type == AV_PICTURE_TYPE_I) yading@10: pkt->flags |= AV_PKT_FLAG_KEY; yading@10: *got_packet = 1; yading@10: yading@10: return 0; yading@10: } yading@10: yading@10: static av_cold int svq1_encode_end(AVCodecContext *avctx) yading@10: { yading@10: SVQ1Context *const s = avctx->priv_data; yading@10: int i; yading@10: yading@10: av_log(avctx, AV_LOG_DEBUG, "RD: %f\n", yading@10: s->rd_total / (double)(avctx->width * avctx->height * yading@10: avctx->frame_number)); yading@10: yading@10: av_freep(&s->m.me.scratchpad); yading@10: av_freep(&s->m.me.map); yading@10: av_freep(&s->m.me.score_map); yading@10: av_freep(&s->mb_type); yading@10: av_freep(&s->dummy); yading@10: av_freep(&s->scratchbuf); yading@10: yading@10: for (i = 0; i < 3; i++) { yading@10: av_freep(&s->motion_val8[i]); yading@10: av_freep(&s->motion_val16[i]); yading@10: } yading@10: yading@10: av_frame_unref(&s->current_picture); yading@10: av_frame_unref(&s->last_picture); yading@10: yading@10: return 0; yading@10: } yading@10: yading@10: AVCodec ff_svq1_encoder = { yading@10: .name = "svq1", yading@10: .type = AVMEDIA_TYPE_VIDEO, yading@10: .id = AV_CODEC_ID_SVQ1, yading@10: .priv_data_size = sizeof(SVQ1Context), yading@10: .init = svq1_encode_init, yading@10: .encode2 = svq1_encode_frame, yading@10: .close = svq1_encode_end, yading@10: .pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV410P, yading@10: AV_PIX_FMT_NONE }, yading@10: .long_name = NULL_IF_CONFIG_SMALL("Sorenson Vector Quantizer 1 / Sorenson Video 1 / SVQ1"), yading@10: };