utvideoenc.c
Go to the documentation of this file.
1 /*
2  * Ut Video encoder
3  * Copyright (c) 2012 Jan Ekström
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * Ut Video encoder
25  */
26 
27 #include "libavutil/intreadwrite.h"
28 #include "avcodec.h"
29 #include "internal.h"
30 #include "bytestream.h"
31 #include "put_bits.h"
32 #include "dsputil.h"
33 #include "mathops.h"
34 #include "utvideo.h"
35 #include "huffman.h"
36 
37 /* Compare huffentry symbols */
38 static int huff_cmp_sym(const void *a, const void *b)
39 {
40  const HuffEntry *aa = a, *bb = b;
41  return aa->sym - bb->sym;
42 }
43 
45 {
46  UtvideoContext *c = avctx->priv_data;
47  int i;
48 
49  av_freep(&avctx->coded_frame);
50  av_freep(&c->slice_bits);
51  for (i = 0; i < 4; i++)
52  av_freep(&c->slice_buffer[i]);
53 
54  return 0;
55 }
56 
58 {
59  UtvideoContext *c = avctx->priv_data;
60  int i;
61  uint32_t original_format;
62 
63  c->avctx = avctx;
64  c->frame_info_size = 4;
65  c->slice_stride = FFALIGN(avctx->width, 32);
66 
67  switch (avctx->pix_fmt) {
68  case AV_PIX_FMT_RGB24:
69  c->planes = 3;
70  avctx->codec_tag = MKTAG('U', 'L', 'R', 'G');
71  original_format = UTVIDEO_RGB;
72  break;
73  case AV_PIX_FMT_RGBA:
74  c->planes = 4;
75  avctx->codec_tag = MKTAG('U', 'L', 'R', 'A');
76  original_format = UTVIDEO_RGBA;
77  break;
78  case AV_PIX_FMT_YUV420P:
79  if (avctx->width & 1 || avctx->height & 1) {
80  av_log(avctx, AV_LOG_ERROR,
81  "4:2:0 video requires even width and height.\n");
82  return AVERROR_INVALIDDATA;
83  }
84  c->planes = 3;
85  avctx->codec_tag = MKTAG('U', 'L', 'Y', '0');
86  original_format = UTVIDEO_420;
87  break;
88  case AV_PIX_FMT_YUV422P:
89  if (avctx->width & 1) {
90  av_log(avctx, AV_LOG_ERROR,
91  "4:2:2 video requires even width.\n");
92  return AVERROR_INVALIDDATA;
93  }
94  c->planes = 3;
95  avctx->codec_tag = MKTAG('U', 'L', 'Y', '2');
96  original_format = UTVIDEO_422;
97  break;
98  default:
99  av_log(avctx, AV_LOG_ERROR, "Unknown pixel format: %d\n",
100  avctx->pix_fmt);
101  return AVERROR_INVALIDDATA;
102  }
103 
104  ff_dsputil_init(&c->dsp, avctx);
105 
106  /* Check the prediction method, and error out if unsupported */
107  if (avctx->prediction_method < 0 || avctx->prediction_method > 4) {
108  av_log(avctx, AV_LOG_WARNING,
109  "Prediction method %d is not supported in Ut Video.\n",
110  avctx->prediction_method);
112  }
113 
114  if (avctx->prediction_method == FF_PRED_PLANE) {
115  av_log(avctx, AV_LOG_ERROR,
116  "Plane prediction is not supported in Ut Video.\n");
118  }
119 
120  /* Convert from libavcodec prediction type to Ut Video's */
122 
123  if (c->frame_pred == PRED_GRADIENT) {
124  av_log(avctx, AV_LOG_ERROR, "Gradient prediction is not supported.\n");
126  }
127 
128  avctx->coded_frame = avcodec_alloc_frame();
129 
130  if (!avctx->coded_frame) {
131  av_log(avctx, AV_LOG_ERROR, "Could not allocate frame.\n");
132  utvideo_encode_close(avctx);
133  return AVERROR(ENOMEM);
134  }
135 
136  /* extradata size is 4 * 32bit */
137  avctx->extradata_size = 16;
138 
139  avctx->extradata = av_mallocz(avctx->extradata_size +
141 
142  if (!avctx->extradata) {
143  av_log(avctx, AV_LOG_ERROR, "Could not allocate extradata.\n");
144  utvideo_encode_close(avctx);
145  return AVERROR(ENOMEM);
146  }
147 
148  for (i = 0; i < c->planes; i++) {
149  c->slice_buffer[i] = av_malloc(c->slice_stride * (avctx->height + 2) +
151  if (!c->slice_buffer[i]) {
152  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer 1.\n");
153  utvideo_encode_close(avctx);
154  return AVERROR(ENOMEM);
155  }
156  }
157 
158  /*
159  * Set the version of the encoder.
160  * Last byte is "implementation ID", which is
161  * obtained from the creator of the format.
162  * Libavcodec has been assigned with the ID 0xF0.
163  */
164  AV_WB32(avctx->extradata, MKTAG(1, 0, 0, 0xF0));
165 
166  /*
167  * Set the "original format"
168  * Not used for anything during decoding.
169  */
170  AV_WL32(avctx->extradata + 4, original_format);
171 
172  /* Write 4 as the 'frame info size' */
173  AV_WL32(avctx->extradata + 8, c->frame_info_size);
174 
175  /*
176  * Set how many slices are going to be used.
177  * Set one slice for now.
178  */
179  c->slices = 1;
180 
181  /* Set compression mode */
182  c->compression = COMP_HUFF;
183 
184  /*
185  * Set the encoding flags:
186  * - Slice count minus 1
187  * - Interlaced encoding mode flag, set to zero for now.
188  * - Compression mode (none/huff)
189  * And write the flags.
190  */
191  c->flags = (c->slices - 1) << 24;
192  c->flags |= 0 << 11; // bit field to signal interlaced encoding mode
193  c->flags |= c->compression;
194 
195  AV_WL32(avctx->extradata + 12, c->flags);
196 
197  return 0;
198 }
199 
200 static void mangle_rgb_planes(uint8_t *dst[4], int dst_stride, uint8_t *src,
201  int step, int stride, int width, int height)
202 {
203  int i, j;
204  int k = 2 * dst_stride;
205  unsigned int g;
206 
207  for (j = 0; j < height; j++) {
208  if (step == 3) {
209  for (i = 0; i < width * step; i += step) {
210  g = src[i + 1];
211  dst[0][k] = g;
212  g += 0x80;
213  dst[1][k] = src[i + 2] - g;
214  dst[2][k] = src[i + 0] - g;
215  k++;
216  }
217  } else {
218  for (i = 0; i < width * step; i += step) {
219  g = src[i + 1];
220  dst[0][k] = g;
221  g += 0x80;
222  dst[1][k] = src[i + 2] - g;
223  dst[2][k] = src[i + 0] - g;
224  dst[3][k] = src[i + 3];
225  k++;
226  }
227  }
228  k += dst_stride - width;
229  src += stride;
230  }
231 }
232 
233 /* Write data to a plane, no prediction applied */
234 static void write_plane(uint8_t *src, uint8_t *dst, int stride,
235  int width, int height)
236 {
237  int i, j;
238 
239  for (j = 0; j < height; j++) {
240  for (i = 0; i < width; i++)
241  *dst++ = src[i];
242 
243  src += stride;
244  }
245 }
246 
247 /* Write data to a plane with left prediction */
248 static void left_predict(uint8_t *src, uint8_t *dst, int stride,
249  int width, int height)
250 {
251  int i, j;
252  uint8_t prev;
253 
254  prev = 0x80; /* Set the initial value */
255  for (j = 0; j < height; j++) {
256  for (i = 0; i < width; i++) {
257  *dst++ = src[i] - prev;
258  prev = src[i];
259  }
260  src += stride;
261  }
262 }
263 
264 /* Write data to a plane with median prediction */
266  int width, int height)
267 {
268  int i, j;
269  int A, B;
270  uint8_t prev;
271 
272  /* First line uses left neighbour prediction */
273  prev = 0x80; /* Set the initial value */
274  for (i = 0; i < width; i++) {
275  *dst++ = src[i] - prev;
276  prev = src[i];
277  }
278 
279  if (height == 1)
280  return;
281 
282  src += stride;
283 
284  /*
285  * Second line uses top prediction for the first sample,
286  * and median for the rest.
287  */
288  A = B = 0;
289 
290  /* Rest of the coded part uses median prediction */
291  for (j = 1; j < height; j++) {
292  c->dsp.sub_hfyu_median_prediction(dst, src - stride, src, width, &A, &B);
293  dst += width;
294  src += stride;
295  }
296 }
297 
298 /* Count the usage of values in a plane */
299 static void count_usage(uint8_t *src, int width,
300  int height, uint64_t *counts)
301 {
302  int i, j;
303 
304  for (j = 0; j < height; j++) {
305  for (i = 0; i < width; i++) {
306  counts[src[i]]++;
307  }
308  src += width;
309  }
310 }
311 
312 /* Calculate the actual huffman codes from the code lengths */
313 static void calculate_codes(HuffEntry *he)
314 {
315  int last, i;
316  uint32_t code;
317 
318  qsort(he, 256, sizeof(*he), ff_ut_huff_cmp_len);
319 
320  last = 255;
321  while (he[last].len == 255 && last)
322  last--;
323 
324  code = 1;
325  for (i = last; i >= 0; i--) {
326  he[i].code = code >> (32 - he[i].len);
327  code += 0x80000000u >> (he[i].len - 1);
328  }
329 
330  qsort(he, 256, sizeof(*he), huff_cmp_sym);
331 }
332 
333 /* Write huffman bit codes to a memory block */
334 static int write_huff_codes(uint8_t *src, uint8_t *dst, int dst_size,
335  int width, int height, HuffEntry *he)
336 {
337  PutBitContext pb;
338  int i, j;
339  int count;
340 
341  init_put_bits(&pb, dst, dst_size);
342 
343  /* Write the codes */
344  for (j = 0; j < height; j++) {
345  for (i = 0; i < width; i++)
346  put_bits(&pb, he[src[i]].len, he[src[i]].code);
347 
348  src += width;
349  }
350 
351  /* Pad output to a 32bit boundary */
352  count = put_bits_count(&pb) & 0x1F;
353 
354  if (count)
355  put_bits(&pb, 32 - count, 0);
356 
357  /* Get the amount of bits written */
358  count = put_bits_count(&pb);
359 
360  /* Flush the rest with zeroes */
361  flush_put_bits(&pb);
362 
363  return count;
364 }
365 
367  uint8_t *dst, int stride,
368  int width, int height, PutByteContext *pb)
369 {
370  UtvideoContext *c = avctx->priv_data;
371  uint8_t lengths[256];
372  uint64_t counts[256] = { 0 };
373 
374  HuffEntry he[256];
375 
376  uint32_t offset = 0, slice_len = 0;
377  int i, sstart, send = 0;
378  int symbol;
379 
380  /* Do prediction / make planes */
381  switch (c->frame_pred) {
382  case PRED_NONE:
383  for (i = 0; i < c->slices; i++) {
384  sstart = send;
385  send = height * (i + 1) / c->slices;
386  write_plane(src + sstart * stride, dst + sstart * width,
387  stride, width, send - sstart);
388  }
389  break;
390  case PRED_LEFT:
391  for (i = 0; i < c->slices; i++) {
392  sstart = send;
393  send = height * (i + 1) / c->slices;
394  left_predict(src + sstart * stride, dst + sstart * width,
395  stride, width, send - sstart);
396  }
397  break;
398  case PRED_MEDIAN:
399  for (i = 0; i < c->slices; i++) {
400  sstart = send;
401  send = height * (i + 1) / c->slices;
402  median_predict(c, src + sstart * stride, dst + sstart * width,
403  stride, width, send - sstart);
404  }
405  break;
406  default:
407  av_log(avctx, AV_LOG_ERROR, "Unknown prediction mode: %d\n",
408  c->frame_pred);
410  }
411 
412  /* Count the usage of values */
413  count_usage(dst, width, height, counts);
414 
415  /* Check for a special case where only one symbol was used */
416  for (symbol = 0; symbol < 256; symbol++) {
417  /* If non-zero count is found, see if it matches width * height */
418  if (counts[symbol]) {
419  /* Special case if only one symbol was used */
420  if (counts[symbol] == width * (int64_t)height) {
421  /*
422  * Write a zero for the single symbol
423  * used in the plane, else 0xFF.
424  */
425  for (i = 0; i < 256; i++) {
426  if (i == symbol)
427  bytestream2_put_byte(pb, 0);
428  else
429  bytestream2_put_byte(pb, 0xFF);
430  }
431 
432  /* Write zeroes for lengths */
433  for (i = 0; i < c->slices; i++)
434  bytestream2_put_le32(pb, 0);
435 
436  /* And that's all for that plane folks */
437  return 0;
438  }
439  break;
440  }
441  }
442 
443  /* Calculate huffman lengths */
444  ff_huff_gen_len_table(lengths, counts);
445 
446  /*
447  * Write the plane's header into the output packet:
448  * - huffman code lengths (256 bytes)
449  * - slice end offsets (gotten from the slice lengths)
450  */
451  for (i = 0; i < 256; i++) {
452  bytestream2_put_byte(pb, lengths[i]);
453 
454  he[i].len = lengths[i];
455  he[i].sym = i;
456  }
457 
458  /* Calculate the huffman codes themselves */
459  calculate_codes(he);
460 
461  send = 0;
462  for (i = 0; i < c->slices; i++) {
463  sstart = send;
464  send = height * (i + 1) / c->slices;
465 
466  /*
467  * Write the huffman codes to a buffer,
468  * get the offset in bits and convert to bytes.
469  */
470  offset += write_huff_codes(dst + sstart * width, c->slice_bits,
471  width * (send - sstart), width,
472  send - sstart, he) >> 3;
473 
474  slice_len = offset - slice_len;
475 
476  /* Byteswap the written huffman codes */
477  c->dsp.bswap_buf((uint32_t *) c->slice_bits,
478  (uint32_t *) c->slice_bits,
479  slice_len >> 2);
480 
481  /* Write the offset to the stream */
482  bytestream2_put_le32(pb, offset);
483 
484  /* Seek to the data part of the packet */
485  bytestream2_seek_p(pb, 4 * (c->slices - i - 1) +
486  offset - slice_len, SEEK_CUR);
487 
488  /* Write the slices' data into the output packet */
489  bytestream2_put_buffer(pb, c->slice_bits, slice_len);
490 
491  /* Seek back to the slice offsets */
492  bytestream2_seek_p(pb, -4 * (c->slices - i - 1) - offset,
493  SEEK_CUR);
494 
495  slice_len = offset;
496  }
497 
498  /* And at the end seek to the end of written slice(s) */
499  bytestream2_seek_p(pb, offset, SEEK_CUR);
500 
501  return 0;
502 }
503 
505  const AVFrame *pic, int *got_packet)
506 {
507  UtvideoContext *c = avctx->priv_data;
508  PutByteContext pb;
509 
510  uint32_t frame_info;
511 
512  uint8_t *dst;
513 
514  int width = avctx->width, height = avctx->height;
515  int i, ret = 0;
516 
517  /* Allocate a new packet if needed, and set it to the pointer dst */
518  ret = ff_alloc_packet2(avctx, pkt, (256 + 4 * c->slices + width * height) *
519  c->planes + 4);
520 
521  if (ret < 0)
522  return ret;
523 
524  dst = pkt->data;
525 
526  bytestream2_init_writer(&pb, dst, pkt->size);
527 
529  width * height + FF_INPUT_BUFFER_PADDING_SIZE);
530 
531  if (!c->slice_bits) {
532  av_log(avctx, AV_LOG_ERROR, "Cannot allocate temporary buffer 2.\n");
533  return AVERROR(ENOMEM);
534  }
535 
536  /* In case of RGB, mangle the planes to Ut Video's format */
537  if (avctx->pix_fmt == AV_PIX_FMT_RGBA || avctx->pix_fmt == AV_PIX_FMT_RGB24)
539  c->planes, pic->linesize[0], width, height);
540 
541  /* Deal with the planes */
542  switch (avctx->pix_fmt) {
543  case AV_PIX_FMT_RGB24:
544  case AV_PIX_FMT_RGBA:
545  for (i = 0; i < c->planes; i++) {
546  ret = encode_plane(avctx, c->slice_buffer[i] + 2 * c->slice_stride,
547  c->slice_buffer[i], c->slice_stride,
548  width, height, &pb);
549 
550  if (ret) {
551  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
552  return ret;
553  }
554  }
555  break;
556  case AV_PIX_FMT_YUV422P:
557  for (i = 0; i < c->planes; i++) {
558  ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
559  pic->linesize[i], width >> !!i, height, &pb);
560 
561  if (ret) {
562  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
563  return ret;
564  }
565  }
566  break;
567  case AV_PIX_FMT_YUV420P:
568  for (i = 0; i < c->planes; i++) {
569  ret = encode_plane(avctx, pic->data[i], c->slice_buffer[0],
570  pic->linesize[i], width >> !!i, height >> !!i,
571  &pb);
572 
573  if (ret) {
574  av_log(avctx, AV_LOG_ERROR, "Error encoding plane %d.\n", i);
575  return ret;
576  }
577  }
578  break;
579  default:
580  av_log(avctx, AV_LOG_ERROR, "Unknown pixel format: %d\n",
581  avctx->pix_fmt);
582  return AVERROR_INVALIDDATA;
583  }
584 
585  /*
586  * Write frame information (LE 32bit unsigned)
587  * into the output packet.
588  * Contains the prediction method.
589  */
590  frame_info = c->frame_pred << 8;
591  bytestream2_put_le32(&pb, frame_info);
592 
593  /*
594  * At least currently Ut Video is IDR only.
595  * Set flags accordingly.
596  */
597  avctx->coded_frame->key_frame = 1;
599 
600  pkt->size = bytestream2_tell_p(&pb);
601  pkt->flags |= AV_PKT_FLAG_KEY;
602 
603  /* Packet should be done */
604  *got_packet = 1;
605 
606  return 0;
607 }
608 
610  .name = "utvideo",
611  .type = AVMEDIA_TYPE_VIDEO,
612  .id = AV_CODEC_ID_UTVIDEO,
613  .priv_data_size = sizeof(UtvideoContext),
615  .encode2 = utvideo_encode_frame,
617  .pix_fmts = (const enum AVPixelFormat[]) {
620  },
621  .long_name = NULL_IF_CONFIG_SMALL("Ut Video"),
622 };
int slice_stride
Definition: utvideo.h:77
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:205
av_cold void ff_dsputil_init(DSPContext *c, AVCodecContext *avctx)
Definition: dsputil.c:2675
void(* sub_hfyu_median_prediction)(uint8_t *dst, const uint8_t *src1, const uint8_t *src2, int w, int *left, int *left_top)
subtract huffyuv&#39;s variant of median prediction note, this might read from src1[-1], src2[-1]
Definition: dsputil.h:203
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
This structure describes decoded (raw) audio or video data.
Definition: frame.h:76
static void write_plane(uint8_t *src, uint8_t *dst, int stride, int width, int height)
Definition: utvideoenc.c:234
static void mangle_rgb_planes(uint8_t *dst[4], int dst_stride, uint8_t *src, int step, int stride, int width, int height)
Definition: utvideoenc.c:200
#define B
Definition: dsputil.c:2025
uint32_t flags
Definition: utvideo.h:70
static int huff_cmp_sym(const void *a, const void *b)
Definition: utvideoenc.c:38
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:70
AVFrame * coded_frame
the picture in the bitstream
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:154
int slice_bits_size
Definition: utvideo.h:79
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:139
void ff_huff_gen_len_table(uint8_t *dst, const uint64_t *stats)
Definition: huffman.c:53
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static av_cold int utvideo_encode_init(AVCodecContext *avctx)
Definition: utvideoenc.c:57
int stride
Definition: mace.c:144
#define FFALIGN(x, a)
Definition: common.h:63
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
#define AV_WB32(p, darg)
Definition: intreadwrite.h:265
#define AV_WL32(p, darg)
Definition: intreadwrite.h:282
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
uint8_t
#define av_cold
Definition: attributes.h:78
AVCodec ff_utvideo_encoder
Definition: utvideoenc.c:609
static AVPacket pkt
Definition: demuxing.c:56
#define b
Definition: input.c:42
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
#define FF_PRED_PLANE
uint8_t * data
static int utvideo_encode_frame(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic, int *got_packet)
Definition: utvideoenc.c:504
uint32_t code
Definition: utvideo.h:85
#define A(x)
static void left_predict(uint8_t *src, uint8_t *dst, int stride, int width, int height)
Definition: utvideoenc.c:248
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
const int ff_ut_pred_order[5]
Definition: utvideo.c:29
uint8_t sym
Definition: utvideo.h:83
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
AVCodecContext * avctx
Definition: utvideo.h:67
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
const char * name
Name of the codec implementation.
uint32_t frame_info_size
Definition: utvideo.h:70
static const uint8_t offset[127][2]
Definition: vf_spp.c:70
static void put_bits(J2kEncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:160
static int write_huff_codes(uint8_t *src, uint8_t *dst, int dst_size, int width, int height, HuffEntry *he)
Definition: utvideoenc.c:334
static av_always_inline int bytestream2_tell_p(PutByteContext *p)
Definition: bytestream.h:188
external API header
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:97
int flags
A combination of AV_PKT_FLAG values.
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:73
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:72
static void median_predict(UtvideoContext *c, uint8_t *src, uint8_t *dst, int stride, int width, int height)
Definition: utvideoenc.c:265
#define FF_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
AVFrame * avcodec_alloc_frame(void)
Allocate an AVFrame and set its fields to default values.
int compression
Definition: utvideo.h:73
DSPContext dsp
Definition: utvideo.h:68
FFT buffer for g
Definition: stft_peak.m:17
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:144
ret
Definition: avfilter.c:821
int width
picture width / height.
static int encode_plane(AVCodecContext *avctx, uint8_t *src, uint8_t *dst, int stride, int width, int height, PutByteContext *pb)
Definition: utvideoenc.c:366
float u
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, const uint8_t *src, unsigned int size)
Definition: bytestream.h:277
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int size)
Check AVPacket size and/or allocate data.
static av_always_inline int bytestream2_seek_p(PutByteContext *p, int offset, int whence)
Definition: bytestream.h:227
for k
or the Software in violation of any applicable export control laws in any jurisdiction Except as provided by mandatorily applicable UPF has no obligation to provide you with source code to the Software In the event Software contains any source code
Common Ut Video header.
static int width
Definition: tests/utils.c:158
int frame_pred
Definition: utvideo.h:75
uint8_t len
Definition: utvideo.h:84
AVS_Value src
Definition: avisynth_c.h:523
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:101
main external API structure.
static void close(AVCodecParserContext *s)
Definition: h264_parser.c:375
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:148
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> (&#39;D&#39;<<24) + (&#39;C&#39;<<16) + (&#39;B&#39;<<8) + &#39;A&#39;).
BYTE int const BYTE int int int height
Definition: avisynth_c.h:713
void * av_malloc(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:73
synthesis window for stochastic i
void(* bswap_buf)(uint32_t *dst, const uint32_t *src, int w)
Definition: dsputil.h:208
huffman tree builder and VLC generator
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
uint8_t * slice_bits
Definition: utvideo.h:78
int ff_ut_huff_cmp_len(const void *a, const void *b)
Definition: utvideo.c:35
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:87
static void calculate_codes(HuffEntry *he)
Definition: utvideoenc.c:313
static void count_usage(uint8_t *src, int width, int height, uint64_t *counts)
Definition: utvideoenc.c:299
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:68
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:81
struct UtvideoContext UtvideoContext
static double c[64]
int prediction_method
prediction method (needed for huffyuv)
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:54
DSP utils.
uint8_t * slice_buffer[4]
Definition: utvideo.h:78
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:61
int len
else dst[i][x+y *dst_stride[i]]
Definition: vf_mcdeint.c:160
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:139
void INT64 INT64 count
Definition: avisynth_c.h:594
#define MKTAG(a, b, c, d)
Definition: common.h:282
AVPixelFormat
Pixel format.
Definition: pixfmt.h:66
static av_cold int utvideo_encode_close(AVCodecContext *avctx)
Definition: utvideoenc.c:44
This structure stores compressed data.
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step
bitstream writer API