nutenc.c
Go to the documentation of this file.
1 /*
2  * nut muxer
3  * Copyright (c) 2004-2007 Michael Niedermayer
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/intreadwrite.h"
23 #include "libavutil/mathematics.h"
24 #include "libavutil/tree.h"
25 #include "libavutil/dict.h"
26 #include "libavutil/avassert.h"
28 #include "nut.h"
29 #include "internal.h"
30 #include "avio_internal.h"
31 #include "riff.h"
32 
33 static int find_expected_header(AVCodecContext *c, int size, int key_frame,
34  uint8_t out[64])
35 {
36  int sample_rate = c->sample_rate;
37 
38  if (size > 4096)
39  return 0;
40 
41  AV_WB24(out, 1);
42 
43  if (c->codec_id == AV_CODEC_ID_MPEG4) {
44  if (key_frame) {
45  return 3;
46  } else {
47  out[3] = 0xB6;
48  return 4;
49  }
50  } else if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
52  return 3;
53  } else if (c->codec_id == AV_CODEC_ID_H264) {
54  return 3;
55  } else if (c->codec_id == AV_CODEC_ID_MP3 ||
56  c->codec_id == AV_CODEC_ID_MP2) {
57  int lsf, mpeg25, sample_rate_index, bitrate_index, frame_size;
58  int layer = c->codec_id == AV_CODEC_ID_MP3 ? 3 : 2;
59  unsigned int header = 0xFFF00000;
60 
61  lsf = sample_rate < (24000 + 32000) / 2;
62  mpeg25 = sample_rate < (12000 + 16000) / 2;
63  sample_rate <<= lsf + mpeg25;
64  if (sample_rate < (32000 + 44100) / 2) sample_rate_index = 2;
65  else if (sample_rate < (44100 + 48000) / 2) sample_rate_index = 0;
66  else sample_rate_index = 1;
67 
68  sample_rate = avpriv_mpa_freq_tab[sample_rate_index] >> (lsf + mpeg25);
69 
70  for (bitrate_index = 2; bitrate_index < 30; bitrate_index++) {
71  frame_size =
72  avpriv_mpa_bitrate_tab[lsf][layer - 1][bitrate_index >> 1];
73  frame_size = (frame_size * 144000) / (sample_rate << lsf) +
74  (bitrate_index & 1);
75 
76  if (frame_size == size)
77  break;
78  }
79 
80  header |= (!lsf) << 19;
81  header |= (4 - layer) << 17;
82  header |= 1 << 16; //no crc
83  AV_WB32(out, header);
84  if (size <= 0)
85  return 2; //we guess there is no crc, if there is one the user clearly does not care about overhead
86  if (bitrate_index == 30)
87  return -1; //something is wrong ...
88 
89  header |= (bitrate_index >> 1) << 12;
90  header |= sample_rate_index << 10;
91  header |= (bitrate_index & 1) << 9;
92 
93  return 2; //FIXME actually put the needed ones in build_elision_headers()
94  return 3; //we guess that the private bit is not set
95 //FIXME the above assumptions should be checked, if these turn out false too often something should be done
96  }
97  return 0;
98 }
99 
100 static int find_header_idx(AVFormatContext *s, AVCodecContext *c, int size, int frame_type)
101 {
102  NUTContext *nut = s->priv_data;
103  uint8_t out[64];
104  int i;
105  int len = find_expected_header(c, size, frame_type, out);
106 
107  for (i = 1; i < nut->header_count; i++) {
108  if (len == nut->header_len[i] && !memcmp(out, nut->header[i], len)) {
109  return i;
110  }
111  }
112 
113  return 0;
114 }
115 
117 {
118  NUTContext *nut = s->priv_data;
119  int i;
120  //FIXME this is lame
121  //FIXME write a 2pass mode to find the maximal headers
122  static const uint8_t headers[][5] = {
123  { 3, 0x00, 0x00, 0x01 },
124  { 4, 0x00, 0x00, 0x01, 0xB6},
125  { 2, 0xFF, 0xFA }, //mp3+crc
126  { 2, 0xFF, 0xFB }, //mp3
127  { 2, 0xFF, 0xFC }, //mp2+crc
128  { 2, 0xFF, 0xFD }, //mp2
129  };
130 
131  nut->header_count = 7;
132  for (i = 1; i < nut->header_count; i++) {
133  nut->header_len[i] = headers[i - 1][0];
134  nut->header[i] = &headers[i - 1][1];
135  }
136 }
137 
139 {
140  NUTContext *nut = s->priv_data;
141  int key_frame, index, pred, stream_id;
142  int start = 1;
143  int end = 254;
144  int keyframe_0_esc = s->nb_streams > 2;
145  int pred_table[10];
146  FrameCode *ft;
147 
148  ft = &nut->frame_code[start];
149  ft->flags = FLAG_CODED;
150  ft->size_mul = 1;
151  ft->pts_delta = 1;
152  start++;
153 
154  if (keyframe_0_esc) {
155  /* keyframe = 0 escape */
156  FrameCode *ft = &nut->frame_code[start];
158  ft->size_mul = 1;
159  start++;
160  }
161 
162  for (stream_id = 0; stream_id < s->nb_streams; stream_id++) {
163  int start2 = start + (end - start) * stream_id / s->nb_streams;
164  int end2 = start + (end - start) * (stream_id + 1) / s->nb_streams;
165  AVCodecContext *codec = s->streams[stream_id]->codec;
166  int is_audio = codec->codec_type == AVMEDIA_TYPE_AUDIO;
167  int intra_only = /*codec->intra_only || */ is_audio;
168  int pred_count;
169  int frame_size = 0;
170 
171  if (codec->codec_type == AVMEDIA_TYPE_AUDIO) {
173  if (codec->codec_id == AV_CODEC_ID_VORBIS && !frame_size)
174  frame_size = 64;
175  } else {
176  AVRational f = av_div_q(codec->time_base, *nut->stream[stream_id].time_base);
177  if (f.den == 1 && f.num>0)
178  frame_size = f.num;
179  }
180  if (!frame_size)
181  frame_size = 1;
182 
183  for (key_frame = 0; key_frame < 2; key_frame++) {
184  if (!intra_only || !keyframe_0_esc || key_frame != 0) {
185  FrameCode *ft = &nut->frame_code[start2];
186  ft->flags = FLAG_KEY * key_frame;
188  ft->stream_id = stream_id;
189  ft->size_mul = 1;
190  if (is_audio)
191  ft->header_idx = find_header_idx(s, codec, -1, key_frame);
192  start2++;
193  }
194  }
195 
196  key_frame = intra_only;
197 #if 1
198  if (is_audio) {
199  int frame_bytes = codec->frame_size * (int64_t)codec->bit_rate /
200  (8 * codec->sample_rate);
201  int pts;
202  for (pts = 0; pts < 2; pts++) {
203  for (pred = 0; pred < 2; pred++) {
204  FrameCode *ft = &nut->frame_code[start2];
205  ft->flags = FLAG_KEY * key_frame;
206  ft->stream_id = stream_id;
207  ft->size_mul = frame_bytes + 2;
208  ft->size_lsb = frame_bytes + pred;
209  ft->pts_delta = pts * frame_size;
210  ft->header_idx = find_header_idx(s, codec, frame_bytes + pred, key_frame);
211  start2++;
212  }
213  }
214  } else {
215  FrameCode *ft = &nut->frame_code[start2];
216  ft->flags = FLAG_KEY | FLAG_SIZE_MSB;
217  ft->stream_id = stream_id;
218  ft->size_mul = 1;
219  ft->pts_delta = frame_size;
220  start2++;
221  }
222 #endif
223 
224  if (codec->has_b_frames) {
225  pred_count = 5;
226  pred_table[0] = -2;
227  pred_table[1] = -1;
228  pred_table[2] = 1;
229  pred_table[3] = 3;
230  pred_table[4] = 4;
231  } else if (codec->codec_id == AV_CODEC_ID_VORBIS) {
232  pred_count = 3;
233  pred_table[0] = 2;
234  pred_table[1] = 9;
235  pred_table[2] = 16;
236  } else {
237  pred_count = 1;
238  pred_table[0] = 1;
239  }
240 
241  for (pred = 0; pred < pred_count; pred++) {
242  int start3 = start2 + (end2 - start2) * pred / pred_count;
243  int end3 = start2 + (end2 - start2) * (pred + 1) / pred_count;
244 
245  pred_table[pred] *= frame_size;
246 
247  for (index = start3; index < end3; index++) {
248  FrameCode *ft = &nut->frame_code[index];
249  ft->flags = FLAG_KEY * key_frame;
250  ft->flags |= FLAG_SIZE_MSB;
251  ft->stream_id = stream_id;
252 //FIXME use single byte size and pred from last
253  ft->size_mul = end3 - start3;
254  ft->size_lsb = index - start3;
255  ft->pts_delta = pred_table[pred];
256  if (is_audio)
257  ft->header_idx = find_header_idx(s, codec, -1, key_frame);
258  }
259  }
260  }
261  memmove(&nut->frame_code['N' + 1], &nut->frame_code['N'], sizeof(FrameCode) * (255 - 'N'));
262  nut->frame_code[0].flags =
263  nut->frame_code[255].flags =
264  nut->frame_code['N'].flags = FLAG_INVALID;
265 }
266 
267 static void put_tt(NUTContext *nut, AVRational *time_base, AVIOContext *bc, uint64_t val)
268 {
269  val *= nut->time_base_count;
270  val += time_base - nut->time_base;
271  ff_put_v(bc, val);
272 }
273 /**
274  * Store a string as vb.
275  */
276 static void put_str(AVIOContext *bc, const char *string)
277 {
278  int len = strlen(string);
279 
280  ff_put_v(bc, len);
281  avio_write(bc, string, len);
282 }
283 
284 static void put_s(AVIOContext *bc, int64_t val)
285 {
286  ff_put_v(bc, 2 * FFABS(val) - (val > 0));
287 }
288 
289 #ifdef TRACE
290 static inline void ff_put_v_trace(AVIOContext *bc, uint64_t v, const char *file,
291  const char *func, int line)
292 {
293  av_log(NULL, AV_LOG_DEBUG, "ff_put_v %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
294 
295  ff_put_v(bc, v);
296 }
297 
298 static inline void put_s_trace(AVIOContext *bc, int64_t v, const char *file, const char *func, int line)
299 {
300  av_log(NULL, AV_LOG_DEBUG, "put_s %5"PRId64" / %"PRIX64" in %s %s:%d\n", v, v, file, func, line);
301 
302  put_s(bc, v);
303 }
304 #define ff_put_v(bc, v) ff_put_v_trace(bc, v, __FILE__, __PRETTY_FUNCTION__, __LINE__)
305 #define put_s(bc, v) put_s_trace(bc, v, __FILE__, __PRETTY_FUNCTION__, __LINE__)
306 #endif
307 
308 //FIXME remove calculate_checksum
309 static void put_packet(NUTContext *nut, AVIOContext *bc, AVIOContext *dyn_bc,
310  int calculate_checksum, uint64_t startcode)
311 {
312  uint8_t *dyn_buf = NULL;
313  int dyn_size = avio_close_dyn_buf(dyn_bc, &dyn_buf);
314  int forw_ptr = dyn_size + 4 * calculate_checksum;
315 
316  if (forw_ptr > 4096)
318  avio_wb64(bc, startcode);
319  ff_put_v(bc, forw_ptr);
320  if (forw_ptr > 4096)
321  avio_wl32(bc, ffio_get_checksum(bc));
322 
323  if (calculate_checksum)
325  avio_write(bc, dyn_buf, dyn_size);
326  if (calculate_checksum)
327  avio_wl32(bc, ffio_get_checksum(bc));
328 
329  av_free(dyn_buf);
330 }
331 
333 {
334  int i, j, tmp_pts, tmp_flags, tmp_stream, tmp_mul, tmp_size, tmp_fields,
335  tmp_head_idx;
336  int64_t tmp_match;
337 
338  ff_put_v(bc, 3); /* version */
339  ff_put_v(bc, nut->avf->nb_streams);
340  ff_put_v(bc, nut->max_distance);
341  ff_put_v(bc, nut->time_base_count);
342 
343  for (i = 0; i < nut->time_base_count; i++) {
344  ff_put_v(bc, nut->time_base[i].num);
345  ff_put_v(bc, nut->time_base[i].den);
346  }
347 
348  tmp_pts = 0;
349  tmp_mul = 1;
350  tmp_stream = 0;
351  tmp_match = 1 - (1LL << 62);
352  tmp_head_idx = 0;
353  for (i = 0; i < 256; ) {
354  tmp_fields = 0;
355  tmp_size = 0;
356 // tmp_res=0;
357  if (tmp_pts != nut->frame_code[i].pts_delta ) tmp_fields = 1;
358  if (tmp_mul != nut->frame_code[i].size_mul ) tmp_fields = 2;
359  if (tmp_stream != nut->frame_code[i].stream_id ) tmp_fields = 3;
360  if (tmp_size != nut->frame_code[i].size_lsb ) tmp_fields = 4;
361 // if (tmp_res != nut->frame_code[i].res ) tmp_fields=5;
362  if (tmp_head_idx != nut->frame_code[i].header_idx) tmp_fields = 8;
363 
364  tmp_pts = nut->frame_code[i].pts_delta;
365  tmp_flags = nut->frame_code[i].flags;
366  tmp_stream = nut->frame_code[i].stream_id;
367  tmp_mul = nut->frame_code[i].size_mul;
368  tmp_size = nut->frame_code[i].size_lsb;
369 // tmp_res = nut->frame_code[i].res;
370  tmp_head_idx = nut->frame_code[i].header_idx;
371 
372  for (j = 0; i < 256; j++, i++) {
373  if (i == 'N') {
374  j--;
375  continue;
376  }
377  if (nut->frame_code[i].pts_delta != tmp_pts ||
378  nut->frame_code[i].flags != tmp_flags ||
379  nut->frame_code[i].stream_id != tmp_stream ||
380  nut->frame_code[i].size_mul != tmp_mul ||
381  nut->frame_code[i].size_lsb != tmp_size + j ||
382 // nut->frame_code[i].res != tmp_res ||
383  nut->frame_code[i].header_idx != tmp_head_idx)
384  break;
385  }
386  if (j != tmp_mul - tmp_size)
387  tmp_fields = 6;
388 
389  ff_put_v(bc, tmp_flags);
390  ff_put_v(bc, tmp_fields);
391  if (tmp_fields > 0) put_s(bc, tmp_pts);
392  if (tmp_fields > 1) ff_put_v(bc, tmp_mul);
393  if (tmp_fields > 2) ff_put_v(bc, tmp_stream);
394  if (tmp_fields > 3) ff_put_v(bc, tmp_size);
395  if (tmp_fields > 4) ff_put_v(bc, 0 /*tmp_res*/);
396  if (tmp_fields > 5) ff_put_v(bc, j);
397  if (tmp_fields > 6) ff_put_v(bc, tmp_match);
398  if (tmp_fields > 7) ff_put_v(bc, tmp_head_idx);
399  }
400  ff_put_v(bc, nut->header_count - 1);
401  for (i = 1; i < nut->header_count; i++) {
402  ff_put_v(bc, nut->header_len[i]);
403  avio_write(bc, nut->header[i], nut->header_len[i]);
404  }
405 }
406 
408  AVStream *st, int i)
409 {
410  NUTContext *nut = avctx->priv_data;
411  AVCodecContext *codec = st->codec;
412 
413  ff_put_v(bc, i);
414  switch (codec->codec_type) {
415  case AVMEDIA_TYPE_VIDEO: ff_put_v(bc, 0); break;
416  case AVMEDIA_TYPE_AUDIO: ff_put_v(bc, 1); break;
417  case AVMEDIA_TYPE_SUBTITLE: ff_put_v(bc, 2); break;
418  default: ff_put_v(bc, 3); break;
419  }
420  ff_put_v(bc, 4);
421  if (codec->codec_tag) {
422  avio_wl32(bc, codec->codec_tag);
423  } else {
424  av_log(avctx, AV_LOG_ERROR, "No codec tag defined for stream %d\n", i);
425  return AVERROR(EINVAL);
426  }
427 
428  ff_put_v(bc, nut->stream[i].time_base - nut->time_base);
429  ff_put_v(bc, nut->stream[i].msb_pts_shift);
430  ff_put_v(bc, nut->stream[i].max_pts_distance);
431  ff_put_v(bc, codec->has_b_frames);
432  avio_w8(bc, 0); /* flags: 0x1 - fixed_fps, 0x2 - index_present */
433 
434  ff_put_v(bc, codec->extradata_size);
435  avio_write(bc, codec->extradata, codec->extradata_size);
436 
437  switch (codec->codec_type) {
438  case AVMEDIA_TYPE_AUDIO:
439  ff_put_v(bc, codec->sample_rate);
440  ff_put_v(bc, 1);
441  ff_put_v(bc, codec->channels);
442  break;
443  case AVMEDIA_TYPE_VIDEO:
444  ff_put_v(bc, codec->width);
445  ff_put_v(bc, codec->height);
446 
447  if (st->sample_aspect_ratio.num <= 0 ||
448  st->sample_aspect_ratio.den <= 0) {
449  ff_put_v(bc, 0);
450  ff_put_v(bc, 0);
451  } else {
454  }
455  ff_put_v(bc, 0); /* csp type -- unknown */
456  break;
457  default:
458  break;
459  }
460  return 0;
461 }
462 
463 static int add_info(AVIOContext *bc, const char *type, const char *value)
464 {
465  put_str(bc, type);
466  put_s(bc, -1);
467  put_str(bc, value);
468  return 1;
469 }
470 
472 {
473  AVFormatContext *s = nut->avf;
475  AVIOContext *dyn_bc;
476  uint8_t *dyn_buf = NULL;
477  int count = 0, dyn_size;
478  int ret = avio_open_dyn_buf(&dyn_bc);
479  if (ret < 0)
480  return ret;
481 
482  while ((t = av_dict_get(s->metadata, "", t, AV_DICT_IGNORE_SUFFIX)))
483  count += add_info(dyn_bc, t->key, t->value);
484 
485  ff_put_v(bc, 0); //stream_if_plus1
486  ff_put_v(bc, 0); //chapter_id
487  ff_put_v(bc, 0); //timestamp_start
488  ff_put_v(bc, 0); //length
489 
490  ff_put_v(bc, count);
491 
492  dyn_size = avio_close_dyn_buf(dyn_bc, &dyn_buf);
493  avio_write(bc, dyn_buf, dyn_size);
494  av_free(dyn_buf);
495  return 0;
496 }
497 
498 static int write_streaminfo(NUTContext *nut, AVIOContext *bc, int stream_id) {
499  AVFormatContext *s= nut->avf;
500  AVStream* st = s->streams[stream_id];
502  AVIOContext *dyn_bc;
503  uint8_t *dyn_buf=NULL;
504  int count=0, dyn_size, i;
505  int ret = avio_open_dyn_buf(&dyn_bc);
506  if (ret < 0)
507  return ret;
508 
509  while ((t = av_dict_get(st->metadata, "", t, AV_DICT_IGNORE_SUFFIX)))
510  count += add_info(dyn_bc, t->key, t->value);
511  for (i=0; ff_nut_dispositions[i].flag; ++i) {
512  if (st->disposition & ff_nut_dispositions[i].flag)
513  count += add_info(dyn_bc, "Disposition", ff_nut_dispositions[i].str);
514  }
515  if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
516  uint8_t buf[256];
517  snprintf(buf, sizeof(buf), "%d/%d", st->codec->time_base.den, st->codec->time_base.num);
518  count += add_info(dyn_bc, "r_frame_rate", buf);
519  }
520  dyn_size = avio_close_dyn_buf(dyn_bc, &dyn_buf);
521 
522  if (count) {
523  ff_put_v(bc, stream_id + 1); //stream_id_plus1
524  ff_put_v(bc, 0); //chapter_id
525  ff_put_v(bc, 0); //timestamp_start
526  ff_put_v(bc, 0); //length
527 
528  ff_put_v(bc, count);
529 
530  avio_write(bc, dyn_buf, dyn_size);
531  }
532 
533  av_free(dyn_buf);
534  return count;
535 }
536 
537 static int write_chapter(NUTContext *nut, AVIOContext *bc, int id)
538 {
539  AVIOContext *dyn_bc;
540  uint8_t *dyn_buf = NULL;
542  AVChapter *ch = nut->avf->chapters[id];
543  int ret, dyn_size, count = 0;
544 
545  ret = avio_open_dyn_buf(&dyn_bc);
546  if (ret < 0)
547  return ret;
548 
549  ff_put_v(bc, 0); // stream_id_plus1
550  put_s(bc, id + 1); // chapter_id
551  put_tt(nut, nut->chapter[id].time_base, bc, ch->start); // chapter_start
552  ff_put_v(bc, ch->end - ch->start); // chapter_len
553 
554  while ((t = av_dict_get(ch->metadata, "", t, AV_DICT_IGNORE_SUFFIX)))
555  count += add_info(dyn_bc, t->key, t->value);
556 
557  ff_put_v(bc, count);
558 
559  dyn_size = avio_close_dyn_buf(dyn_bc, &dyn_buf);
560  avio_write(bc, dyn_buf, dyn_size);
561  av_freep(&dyn_buf);
562  return 0;
563 }
564 
565 static int write_index(NUTContext *nut, AVIOContext *bc) {
566  int i;
567  Syncpoint dummy= { .pos= 0 };
568  Syncpoint *next_node[2] = { NULL };
569  int64_t startpos = avio_tell(bc);
570  int64_t payload_size;
571 
572  put_tt(nut, nut->max_pts_tb, bc, nut->max_pts);
573 
574  ff_put_v(bc, nut->sp_count);
575 
576  for (i=0; i<nut->sp_count; i++) {
577  av_tree_find(nut->syncpoints, &dummy, (void *) ff_nut_sp_pos_cmp, (void**)next_node);
578  ff_put_v(bc, (next_node[1]->pos >> 4) - (dummy.pos>>4));
579  dummy.pos = next_node[1]->pos;
580  }
581 
582  for (i=0; i<nut->avf->nb_streams; i++) {
583  StreamContext *nus= &nut->stream[i];
584  int64_t last_pts= -1;
585  int j, k;
586  for (j=0; j<nut->sp_count; j++) {
587  int flag = (nus->keyframe_pts[j] != AV_NOPTS_VALUE) ^ (j+1 == nut->sp_count);
588  int n = 0;
589  for (; j<nut->sp_count && (nus->keyframe_pts[j] != AV_NOPTS_VALUE) == flag; j++)
590  n++;
591 
592  ff_put_v(bc, 1 + 2*flag + 4*n);
593  for (k= j - n; k<=j && k<nut->sp_count; k++) {
594  if (nus->keyframe_pts[k] == AV_NOPTS_VALUE)
595  continue;
596  av_assert0(nus->keyframe_pts[k] > last_pts);
597  ff_put_v(bc, nus->keyframe_pts[k] - last_pts);
598  last_pts = nus->keyframe_pts[k];
599  }
600  }
601  }
602 
603  payload_size = avio_tell(bc) - startpos + 8 + 4;
604 
605  avio_wb64(bc, 8 + payload_size + av_log2(payload_size) / 7 + 1 + 4*(payload_size > 4096));
606 
607  return 0;
608 }
609 
611 {
612  NUTContext *nut = avctx->priv_data;
613  AVIOContext *dyn_bc;
614  int i, ret;
615 
617 
618  ret = avio_open_dyn_buf(&dyn_bc);
619  if (ret < 0)
620  return ret;
621  write_mainheader(nut, dyn_bc);
622  put_packet(nut, bc, dyn_bc, 1, MAIN_STARTCODE);
623 
624  for (i = 0; i < nut->avf->nb_streams; i++) {
625  ret = avio_open_dyn_buf(&dyn_bc);
626  if (ret < 0)
627  return ret;
628  ret = write_streamheader(avctx, dyn_bc, nut->avf->streams[i], i);
629  if (ret < 0)
630  return ret;
631  put_packet(nut, bc, dyn_bc, 1, STREAM_STARTCODE);
632  }
633 
634  ret = avio_open_dyn_buf(&dyn_bc);
635  if (ret < 0)
636  return ret;
637  write_globalinfo(nut, dyn_bc);
638  put_packet(nut, bc, dyn_bc, 1, INFO_STARTCODE);
639 
640  for (i = 0; i < nut->avf->nb_streams; i++) {
641  ret = avio_open_dyn_buf(&dyn_bc);
642  if (ret < 0)
643  return ret;
644  ret = write_streaminfo(nut, dyn_bc, i);
645  if (ret < 0)
646  return ret;
647  if (ret > 0)
648  put_packet(nut, bc, dyn_bc, 1, INFO_STARTCODE);
649  else {
650  uint8_t *buf;
651  avio_close_dyn_buf(dyn_bc, &buf);
652  av_free(buf);
653  }
654  }
655 
656  for (i = 0; i < nut->avf->nb_chapters; i++) {
657  ret = avio_open_dyn_buf(&dyn_bc);
658  if (ret < 0)
659  return ret;
660  ret = write_chapter(nut, dyn_bc, i);
661  if (ret < 0) {
662  uint8_t *buf;
663  avio_close_dyn_buf(dyn_bc, &buf);
664  av_freep(&buf);
665  return ret;
666  }
667  put_packet(nut, bc, dyn_bc, 1, INFO_STARTCODE);
668  }
669 
670  nut->last_syncpoint_pos = INT_MIN;
671  nut->header_count++;
672  return 0;
673 }
674 
676 {
677  NUTContext *nut = s->priv_data;
678  AVIOContext *bc = s->pb;
679  int i, j, ret;
680 
681  nut->avf = s;
682 
683  nut->stream = av_mallocz(sizeof(StreamContext ) * s->nb_streams);
684  nut->chapter = av_mallocz(sizeof(ChapterContext) * s->nb_chapters);
685  nut->time_base= av_mallocz(sizeof(AVRational ) *(s->nb_streams +
686  s->nb_chapters));
687  if (!nut->stream || !nut->chapter || !nut->time_base) {
688  av_freep(&nut->stream);
689  av_freep(&nut->chapter);
690  av_freep(&nut->time_base);
691  return AVERROR(ENOMEM);
692  }
693 
694  for (i = 0; i < s->nb_streams; i++) {
695  AVStream *st = s->streams[i];
696  int ssize;
697  AVRational time_base;
698  ff_parse_specific_params(st->codec, &time_base.den, &ssize, &time_base.num);
699 
700  if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO && st->codec->sample_rate) {
701  time_base = (AVRational) {1, st->codec->sample_rate};
702  } else {
703  time_base = ff_choose_timebase(s, st, 48000);
704  }
705 
706  avpriv_set_pts_info(st, 64, time_base.num, time_base.den);
707 
708  for (j = 0; j < nut->time_base_count; j++)
709  if (!memcmp(&time_base, &nut->time_base[j], sizeof(AVRational))) {
710  break;
711  }
712  nut->time_base[j] = time_base;
713  nut->stream[i].time_base = &nut->time_base[j];
714  if (j == nut->time_base_count)
715  nut->time_base_count++;
716 
717  if (INT64_C(1000) * time_base.num >= time_base.den)
718  nut->stream[i].msb_pts_shift = 7;
719  else
720  nut->stream[i].msb_pts_shift = 14;
721  nut->stream[i].max_pts_distance =
722  FFMAX(time_base.den, time_base.num) / time_base.num;
723  }
724 
725  for (i = 0; i < s->nb_chapters; i++) {
726  AVChapter *ch = s->chapters[i];
727 
728  for (j = 0; j < nut->time_base_count; j++)
729  if (!memcmp(&ch->time_base, &nut->time_base[j], sizeof(AVRational)))
730  break;
731 
732  nut->time_base[j] = ch->time_base;
733  nut->chapter[i].time_base = &nut->time_base[j];
734  if (j == nut->time_base_count)
735  nut->time_base_count++;
736  }
737 
738  nut->max_distance = MAX_DISTANCE;
740  build_frame_code(s);
741  av_assert0(nut->frame_code['N'].flags == FLAG_INVALID);
742 
743  avio_write(bc, ID_STRING, strlen(ID_STRING));
744  avio_w8(bc, 0);
745 
746  if ((ret = write_headers(s, bc)) < 0)
747  return ret;
748 
749  if (s->avoid_negative_ts < 0)
750  s->avoid_negative_ts = 1;
751 
752  avio_flush(bc);
753 
754  return 0;
755 }
756 
758  AVPacket *pkt)
759 {
760  int flags = 0;
761 
762  if (pkt->flags & AV_PKT_FLAG_KEY)
763  flags |= FLAG_KEY;
764  if (pkt->stream_index != fc->stream_id)
765  flags |= FLAG_STREAM_ID;
766  if (pkt->size / fc->size_mul)
767  flags |= FLAG_SIZE_MSB;
768  if (pkt->pts - nus->last_pts != fc->pts_delta)
769  flags |= FLAG_CODED_PTS;
770  if (pkt->size > 2 * nut->max_distance)
771  flags |= FLAG_CHECKSUM;
772  if (FFABS(pkt->pts - nus->last_pts) > nus->max_pts_distance)
773  flags |= FLAG_CHECKSUM;
774  if (pkt->size < nut->header_len[fc->header_idx] ||
775  (pkt->size > 4096 && fc->header_idx) ||
776  memcmp(pkt->data, nut->header[fc->header_idx],
777  nut->header_len[fc->header_idx]))
778  flags |= FLAG_HEADER_IDX;
779 
780  return flags | (fc->flags & FLAG_CODED);
781 }
782 
784 {
785  int i;
786  int best_i = 0;
787  int best_len = 0;
788 
789  if (pkt->size > 4096)
790  return 0;
791 
792  for (i = 1; i < nut->header_count; i++)
793  if (pkt->size >= nut->header_len[i]
794  && nut->header_len[i] > best_len
795  && !memcmp(pkt->data, nut->header[i], nut->header_len[i])) {
796  best_i = i;
797  best_len = nut->header_len[i];
798  }
799  return best_i;
800 }
801 
803 {
804  NUTContext *nut = s->priv_data;
805  StreamContext *nus = &nut->stream[pkt->stream_index];
806  AVIOContext *bc = s->pb, *dyn_bc;
807  FrameCode *fc;
808  int64_t coded_pts;
809  int best_length, frame_code, flags, needed_flags, i, header_idx;
810  int best_header_idx;
811  int key_frame = !!(pkt->flags & AV_PKT_FLAG_KEY);
812  int store_sp = 0;
813  int ret;
814 
815  if (pkt->pts < 0) {
816  av_log(s, AV_LOG_ERROR,
817  "Negative pts not supported stream %d, pts %"PRId64"\n",
818  pkt->stream_index, pkt->pts);
819  return AVERROR(EINVAL);
820  }
821 
822  if (1LL << (20 + 3 * nut->header_count) <= avio_tell(bc))
823  write_headers(s, bc);
824 
825  if (key_frame && !(nus->last_flags & FLAG_KEY))
826  store_sp = 1;
827 
828  if (pkt->size + 30 /*FIXME check*/ + avio_tell(bc) >= nut->last_syncpoint_pos + nut->max_distance)
829  store_sp = 1;
830 
831 //FIXME: Ensure store_sp is 1 in the first place.
832 
833  if (store_sp) {
834  Syncpoint *sp, dummy = { .pos = INT64_MAX };
835 
836  ff_nut_reset_ts(nut, *nus->time_base, pkt->dts);
837  for (i = 0; i < s->nb_streams; i++) {
838  AVStream *st = s->streams[i];
839  int64_t dts_tb = av_rescale_rnd(pkt->dts,
840  nus->time_base->num * (int64_t)nut->stream[i].time_base->den,
841  nus->time_base->den * (int64_t)nut->stream[i].time_base->num,
842  AV_ROUND_DOWN);
843  int index = av_index_search_timestamp(st, dts_tb,
845  if (index >= 0)
846  dummy.pos = FFMIN(dummy.pos, st->index_entries[index].pos);
847  }
848  if (dummy.pos == INT64_MAX)
849  dummy.pos = 0;
850  sp = av_tree_find(nut->syncpoints, &dummy, (void *)ff_nut_sp_pos_cmp,
851  NULL);
852 
853  nut->last_syncpoint_pos = avio_tell(bc);
854  ret = avio_open_dyn_buf(&dyn_bc);
855  if (ret < 0)
856  return ret;
857  put_tt(nut, nus->time_base, dyn_bc, pkt->dts);
858  ff_put_v(dyn_bc, sp ? (nut->last_syncpoint_pos - sp->pos) >> 4 : 0);
859  put_packet(nut, bc, dyn_bc, 1, SYNCPOINT_STARTCODE);
860 
861  ff_nut_add_sp(nut, nut->last_syncpoint_pos, 0 /*unused*/, pkt->dts);
862 
863  if ((1ll<<60) % nut->sp_count == 0)
864  for (i=0; i<s->nb_streams; i++) {
865  int j;
866  StreamContext *nus = &nut->stream[i];
867  nus->keyframe_pts = av_realloc(nus->keyframe_pts, 2*nut->sp_count*sizeof(*nus->keyframe_pts));
868  if (!nus->keyframe_pts)
869  return AVERROR(ENOMEM);
870  for (j=nut->sp_count == 1 ? 0 : nut->sp_count; j<2*nut->sp_count; j++)
871  nus->keyframe_pts[j] = AV_NOPTS_VALUE;
872  }
873  }
875 
876  coded_pts = pkt->pts & ((1 << nus->msb_pts_shift) - 1);
877  if (ff_lsb2full(nus, coded_pts) != pkt->pts)
878  coded_pts = pkt->pts + (1 << nus->msb_pts_shift);
879 
880  best_header_idx = find_best_header_idx(nut, pkt);
881 
882  best_length = INT_MAX;
883  frame_code = -1;
884  for (i = 0; i < 256; i++) {
885  int length = 0;
886  FrameCode *fc = &nut->frame_code[i];
887  int flags = fc->flags;
888 
889  if (flags & FLAG_INVALID)
890  continue;
891  needed_flags = get_needed_flags(nut, nus, fc, pkt);
892 
893  if (flags & FLAG_CODED) {
894  length++;
895  flags = needed_flags;
896  }
897 
898  if ((flags & needed_flags) != needed_flags)
899  continue;
900 
901  if ((flags ^ needed_flags) & FLAG_KEY)
902  continue;
903 
904  if (flags & FLAG_STREAM_ID)
905  length += ff_get_v_length(pkt->stream_index);
906 
907  if (pkt->size % fc->size_mul != fc->size_lsb)
908  continue;
909  if (flags & FLAG_SIZE_MSB)
910  length += ff_get_v_length(pkt->size / fc->size_mul);
911 
912  if (flags & FLAG_CHECKSUM)
913  length += 4;
914 
915  if (flags & FLAG_CODED_PTS)
916  length += ff_get_v_length(coded_pts);
917 
918  if ( (flags & FLAG_CODED)
919  && nut->header_len[best_header_idx] > nut->header_len[fc->header_idx] + 1) {
920  flags |= FLAG_HEADER_IDX;
921  }
922 
923  if (flags & FLAG_HEADER_IDX) {
924  length += 1 - nut->header_len[best_header_idx];
925  } else {
926  length -= nut->header_len[fc->header_idx];
927  }
928 
929  length *= 4;
930  length += !(flags & FLAG_CODED_PTS);
931  length += !(flags & FLAG_CHECKSUM);
932 
933  if (length < best_length) {
934  best_length = length;
935  frame_code = i;
936  }
937  }
938  av_assert0(frame_code != -1);
939  fc = &nut->frame_code[frame_code];
940  flags = fc->flags;
941  needed_flags = get_needed_flags(nut, nus, fc, pkt);
942  header_idx = fc->header_idx;
943 
945  avio_w8(bc, frame_code);
946  if (flags & FLAG_CODED) {
947  ff_put_v(bc, (flags ^ needed_flags) & ~(FLAG_CODED));
948  flags = needed_flags;
949  }
950  if (flags & FLAG_STREAM_ID) ff_put_v(bc, pkt->stream_index);
951  if (flags & FLAG_CODED_PTS) ff_put_v(bc, coded_pts);
952  if (flags & FLAG_SIZE_MSB ) ff_put_v(bc, pkt->size / fc->size_mul);
953  if (flags & FLAG_HEADER_IDX) ff_put_v(bc, header_idx = best_header_idx);
954 
955  if (flags & FLAG_CHECKSUM) avio_wl32(bc, ffio_get_checksum(bc));
956  else ffio_get_checksum(bc);
957 
958  avio_write(bc, pkt->data + nut->header_len[header_idx], pkt->size - nut->header_len[header_idx]);
959  nus->last_flags = flags;
960  nus->last_pts = pkt->pts;
961 
962  //FIXME just store one per syncpoint
963  if (flags & FLAG_KEY) {
965  s->streams[pkt->stream_index],
966  nut->last_syncpoint_pos,
967  pkt->pts,
968  0,
969  0,
971  if (nus->keyframe_pts && nus->keyframe_pts[nut->sp_count] == AV_NOPTS_VALUE)
972  nus->keyframe_pts[nut->sp_count] = pkt->pts;
973  }
974 
975  if (!nut->max_pts_tb || av_compare_ts(nut->max_pts, *nut->max_pts_tb, pkt->pts, *nus->time_base) < 0) {
976  nut->max_pts = pkt->pts;
977  nut->max_pts_tb = nus->time_base;
978  }
979 
980  return 0;
981 }
982 
984 {
985  NUTContext *nut = s->priv_data;
986  AVIOContext *bc = s->pb, *dyn_bc;
987  int i, ret;
988 
989  while (nut->header_count < 3)
990  write_headers(s, bc);
991 
992  ret = avio_open_dyn_buf(&dyn_bc);
993  if (ret >= 0 && nut->sp_count) {
994  write_index(nut, dyn_bc);
995  put_packet(nut, bc, dyn_bc, 1, INDEX_STARTCODE);
996  }
997 
998  ff_nut_free_sp(nut);
999  for (i=0; i<s->nb_streams; i++)
1000  av_freep(&nut->stream[i].keyframe_pts);
1001 
1002  av_freep(&nut->stream);
1003  av_freep(&nut->chapter);
1004  av_freep(&nut->time_base);
1005 
1006  return 0;
1007 }
1008 
1010  .name = "nut",
1011  .long_name = NULL_IF_CONFIG_SMALL("NUT"),
1012  .mime_type = "video/x-nut",
1013  .extensions = "nut",
1014  .priv_data_size = sizeof(NUTContext),
1015  .audio_codec = CONFIG_LIBVORBIS ? AV_CODEC_ID_VORBIS :
1017  .video_codec = AV_CODEC_ID_MPEG4,
1022  .codec_tag = ff_nut_codec_tags,
1023 };
unsigned int nb_chapters
Definition: avformat.h:1089
#define AVSEEK_FLAG_BACKWARD
Definition: avformat.h:1743
Definition: start.py:1
void avio_wb64(AVIOContext *s, uint64_t val)
Definition: aviobuf.c:361
void * av_mallocz(size_t size)
Allocate a block of size bytes with alignment suitable for all memory accesses (including vectors if ...
Definition: mem.c:205
uint8_t header_len[128]
Definition: nut.h:94
float v
const char * s
Definition: avisynth_c.h:668
#define CONFIG_LIBMP3LAME
Definition: config.h:321
Bytestream IO Context.
Definition: avio.h:68
#define MAIN_STARTCODE
Definition: nut.h:32
void ff_metadata_conv_ctx(AVFormatContext *ctx, const AVMetadataConv *d_conv, const AVMetadataConv *s_conv)
int64_t last_syncpoint_pos
Definition: nut.h:101
Definition: nut.h:63
int avio_close_dyn_buf(AVIOContext *s, uint8_t **pbuffer)
Return the written size and a pointer to the buffer.
Definition: aviobuf.c:988
int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp, int size, int distance, int flags)
Add an index entry into a sorted list.
enum AVCodecID id
Definition: mxfenc.c:89
int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd)
Rescale a 64-bit integer with specified rounding.
Definition: mathematics.c:60
void ff_parse_specific_params(AVCodecContext *stream, int *au_rate, int *au_ssize, int *au_scale)
void avpriv_set_pts_info(AVStream *s, int pts_wrap_bits, unsigned int pts_num, unsigned int pts_den)
Set the time base and wrapping info for a given stream.
int64_t pos
Definition: avformat.h:592
static int write_packet(AVFormatContext *s, AVPacket *pkt)
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
Definition: avformat.h:709
int num
numerator
Definition: rational.h:44
AVIndexEntry * index_entries
Only used if the format does not support seeking natively.
Definition: avformat.h:822
Definition: nut.h:56
Sinusoidal phase f
void * av_realloc(void *ptr, size_t size)
Allocate or reallocate a block of memory.
Definition: mem.c:141
uint8_t stream_id
Definition: nut.h:65
AVDictionary * metadata
Definition: avformat.h:922
AVDictionaryEntry * av_dict_get(AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:39
int avio_open_dyn_buf(AVIOContext **s)
Open a write only memory stream.
Definition: aviobuf.c:976
mpeg audio layer common tables.
FFmpeg currently uses a custom build this text attempts to document some of its obscure features and options Makefile the full command issued by make and its output will be shown on the screen DESTDIR Destination directory for the install useful to prepare packages or install FFmpeg in cross environments Makefile builds all the libraries and the executables fate Run the fate test note you must have installed it fate list Will list all fate regression test targets install Install headers
Definition: build_system.txt:1
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
void av_freep(void *arg)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc() and set the pointer ...
Definition: mem.c:198
const uint8_t * header[128]
Definition: nut.h:95
initialize output if(nPeaks >3)%at least 3 peaks in spectrum for trying to find f0 nf0peaks
Format I/O context.
Definition: avformat.h:944
#define AV_WB32(p, darg)
Definition: intreadwrite.h:265
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
Public dictionary API.
void * av_tree_find(const AVTreeNode *t, void *key, int(*cmp)(void *key, const void *b), void *next[2])
Definition: tree.c:38
void avio_wl32(AVIOContext *s, unsigned int val)
Definition: aviobuf.c:291
uint8_t
AVRational * time_base
Definition: nut.h:103
uint16_t flags
Definition: nut.h:64
A tree container.
#define ID_STRING
Definition: ffmeta.h:25
static void build_elision_headers(AVFormatContext *s)
Definition: nutenc.c:116
static void build_frame_code(AVFormatContext *s)
Definition: nutenc.c:138
static AVPacket pkt
Definition: demuxing.c:56
if set, coded_pts is in the frame header
Definition: nut.h:45
const uint16_t avpriv_mpa_freq_tab[3]
Definition: mpegaudiodata.c:40
end end
static int64_t last_pts
#define STREAM_STARTCODE
Definition: nut.h:33
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
static void put_tt(NUTContext *nut, AVRational *time_base, AVIOContext *bc, uint64_t val)
Definition: nutenc.c:267
AVStream ** streams
Definition: avformat.h:992
const AVMetadataConv ff_nut_metadata_conv[]
Definition: nut.c:252
static int nut_write_packet(AVFormatContext *s, AVPacket *pkt)
Definition: nutenc.c:802
uint8_t * data
int last_flags
Definition: nut.h:74
#define MAX_DISTANCE
Definition: nut.h:40
#define sp
Definition: regdef.h:63
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:248
void avio_write(AVIOContext *s, const unsigned char *buf, int size)
Definition: aviobuf.c:173
static int write_trailer(AVFormatContext *s)
int ff_nut_sp_pos_cmp(const Syncpoint *a, const Syncpoint *b)
Definition: nut.c:206
AVFormatContext * avf
Definition: nut.h:90
struct NUTContext NUTContext
int64_t last_pts
Definition: nut.h:76
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
static const uint8_t frame_size[4]
Definition: g723_1_data.h:58
AVOutputFormat ff_nut_muxer
Definition: nutenc.c:1009
#define AVINDEX_KEYFRAME
Definition: avformat.h:599
AVDictionary * metadata
Definition: avformat.h:1092
int has_b_frames
Size of the frame reordering buffer in the decoder.
void av_free(void *ptr)
Free a memory block which has been allocated with av_malloc(z)() or av_realloc(). ...
Definition: mem.c:183
void ff_nut_free_sp(NUTContext *nut)
Definition: nut.c:236
int av_index_search_timestamp(AVStream *st, int64_t timestamp, int flags)
Get the index for a specific timestamp.
static int nut_write_header(AVFormatContext *s)
Definition: nutenc.c:675
uint64_t pos
Definition: nut.h:57
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
Definition: rational.c:88
preferred ID for decoding MPEG audio layer 1, 2 or 3
AVChapter ** chapters
Definition: avformat.h:1090
Definition: graph2dot.c:48
static int write_chapter(NUTContext *nut, AVIOContext *bc, int id)
Definition: nutenc.c:537
simple assert() macros that are a bit more flexible than ISO C assert().
void av_log(void *avcl, int level, const char *fmt,...)
Definition: log.c:246
int sp_count
Definition: nut.h:105
int header_count
Definition: nut.h:102
AVRational * time_base
Definition: nut.h:78
#define FFMAX(a, b)
Definition: common.h:56
if set, frame is keyframe
Definition: nut.h:43
int size
int flags
A combination of AV_PKT_FLAG values.
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare 2 timestamps each in its own timebases.
Definition: mathematics.c:135
AVCodecContext * codec
Codec context associated with this stream.
Definition: avformat.h:662
static void write_mainheader(NUTContext *nut, AVIOContext *bc)
Definition: nutenc.c:332
unsigned int nb_streams
A list of all streams in the file.
Definition: avformat.h:991
struct AVRational AVRational
rational number numerator/denominator
void ffio_init_checksum(AVIOContext *s, unsigned long(*update_checksum)(unsigned long c, const uint8_t *p, unsigned int len), unsigned long checksum)
Definition: aviobuf.c:457
static int find_expected_header(AVCodecContext *c, int size, int key_frame, uint8_t out[64])
Definition: nutenc.c:33
int void avio_flush(AVIOContext *s)
Force flushing of buffered data to the output s.
Definition: aviobuf.c:193
AVRational * max_pts_tb
Definition: nut.h:107
void ff_nut_reset_ts(NUTContext *nut, AVRational time_base, int64_t val)
Definition: nut.c:189
#define FFMIN(a, b)
Definition: common.h:58
uint8_t header_idx
Definition: nut.h:70
ret
Definition: avfilter.c:821
int width
picture width / height.
uint16_t size_lsb
Definition: nut.h:67
FFmpeg Automated Testing Environment ************************************Table of Contents *****************FFmpeg Automated Testing Environment Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass target exec to configure or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script tests fate sh from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at doc fate_config sh template Create a configuration that suits your based on the configuration template The slot configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern< arch >< os >< compiler >< compiler version > The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the fate_recv variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration file
Definition: fate.txt:34
#define AVFMT_GLOBALHEADER
Format wants global header.
Definition: avformat.h:351
unsigned long ff_crc04C11DB7_update(unsigned long checksum, const uint8_t *buf, unsigned int len)
Definition: aviobuf.c:443
int16_t pts_delta
Definition: nut.h:68
int64_t * keyframe_pts
Definition: nut.h:82
t
Definition: genspecsines3.m:6
int64_t ff_lsb2full(StreamContext *stream, int64_t lsb)
Definition: nut.c:200
const char * name
Definition: avformat.h:378
internal header for RIFF based (de)muxers do NOT include this in end user applications ...
#define AV_WB24(p, d)
Definition: intreadwrite.h:442
if set, frame_code is invalid
Definition: nut.h:53
#define FFABS(a)
Definition: common.h:53
struct AVTreeNode * syncpoints
Definition: nut.h:104
int avoid_negative_ts
Avoid negative timestamps during muxing.
Definition: avformat.h:1180
if set, data_size_msb is at frame header, otherwise data_size_msb is 0
Definition: nut.h:47
ChapterContext * chapter
Definition: nut.h:98
AVDictionary * metadata
Definition: avformat.h:711
if set, the frame header contains a checksum
Definition: nut.h:48
#define INDEX_STARTCODE
Definition: nut.h:35
void ff_nut_add_sp(NUTContext *nut, int64_t pos, int64_t back_ptr, int64_t ts)
Definition: nut.c:214
uint16_t size_mul
Definition: nut.h:66
preferred ID for MPEG-1/2 video decoding
static int write_globalinfo(NUTContext *nut, AVIOContext *bc)
Definition: nutenc.c:471
static const float pred[4]
Definition: siprdata.h:259
Stream structure.
Definition: avformat.h:643
int msb_pts_shift
Definition: nut.h:79
int64_t end
chapter start/end time in time_base units
Definition: avformat.h:921
for k
NULL
Definition: eval.c:55
sample_rate
enum AVMediaType codec_type
enum AVCodecID codec_id
static int intra_only
Definition: ffmpeg_opt.c:89
int sample_rate
samples per second
AVIOContext * pb
I/O context.
Definition: avformat.h:977
int max_pts_distance
Definition: nut.h:80
void avio_w8(AVIOContext *s, int b)
Definition: aviobuf.c:151
main external API structure.
static int write_streaminfo(NUTContext *nut, AVIOContext *bc, int stream_id)
Definition: nutenc.c:498
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:148
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> (&#39;D&#39;<<24) + (&#39;C&#39;<<16) + (&#39;B&#39;<<8) + &#39;A&#39;).
static int write_index(NUTContext *nut, AVIOContext *bc)
Definition: nutenc.c:565
if set, coded_flags are stored in the frame header
Definition: nut.h:52
void * buf
Definition: avisynth_c.h:594
double value
Definition: eval.c:82
int(* func)(AVBPrint *dst, const char *in, const char *arg)
int index
Definition: gxfenc.c:89
synthesis window for stochastic i
rational number numerator/denominator
Definition: rational.h:43
static int nut_write_trailer(AVFormatContext *s)
Definition: nutenc.c:983
AVRational * time_base
Definition: nut.h:86
unsigned long ffio_get_checksum(AVIOContext *s)
Definition: aviobuf.c:449
StreamContext * stream
Definition: nut.h:97
#define snprintf
Definition: snprintf.h:34
static void put_s(AVIOContext *bc, int64_t val)
Definition: nutenc.c:284
static int write_streamheader(AVFormatContext *avctx, AVIOContext *bc, AVStream *st, int i)
Definition: nutenc.c:407
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
Return audio frame duration.
#define type
Round toward -infinity.
Definition: mathematics.h:70
static int find_header_idx(AVFormatContext *s, AVCodecContext *c, int size, int frame_type)
Definition: nutenc.c:100
#define INFO_STARTCODE
Definition: nut.h:36
int ff_get_v_length(uint64_t val)
Get the length in bytes which is needed to store val as v.
Definition: aviobuf.c:335
static int write_headers(AVFormatContext *avctx, AVIOContext *bc)
Definition: nutenc.c:610
static int flags
Definition: cpu.c:23
AVRational ff_choose_timebase(AVFormatContext *s, AVStream *st, int min_precission)
Chooses a timebase for muxing the specified stream.
Definition: mux.c:106
int64_t start
Definition: avformat.h:921
const Dispositions ff_nut_dispositions[]
Definition: nut.c:242
int64_t max_pts
Definition: nut.h:106
static int find_best_header_idx(NUTContext *nut, AVPacket *pkt)
Definition: nutenc.c:783
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:162
static double c[64]
FrameCode frame_code[256]
Definition: nut.h:93
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:700
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:920
char * key
Definition: dict.h:81
int den
denominator
Definition: rational.h:45
#define CONFIG_LIBVORBIS
Definition: config.h:342
#define SYNCPOINT_STARTCODE
Definition: nut.h:34
int flag
Definition: nut.h:119
static void put_str(AVIOContext *bc, const char *string)
Store a string as vb.
Definition: nutenc.c:276
#define AVFMT_VARIABLE_FPS
Format allows variable fps.
Definition: avformat.h:355
char * value
Definition: dict.h:82
static void put_packet(NUTContext *nut, AVIOContext *bc, AVIOContext *dyn_bc, int calculate_checksum, uint64_t startcode)
Definition: nutenc.c:309
void ff_put_v(AVIOContext *bc, uint64_t val)
Put val using a variable number of bytes.
Definition: aviobuf.c:345
If set, header_idx is coded in the frame header.
Definition: nut.h:50
int len
int channels
number of audio channels
#define av_log2
Definition: intmath.h:89
void * priv_data
Format private data.
Definition: avformat.h:964
static int get_needed_flags(NUTContext *nut, StreamContext *nus, FrameCode *fc, AVPacket *pkt)
Definition: nutenc.c:757
static void write_header(FFV1Context *f)
Definition: ffv1enc.c:470
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
void INT64 INT64 count
Definition: avisynth_c.h:594
if set, stream_id is coded in the frame header
Definition: nut.h:46
void INT64 start
Definition: avisynth_c.h:594
static int add_info(AVIOContext *bc, const char *type, const char *value)
Definition: nutenc.c:463
#define AV_DICT_IGNORE_SUFFIX
Definition: dict.h:68
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi-0x80)*(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t,*(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t,*(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31))))#define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac){}void ff_audio_convert_free(AudioConvert **ac){if(!*ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);}AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map){AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method!=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2){ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc){av_free(ac);return NULL;}return ac;}in_planar=av_sample_fmt_is_planar(in_fmt);out_planar=av_sample_fmt_is_planar(out_fmt);if(in_planar==out_planar){ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar?ac->channels:1;}else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;}int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in){int use_generic=1;int len=in->nb_samples;int p;if(ac->dc){av_dlog(ac->avr,"%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> out
const uint16_t avpriv_mpa_bitrate_tab[2][3][15]
Definition: mpegaudiodata.c:30
const char int length
Definition: avisynth_c.h:668
int dummy
Definition: motion-test.c:64
const AVCodecTag *const ff_nut_codec_tags[]
Definition: nut.c:184
This structure stores compressed data.
unsigned int time_base_count
Definition: nut.h:100
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:190
unsigned int max_distance
Definition: nut.h:99