yading@10: /* yading@10: * utils for libavcodec yading@10: * Copyright (c) 2001 Fabrice Bellard yading@10: * Copyright (c) 2002-2004 Michael Niedermayer yading@10: * yading@10: * This file is part of FFmpeg. yading@10: * yading@10: * FFmpeg is free software; you can redistribute it and/or yading@10: * modify it under the terms of the GNU Lesser General Public yading@10: * License as published by the Free Software Foundation; either yading@10: * version 2.1 of the License, or (at your option) any later version. yading@10: * yading@10: * FFmpeg is distributed in the hope that it will be useful, yading@10: * but WITHOUT ANY WARRANTY; without even the implied warranty of yading@10: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU yading@10: * Lesser General Public License for more details. yading@10: * yading@10: * You should have received a copy of the GNU Lesser General Public yading@10: * License along with FFmpeg; if not, write to the Free Software yading@10: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA yading@10: */ yading@10: yading@10: /** yading@10: * @file yading@10: * utils. yading@10: */ yading@10: yading@10: #include "config.h" yading@10: #include "libavutil/avassert.h" yading@10: #include "libavutil/avstring.h" yading@10: #include "libavutil/bprint.h" yading@10: #include "libavutil/channel_layout.h" yading@10: #include "libavutil/crc.h" yading@10: #include "libavutil/frame.h" yading@10: #include "libavutil/mathematics.h" yading@10: #include "libavutil/pixdesc.h" yading@10: #include "libavutil/imgutils.h" yading@10: #include "libavutil/samplefmt.h" yading@10: #include "libavutil/dict.h" yading@10: #include "libavutil/avassert.h" yading@10: #include "avcodec.h" yading@10: #include "dsputil.h" yading@10: #include "libavutil/opt.h" yading@10: #include "thread.h" yading@10: #include "frame_thread_encoder.h" yading@10: #include "internal.h" yading@10: #include "bytestream.h" yading@10: #include "version.h" yading@10: #include yading@10: #include yading@10: #include yading@10: #include yading@10: #if CONFIG_ICONV yading@10: # include yading@10: #endif yading@10: yading@10: volatile int ff_avcodec_locked; yading@10: static int volatile entangled_thread_counter = 0; yading@10: static int (*ff_lockmgr_cb)(void **mutex, enum AVLockOp op); yading@10: static void *codec_mutex; yading@10: static void *avformat_mutex; yading@10: yading@10: void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size) yading@10: { yading@10: if (min_size < *size) yading@10: return ptr; yading@10: yading@10: min_size = FFMAX(17 * min_size / 16 + 32, min_size); yading@10: yading@10: ptr = av_realloc(ptr, min_size); yading@10: /* we could set this to the unmodified min_size but this is safer yading@10: * if the user lost the ptr and uses NULL now yading@10: */ yading@10: if (!ptr) yading@10: min_size = 0; yading@10: yading@10: *size = min_size; yading@10: yading@10: return ptr; yading@10: } yading@10: yading@10: static inline int ff_fast_malloc(void *ptr, unsigned int *size, size_t min_size, int zero_realloc) yading@10: { yading@10: void **p = ptr; yading@10: if (min_size < *size) yading@10: return 0; yading@10: min_size = FFMAX(17 * min_size / 16 + 32, min_size); yading@10: av_free(*p); yading@10: *p = zero_realloc ? av_mallocz(min_size) : av_malloc(min_size); yading@10: if (!*p) yading@10: min_size = 0; yading@10: *size = min_size; yading@10: return 1; yading@10: } yading@10: yading@10: void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size) yading@10: { yading@10: ff_fast_malloc(ptr, size, min_size, 0); yading@10: } yading@10: yading@10: void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size) yading@10: { yading@10: uint8_t **p = ptr; yading@10: if (min_size > SIZE_MAX - FF_INPUT_BUFFER_PADDING_SIZE) { yading@10: av_freep(p); yading@10: *size = 0; yading@10: return; yading@10: } yading@10: if (!ff_fast_malloc(p, size, min_size + FF_INPUT_BUFFER_PADDING_SIZE, 1)) yading@10: memset(*p + min_size, 0, FF_INPUT_BUFFER_PADDING_SIZE); yading@10: } yading@10: yading@10: void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size) yading@10: { yading@10: uint8_t **p = ptr; yading@10: if (min_size > SIZE_MAX - FF_INPUT_BUFFER_PADDING_SIZE) { yading@10: av_freep(p); yading@10: *size = 0; yading@10: return; yading@10: } yading@10: if (!ff_fast_malloc(p, size, min_size + FF_INPUT_BUFFER_PADDING_SIZE, 1)) yading@10: memset(*p, 0, min_size + FF_INPUT_BUFFER_PADDING_SIZE); yading@10: } yading@10: yading@10: /* encoder management */ yading@10: static AVCodec *first_avcodec = NULL; yading@10: yading@10: AVCodec *av_codec_next(const AVCodec *c) yading@10: { yading@10: if (c) yading@10: return c->next; yading@10: else yading@10: return first_avcodec; yading@10: } yading@10: yading@10: static void avcodec_init(void) yading@10: { yading@10: static int initialized = 0; yading@10: yading@10: if (initialized != 0) yading@10: return; yading@10: initialized = 1; yading@10: yading@10: if (CONFIG_DSPUTIL) yading@10: ff_dsputil_static_init(); yading@10: } yading@10: yading@10: int av_codec_is_encoder(const AVCodec *codec) yading@10: { yading@10: return codec && (codec->encode_sub || codec->encode2); yading@10: } yading@10: yading@10: int av_codec_is_decoder(const AVCodec *codec) yading@10: { yading@10: return codec && codec->decode; yading@10: } yading@10: yading@10: void avcodec_register(AVCodec *codec) yading@10: { yading@10: AVCodec **p; yading@10: avcodec_init(); yading@10: p = &first_avcodec; yading@10: while (*p != NULL) yading@10: p = &(*p)->next; yading@10: *p = codec; yading@10: codec->next = NULL; yading@10: yading@10: if (codec->init_static_data) yading@10: codec->init_static_data(codec); yading@10: } yading@10: yading@10: unsigned avcodec_get_edge_width(void) yading@10: { yading@10: return EDGE_WIDTH; yading@10: } yading@10: yading@10: void avcodec_set_dimensions(AVCodecContext *s, int width, int height) yading@10: { yading@10: s->coded_width = width; yading@10: s->coded_height = height; yading@10: s->width = -((-width ) >> s->lowres); yading@10: s->height = -((-height) >> s->lowres); yading@10: } yading@10: yading@10: #if (ARCH_ARM && HAVE_NEON) || ARCH_PPC || HAVE_MMX yading@10: # define STRIDE_ALIGN 16 yading@10: #else yading@10: # define STRIDE_ALIGN 8 yading@10: #endif yading@10: yading@10: void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, yading@10: int linesize_align[AV_NUM_DATA_POINTERS]) yading@10: { yading@10: int i; yading@10: int w_align = 1; yading@10: int h_align = 1; yading@10: yading@10: switch (s->pix_fmt) { yading@10: case AV_PIX_FMT_YUV420P: yading@10: case AV_PIX_FMT_YUYV422: yading@10: case AV_PIX_FMT_UYVY422: yading@10: case AV_PIX_FMT_YUV422P: yading@10: case AV_PIX_FMT_YUV440P: yading@10: case AV_PIX_FMT_YUV444P: yading@10: case AV_PIX_FMT_GBRP: yading@10: case AV_PIX_FMT_GRAY8: yading@10: case AV_PIX_FMT_GRAY16BE: yading@10: case AV_PIX_FMT_GRAY16LE: yading@10: case AV_PIX_FMT_YUVJ420P: yading@10: case AV_PIX_FMT_YUVJ422P: yading@10: case AV_PIX_FMT_YUVJ440P: yading@10: case AV_PIX_FMT_YUVJ444P: yading@10: case AV_PIX_FMT_YUVA420P: yading@10: case AV_PIX_FMT_YUVA422P: yading@10: case AV_PIX_FMT_YUVA444P: yading@10: case AV_PIX_FMT_YUV420P9LE: yading@10: case AV_PIX_FMT_YUV420P9BE: yading@10: case AV_PIX_FMT_YUV420P10LE: yading@10: case AV_PIX_FMT_YUV420P10BE: yading@10: case AV_PIX_FMT_YUV420P12LE: yading@10: case AV_PIX_FMT_YUV420P12BE: yading@10: case AV_PIX_FMT_YUV420P14LE: yading@10: case AV_PIX_FMT_YUV420P14BE: yading@10: case AV_PIX_FMT_YUV422P9LE: yading@10: case AV_PIX_FMT_YUV422P9BE: yading@10: case AV_PIX_FMT_YUV422P10LE: yading@10: case AV_PIX_FMT_YUV422P10BE: yading@10: case AV_PIX_FMT_YUV422P12LE: yading@10: case AV_PIX_FMT_YUV422P12BE: yading@10: case AV_PIX_FMT_YUV422P14LE: yading@10: case AV_PIX_FMT_YUV422P14BE: yading@10: case AV_PIX_FMT_YUV444P9LE: yading@10: case AV_PIX_FMT_YUV444P9BE: yading@10: case AV_PIX_FMT_YUV444P10LE: yading@10: case AV_PIX_FMT_YUV444P10BE: yading@10: case AV_PIX_FMT_YUV444P12LE: yading@10: case AV_PIX_FMT_YUV444P12BE: yading@10: case AV_PIX_FMT_YUV444P14LE: yading@10: case AV_PIX_FMT_YUV444P14BE: yading@10: case AV_PIX_FMT_GBRP9LE: yading@10: case AV_PIX_FMT_GBRP9BE: yading@10: case AV_PIX_FMT_GBRP10LE: yading@10: case AV_PIX_FMT_GBRP10BE: yading@10: case AV_PIX_FMT_GBRP12LE: yading@10: case AV_PIX_FMT_GBRP12BE: yading@10: case AV_PIX_FMT_GBRP14LE: yading@10: case AV_PIX_FMT_GBRP14BE: yading@10: w_align = 16; //FIXME assume 16 pixel per macroblock yading@10: h_align = 16 * 2; // interlaced needs 2 macroblocks height yading@10: break; yading@10: case AV_PIX_FMT_YUV411P: yading@10: case AV_PIX_FMT_UYYVYY411: yading@10: w_align = 32; yading@10: h_align = 8; yading@10: break; yading@10: case AV_PIX_FMT_YUV410P: yading@10: if (s->codec_id == AV_CODEC_ID_SVQ1) { yading@10: w_align = 64; yading@10: h_align = 64; yading@10: } yading@10: break; yading@10: case AV_PIX_FMT_RGB555: yading@10: if (s->codec_id == AV_CODEC_ID_RPZA) { yading@10: w_align = 4; yading@10: h_align = 4; yading@10: } yading@10: break; yading@10: case AV_PIX_FMT_PAL8: yading@10: case AV_PIX_FMT_BGR8: yading@10: case AV_PIX_FMT_RGB8: yading@10: if (s->codec_id == AV_CODEC_ID_SMC || yading@10: s->codec_id == AV_CODEC_ID_CINEPAK) { yading@10: w_align = 4; yading@10: h_align = 4; yading@10: } yading@10: break; yading@10: case AV_PIX_FMT_BGR24: yading@10: if ((s->codec_id == AV_CODEC_ID_MSZH) || yading@10: (s->codec_id == AV_CODEC_ID_ZLIB)) { yading@10: w_align = 4; yading@10: h_align = 4; yading@10: } yading@10: break; yading@10: case AV_PIX_FMT_RGB24: yading@10: if (s->codec_id == AV_CODEC_ID_CINEPAK) { yading@10: w_align = 4; yading@10: h_align = 4; yading@10: } yading@10: break; yading@10: default: yading@10: w_align = 1; yading@10: h_align = 1; yading@10: break; yading@10: } yading@10: yading@10: if (s->codec_id == AV_CODEC_ID_IFF_ILBM || s->codec_id == AV_CODEC_ID_IFF_BYTERUN1) { yading@10: w_align = FFMAX(w_align, 8); yading@10: } yading@10: yading@10: *width = FFALIGN(*width, w_align); yading@10: *height = FFALIGN(*height, h_align); yading@10: if (s->codec_id == AV_CODEC_ID_H264 || s->lowres) yading@10: // some of the optimized chroma MC reads one line too much yading@10: // which is also done in mpeg decoders with lowres > 0 yading@10: *height += 2; yading@10: yading@10: for (i = 0; i < 4; i++) yading@10: linesize_align[i] = STRIDE_ALIGN; yading@10: } yading@10: yading@10: void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height) yading@10: { yading@10: const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->pix_fmt); yading@10: int chroma_shift = desc->log2_chroma_w; yading@10: int linesize_align[AV_NUM_DATA_POINTERS]; yading@10: int align; yading@10: yading@10: avcodec_align_dimensions2(s, width, height, linesize_align); yading@10: align = FFMAX(linesize_align[0], linesize_align[3]); yading@10: linesize_align[1] <<= chroma_shift; yading@10: linesize_align[2] <<= chroma_shift; yading@10: align = FFMAX3(align, linesize_align[1], linesize_align[2]); yading@10: *width = FFALIGN(*width, align); yading@10: } yading@10: yading@10: int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels, yading@10: enum AVSampleFormat sample_fmt, const uint8_t *buf, yading@10: int buf_size, int align) yading@10: { yading@10: int ch, planar, needed_size, ret = 0; yading@10: yading@10: needed_size = av_samples_get_buffer_size(NULL, nb_channels, yading@10: frame->nb_samples, sample_fmt, yading@10: align); yading@10: if (buf_size < needed_size) yading@10: return AVERROR(EINVAL); yading@10: yading@10: planar = av_sample_fmt_is_planar(sample_fmt); yading@10: if (planar && nb_channels > AV_NUM_DATA_POINTERS) { yading@10: if (!(frame->extended_data = av_mallocz(nb_channels * yading@10: sizeof(*frame->extended_data)))) yading@10: return AVERROR(ENOMEM); yading@10: } else { yading@10: frame->extended_data = frame->data; yading@10: } yading@10: yading@10: if ((ret = av_samples_fill_arrays(frame->extended_data, &frame->linesize[0], yading@10: (uint8_t *)(intptr_t)buf, nb_channels, frame->nb_samples, yading@10: sample_fmt, align)) < 0) { yading@10: if (frame->extended_data != frame->data) yading@10: av_freep(&frame->extended_data); yading@10: return ret; yading@10: } yading@10: if (frame->extended_data != frame->data) { yading@10: for (ch = 0; ch < AV_NUM_DATA_POINTERS; ch++) yading@10: frame->data[ch] = frame->extended_data[ch]; yading@10: } yading@10: yading@10: return ret; yading@10: } yading@10: yading@10: static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame) yading@10: { yading@10: FramePool *pool = avctx->internal->pool; yading@10: int i, ret; yading@10: yading@10: switch (avctx->codec_type) { yading@10: case AVMEDIA_TYPE_VIDEO: { yading@10: AVPicture picture; yading@10: int size[4] = { 0 }; yading@10: int w = frame->width; yading@10: int h = frame->height; yading@10: int tmpsize, unaligned; yading@10: yading@10: if (pool->format == frame->format && yading@10: pool->width == frame->width && pool->height == frame->height) yading@10: return 0; yading@10: yading@10: avcodec_align_dimensions2(avctx, &w, &h, pool->stride_align); yading@10: yading@10: if (!(avctx->flags & CODEC_FLAG_EMU_EDGE)) { yading@10: w += EDGE_WIDTH * 2; yading@10: h += EDGE_WIDTH * 2; yading@10: } yading@10: yading@10: do { yading@10: // NOTE: do not align linesizes individually, this breaks e.g. assumptions yading@10: // that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2 yading@10: av_image_fill_linesizes(picture.linesize, avctx->pix_fmt, w); yading@10: // increase alignment of w for next try (rhs gives the lowest bit set in w) yading@10: w += w & ~(w - 1); yading@10: yading@10: unaligned = 0; yading@10: for (i = 0; i < 4; i++) yading@10: unaligned |= picture.linesize[i] % pool->stride_align[i]; yading@10: } while (unaligned); yading@10: yading@10: tmpsize = av_image_fill_pointers(picture.data, avctx->pix_fmt, h, yading@10: NULL, picture.linesize); yading@10: if (tmpsize < 0) yading@10: return -1; yading@10: yading@10: for (i = 0; i < 3 && picture.data[i + 1]; i++) yading@10: size[i] = picture.data[i + 1] - picture.data[i]; yading@10: size[i] = tmpsize - (picture.data[i] - picture.data[0]); yading@10: yading@10: for (i = 0; i < 4; i++) { yading@10: av_buffer_pool_uninit(&pool->pools[i]); yading@10: pool->linesize[i] = picture.linesize[i]; yading@10: if (size[i]) { yading@10: pool->pools[i] = av_buffer_pool_init(size[i] + 16, yading@10: CONFIG_MEMORY_POISONING ? yading@10: NULL : yading@10: av_buffer_allocz); yading@10: if (!pool->pools[i]) { yading@10: ret = AVERROR(ENOMEM); yading@10: goto fail; yading@10: } yading@10: } yading@10: } yading@10: pool->format = frame->format; yading@10: pool->width = frame->width; yading@10: pool->height = frame->height; yading@10: yading@10: break; yading@10: } yading@10: case AVMEDIA_TYPE_AUDIO: { yading@10: int ch = av_frame_get_channels(frame); //av_get_channel_layout_nb_channels(frame->channel_layout); yading@10: int planar = av_sample_fmt_is_planar(frame->format); yading@10: int planes = planar ? ch : 1; yading@10: yading@10: if (pool->format == frame->format && pool->planes == planes && yading@10: pool->channels == ch && frame->nb_samples == pool->samples) yading@10: return 0; yading@10: yading@10: av_buffer_pool_uninit(&pool->pools[0]); yading@10: ret = av_samples_get_buffer_size(&pool->linesize[0], ch, yading@10: frame->nb_samples, frame->format, 0); yading@10: if (ret < 0) yading@10: goto fail; yading@10: yading@10: pool->pools[0] = av_buffer_pool_init(pool->linesize[0], NULL); yading@10: if (!pool->pools[0]) { yading@10: ret = AVERROR(ENOMEM); yading@10: goto fail; yading@10: } yading@10: yading@10: pool->format = frame->format; yading@10: pool->planes = planes; yading@10: pool->channels = ch; yading@10: pool->samples = frame->nb_samples; yading@10: break; yading@10: } yading@10: default: av_assert0(0); yading@10: } yading@10: return 0; yading@10: fail: yading@10: for (i = 0; i < 4; i++) yading@10: av_buffer_pool_uninit(&pool->pools[i]); yading@10: pool->format = -1; yading@10: pool->planes = pool->channels = pool->samples = 0; yading@10: pool->width = pool->height = 0; yading@10: return ret; yading@10: } yading@10: yading@10: static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame) yading@10: { yading@10: FramePool *pool = avctx->internal->pool; yading@10: int planes = pool->planes; yading@10: int i; yading@10: yading@10: frame->linesize[0] = pool->linesize[0]; yading@10: yading@10: if (planes > AV_NUM_DATA_POINTERS) { yading@10: frame->extended_data = av_mallocz(planes * sizeof(*frame->extended_data)); yading@10: frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS; yading@10: frame->extended_buf = av_mallocz(frame->nb_extended_buf * yading@10: sizeof(*frame->extended_buf)); yading@10: if (!frame->extended_data || !frame->extended_buf) { yading@10: av_freep(&frame->extended_data); yading@10: av_freep(&frame->extended_buf); yading@10: return AVERROR(ENOMEM); yading@10: } yading@10: } else { yading@10: frame->extended_data = frame->data; yading@10: av_assert0(frame->nb_extended_buf == 0); yading@10: } yading@10: yading@10: for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) { yading@10: frame->buf[i] = av_buffer_pool_get(pool->pools[0]); yading@10: if (!frame->buf[i]) yading@10: goto fail; yading@10: frame->extended_data[i] = frame->data[i] = frame->buf[i]->data; yading@10: } yading@10: for (i = 0; i < frame->nb_extended_buf; i++) { yading@10: frame->extended_buf[i] = av_buffer_pool_get(pool->pools[0]); yading@10: if (!frame->extended_buf[i]) yading@10: goto fail; yading@10: frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data; yading@10: } yading@10: yading@10: if (avctx->debug & FF_DEBUG_BUFFERS) yading@10: av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p", frame); yading@10: yading@10: return 0; yading@10: fail: yading@10: av_frame_unref(frame); yading@10: return AVERROR(ENOMEM); yading@10: } yading@10: yading@10: static int video_get_buffer(AVCodecContext *s, AVFrame *pic) yading@10: { yading@10: FramePool *pool = s->internal->pool; yading@10: const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pic->format); yading@10: int pixel_size = desc->comp[0].step_minus1 + 1; yading@10: int h_chroma_shift, v_chroma_shift; yading@10: int i; yading@10: yading@10: if (pic->data[0] != NULL) { yading@10: av_log(s, AV_LOG_ERROR, "pic->data[0]!=NULL in avcodec_default_get_buffer\n"); yading@10: return -1; yading@10: } yading@10: yading@10: memset(pic->data, 0, sizeof(pic->data)); yading@10: pic->extended_data = pic->data; yading@10: yading@10: av_pix_fmt_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift); yading@10: yading@10: for (i = 0; i < 4 && pool->pools[i]; i++) { yading@10: const int h_shift = i == 0 ? 0 : h_chroma_shift; yading@10: const int v_shift = i == 0 ? 0 : v_chroma_shift; yading@10: yading@10: pic->linesize[i] = pool->linesize[i]; yading@10: yading@10: pic->buf[i] = av_buffer_pool_get(pool->pools[i]); yading@10: if (!pic->buf[i]) yading@10: goto fail; yading@10: yading@10: // no edge if EDGE EMU or not planar YUV yading@10: if ((s->flags & CODEC_FLAG_EMU_EDGE) || !pool->pools[2]) yading@10: pic->data[i] = pic->buf[i]->data; yading@10: else { yading@10: pic->data[i] = pic->buf[i]->data + yading@10: FFALIGN((pic->linesize[i] * EDGE_WIDTH >> v_shift) + yading@10: (pixel_size * EDGE_WIDTH >> h_shift), pool->stride_align[i]); yading@10: } yading@10: } yading@10: for (; i < AV_NUM_DATA_POINTERS; i++) { yading@10: pic->data[i] = NULL; yading@10: pic->linesize[i] = 0; yading@10: } yading@10: if (pic->data[1] && !pic->data[2]) yading@10: avpriv_set_systematic_pal2((uint32_t *)pic->data[1], s->pix_fmt); yading@10: yading@10: if (s->debug & FF_DEBUG_BUFFERS) yading@10: av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p\n", pic); yading@10: yading@10: return 0; yading@10: fail: yading@10: av_frame_unref(pic); yading@10: return AVERROR(ENOMEM); yading@10: } yading@10: yading@10: void avpriv_color_frame(AVFrame *frame, const int c[4]) yading@10: { yading@10: const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format); yading@10: int p, y, x; yading@10: yading@10: av_assert0(desc->flags & PIX_FMT_PLANAR); yading@10: yading@10: for (p = 0; pnb_components; p++) { yading@10: uint8_t *dst = frame->data[p]; yading@10: int is_chroma = p == 1 || p == 2; yading@10: int bytes = -((-frame->width) >> (is_chroma ? desc->log2_chroma_w : 0)); yading@10: for (y = 0; y<-((-frame->height) >> (is_chroma ? desc->log2_chroma_h : 0)); y++){ yading@10: if (desc->comp[0].depth_minus1 >= 8) { yading@10: for (x = 0; xlinesize[p]; yading@10: } yading@10: } yading@10: } yading@10: yading@10: int avcodec_default_get_buffer2(AVCodecContext *avctx, AVFrame *frame, int flags) yading@10: { yading@10: int ret; yading@10: yading@10: if ((ret = update_frame_pool(avctx, frame)) < 0) yading@10: return ret; yading@10: yading@10: #if FF_API_GET_BUFFER yading@10: frame->type = FF_BUFFER_TYPE_INTERNAL; yading@10: #endif yading@10: yading@10: switch (avctx->codec_type) { yading@10: case AVMEDIA_TYPE_VIDEO: yading@10: return video_get_buffer(avctx, frame); yading@10: case AVMEDIA_TYPE_AUDIO: yading@10: return audio_get_buffer(avctx, frame); yading@10: default: yading@10: return -1; yading@10: } yading@10: } yading@10: yading@10: int ff_init_buffer_info(AVCodecContext *avctx, AVFrame *frame) yading@10: { yading@10: if (avctx->pkt) { yading@10: frame->pkt_pts = avctx->pkt->pts; yading@10: av_frame_set_pkt_pos (frame, avctx->pkt->pos); yading@10: av_frame_set_pkt_duration(frame, avctx->pkt->duration); yading@10: av_frame_set_pkt_size (frame, avctx->pkt->size); yading@10: } else { yading@10: frame->pkt_pts = AV_NOPTS_VALUE; yading@10: av_frame_set_pkt_pos (frame, -1); yading@10: av_frame_set_pkt_duration(frame, 0); yading@10: av_frame_set_pkt_size (frame, -1); yading@10: } yading@10: frame->reordered_opaque = avctx->reordered_opaque; yading@10: yading@10: switch (avctx->codec->type) { yading@10: case AVMEDIA_TYPE_VIDEO: yading@10: frame->width = FFMAX(avctx->width , -((-avctx->coded_width )>>avctx->lowres)); yading@10: frame->height = FFMAX(avctx->height, -((-avctx->coded_height)>>avctx->lowres)); yading@10: if (frame->format < 0) yading@10: frame->format = avctx->pix_fmt; yading@10: if (!frame->sample_aspect_ratio.num) yading@10: frame->sample_aspect_ratio = avctx->sample_aspect_ratio; yading@10: break; yading@10: case AVMEDIA_TYPE_AUDIO: yading@10: if (!frame->sample_rate) yading@10: frame->sample_rate = avctx->sample_rate; yading@10: if (frame->format < 0) yading@10: frame->format = avctx->sample_fmt; yading@10: if (!frame->channel_layout) { yading@10: if (avctx->channel_layout) { yading@10: if (av_get_channel_layout_nb_channels(avctx->channel_layout) != yading@10: avctx->channels) { yading@10: av_log(avctx, AV_LOG_ERROR, "Inconsistent channel " yading@10: "configuration.\n"); yading@10: return AVERROR(EINVAL); yading@10: } yading@10: yading@10: frame->channel_layout = avctx->channel_layout; yading@10: } else { yading@10: if (avctx->channels > FF_SANE_NB_CHANNELS) { yading@10: av_log(avctx, AV_LOG_ERROR, "Too many channels: %d.\n", yading@10: avctx->channels); yading@10: return AVERROR(ENOSYS); yading@10: } yading@10: } yading@10: } yading@10: av_frame_set_channels(frame, avctx->channels); yading@10: break; yading@10: } yading@10: return 0; yading@10: } yading@10: yading@10: #if FF_API_GET_BUFFER yading@10: int avcodec_default_get_buffer(AVCodecContext *avctx, AVFrame *frame) yading@10: { yading@10: return avcodec_default_get_buffer2(avctx, frame, 0); yading@10: } yading@10: yading@10: typedef struct CompatReleaseBufPriv { yading@10: AVCodecContext avctx; yading@10: AVFrame frame; yading@10: } CompatReleaseBufPriv; yading@10: yading@10: static void compat_free_buffer(void *opaque, uint8_t *data) yading@10: { yading@10: CompatReleaseBufPriv *priv = opaque; yading@10: if (priv->avctx.release_buffer) yading@10: priv->avctx.release_buffer(&priv->avctx, &priv->frame); yading@10: av_freep(&priv); yading@10: } yading@10: yading@10: static void compat_release_buffer(void *opaque, uint8_t *data) yading@10: { yading@10: AVBufferRef *buf = opaque; yading@10: av_buffer_unref(&buf); yading@10: } yading@10: #endif yading@10: yading@10: static int get_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags) yading@10: { yading@10: int ret; yading@10: yading@10: if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) { yading@10: if ((ret = av_image_check_size(avctx->width, avctx->height, 0, avctx)) < 0 || avctx->pix_fmt<0) { yading@10: av_log(avctx, AV_LOG_ERROR, "video_get_buffer: image parameters invalid\n"); yading@10: return AVERROR(EINVAL); yading@10: } yading@10: } yading@10: if ((ret = ff_init_buffer_info(avctx, frame)) < 0) yading@10: return ret; yading@10: yading@10: #if FF_API_GET_BUFFER yading@10: /* yading@10: * Wrap an old get_buffer()-allocated buffer in an bunch of AVBuffers. yading@10: * We wrap each plane in its own AVBuffer. Each of those has a reference to yading@10: * a dummy AVBuffer as its private data, unreffing it on free. yading@10: * When all the planes are freed, the dummy buffer's free callback calls yading@10: * release_buffer(). yading@10: */ yading@10: if (avctx->get_buffer) { yading@10: CompatReleaseBufPriv *priv = NULL; yading@10: AVBufferRef *dummy_buf = NULL; yading@10: int planes, i, ret; yading@10: yading@10: if (flags & AV_GET_BUFFER_FLAG_REF) yading@10: frame->reference = 1; yading@10: yading@10: ret = avctx->get_buffer(avctx, frame); yading@10: if (ret < 0) yading@10: return ret; yading@10: yading@10: /* return if the buffers are already set up yading@10: * this would happen e.g. when a custom get_buffer() calls yading@10: * avcodec_default_get_buffer yading@10: */ yading@10: if (frame->buf[0]) yading@10: return 0; yading@10: yading@10: priv = av_mallocz(sizeof(*priv)); yading@10: if (!priv) { yading@10: ret = AVERROR(ENOMEM); yading@10: goto fail; yading@10: } yading@10: priv->avctx = *avctx; yading@10: priv->frame = *frame; yading@10: yading@10: dummy_buf = av_buffer_create(NULL, 0, compat_free_buffer, priv, 0); yading@10: if (!dummy_buf) { yading@10: ret = AVERROR(ENOMEM); yading@10: goto fail; yading@10: } yading@10: yading@10: #define WRAP_PLANE(ref_out, data, data_size) \ yading@10: do { \ yading@10: AVBufferRef *dummy_ref = av_buffer_ref(dummy_buf); \ yading@10: if (!dummy_ref) { \ yading@10: ret = AVERROR(ENOMEM); \ yading@10: goto fail; \ yading@10: } \ yading@10: ref_out = av_buffer_create(data, data_size, compat_release_buffer, \ yading@10: dummy_ref, 0); \ yading@10: if (!ref_out) { \ yading@10: av_frame_unref(frame); \ yading@10: ret = AVERROR(ENOMEM); \ yading@10: goto fail; \ yading@10: } \ yading@10: } while (0) yading@10: yading@10: if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) { yading@10: const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(frame->format); yading@10: yading@10: planes = av_pix_fmt_count_planes(frame->format); yading@10: /* workaround for AVHWAccel plane count of 0, buf[0] is used as yading@10: check for allocated buffers: make libavcodec happy */ yading@10: if (desc && desc->flags & PIX_FMT_HWACCEL) yading@10: planes = 1; yading@10: if (!desc || planes <= 0) { yading@10: ret = AVERROR(EINVAL); yading@10: goto fail; yading@10: } yading@10: yading@10: for (i = 0; i < planes; i++) { yading@10: int v_shift = (i == 1 || i == 2) ? desc->log2_chroma_h : 0; yading@10: int plane_size = (frame->height >> v_shift) * frame->linesize[i]; yading@10: yading@10: WRAP_PLANE(frame->buf[i], frame->data[i], plane_size); yading@10: } yading@10: } else { yading@10: int planar = av_sample_fmt_is_planar(frame->format); yading@10: planes = planar ? avctx->channels : 1; yading@10: yading@10: if (planes > FF_ARRAY_ELEMS(frame->buf)) { yading@10: frame->nb_extended_buf = planes - FF_ARRAY_ELEMS(frame->buf); yading@10: frame->extended_buf = av_malloc(sizeof(*frame->extended_buf) * yading@10: frame->nb_extended_buf); yading@10: if (!frame->extended_buf) { yading@10: ret = AVERROR(ENOMEM); yading@10: goto fail; yading@10: } yading@10: } yading@10: yading@10: for (i = 0; i < FFMIN(planes, FF_ARRAY_ELEMS(frame->buf)); i++) yading@10: WRAP_PLANE(frame->buf[i], frame->extended_data[i], frame->linesize[0]); yading@10: yading@10: for (i = 0; i < frame->nb_extended_buf; i++) yading@10: WRAP_PLANE(frame->extended_buf[i], yading@10: frame->extended_data[i + FF_ARRAY_ELEMS(frame->buf)], yading@10: frame->linesize[0]); yading@10: } yading@10: yading@10: av_buffer_unref(&dummy_buf); yading@10: yading@10: frame->width = avctx->width; yading@10: frame->height = avctx->height; yading@10: yading@10: return 0; yading@10: yading@10: fail: yading@10: avctx->release_buffer(avctx, frame); yading@10: av_freep(&priv); yading@10: av_buffer_unref(&dummy_buf); yading@10: return ret; yading@10: } yading@10: #endif yading@10: yading@10: ret = avctx->get_buffer2(avctx, frame, flags); yading@10: yading@10: if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) { yading@10: frame->width = avctx->width; yading@10: frame->height = avctx->height; yading@10: } yading@10: yading@10: return ret; yading@10: } yading@10: yading@10: int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags) yading@10: { yading@10: int ret = get_buffer_internal(avctx, frame, flags); yading@10: if (ret < 0) yading@10: av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n"); yading@10: return ret; yading@10: } yading@10: yading@10: static int reget_buffer_internal(AVCodecContext *avctx, AVFrame *frame) yading@10: { yading@10: AVFrame tmp; yading@10: int ret; yading@10: yading@10: av_assert0(avctx->codec_type == AVMEDIA_TYPE_VIDEO); yading@10: yading@10: if (frame->data[0] && (frame->width != avctx->width || frame->height != avctx->height || frame->format != avctx->pix_fmt)) { yading@10: av_log(avctx, AV_LOG_WARNING, "Picture changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s in reget buffer()\n", yading@10: frame->width, frame->height, av_get_pix_fmt_name(frame->format), avctx->width, avctx->height, av_get_pix_fmt_name(avctx->pix_fmt)); yading@10: av_frame_unref(frame); yading@10: } yading@10: yading@10: ff_init_buffer_info(avctx, frame); yading@10: yading@10: if (!frame->data[0]) yading@10: return ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF); yading@10: yading@10: if (av_frame_is_writable(frame)) yading@10: return 0; yading@10: yading@10: av_frame_move_ref(&tmp, frame); yading@10: yading@10: ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF); yading@10: if (ret < 0) { yading@10: av_frame_unref(&tmp); yading@10: return ret; yading@10: } yading@10: yading@10: av_image_copy(frame->data, frame->linesize, tmp.data, tmp.linesize, yading@10: frame->format, frame->width, frame->height); yading@10: yading@10: av_frame_unref(&tmp); yading@10: yading@10: return 0; yading@10: } yading@10: yading@10: int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame) yading@10: { yading@10: int ret = reget_buffer_internal(avctx, frame); yading@10: if (ret < 0) yading@10: av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n"); yading@10: return ret; yading@10: } yading@10: yading@10: #if FF_API_GET_BUFFER yading@10: void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic) yading@10: { yading@10: av_assert0(s->codec_type == AVMEDIA_TYPE_VIDEO); yading@10: yading@10: av_frame_unref(pic); yading@10: } yading@10: yading@10: int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic) yading@10: { yading@10: av_assert0(0); yading@10: } yading@10: #endif yading@10: yading@10: int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2), void *arg, int *ret, int count, int size) yading@10: { yading@10: int i; yading@10: yading@10: for (i = 0; i < count; i++) { yading@10: int r = func(c, (char *)arg + i * size); yading@10: if (ret) yading@10: ret[i] = r; yading@10: } yading@10: return 0; yading@10: } yading@10: yading@10: int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int jobnr, int threadnr), void *arg, int *ret, int count) yading@10: { yading@10: int i; yading@10: yading@10: for (i = 0; i < count; i++) { yading@10: int r = func(c, arg, i, 0); yading@10: if (ret) yading@10: ret[i] = r; yading@10: } yading@10: return 0; yading@10: } yading@10: yading@10: static int is_hwaccel_pix_fmt(enum AVPixelFormat pix_fmt) yading@10: { yading@10: const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt); yading@10: return desc->flags & PIX_FMT_HWACCEL; yading@10: } yading@10: yading@10: enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum AVPixelFormat *fmt) yading@10: { yading@10: while (*fmt != AV_PIX_FMT_NONE && is_hwaccel_pix_fmt(*fmt)) yading@10: ++fmt; yading@10: return fmt[0]; yading@10: } yading@10: yading@10: void avcodec_get_frame_defaults(AVFrame *frame) yading@10: { yading@10: #if LIBAVCODEC_VERSION_MAJOR >= 55 yading@10: // extended_data should explicitly be freed when needed, this code is unsafe currently yading@10: // also this is not compatible to the <55 ABI/API yading@10: if (frame->extended_data != frame->data && 0) yading@10: av_freep(&frame->extended_data); yading@10: #endif yading@10: yading@10: memset(frame, 0, sizeof(AVFrame)); yading@10: yading@10: frame->pts = yading@10: frame->pkt_dts = yading@10: frame->pkt_pts = AV_NOPTS_VALUE; yading@10: av_frame_set_best_effort_timestamp(frame, AV_NOPTS_VALUE); yading@10: av_frame_set_pkt_duration (frame, 0); yading@10: av_frame_set_pkt_pos (frame, -1); yading@10: av_frame_set_pkt_size (frame, -1); yading@10: frame->key_frame = 1; yading@10: frame->sample_aspect_ratio = (AVRational) {0, 1 }; yading@10: frame->format = -1; /* unknown */ yading@10: frame->extended_data = frame->data; yading@10: } yading@10: yading@10: AVFrame *avcodec_alloc_frame(void) yading@10: { yading@10: AVFrame *frame = av_malloc(sizeof(AVFrame)); yading@10: yading@10: if (frame == NULL) yading@10: return NULL; yading@10: yading@10: frame->extended_data = NULL; yading@10: avcodec_get_frame_defaults(frame); yading@10: yading@10: return frame; yading@10: } yading@10: yading@10: void avcodec_free_frame(AVFrame **frame) yading@10: { yading@10: AVFrame *f; yading@10: yading@10: if (!frame || !*frame) yading@10: return; yading@10: yading@10: f = *frame; yading@10: yading@10: if (f->extended_data != f->data) yading@10: av_freep(&f->extended_data); yading@10: yading@10: av_freep(frame); yading@10: } yading@10: yading@10: #define MAKE_ACCESSORS(str, name, type, field) \ yading@10: type av_##name##_get_##field(const str *s) { return s->field; } \ yading@10: void av_##name##_set_##field(str *s, type v) { s->field = v; } yading@10: yading@10: MAKE_ACCESSORS(AVCodecContext, codec, AVRational, pkt_timebase) yading@10: MAKE_ACCESSORS(AVCodecContext, codec, const AVCodecDescriptor *, codec_descriptor) yading@10: yading@10: static void avcodec_get_subtitle_defaults(AVSubtitle *sub) yading@10: { yading@10: memset(sub, 0, sizeof(*sub)); yading@10: sub->pts = AV_NOPTS_VALUE; yading@10: } yading@10: yading@10: static int get_bit_rate(AVCodecContext *ctx) yading@10: { yading@10: int bit_rate; yading@10: int bits_per_sample; yading@10: yading@10: switch (ctx->codec_type) { yading@10: case AVMEDIA_TYPE_VIDEO: yading@10: case AVMEDIA_TYPE_DATA: yading@10: case AVMEDIA_TYPE_SUBTITLE: yading@10: case AVMEDIA_TYPE_ATTACHMENT: yading@10: bit_rate = ctx->bit_rate; yading@10: break; yading@10: case AVMEDIA_TYPE_AUDIO: yading@10: bits_per_sample = av_get_bits_per_sample(ctx->codec_id); yading@10: bit_rate = bits_per_sample ? ctx->sample_rate * ctx->channels * bits_per_sample : ctx->bit_rate; yading@10: break; yading@10: default: yading@10: bit_rate = 0; yading@10: break; yading@10: } yading@10: return bit_rate; yading@10: } yading@10: yading@10: #if FF_API_AVCODEC_OPEN yading@10: int attribute_align_arg avcodec_open(AVCodecContext *avctx, AVCodec *codec) yading@10: { yading@10: return avcodec_open2(avctx, codec, NULL); yading@10: } yading@10: #endif yading@10: yading@10: int attribute_align_arg ff_codec_open2_recursive(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options) yading@10: { yading@10: int ret = 0; yading@10: yading@10: ff_unlock_avcodec(); yading@10: yading@10: ret = avcodec_open2(avctx, codec, options); yading@10: yading@10: ff_lock_avcodec(avctx); yading@10: return ret; yading@10: } yading@10: yading@10: int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options) yading@10: { yading@10: int ret = 0; yading@10: AVDictionary *tmp = NULL; yading@10: yading@10: if (avcodec_is_open(avctx)) yading@10: return 0; yading@10: yading@10: if ((!codec && !avctx->codec)) { yading@10: av_log(avctx, AV_LOG_ERROR, "No codec provided to avcodec_open2()\n"); yading@10: return AVERROR(EINVAL); yading@10: } yading@10: if ((codec && avctx->codec && codec != avctx->codec)) { yading@10: av_log(avctx, AV_LOG_ERROR, "This AVCodecContext was allocated for %s, " yading@10: "but %s passed to avcodec_open2()\n", avctx->codec->name, codec->name); yading@10: return AVERROR(EINVAL); yading@10: } yading@10: if (!codec) yading@10: codec = avctx->codec; yading@10: yading@10: if (avctx->extradata_size < 0 || avctx->extradata_size >= FF_MAX_EXTRADATA_SIZE) yading@10: return AVERROR(EINVAL); yading@10: yading@10: if (options) yading@10: av_dict_copy(&tmp, *options, 0); yading@10: yading@10: ret = ff_lock_avcodec(avctx); yading@10: if (ret < 0) yading@10: return ret; yading@10: yading@10: avctx->internal = av_mallocz(sizeof(AVCodecInternal)); yading@10: if (!avctx->internal) { yading@10: ret = AVERROR(ENOMEM); yading@10: goto end; yading@10: } yading@10: yading@10: avctx->internal->pool = av_mallocz(sizeof(*avctx->internal->pool)); yading@10: if (!avctx->internal->pool) { yading@10: ret = AVERROR(ENOMEM); yading@10: goto free_and_end; yading@10: } yading@10: yading@10: if (codec->priv_data_size > 0) { yading@10: if (!avctx->priv_data) { yading@10: avctx->priv_data = av_mallocz(codec->priv_data_size); yading@10: if (!avctx->priv_data) { yading@10: ret = AVERROR(ENOMEM); yading@10: goto end; yading@10: } yading@10: if (codec->priv_class) { yading@10: *(const AVClass **)avctx->priv_data = codec->priv_class; yading@10: av_opt_set_defaults(avctx->priv_data); yading@10: } yading@10: } yading@10: if (codec->priv_class && (ret = av_opt_set_dict(avctx->priv_data, &tmp)) < 0) yading@10: goto free_and_end; yading@10: } else { yading@10: avctx->priv_data = NULL; yading@10: } yading@10: if ((ret = av_opt_set_dict(avctx, &tmp)) < 0) yading@10: goto free_and_end; yading@10: yading@10: // only call avcodec_set_dimensions() for non H.264/VP6F codecs so as not to overwrite previously setup dimensions yading@10: if (!(avctx->coded_width && avctx->coded_height && avctx->width && avctx->height && yading@10: (avctx->codec_id == AV_CODEC_ID_H264 || avctx->codec_id == AV_CODEC_ID_VP6F))) { yading@10: if (avctx->coded_width && avctx->coded_height) yading@10: avcodec_set_dimensions(avctx, avctx->coded_width, avctx->coded_height); yading@10: else if (avctx->width && avctx->height) yading@10: avcodec_set_dimensions(avctx, avctx->width, avctx->height); yading@10: } yading@10: yading@10: if ((avctx->coded_width || avctx->coded_height || avctx->width || avctx->height) yading@10: && ( av_image_check_size(avctx->coded_width, avctx->coded_height, 0, avctx) < 0 yading@10: || av_image_check_size(avctx->width, avctx->height, 0, avctx) < 0)) { yading@10: av_log(avctx, AV_LOG_WARNING, "Ignoring invalid width/height values\n"); yading@10: avcodec_set_dimensions(avctx, 0, 0); yading@10: } yading@10: yading@10: /* if the decoder init function was already called previously, yading@10: * free the already allocated subtitle_header before overwriting it */ yading@10: if (av_codec_is_decoder(codec)) yading@10: av_freep(&avctx->subtitle_header); yading@10: yading@10: if (avctx->channels > FF_SANE_NB_CHANNELS) { yading@10: ret = AVERROR(EINVAL); yading@10: goto free_and_end; yading@10: } yading@10: yading@10: avctx->codec = codec; yading@10: if ((avctx->codec_type == AVMEDIA_TYPE_UNKNOWN || avctx->codec_type == codec->type) && yading@10: avctx->codec_id == AV_CODEC_ID_NONE) { yading@10: avctx->codec_type = codec->type; yading@10: avctx->codec_id = codec->id; yading@10: } yading@10: if (avctx->codec_id != codec->id || (avctx->codec_type != codec->type yading@10: && avctx->codec_type != AVMEDIA_TYPE_ATTACHMENT)) { yading@10: av_log(avctx, AV_LOG_ERROR, "Codec type or id mismatches\n"); yading@10: ret = AVERROR(EINVAL); yading@10: goto free_and_end; yading@10: } yading@10: avctx->frame_number = 0; yading@10: avctx->codec_descriptor = avcodec_descriptor_get(avctx->codec_id); yading@10: yading@10: if (avctx->codec->capabilities & CODEC_CAP_EXPERIMENTAL && yading@10: avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL) { yading@10: const char *codec_string = av_codec_is_encoder(codec) ? "encoder" : "decoder"; yading@10: AVCodec *codec2; yading@10: av_log(avctx, AV_LOG_ERROR, yading@10: "The %s '%s' is experimental but experimental codecs are not enabled, " yading@10: "add '-strict %d' if you want to use it.\n", yading@10: codec_string, codec->name, FF_COMPLIANCE_EXPERIMENTAL); yading@10: codec2 = av_codec_is_encoder(codec) ? avcodec_find_encoder(codec->id) : avcodec_find_decoder(codec->id); yading@10: if (!(codec2->capabilities & CODEC_CAP_EXPERIMENTAL)) yading@10: av_log(avctx, AV_LOG_ERROR, "Alternatively use the non experimental %s '%s'.\n", yading@10: codec_string, codec2->name); yading@10: ret = AVERROR_EXPERIMENTAL; yading@10: goto free_and_end; yading@10: } yading@10: yading@10: if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && yading@10: (!avctx->time_base.num || !avctx->time_base.den)) { yading@10: avctx->time_base.num = 1; yading@10: avctx->time_base.den = avctx->sample_rate; yading@10: } yading@10: yading@10: if (!HAVE_THREADS) yading@10: av_log(avctx, AV_LOG_WARNING, "Warning: not compiled with thread support, using thread emulation\n"); yading@10: yading@10: if (CONFIG_FRAME_THREAD_ENCODER) { yading@10: ff_unlock_avcodec(); //we will instanciate a few encoders thus kick the counter to prevent false detection of a problem yading@10: ret = ff_frame_thread_encoder_init(avctx, options ? *options : NULL); yading@10: ff_lock_avcodec(avctx); yading@10: if (ret < 0) yading@10: goto free_and_end; yading@10: } yading@10: yading@10: if (HAVE_THREADS && !avctx->thread_opaque yading@10: && !(avctx->internal->frame_thread_encoder && (avctx->active_thread_type&FF_THREAD_FRAME))) { yading@10: ret = ff_thread_init(avctx); yading@10: if (ret < 0) { yading@10: goto free_and_end; yading@10: } yading@10: } yading@10: if (!HAVE_THREADS && !(codec->capabilities & CODEC_CAP_AUTO_THREADS)) yading@10: avctx->thread_count = 1; yading@10: yading@10: if (avctx->codec->max_lowres < avctx->lowres || avctx->lowres < 0) { yading@10: av_log(avctx, AV_LOG_ERROR, "The maximum value for lowres supported by the decoder is %d\n", yading@10: avctx->codec->max_lowres); yading@10: ret = AVERROR(EINVAL); yading@10: goto free_and_end; yading@10: } yading@10: yading@10: if (av_codec_is_encoder(avctx->codec)) { yading@10: int i; yading@10: if (avctx->codec->sample_fmts) { yading@10: for (i = 0; avctx->codec->sample_fmts[i] != AV_SAMPLE_FMT_NONE; i++) { yading@10: if (avctx->sample_fmt == avctx->codec->sample_fmts[i]) yading@10: break; yading@10: if (avctx->channels == 1 && yading@10: av_get_planar_sample_fmt(avctx->sample_fmt) == yading@10: av_get_planar_sample_fmt(avctx->codec->sample_fmts[i])) { yading@10: avctx->sample_fmt = avctx->codec->sample_fmts[i]; yading@10: break; yading@10: } yading@10: } yading@10: if (avctx->codec->sample_fmts[i] == AV_SAMPLE_FMT_NONE) { yading@10: char buf[128]; yading@10: snprintf(buf, sizeof(buf), "%d", avctx->sample_fmt); yading@10: av_log(avctx, AV_LOG_ERROR, "Specified sample format %s is invalid or not supported\n", yading@10: (char *)av_x_if_null(av_get_sample_fmt_name(avctx->sample_fmt), buf)); yading@10: ret = AVERROR(EINVAL); yading@10: goto free_and_end; yading@10: } yading@10: } yading@10: if (avctx->codec->pix_fmts) { yading@10: for (i = 0; avctx->codec->pix_fmts[i] != AV_PIX_FMT_NONE; i++) yading@10: if (avctx->pix_fmt == avctx->codec->pix_fmts[i]) yading@10: break; yading@10: if (avctx->codec->pix_fmts[i] == AV_PIX_FMT_NONE yading@10: && !((avctx->codec_id == AV_CODEC_ID_MJPEG || avctx->codec_id == AV_CODEC_ID_LJPEG) yading@10: && avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL)) { yading@10: char buf[128]; yading@10: snprintf(buf, sizeof(buf), "%d", avctx->pix_fmt); yading@10: av_log(avctx, AV_LOG_ERROR, "Specified pixel format %s is invalid or not supported\n", yading@10: (char *)av_x_if_null(av_get_pix_fmt_name(avctx->pix_fmt), buf)); yading@10: ret = AVERROR(EINVAL); yading@10: goto free_and_end; yading@10: } yading@10: } yading@10: if (avctx->codec->supported_samplerates) { yading@10: for (i = 0; avctx->codec->supported_samplerates[i] != 0; i++) yading@10: if (avctx->sample_rate == avctx->codec->supported_samplerates[i]) yading@10: break; yading@10: if (avctx->codec->supported_samplerates[i] == 0) { yading@10: av_log(avctx, AV_LOG_ERROR, "Specified sample rate %d is not supported\n", yading@10: avctx->sample_rate); yading@10: ret = AVERROR(EINVAL); yading@10: goto free_and_end; yading@10: } yading@10: } yading@10: if (avctx->codec->channel_layouts) { yading@10: if (!avctx->channel_layout) { yading@10: av_log(avctx, AV_LOG_WARNING, "Channel layout not specified\n"); yading@10: } else { yading@10: for (i = 0; avctx->codec->channel_layouts[i] != 0; i++) yading@10: if (avctx->channel_layout == avctx->codec->channel_layouts[i]) yading@10: break; yading@10: if (avctx->codec->channel_layouts[i] == 0) { yading@10: char buf[512]; yading@10: av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout); yading@10: av_log(avctx, AV_LOG_ERROR, "Specified channel layout '%s' is not supported\n", buf); yading@10: ret = AVERROR(EINVAL); yading@10: goto free_and_end; yading@10: } yading@10: } yading@10: } yading@10: if (avctx->channel_layout && avctx->channels) { yading@10: int channels = av_get_channel_layout_nb_channels(avctx->channel_layout); yading@10: if (channels != avctx->channels) { yading@10: char buf[512]; yading@10: av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout); yading@10: av_log(avctx, AV_LOG_ERROR, yading@10: "Channel layout '%s' with %d channels does not match number of specified channels %d\n", yading@10: buf, channels, avctx->channels); yading@10: ret = AVERROR(EINVAL); yading@10: goto free_and_end; yading@10: } yading@10: } else if (avctx->channel_layout) { yading@10: avctx->channels = av_get_channel_layout_nb_channels(avctx->channel_layout); yading@10: } yading@10: if(avctx->codec_type == AVMEDIA_TYPE_VIDEO && yading@10: avctx->codec_id != AV_CODEC_ID_PNG // For mplayer yading@10: ) { yading@10: if (avctx->width <= 0 || avctx->height <= 0) { yading@10: av_log(avctx, AV_LOG_ERROR, "dimensions not set\n"); yading@10: ret = AVERROR(EINVAL); yading@10: goto free_and_end; yading@10: } yading@10: } yading@10: if ( (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO) yading@10: && avctx->bit_rate>0 && avctx->bit_rate<1000) { yading@10: av_log(avctx, AV_LOG_WARNING, "Bitrate %d is extremely low, maybe you mean %dk\n", avctx->bit_rate, avctx->bit_rate); yading@10: } yading@10: yading@10: if (!avctx->rc_initial_buffer_occupancy) yading@10: avctx->rc_initial_buffer_occupancy = avctx->rc_buffer_size * 3 / 4; yading@10: } yading@10: yading@10: avctx->pts_correction_num_faulty_pts = yading@10: avctx->pts_correction_num_faulty_dts = 0; yading@10: avctx->pts_correction_last_pts = yading@10: avctx->pts_correction_last_dts = INT64_MIN; yading@10: yading@10: if ( avctx->codec->init && (!(avctx->active_thread_type&FF_THREAD_FRAME) yading@10: || avctx->internal->frame_thread_encoder)) { yading@10: ret = avctx->codec->init(avctx); yading@10: if (ret < 0) { yading@10: goto free_and_end; yading@10: } yading@10: } yading@10: yading@10: ret=0; yading@10: yading@10: if (av_codec_is_decoder(avctx->codec)) { yading@10: if (!avctx->bit_rate) yading@10: avctx->bit_rate = get_bit_rate(avctx); yading@10: /* validate channel layout from the decoder */ yading@10: if (avctx->channel_layout) { yading@10: int channels = av_get_channel_layout_nb_channels(avctx->channel_layout); yading@10: if (!avctx->channels) yading@10: avctx->channels = channels; yading@10: else if (channels != avctx->channels) { yading@10: char buf[512]; yading@10: av_get_channel_layout_string(buf, sizeof(buf), -1, avctx->channel_layout); yading@10: av_log(avctx, AV_LOG_WARNING, yading@10: "Channel layout '%s' with %d channels does not match specified number of channels %d: " yading@10: "ignoring specified channel layout\n", yading@10: buf, channels, avctx->channels); yading@10: avctx->channel_layout = 0; yading@10: } yading@10: } yading@10: if (avctx->channels && avctx->channels < 0 || yading@10: avctx->channels > FF_SANE_NB_CHANNELS) { yading@10: ret = AVERROR(EINVAL); yading@10: goto free_and_end; yading@10: } yading@10: if (avctx->sub_charenc) { yading@10: if (avctx->codec_type != AVMEDIA_TYPE_SUBTITLE) { yading@10: av_log(avctx, AV_LOG_ERROR, "Character encoding is only " yading@10: "supported with subtitles codecs\n"); yading@10: ret = AVERROR(EINVAL); yading@10: goto free_and_end; yading@10: } else if (avctx->codec_descriptor->props & AV_CODEC_PROP_BITMAP_SUB) { yading@10: av_log(avctx, AV_LOG_WARNING, "Codec '%s' is bitmap-based, " yading@10: "subtitles character encoding will be ignored\n", yading@10: avctx->codec_descriptor->name); yading@10: avctx->sub_charenc_mode = FF_SUB_CHARENC_MODE_DO_NOTHING; yading@10: } else { yading@10: /* input character encoding is set for a text based subtitle yading@10: * codec at this point */ yading@10: if (avctx->sub_charenc_mode == FF_SUB_CHARENC_MODE_AUTOMATIC) yading@10: avctx->sub_charenc_mode = FF_SUB_CHARENC_MODE_PRE_DECODER; yading@10: yading@10: if (avctx->sub_charenc_mode == FF_SUB_CHARENC_MODE_PRE_DECODER) { yading@10: #if CONFIG_ICONV yading@10: iconv_t cd = iconv_open("UTF-8", avctx->sub_charenc); yading@10: if (cd == (iconv_t)-1) { yading@10: av_log(avctx, AV_LOG_ERROR, "Unable to open iconv context " yading@10: "with input character encoding \"%s\"\n", avctx->sub_charenc); yading@10: ret = AVERROR(errno); yading@10: goto free_and_end; yading@10: } yading@10: iconv_close(cd); yading@10: #else yading@10: av_log(avctx, AV_LOG_ERROR, "Character encoding subtitles " yading@10: "conversion needs a libavcodec built with iconv support " yading@10: "for this codec\n"); yading@10: ret = AVERROR(ENOSYS); yading@10: goto free_and_end; yading@10: #endif yading@10: } yading@10: } yading@10: } yading@10: } yading@10: end: yading@10: ff_unlock_avcodec(); yading@10: if (options) { yading@10: av_dict_free(options); yading@10: *options = tmp; yading@10: } yading@10: yading@10: return ret; yading@10: free_and_end: yading@10: av_dict_free(&tmp); yading@10: av_freep(&avctx->priv_data); yading@10: if (avctx->internal) yading@10: av_freep(&avctx->internal->pool); yading@10: av_freep(&avctx->internal); yading@10: avctx->codec = NULL; yading@10: goto end; yading@10: } yading@10: yading@10: int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int size) yading@10: { yading@10: if (size < 0 || avpkt->size < 0 || size > INT_MAX - FF_INPUT_BUFFER_PADDING_SIZE) { yading@10: av_log(avctx, AV_LOG_ERROR, "Size %d invalid\n", size); yading@10: return AVERROR(EINVAL); yading@10: } yading@10: yading@10: if (avctx) { yading@10: av_assert0(!avpkt->data || avpkt->data != avctx->internal->byte_buffer); yading@10: if (!avpkt->data || avpkt->size < size) { yading@10: av_fast_padded_malloc(&avctx->internal->byte_buffer, &avctx->internal->byte_buffer_size, size); yading@10: avpkt->data = avctx->internal->byte_buffer; yading@10: avpkt->size = avctx->internal->byte_buffer_size; yading@10: avpkt->destruct = NULL; yading@10: } yading@10: } yading@10: yading@10: if (avpkt->data) { yading@10: AVBufferRef *buf = avpkt->buf; yading@10: #if FF_API_DESTRUCT_PACKET yading@10: void *destruct = avpkt->destruct; yading@10: #endif yading@10: yading@10: if (avpkt->size < size) { yading@10: av_log(avctx, AV_LOG_ERROR, "User packet is too small (%d < %d)\n", avpkt->size, size); yading@10: return AVERROR(EINVAL); yading@10: } yading@10: yading@10: av_init_packet(avpkt); yading@10: #if FF_API_DESTRUCT_PACKET yading@10: avpkt->destruct = destruct; yading@10: #endif yading@10: avpkt->buf = buf; yading@10: avpkt->size = size; yading@10: return 0; yading@10: } else { yading@10: int ret = av_new_packet(avpkt, size); yading@10: if (ret < 0) yading@10: av_log(avctx, AV_LOG_ERROR, "Failed to allocate packet of size %d\n", size); yading@10: return ret; yading@10: } yading@10: } yading@10: yading@10: int ff_alloc_packet(AVPacket *avpkt, int size) yading@10: { yading@10: return ff_alloc_packet2(NULL, avpkt, size); yading@10: } yading@10: yading@10: /** yading@10: * Pad last frame with silence. yading@10: */ yading@10: static int pad_last_frame(AVCodecContext *s, AVFrame **dst, const AVFrame *src) yading@10: { yading@10: AVFrame *frame = NULL; yading@10: uint8_t *buf = NULL; yading@10: int ret; yading@10: yading@10: if (!(frame = avcodec_alloc_frame())) yading@10: return AVERROR(ENOMEM); yading@10: *frame = *src; yading@10: yading@10: if ((ret = av_samples_get_buffer_size(&frame->linesize[0], s->channels, yading@10: s->frame_size, s->sample_fmt, 0)) < 0) yading@10: goto fail; yading@10: yading@10: if (!(buf = av_malloc(ret))) { yading@10: ret = AVERROR(ENOMEM); yading@10: goto fail; yading@10: } yading@10: yading@10: frame->nb_samples = s->frame_size; yading@10: if ((ret = avcodec_fill_audio_frame(frame, s->channels, s->sample_fmt, yading@10: buf, ret, 0)) < 0) yading@10: goto fail; yading@10: if ((ret = av_samples_copy(frame->extended_data, src->extended_data, 0, 0, yading@10: src->nb_samples, s->channels, s->sample_fmt)) < 0) yading@10: goto fail; yading@10: if ((ret = av_samples_set_silence(frame->extended_data, src->nb_samples, yading@10: frame->nb_samples - src->nb_samples, yading@10: s->channels, s->sample_fmt)) < 0) yading@10: goto fail; yading@10: yading@10: *dst = frame; yading@10: yading@10: return 0; yading@10: yading@10: fail: yading@10: if (frame->extended_data != frame->data) yading@10: av_freep(&frame->extended_data); yading@10: av_freep(&buf); yading@10: av_freep(&frame); yading@10: return ret; yading@10: } yading@10: yading@10: int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx, yading@10: AVPacket *avpkt, yading@10: const AVFrame *frame, yading@10: int *got_packet_ptr) yading@10: { yading@10: AVFrame tmp; yading@10: AVFrame *padded_frame = NULL; yading@10: int ret; yading@10: AVPacket user_pkt = *avpkt; yading@10: int needs_realloc = !user_pkt.data; yading@10: yading@10: *got_packet_ptr = 0; yading@10: yading@10: if (!(avctx->codec->capabilities & CODEC_CAP_DELAY) && !frame) { yading@10: av_free_packet(avpkt); yading@10: av_init_packet(avpkt); yading@10: return 0; yading@10: } yading@10: yading@10: /* ensure that extended_data is properly set */ yading@10: if (frame && !frame->extended_data) { yading@10: if (av_sample_fmt_is_planar(avctx->sample_fmt) && yading@10: avctx->channels > AV_NUM_DATA_POINTERS) { yading@10: av_log(avctx, AV_LOG_ERROR, "Encoding to a planar sample format, " yading@10: "with more than %d channels, but extended_data is not set.\n", yading@10: AV_NUM_DATA_POINTERS); yading@10: return AVERROR(EINVAL); yading@10: } yading@10: av_log(avctx, AV_LOG_WARNING, "extended_data is not set.\n"); yading@10: yading@10: tmp = *frame; yading@10: tmp.extended_data = tmp.data; yading@10: frame = &tmp; yading@10: } yading@10: yading@10: /* check for valid frame size */ yading@10: if (frame) { yading@10: if (avctx->codec->capabilities & CODEC_CAP_SMALL_LAST_FRAME) { yading@10: if (frame->nb_samples > avctx->frame_size) { yading@10: av_log(avctx, AV_LOG_ERROR, "more samples than frame size (avcodec_encode_audio2)\n"); yading@10: return AVERROR(EINVAL); yading@10: } yading@10: } else if (!(avctx->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)) { yading@10: if (frame->nb_samples < avctx->frame_size && yading@10: !avctx->internal->last_audio_frame) { yading@10: ret = pad_last_frame(avctx, &padded_frame, frame); yading@10: if (ret < 0) yading@10: return ret; yading@10: yading@10: frame = padded_frame; yading@10: avctx->internal->last_audio_frame = 1; yading@10: } yading@10: yading@10: if (frame->nb_samples != avctx->frame_size) { yading@10: av_log(avctx, AV_LOG_ERROR, "nb_samples (%d) != frame_size (%d) (avcodec_encode_audio2)\n", frame->nb_samples, avctx->frame_size); yading@10: ret = AVERROR(EINVAL); yading@10: goto end; yading@10: } yading@10: } yading@10: } yading@10: yading@10: ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr); yading@10: if (!ret) { yading@10: if (*got_packet_ptr) { yading@10: if (!(avctx->codec->capabilities & CODEC_CAP_DELAY)) { yading@10: if (avpkt->pts == AV_NOPTS_VALUE) yading@10: avpkt->pts = frame->pts; yading@10: if (!avpkt->duration) yading@10: avpkt->duration = ff_samples_to_time_base(avctx, yading@10: frame->nb_samples); yading@10: } yading@10: avpkt->dts = avpkt->pts; yading@10: } else { yading@10: avpkt->size = 0; yading@10: } yading@10: } yading@10: if (avpkt->data && avpkt->data == avctx->internal->byte_buffer) { yading@10: needs_realloc = 0; yading@10: if (user_pkt.data) { yading@10: if (user_pkt.size >= avpkt->size) { yading@10: memcpy(user_pkt.data, avpkt->data, avpkt->size); yading@10: } else { yading@10: av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size); yading@10: avpkt->size = user_pkt.size; yading@10: ret = -1; yading@10: } yading@10: avpkt->buf = user_pkt.buf; yading@10: avpkt->data = user_pkt.data; yading@10: avpkt->destruct = user_pkt.destruct; yading@10: } else { yading@10: if (av_dup_packet(avpkt) < 0) { yading@10: ret = AVERROR(ENOMEM); yading@10: } yading@10: } yading@10: } yading@10: yading@10: if (!ret) { yading@10: if (needs_realloc && avpkt->data) { yading@10: ret = av_buffer_realloc(&avpkt->buf, avpkt->size + FF_INPUT_BUFFER_PADDING_SIZE); yading@10: if (ret >= 0) yading@10: avpkt->data = avpkt->buf->data; yading@10: } yading@10: yading@10: avctx->frame_number++; yading@10: } yading@10: yading@10: if (ret < 0 || !*got_packet_ptr) { yading@10: av_free_packet(avpkt); yading@10: av_init_packet(avpkt); yading@10: goto end; yading@10: } yading@10: yading@10: /* NOTE: if we add any audio encoders which output non-keyframe packets, yading@10: * this needs to be moved to the encoders, but for now we can do it yading@10: * here to simplify things */ yading@10: avpkt->flags |= AV_PKT_FLAG_KEY; yading@10: yading@10: end: yading@10: if (padded_frame) { yading@10: av_freep(&padded_frame->data[0]); yading@10: if (padded_frame->extended_data != padded_frame->data) yading@10: av_freep(&padded_frame->extended_data); yading@10: av_freep(&padded_frame); yading@10: } yading@10: yading@10: return ret; yading@10: } yading@10: yading@10: #if FF_API_OLD_ENCODE_AUDIO yading@10: int attribute_align_arg avcodec_encode_audio(AVCodecContext *avctx, yading@10: uint8_t *buf, int buf_size, yading@10: const short *samples) yading@10: { yading@10: AVPacket pkt; yading@10: AVFrame frame0 = { { 0 } }; yading@10: AVFrame *frame; yading@10: int ret, samples_size, got_packet; yading@10: yading@10: av_init_packet(&pkt); yading@10: pkt.data = buf; yading@10: pkt.size = buf_size; yading@10: yading@10: if (samples) { yading@10: frame = &frame0; yading@10: avcodec_get_frame_defaults(frame); yading@10: yading@10: if (avctx->frame_size) { yading@10: frame->nb_samples = avctx->frame_size; yading@10: } else { yading@10: /* if frame_size is not set, the number of samples must be yading@10: * calculated from the buffer size */ yading@10: int64_t nb_samples; yading@10: if (!av_get_bits_per_sample(avctx->codec_id)) { yading@10: av_log(avctx, AV_LOG_ERROR, "avcodec_encode_audio() does not " yading@10: "support this codec\n"); yading@10: return AVERROR(EINVAL); yading@10: } yading@10: nb_samples = (int64_t)buf_size * 8 / yading@10: (av_get_bits_per_sample(avctx->codec_id) * yading@10: avctx->channels); yading@10: if (nb_samples >= INT_MAX) yading@10: return AVERROR(EINVAL); yading@10: frame->nb_samples = nb_samples; yading@10: } yading@10: yading@10: /* it is assumed that the samples buffer is large enough based on the yading@10: * relevant parameters */ yading@10: samples_size = av_samples_get_buffer_size(NULL, avctx->channels, yading@10: frame->nb_samples, yading@10: avctx->sample_fmt, 1); yading@10: if ((ret = avcodec_fill_audio_frame(frame, avctx->channels, yading@10: avctx->sample_fmt, yading@10: (const uint8_t *)samples, yading@10: samples_size, 1)) < 0) yading@10: return ret; yading@10: yading@10: /* fabricate frame pts from sample count. yading@10: * this is needed because the avcodec_encode_audio() API does not have yading@10: * a way for the user to provide pts */ yading@10: if (avctx->sample_rate && avctx->time_base.num) yading@10: frame->pts = ff_samples_to_time_base(avctx, yading@10: avctx->internal->sample_count); yading@10: else yading@10: frame->pts = AV_NOPTS_VALUE; yading@10: avctx->internal->sample_count += frame->nb_samples; yading@10: } else { yading@10: frame = NULL; yading@10: } yading@10: yading@10: got_packet = 0; yading@10: ret = avcodec_encode_audio2(avctx, &pkt, frame, &got_packet); yading@10: if (!ret && got_packet && avctx->coded_frame) { yading@10: avctx->coded_frame->pts = pkt.pts; yading@10: avctx->coded_frame->key_frame = !!(pkt.flags & AV_PKT_FLAG_KEY); yading@10: } yading@10: /* free any side data since we cannot return it */ yading@10: ff_packet_free_side_data(&pkt); yading@10: yading@10: if (frame && frame->extended_data != frame->data) yading@10: av_freep(&frame->extended_data); yading@10: yading@10: return ret ? ret : pkt.size; yading@10: } yading@10: yading@10: #endif yading@10: yading@10: #if FF_API_OLD_ENCODE_VIDEO yading@10: int attribute_align_arg avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size, yading@10: const AVFrame *pict) yading@10: { yading@10: AVPacket pkt; yading@10: int ret, got_packet = 0; yading@10: yading@10: if (buf_size < FF_MIN_BUFFER_SIZE) { yading@10: av_log(avctx, AV_LOG_ERROR, "buffer smaller than minimum size\n"); yading@10: return -1; yading@10: } yading@10: yading@10: av_init_packet(&pkt); yading@10: pkt.data = buf; yading@10: pkt.size = buf_size; yading@10: yading@10: ret = avcodec_encode_video2(avctx, &pkt, pict, &got_packet); yading@10: if (!ret && got_packet && avctx->coded_frame) { yading@10: avctx->coded_frame->pts = pkt.pts; yading@10: avctx->coded_frame->key_frame = !!(pkt.flags & AV_PKT_FLAG_KEY); yading@10: } yading@10: yading@10: /* free any side data since we cannot return it */ yading@10: if (pkt.side_data_elems > 0) { yading@10: int i; yading@10: for (i = 0; i < pkt.side_data_elems; i++) yading@10: av_free(pkt.side_data[i].data); yading@10: av_freep(&pkt.side_data); yading@10: pkt.side_data_elems = 0; yading@10: } yading@10: yading@10: return ret ? ret : pkt.size; yading@10: } yading@10: yading@10: #endif yading@10: yading@10: int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx, yading@10: AVPacket *avpkt, yading@10: const AVFrame *frame, yading@10: int *got_packet_ptr) yading@10: { yading@10: int ret; yading@10: AVPacket user_pkt = *avpkt; yading@10: int needs_realloc = !user_pkt.data; yading@10: yading@10: *got_packet_ptr = 0; yading@10: yading@10: if(CONFIG_FRAME_THREAD_ENCODER && yading@10: avctx->internal->frame_thread_encoder && (avctx->active_thread_type&FF_THREAD_FRAME)) yading@10: return ff_thread_video_encode_frame(avctx, avpkt, frame, got_packet_ptr); yading@10: yading@10: if ((avctx->flags&CODEC_FLAG_PASS1) && avctx->stats_out) yading@10: avctx->stats_out[0] = '\0'; yading@10: yading@10: if (!(avctx->codec->capabilities & CODEC_CAP_DELAY) && !frame) { yading@10: av_free_packet(avpkt); yading@10: av_init_packet(avpkt); yading@10: avpkt->size = 0; yading@10: return 0; yading@10: } yading@10: yading@10: if (av_image_check_size(avctx->width, avctx->height, 0, avctx)) yading@10: return AVERROR(EINVAL); yading@10: yading@10: av_assert0(avctx->codec->encode2); yading@10: yading@10: ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr); yading@10: av_assert0(ret <= 0); yading@10: yading@10: if (avpkt->data && avpkt->data == avctx->internal->byte_buffer) { yading@10: needs_realloc = 0; yading@10: if (user_pkt.data) { yading@10: if (user_pkt.size >= avpkt->size) { yading@10: memcpy(user_pkt.data, avpkt->data, avpkt->size); yading@10: } else { yading@10: av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size); yading@10: avpkt->size = user_pkt.size; yading@10: ret = -1; yading@10: } yading@10: avpkt->buf = user_pkt.buf; yading@10: avpkt->data = user_pkt.data; yading@10: avpkt->destruct = user_pkt.destruct; yading@10: } else { yading@10: if (av_dup_packet(avpkt) < 0) { yading@10: ret = AVERROR(ENOMEM); yading@10: } yading@10: } yading@10: } yading@10: yading@10: if (!ret) { yading@10: if (!*got_packet_ptr) yading@10: avpkt->size = 0; yading@10: else if (!(avctx->codec->capabilities & CODEC_CAP_DELAY)) yading@10: avpkt->pts = avpkt->dts = frame->pts; yading@10: yading@10: if (needs_realloc && avpkt->data) { yading@10: ret = av_buffer_realloc(&avpkt->buf, avpkt->size + FF_INPUT_BUFFER_PADDING_SIZE); yading@10: if (ret >= 0) yading@10: avpkt->data = avpkt->buf->data; yading@10: } yading@10: yading@10: avctx->frame_number++; yading@10: } yading@10: yading@10: if (ret < 0 || !*got_packet_ptr) yading@10: av_free_packet(avpkt); yading@10: else yading@10: av_packet_merge_side_data(avpkt); yading@10: yading@10: emms_c(); yading@10: return ret; yading@10: } yading@10: yading@10: int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, yading@10: const AVSubtitle *sub) yading@10: { yading@10: int ret; yading@10: if (sub->start_display_time) { yading@10: av_log(avctx, AV_LOG_ERROR, "start_display_time must be 0.\n"); yading@10: return -1; yading@10: } yading@10: yading@10: ret = avctx->codec->encode_sub(avctx, buf, buf_size, sub); yading@10: avctx->frame_number++; yading@10: return ret; yading@10: } yading@10: yading@10: /** yading@10: * Attempt to guess proper monotonic timestamps for decoded video frames yading@10: * which might have incorrect times. Input timestamps may wrap around, in yading@10: * which case the output will as well. yading@10: * yading@10: * @param pts the pts field of the decoded AVPacket, as passed through yading@10: * AVFrame.pkt_pts yading@10: * @param dts the dts field of the decoded AVPacket yading@10: * @return one of the input values, may be AV_NOPTS_VALUE yading@10: */ yading@10: static int64_t guess_correct_pts(AVCodecContext *ctx, yading@10: int64_t reordered_pts, int64_t dts) yading@10: { yading@10: int64_t pts = AV_NOPTS_VALUE; yading@10: yading@10: if (dts != AV_NOPTS_VALUE) { yading@10: ctx->pts_correction_num_faulty_dts += dts <= ctx->pts_correction_last_dts; yading@10: ctx->pts_correction_last_dts = dts; yading@10: } yading@10: if (reordered_pts != AV_NOPTS_VALUE) { yading@10: ctx->pts_correction_num_faulty_pts += reordered_pts <= ctx->pts_correction_last_pts; yading@10: ctx->pts_correction_last_pts = reordered_pts; yading@10: } yading@10: if ((ctx->pts_correction_num_faulty_pts<=ctx->pts_correction_num_faulty_dts || dts == AV_NOPTS_VALUE) yading@10: && reordered_pts != AV_NOPTS_VALUE) yading@10: pts = reordered_pts; yading@10: else yading@10: pts = dts; yading@10: yading@10: return pts; yading@10: } yading@10: yading@10: static void apply_param_change(AVCodecContext *avctx, AVPacket *avpkt) yading@10: { yading@10: int size = 0; yading@10: const uint8_t *data; yading@10: uint32_t flags; yading@10: yading@10: if (!(avctx->codec->capabilities & CODEC_CAP_PARAM_CHANGE)) yading@10: return; yading@10: yading@10: data = av_packet_get_side_data(avpkt, AV_PKT_DATA_PARAM_CHANGE, &size); yading@10: if (!data || size < 4) yading@10: return; yading@10: flags = bytestream_get_le32(&data); yading@10: size -= 4; yading@10: if (size < 4) /* Required for any of the changes */ yading@10: return; yading@10: if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT) { yading@10: avctx->channels = bytestream_get_le32(&data); yading@10: size -= 4; yading@10: } yading@10: if (flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT) { yading@10: if (size < 8) yading@10: return; yading@10: avctx->channel_layout = bytestream_get_le64(&data); yading@10: size -= 8; yading@10: } yading@10: if (size < 4) yading@10: return; yading@10: if (flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) { yading@10: avctx->sample_rate = bytestream_get_le32(&data); yading@10: size -= 4; yading@10: } yading@10: if (flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) { yading@10: if (size < 8) yading@10: return; yading@10: avctx->width = bytestream_get_le32(&data); yading@10: avctx->height = bytestream_get_le32(&data); yading@10: avcodec_set_dimensions(avctx, avctx->width, avctx->height); yading@10: size -= 8; yading@10: } yading@10: } yading@10: yading@10: static int add_metadata_from_side_data(AVCodecContext *avctx, AVFrame *frame) yading@10: { yading@10: int size, ret = 0; yading@10: const uint8_t *side_metadata; yading@10: const uint8_t *end; yading@10: yading@10: side_metadata = av_packet_get_side_data(avctx->pkt, yading@10: AV_PKT_DATA_STRINGS_METADATA, &size); yading@10: if (!side_metadata) yading@10: goto end; yading@10: end = side_metadata + size; yading@10: while (side_metadata < end) { yading@10: const uint8_t *key = side_metadata; yading@10: const uint8_t *val = side_metadata + strlen(key) + 1; yading@10: int ret = av_dict_set(avpriv_frame_get_metadatap(frame), key, val, 0); yading@10: if (ret < 0) yading@10: break; yading@10: side_metadata = val + strlen(val) + 1; yading@10: } yading@10: end: yading@10: return ret; yading@10: } yading@10: yading@10: int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, yading@10: int *got_picture_ptr, yading@10: const AVPacket *avpkt) yading@10: { yading@10: AVCodecInternal *avci = avctx->internal; yading@10: int ret; yading@10: // copy to ensure we do not change avpkt yading@10: AVPacket tmp = *avpkt; yading@10: yading@10: if (avctx->codec->type != AVMEDIA_TYPE_VIDEO) { yading@10: av_log(avctx, AV_LOG_ERROR, "Invalid media type for video\n"); yading@10: return AVERROR(EINVAL); yading@10: } yading@10: yading@10: *got_picture_ptr = 0; yading@10: if ((avctx->coded_width || avctx->coded_height) && av_image_check_size(avctx->coded_width, avctx->coded_height, 0, avctx)) yading@10: return AVERROR(EINVAL); yading@10: yading@10: avcodec_get_frame_defaults(picture); yading@10: yading@10: if (!avctx->refcounted_frames) yading@10: av_frame_unref(&avci->to_free); yading@10: yading@10: if ((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size || (avctx->active_thread_type & FF_THREAD_FRAME)) { yading@10: int did_split = av_packet_split_side_data(&tmp); yading@10: apply_param_change(avctx, &tmp); yading@10: avctx->pkt = &tmp; yading@10: if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) yading@10: ret = ff_thread_decode_frame(avctx, picture, got_picture_ptr, yading@10: &tmp); yading@10: else { yading@10: ret = avctx->codec->decode(avctx, picture, got_picture_ptr, yading@10: &tmp); yading@10: picture->pkt_dts = avpkt->dts; yading@10: yading@10: if(!avctx->has_b_frames){ yading@10: av_frame_set_pkt_pos(picture, avpkt->pos); yading@10: } yading@10: //FIXME these should be under if(!avctx->has_b_frames) yading@10: /* get_buffer is supposed to set frame parameters */ yading@10: if (!(avctx->codec->capabilities & CODEC_CAP_DR1)) { yading@10: if (!picture->sample_aspect_ratio.num) picture->sample_aspect_ratio = avctx->sample_aspect_ratio; yading@10: if (!picture->width) picture->width = avctx->width; yading@10: if (!picture->height) picture->height = avctx->height; yading@10: if (picture->format == AV_PIX_FMT_NONE) picture->format = avctx->pix_fmt; yading@10: } yading@10: } yading@10: add_metadata_from_side_data(avctx, picture); yading@10: yading@10: emms_c(); //needed to avoid an emms_c() call before every return; yading@10: yading@10: avctx->pkt = NULL; yading@10: if (did_split) { yading@10: ff_packet_free_side_data(&tmp); yading@10: if(ret == tmp.size) yading@10: ret = avpkt->size; yading@10: } yading@10: yading@10: if (ret < 0 && picture->data[0]) yading@10: av_frame_unref(picture); yading@10: yading@10: if (*got_picture_ptr) { yading@10: if (!avctx->refcounted_frames) { yading@10: avci->to_free = *picture; yading@10: avci->to_free.extended_data = avci->to_free.data; yading@10: } yading@10: yading@10: avctx->frame_number++; yading@10: av_frame_set_best_effort_timestamp(picture, yading@10: guess_correct_pts(avctx, yading@10: picture->pkt_pts, yading@10: picture->pkt_dts)); yading@10: } yading@10: } else yading@10: ret = 0; yading@10: yading@10: /* many decoders assign whole AVFrames, thus overwriting extended_data; yading@10: * make sure it's set correctly */ yading@10: picture->extended_data = picture->data; yading@10: yading@10: return ret; yading@10: } yading@10: yading@10: #if FF_API_OLD_DECODE_AUDIO yading@10: int attribute_align_arg avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples, yading@10: int *frame_size_ptr, yading@10: AVPacket *avpkt) yading@10: { yading@10: AVFrame frame = { { 0 } }; yading@10: int ret, got_frame = 0; yading@10: yading@10: if (avctx->get_buffer != avcodec_default_get_buffer) { yading@10: av_log(avctx, AV_LOG_ERROR, "Custom get_buffer() for use with" yading@10: "avcodec_decode_audio3() detected. Overriding with avcodec_default_get_buffer\n"); yading@10: av_log(avctx, AV_LOG_ERROR, "Please port your application to " yading@10: "avcodec_decode_audio4()\n"); yading@10: avctx->get_buffer = avcodec_default_get_buffer; yading@10: avctx->release_buffer = avcodec_default_release_buffer; yading@10: } yading@10: yading@10: ret = avcodec_decode_audio4(avctx, &frame, &got_frame, avpkt); yading@10: yading@10: if (ret >= 0 && got_frame) { yading@10: int ch, plane_size; yading@10: int planar = av_sample_fmt_is_planar(avctx->sample_fmt); yading@10: int data_size = av_samples_get_buffer_size(&plane_size, avctx->channels, yading@10: frame.nb_samples, yading@10: avctx->sample_fmt, 1); yading@10: if (*frame_size_ptr < data_size) { yading@10: av_log(avctx, AV_LOG_ERROR, "output buffer size is too small for " yading@10: "the current frame (%d < %d)\n", *frame_size_ptr, data_size); yading@10: return AVERROR(EINVAL); yading@10: } yading@10: yading@10: memcpy(samples, frame.extended_data[0], plane_size); yading@10: yading@10: if (planar && avctx->channels > 1) { yading@10: uint8_t *out = ((uint8_t *)samples) + plane_size; yading@10: for (ch = 1; ch < avctx->channels; ch++) { yading@10: memcpy(out, frame.extended_data[ch], plane_size); yading@10: out += plane_size; yading@10: } yading@10: } yading@10: *frame_size_ptr = data_size; yading@10: } else { yading@10: *frame_size_ptr = 0; yading@10: } yading@10: return ret; yading@10: } yading@10: yading@10: #endif yading@10: yading@10: int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx, yading@10: AVFrame *frame, yading@10: int *got_frame_ptr, yading@10: const AVPacket *avpkt) yading@10: { yading@10: AVCodecInternal *avci = avctx->internal; yading@10: int planar, channels; yading@10: int ret = 0; yading@10: yading@10: *got_frame_ptr = 0; yading@10: yading@10: if (!avpkt->data && avpkt->size) { yading@10: av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n"); yading@10: return AVERROR(EINVAL); yading@10: } yading@10: if (avctx->codec->type != AVMEDIA_TYPE_AUDIO) { yading@10: av_log(avctx, AV_LOG_ERROR, "Invalid media type for audio\n"); yading@10: return AVERROR(EINVAL); yading@10: } yading@10: yading@10: avcodec_get_frame_defaults(frame); yading@10: yading@10: if (!avctx->refcounted_frames) yading@10: av_frame_unref(&avci->to_free); yading@10: yading@10: if ((avctx->codec->capabilities & CODEC_CAP_DELAY) || avpkt->size) { yading@10: uint8_t *side; yading@10: int side_size; yading@10: // copy to ensure we do not change avpkt yading@10: AVPacket tmp = *avpkt; yading@10: int did_split = av_packet_split_side_data(&tmp); yading@10: apply_param_change(avctx, &tmp); yading@10: yading@10: avctx->pkt = &tmp; yading@10: ret = avctx->codec->decode(avctx, frame, got_frame_ptr, &tmp); yading@10: if (ret >= 0 && *got_frame_ptr) { yading@10: add_metadata_from_side_data(avctx, frame); yading@10: avctx->frame_number++; yading@10: frame->pkt_dts = avpkt->dts; yading@10: av_frame_set_best_effort_timestamp(frame, yading@10: guess_correct_pts(avctx, yading@10: frame->pkt_pts, yading@10: frame->pkt_dts)); yading@10: if (frame->format == AV_SAMPLE_FMT_NONE) yading@10: frame->format = avctx->sample_fmt; yading@10: if (!frame->channel_layout) yading@10: frame->channel_layout = avctx->channel_layout; yading@10: if (!av_frame_get_channels(frame)) yading@10: av_frame_set_channels(frame, avctx->channels); yading@10: if (!frame->sample_rate) yading@10: frame->sample_rate = avctx->sample_rate; yading@10: if (!avctx->refcounted_frames) { yading@10: avci->to_free = *frame; yading@10: avci->to_free.extended_data = avci->to_free.data; yading@10: } yading@10: } yading@10: yading@10: side= av_packet_get_side_data(avctx->pkt, AV_PKT_DATA_SKIP_SAMPLES, &side_size); yading@10: if(side && side_size>=10) { yading@10: avctx->internal->skip_samples = AV_RL32(side); yading@10: av_log(avctx, AV_LOG_DEBUG, "skip %d samples due to side data\n", yading@10: avctx->internal->skip_samples); yading@10: } yading@10: if (avctx->internal->skip_samples && *got_frame_ptr) { yading@10: if(frame->nb_samples <= avctx->internal->skip_samples){ yading@10: *got_frame_ptr = 0; yading@10: avctx->internal->skip_samples -= frame->nb_samples; yading@10: av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n", yading@10: avctx->internal->skip_samples); yading@10: } else { yading@10: av_samples_copy(frame->extended_data, frame->extended_data, 0, avctx->internal->skip_samples, yading@10: frame->nb_samples - avctx->internal->skip_samples, avctx->channels, frame->format); yading@10: if(avctx->pkt_timebase.num && avctx->sample_rate) { yading@10: int64_t diff_ts = av_rescale_q(avctx->internal->skip_samples, yading@10: (AVRational){1, avctx->sample_rate}, yading@10: avctx->pkt_timebase); yading@10: if(frame->pkt_pts!=AV_NOPTS_VALUE) yading@10: frame->pkt_pts += diff_ts; yading@10: if(frame->pkt_dts!=AV_NOPTS_VALUE) yading@10: frame->pkt_dts += diff_ts; yading@10: if (av_frame_get_pkt_duration(frame) >= diff_ts) yading@10: av_frame_set_pkt_duration(frame, av_frame_get_pkt_duration(frame) - diff_ts); yading@10: } else { yading@10: av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n"); yading@10: } yading@10: av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n", yading@10: avctx->internal->skip_samples, frame->nb_samples); yading@10: frame->nb_samples -= avctx->internal->skip_samples; yading@10: avctx->internal->skip_samples = 0; yading@10: } yading@10: } yading@10: yading@10: avctx->pkt = NULL; yading@10: if (did_split) { yading@10: ff_packet_free_side_data(&tmp); yading@10: if(ret == tmp.size) yading@10: ret = avpkt->size; yading@10: } yading@10: yading@10: if (ret < 0 && frame->data[0]) yading@10: av_frame_unref(frame); yading@10: } yading@10: yading@10: /* many decoders assign whole AVFrames, thus overwriting extended_data; yading@10: * make sure it's set correctly; assume decoders that actually use yading@10: * extended_data are doing it correctly */ yading@10: if (*got_frame_ptr) { yading@10: planar = av_sample_fmt_is_planar(frame->format); yading@10: channels = av_frame_get_channels(frame); yading@10: if (!(planar && channels > AV_NUM_DATA_POINTERS)) yading@10: frame->extended_data = frame->data; yading@10: } else { yading@10: frame->extended_data = NULL; yading@10: } yading@10: yading@10: return ret; yading@10: } yading@10: yading@10: #define UTF8_MAX_BYTES 4 /* 5 and 6 bytes sequences should not be used */ yading@10: static int recode_subtitle(AVCodecContext *avctx, yading@10: AVPacket *outpkt, const AVPacket *inpkt) yading@10: { yading@10: #if CONFIG_ICONV yading@10: iconv_t cd = (iconv_t)-1; yading@10: int ret = 0; yading@10: char *inb, *outb; yading@10: size_t inl, outl; yading@10: AVPacket tmp; yading@10: #endif yading@10: yading@10: if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_PRE_DECODER) yading@10: return 0; yading@10: yading@10: #if CONFIG_ICONV yading@10: cd = iconv_open("UTF-8", avctx->sub_charenc); yading@10: av_assert0(cd != (iconv_t)-1); yading@10: yading@10: inb = inpkt->data; yading@10: inl = inpkt->size; yading@10: yading@10: if (inl >= INT_MAX / UTF8_MAX_BYTES - FF_INPUT_BUFFER_PADDING_SIZE) { yading@10: av_log(avctx, AV_LOG_ERROR, "Subtitles packet is too big for recoding\n"); yading@10: ret = AVERROR(ENOMEM); yading@10: goto end; yading@10: } yading@10: yading@10: ret = av_new_packet(&tmp, inl * UTF8_MAX_BYTES); yading@10: if (ret < 0) yading@10: goto end; yading@10: outpkt->buf = tmp.buf; yading@10: outpkt->data = tmp.data; yading@10: outpkt->size = tmp.size; yading@10: outb = outpkt->data; yading@10: outl = outpkt->size; yading@10: yading@10: if (iconv(cd, &inb, &inl, &outb, &outl) == (size_t)-1 || yading@10: iconv(cd, NULL, NULL, &outb, &outl) == (size_t)-1 || yading@10: outl >= outpkt->size || inl != 0) { yading@10: av_log(avctx, AV_LOG_ERROR, "Unable to recode subtitle event \"%s\" " yading@10: "from %s to UTF-8\n", inpkt->data, avctx->sub_charenc); yading@10: av_free_packet(&tmp); yading@10: ret = AVERROR(errno); yading@10: goto end; yading@10: } yading@10: outpkt->size -= outl; yading@10: memset(outpkt->data + outpkt->size, 0, outl); yading@10: yading@10: end: yading@10: if (cd != (iconv_t)-1) yading@10: iconv_close(cd); yading@10: return ret; yading@10: #else yading@10: av_assert0(!"requesting subtitles recoding without iconv"); yading@10: #endif yading@10: } yading@10: yading@10: int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, yading@10: int *got_sub_ptr, yading@10: AVPacket *avpkt) yading@10: { yading@10: int ret = 0; yading@10: yading@10: if (avctx->codec->type != AVMEDIA_TYPE_SUBTITLE) { yading@10: av_log(avctx, AV_LOG_ERROR, "Invalid media type for subtitles\n"); yading@10: return AVERROR(EINVAL); yading@10: } yading@10: yading@10: *got_sub_ptr = 0; yading@10: avcodec_get_subtitle_defaults(sub); yading@10: yading@10: if (avpkt->size) { yading@10: AVPacket pkt_recoded; yading@10: AVPacket tmp = *avpkt; yading@10: int did_split = av_packet_split_side_data(&tmp); yading@10: //apply_param_change(avctx, &tmp); yading@10: yading@10: pkt_recoded = tmp; yading@10: ret = recode_subtitle(avctx, &pkt_recoded, &tmp); yading@10: if (ret < 0) { yading@10: *got_sub_ptr = 0; yading@10: } else { yading@10: avctx->pkt = &pkt_recoded; yading@10: yading@10: if (avctx->pkt_timebase.den && avpkt->pts != AV_NOPTS_VALUE) yading@10: sub->pts = av_rescale_q(avpkt->pts, yading@10: avctx->pkt_timebase, AV_TIME_BASE_Q); yading@10: ret = avctx->codec->decode(avctx, sub, got_sub_ptr, &pkt_recoded); yading@10: av_assert1((ret >= 0) >= !!*got_sub_ptr && yading@10: !!*got_sub_ptr >= !!sub->num_rects); yading@10: yading@10: if (sub->num_rects && !sub->end_display_time && avpkt->duration && yading@10: avctx->pkt_timebase.num) { yading@10: AVRational ms = { 1, 1000 }; yading@10: sub->end_display_time = av_rescale_q(avpkt->duration, yading@10: avctx->pkt_timebase, ms); yading@10: } yading@10: yading@10: if (tmp.data != pkt_recoded.data) { // did we recode? yading@10: /* prevent from destroying side data from original packet */ yading@10: pkt_recoded.side_data = NULL; yading@10: pkt_recoded.side_data_elems = 0; yading@10: yading@10: av_free_packet(&pkt_recoded); yading@10: } yading@10: sub->format = !(avctx->codec_descriptor->props & AV_CODEC_PROP_BITMAP_SUB); yading@10: avctx->pkt = NULL; yading@10: } yading@10: yading@10: if (did_split) { yading@10: ff_packet_free_side_data(&tmp); yading@10: if(ret == tmp.size) yading@10: ret = avpkt->size; yading@10: } yading@10: yading@10: if (*got_sub_ptr) yading@10: avctx->frame_number++; yading@10: } yading@10: yading@10: return ret; yading@10: } yading@10: yading@10: void avsubtitle_free(AVSubtitle *sub) yading@10: { yading@10: int i; yading@10: yading@10: for (i = 0; i < sub->num_rects; i++) { yading@10: av_freep(&sub->rects[i]->pict.data[0]); yading@10: av_freep(&sub->rects[i]->pict.data[1]); yading@10: av_freep(&sub->rects[i]->pict.data[2]); yading@10: av_freep(&sub->rects[i]->pict.data[3]); yading@10: av_freep(&sub->rects[i]->text); yading@10: av_freep(&sub->rects[i]->ass); yading@10: av_freep(&sub->rects[i]); yading@10: } yading@10: yading@10: av_freep(&sub->rects); yading@10: yading@10: memset(sub, 0, sizeof(AVSubtitle)); yading@10: } yading@10: yading@10: av_cold int ff_codec_close_recursive(AVCodecContext *avctx) yading@10: { yading@10: int ret = 0; yading@10: yading@10: ff_unlock_avcodec(); yading@10: yading@10: ret = avcodec_close(avctx); yading@10: yading@10: ff_lock_avcodec(NULL); yading@10: return ret; yading@10: } yading@10: yading@10: av_cold int avcodec_close(AVCodecContext *avctx) yading@10: { yading@10: int ret = ff_lock_avcodec(avctx); yading@10: if (ret < 0) yading@10: return ret; yading@10: yading@10: if (avcodec_is_open(avctx)) { yading@10: FramePool *pool = avctx->internal->pool; yading@10: int i; yading@10: if (CONFIG_FRAME_THREAD_ENCODER && yading@10: avctx->internal->frame_thread_encoder && avctx->thread_count > 1) { yading@10: ff_unlock_avcodec(); yading@10: ff_frame_thread_encoder_free(avctx); yading@10: ff_lock_avcodec(avctx); yading@10: } yading@10: if (HAVE_THREADS && avctx->thread_opaque) yading@10: ff_thread_free(avctx); yading@10: if (avctx->codec && avctx->codec->close) yading@10: avctx->codec->close(avctx); yading@10: avctx->coded_frame = NULL; yading@10: avctx->internal->byte_buffer_size = 0; yading@10: av_freep(&avctx->internal->byte_buffer); yading@10: if (!avctx->refcounted_frames) yading@10: av_frame_unref(&avctx->internal->to_free); yading@10: for (i = 0; i < FF_ARRAY_ELEMS(pool->pools); i++) yading@10: av_buffer_pool_uninit(&pool->pools[i]); yading@10: av_freep(&avctx->internal->pool); yading@10: av_freep(&avctx->internal); yading@10: } yading@10: yading@10: if (avctx->priv_data && avctx->codec && avctx->codec->priv_class) yading@10: av_opt_free(avctx->priv_data); yading@10: av_opt_free(avctx); yading@10: av_freep(&avctx->priv_data); yading@10: if (av_codec_is_encoder(avctx->codec)) yading@10: av_freep(&avctx->extradata); yading@10: avctx->codec = NULL; yading@10: avctx->active_thread_type = 0; yading@10: yading@10: ff_unlock_avcodec(); yading@10: return 0; yading@10: } yading@10: yading@10: static enum AVCodecID remap_deprecated_codec_id(enum AVCodecID id) yading@10: { yading@10: switch(id){ yading@10: //This is for future deprecatec codec ids, its empty since yading@10: //last major bump but will fill up again over time, please don't remove it yading@10: // case AV_CODEC_ID_UTVIDEO_DEPRECATED: return AV_CODEC_ID_UTVIDEO; yading@10: case AV_CODEC_ID_OPUS_DEPRECATED: return AV_CODEC_ID_OPUS; yading@10: case AV_CODEC_ID_TAK_DEPRECATED : return AV_CODEC_ID_TAK; yading@10: default : return id; yading@10: } yading@10: } yading@10: yading@10: static AVCodec *find_encdec(enum AVCodecID id, int encoder) yading@10: { yading@10: AVCodec *p, *experimental = NULL; yading@10: p = first_avcodec; yading@10: id= remap_deprecated_codec_id(id); yading@10: while (p) { yading@10: if ((encoder ? av_codec_is_encoder(p) : av_codec_is_decoder(p)) && yading@10: p->id == id) { yading@10: if (p->capabilities & CODEC_CAP_EXPERIMENTAL && !experimental) { yading@10: experimental = p; yading@10: } else yading@10: return p; yading@10: } yading@10: p = p->next; yading@10: } yading@10: return experimental; yading@10: } yading@10: yading@10: AVCodec *avcodec_find_encoder(enum AVCodecID id) yading@10: { yading@10: return find_encdec(id, 1); yading@10: } yading@10: yading@10: AVCodec *avcodec_find_encoder_by_name(const char *name) yading@10: { yading@10: AVCodec *p; yading@10: if (!name) yading@10: return NULL; yading@10: p = first_avcodec; yading@10: while (p) { yading@10: if (av_codec_is_encoder(p) && strcmp(name, p->name) == 0) yading@10: return p; yading@10: p = p->next; yading@10: } yading@10: return NULL; yading@10: } yading@10: yading@10: AVCodec *avcodec_find_decoder(enum AVCodecID id) yading@10: { yading@10: return find_encdec(id, 0); yading@10: } yading@10: yading@10: AVCodec *avcodec_find_decoder_by_name(const char *name) yading@10: { yading@10: AVCodec *p; yading@10: if (!name) yading@10: return NULL; yading@10: p = first_avcodec; yading@10: while (p) { yading@10: if (av_codec_is_decoder(p) && strcmp(name, p->name) == 0) yading@10: return p; yading@10: p = p->next; yading@10: } yading@10: return NULL; yading@10: } yading@10: yading@10: const char *avcodec_get_name(enum AVCodecID id) yading@10: { yading@10: const AVCodecDescriptor *cd; yading@10: AVCodec *codec; yading@10: yading@10: if (id == AV_CODEC_ID_NONE) yading@10: return "none"; yading@10: cd = avcodec_descriptor_get(id); yading@10: if (cd) yading@10: return cd->name; yading@10: av_log(NULL, AV_LOG_WARNING, "Codec 0x%x is not in the full list.\n", id); yading@10: codec = avcodec_find_decoder(id); yading@10: if (codec) yading@10: return codec->name; yading@10: codec = avcodec_find_encoder(id); yading@10: if (codec) yading@10: return codec->name; yading@10: return "unknown_codec"; yading@10: } yading@10: yading@10: size_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag) yading@10: { yading@10: int i, len, ret = 0; yading@10: yading@10: #define TAG_PRINT(x) \ yading@10: (((x) >= '0' && (x) <= '9') || \ yading@10: ((x) >= 'a' && (x) <= 'z') || ((x) >= 'A' && (x) <= 'Z') || \ yading@10: ((x) == '.' || (x) == ' ' || (x) == '-' || (x) == '_')) yading@10: yading@10: for (i = 0; i < 4; i++) { yading@10: len = snprintf(buf, buf_size, yading@10: TAG_PRINT(codec_tag & 0xFF) ? "%c" : "[%d]", codec_tag & 0xFF); yading@10: buf += len; yading@10: buf_size = buf_size > len ? buf_size - len : 0; yading@10: ret += len; yading@10: codec_tag >>= 8; yading@10: } yading@10: return ret; yading@10: } yading@10: yading@10: void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode) yading@10: { yading@10: const char *codec_type; yading@10: const char *codec_name; yading@10: const char *profile = NULL; yading@10: const AVCodec *p; yading@10: int bitrate; yading@10: AVRational display_aspect_ratio; yading@10: yading@10: if (!buf || buf_size <= 0) yading@10: return; yading@10: codec_type = av_get_media_type_string(enc->codec_type); yading@10: codec_name = avcodec_get_name(enc->codec_id); yading@10: if (enc->profile != FF_PROFILE_UNKNOWN) { yading@10: if (enc->codec) yading@10: p = enc->codec; yading@10: else yading@10: p = encode ? avcodec_find_encoder(enc->codec_id) : yading@10: avcodec_find_decoder(enc->codec_id); yading@10: if (p) yading@10: profile = av_get_profile_name(p, enc->profile); yading@10: } yading@10: yading@10: snprintf(buf, buf_size, "%s: %s%s", codec_type ? codec_type : "unknown", yading@10: codec_name, enc->mb_decision ? " (hq)" : ""); yading@10: buf[0] ^= 'a' ^ 'A'; /* first letter in uppercase */ yading@10: if (profile) yading@10: snprintf(buf + strlen(buf), buf_size - strlen(buf), " (%s)", profile); yading@10: if (enc->codec_tag) { yading@10: char tag_buf[32]; yading@10: av_get_codec_tag_string(tag_buf, sizeof(tag_buf), enc->codec_tag); yading@10: snprintf(buf + strlen(buf), buf_size - strlen(buf), yading@10: " (%s / 0x%04X)", tag_buf, enc->codec_tag); yading@10: } yading@10: yading@10: switch (enc->codec_type) { yading@10: case AVMEDIA_TYPE_VIDEO: yading@10: if (enc->pix_fmt != AV_PIX_FMT_NONE) { yading@10: snprintf(buf + strlen(buf), buf_size - strlen(buf), yading@10: ", %s", yading@10: av_get_pix_fmt_name(enc->pix_fmt)); yading@10: if (enc->bits_per_raw_sample && yading@10: enc->bits_per_raw_sample <= av_pix_fmt_desc_get(enc->pix_fmt)->comp[0].depth_minus1) yading@10: snprintf(buf + strlen(buf), buf_size - strlen(buf), yading@10: " (%d bpc)", enc->bits_per_raw_sample); yading@10: } yading@10: if (enc->width) { yading@10: snprintf(buf + strlen(buf), buf_size - strlen(buf), yading@10: ", %dx%d", yading@10: enc->width, enc->height); yading@10: if (enc->sample_aspect_ratio.num) { yading@10: av_reduce(&display_aspect_ratio.num, &display_aspect_ratio.den, yading@10: enc->width * enc->sample_aspect_ratio.num, yading@10: enc->height * enc->sample_aspect_ratio.den, yading@10: 1024 * 1024); yading@10: snprintf(buf + strlen(buf), buf_size - strlen(buf), yading@10: " [SAR %d:%d DAR %d:%d]", yading@10: enc->sample_aspect_ratio.num, enc->sample_aspect_ratio.den, yading@10: display_aspect_ratio.num, display_aspect_ratio.den); yading@10: } yading@10: if (av_log_get_level() >= AV_LOG_DEBUG) { yading@10: int g = av_gcd(enc->time_base.num, enc->time_base.den); yading@10: snprintf(buf + strlen(buf), buf_size - strlen(buf), yading@10: ", %d/%d", yading@10: enc->time_base.num / g, enc->time_base.den / g); yading@10: } yading@10: } yading@10: if (encode) { yading@10: snprintf(buf + strlen(buf), buf_size - strlen(buf), yading@10: ", q=%d-%d", enc->qmin, enc->qmax); yading@10: } yading@10: break; yading@10: case AVMEDIA_TYPE_AUDIO: yading@10: if (enc->sample_rate) { yading@10: snprintf(buf + strlen(buf), buf_size - strlen(buf), yading@10: ", %d Hz", enc->sample_rate); yading@10: } yading@10: av_strlcat(buf, ", ", buf_size); yading@10: av_get_channel_layout_string(buf + strlen(buf), buf_size - strlen(buf), enc->channels, enc->channel_layout); yading@10: if (enc->sample_fmt != AV_SAMPLE_FMT_NONE) { yading@10: snprintf(buf + strlen(buf), buf_size - strlen(buf), yading@10: ", %s", av_get_sample_fmt_name(enc->sample_fmt)); yading@10: } yading@10: break; yading@10: case AVMEDIA_TYPE_DATA: yading@10: if (av_log_get_level() >= AV_LOG_DEBUG) { yading@10: int g = av_gcd(enc->time_base.num, enc->time_base.den); yading@10: if (g) yading@10: snprintf(buf + strlen(buf), buf_size - strlen(buf), yading@10: ", %d/%d", yading@10: enc->time_base.num / g, enc->time_base.den / g); yading@10: } yading@10: break; yading@10: default: yading@10: return; yading@10: } yading@10: if (encode) { yading@10: if (enc->flags & CODEC_FLAG_PASS1) yading@10: snprintf(buf + strlen(buf), buf_size - strlen(buf), yading@10: ", pass 1"); yading@10: if (enc->flags & CODEC_FLAG_PASS2) yading@10: snprintf(buf + strlen(buf), buf_size - strlen(buf), yading@10: ", pass 2"); yading@10: } yading@10: bitrate = get_bit_rate(enc); yading@10: if (bitrate != 0) { yading@10: snprintf(buf + strlen(buf), buf_size - strlen(buf), yading@10: ", %d kb/s", bitrate / 1000); yading@10: } yading@10: } yading@10: yading@10: const char *av_get_profile_name(const AVCodec *codec, int profile) yading@10: { yading@10: const AVProfile *p; yading@10: if (profile == FF_PROFILE_UNKNOWN || !codec->profiles) yading@10: return NULL; yading@10: yading@10: for (p = codec->profiles; p->profile != FF_PROFILE_UNKNOWN; p++) yading@10: if (p->profile == profile) yading@10: return p->name; yading@10: yading@10: return NULL; yading@10: } yading@10: yading@10: unsigned avcodec_version(void) yading@10: { yading@10: // av_assert0(AV_CODEC_ID_V410==164); yading@10: av_assert0(AV_CODEC_ID_PCM_S8_PLANAR==65563); yading@10: av_assert0(AV_CODEC_ID_ADPCM_G722==69660); yading@10: // av_assert0(AV_CODEC_ID_BMV_AUDIO==86071); yading@10: av_assert0(AV_CODEC_ID_SRT==94216); yading@10: av_assert0(LIBAVCODEC_VERSION_MICRO >= 100); yading@10: yading@10: av_assert0(CODEC_ID_CLLC == AV_CODEC_ID_CLLC); yading@10: av_assert0(CODEC_ID_PCM_S8_PLANAR == AV_CODEC_ID_PCM_S8_PLANAR); yading@10: av_assert0(CODEC_ID_ADPCM_IMA_APC == AV_CODEC_ID_ADPCM_IMA_APC); yading@10: av_assert0(CODEC_ID_ILBC == AV_CODEC_ID_ILBC); yading@10: av_assert0(CODEC_ID_SRT == AV_CODEC_ID_SRT); yading@10: return LIBAVCODEC_VERSION_INT; yading@10: } yading@10: yading@10: const char *avcodec_configuration(void) yading@10: { yading@10: return FFMPEG_CONFIGURATION; yading@10: } yading@10: yading@10: const char *avcodec_license(void) yading@10: { yading@10: #define LICENSE_PREFIX "libavcodec license: " yading@10: return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1; yading@10: } yading@10: yading@10: void avcodec_flush_buffers(AVCodecContext *avctx) yading@10: { yading@10: if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) yading@10: ff_thread_flush(avctx); yading@10: else if (avctx->codec->flush) yading@10: avctx->codec->flush(avctx); yading@10: yading@10: avctx->pts_correction_last_pts = yading@10: avctx->pts_correction_last_dts = INT64_MIN; yading@10: } yading@10: yading@10: int av_get_exact_bits_per_sample(enum AVCodecID codec_id) yading@10: { yading@10: switch (codec_id) { yading@10: case AV_CODEC_ID_8SVX_EXP: yading@10: case AV_CODEC_ID_8SVX_FIB: yading@10: case AV_CODEC_ID_ADPCM_CT: yading@10: case AV_CODEC_ID_ADPCM_IMA_APC: yading@10: case AV_CODEC_ID_ADPCM_IMA_EA_SEAD: yading@10: case AV_CODEC_ID_ADPCM_IMA_OKI: yading@10: case AV_CODEC_ID_ADPCM_IMA_WS: yading@10: case AV_CODEC_ID_ADPCM_G722: yading@10: case AV_CODEC_ID_ADPCM_YAMAHA: yading@10: return 4; yading@10: case AV_CODEC_ID_PCM_ALAW: yading@10: case AV_CODEC_ID_PCM_MULAW: yading@10: case AV_CODEC_ID_PCM_S8: yading@10: case AV_CODEC_ID_PCM_S8_PLANAR: yading@10: case AV_CODEC_ID_PCM_U8: yading@10: case AV_CODEC_ID_PCM_ZORK: yading@10: return 8; yading@10: case AV_CODEC_ID_PCM_S16BE: yading@10: case AV_CODEC_ID_PCM_S16BE_PLANAR: yading@10: case AV_CODEC_ID_PCM_S16LE: yading@10: case AV_CODEC_ID_PCM_S16LE_PLANAR: yading@10: case AV_CODEC_ID_PCM_U16BE: yading@10: case AV_CODEC_ID_PCM_U16LE: yading@10: return 16; yading@10: case AV_CODEC_ID_PCM_S24DAUD: yading@10: case AV_CODEC_ID_PCM_S24BE: yading@10: case AV_CODEC_ID_PCM_S24LE: yading@10: case AV_CODEC_ID_PCM_S24LE_PLANAR: yading@10: case AV_CODEC_ID_PCM_U24BE: yading@10: case AV_CODEC_ID_PCM_U24LE: yading@10: return 24; yading@10: case AV_CODEC_ID_PCM_S32BE: yading@10: case AV_CODEC_ID_PCM_S32LE: yading@10: case AV_CODEC_ID_PCM_S32LE_PLANAR: yading@10: case AV_CODEC_ID_PCM_U32BE: yading@10: case AV_CODEC_ID_PCM_U32LE: yading@10: case AV_CODEC_ID_PCM_F32BE: yading@10: case AV_CODEC_ID_PCM_F32LE: yading@10: return 32; yading@10: case AV_CODEC_ID_PCM_F64BE: yading@10: case AV_CODEC_ID_PCM_F64LE: yading@10: return 64; yading@10: default: yading@10: return 0; yading@10: } yading@10: } yading@10: yading@10: enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be) yading@10: { yading@10: static const enum AVCodecID map[AV_SAMPLE_FMT_NB][2] = { yading@10: [AV_SAMPLE_FMT_U8 ] = { AV_CODEC_ID_PCM_U8, AV_CODEC_ID_PCM_U8 }, yading@10: [AV_SAMPLE_FMT_S16 ] = { AV_CODEC_ID_PCM_S16LE, AV_CODEC_ID_PCM_S16BE }, yading@10: [AV_SAMPLE_FMT_S32 ] = { AV_CODEC_ID_PCM_S32LE, AV_CODEC_ID_PCM_S32BE }, yading@10: [AV_SAMPLE_FMT_FLT ] = { AV_CODEC_ID_PCM_F32LE, AV_CODEC_ID_PCM_F32BE }, yading@10: [AV_SAMPLE_FMT_DBL ] = { AV_CODEC_ID_PCM_F64LE, AV_CODEC_ID_PCM_F64BE }, yading@10: [AV_SAMPLE_FMT_U8P ] = { AV_CODEC_ID_PCM_U8, AV_CODEC_ID_PCM_U8 }, yading@10: [AV_SAMPLE_FMT_S16P] = { AV_CODEC_ID_PCM_S16LE, AV_CODEC_ID_PCM_S16BE }, yading@10: [AV_SAMPLE_FMT_S32P] = { AV_CODEC_ID_PCM_S32LE, AV_CODEC_ID_PCM_S32BE }, yading@10: [AV_SAMPLE_FMT_FLTP] = { AV_CODEC_ID_PCM_F32LE, AV_CODEC_ID_PCM_F32BE }, yading@10: [AV_SAMPLE_FMT_DBLP] = { AV_CODEC_ID_PCM_F64LE, AV_CODEC_ID_PCM_F64BE }, yading@10: }; yading@10: if (fmt < 0 || fmt >= AV_SAMPLE_FMT_NB) yading@10: return AV_CODEC_ID_NONE; yading@10: if (be < 0 || be > 1) yading@10: be = AV_NE(1, 0); yading@10: return map[fmt][be]; yading@10: } yading@10: yading@10: int av_get_bits_per_sample(enum AVCodecID codec_id) yading@10: { yading@10: switch (codec_id) { yading@10: case AV_CODEC_ID_ADPCM_SBPRO_2: yading@10: return 2; yading@10: case AV_CODEC_ID_ADPCM_SBPRO_3: yading@10: return 3; yading@10: case AV_CODEC_ID_ADPCM_SBPRO_4: yading@10: case AV_CODEC_ID_ADPCM_IMA_WAV: yading@10: case AV_CODEC_ID_ADPCM_IMA_QT: yading@10: case AV_CODEC_ID_ADPCM_SWF: yading@10: case AV_CODEC_ID_ADPCM_MS: yading@10: return 4; yading@10: default: yading@10: return av_get_exact_bits_per_sample(codec_id); yading@10: } yading@10: } yading@10: yading@10: int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes) yading@10: { yading@10: int id, sr, ch, ba, tag, bps; yading@10: yading@10: id = avctx->codec_id; yading@10: sr = avctx->sample_rate; yading@10: ch = avctx->channels; yading@10: ba = avctx->block_align; yading@10: tag = avctx->codec_tag; yading@10: bps = av_get_exact_bits_per_sample(avctx->codec_id); yading@10: yading@10: /* codecs with an exact constant bits per sample */ yading@10: if (bps > 0 && ch > 0 && frame_bytes > 0 && ch < 32768 && bps < 32768) yading@10: return (frame_bytes * 8LL) / (bps * ch); yading@10: bps = avctx->bits_per_coded_sample; yading@10: yading@10: /* codecs with a fixed packet duration */ yading@10: switch (id) { yading@10: case AV_CODEC_ID_ADPCM_ADX: return 32; yading@10: case AV_CODEC_ID_ADPCM_IMA_QT: return 64; yading@10: case AV_CODEC_ID_ADPCM_EA_XAS: return 128; yading@10: case AV_CODEC_ID_AMR_NB: yading@10: case AV_CODEC_ID_EVRC: yading@10: case AV_CODEC_ID_GSM: yading@10: case AV_CODEC_ID_QCELP: yading@10: case AV_CODEC_ID_RA_288: return 160; yading@10: case AV_CODEC_ID_AMR_WB: yading@10: case AV_CODEC_ID_GSM_MS: return 320; yading@10: case AV_CODEC_ID_MP1: return 384; yading@10: case AV_CODEC_ID_ATRAC1: return 512; yading@10: case AV_CODEC_ID_ATRAC3: return 1024; yading@10: case AV_CODEC_ID_MP2: yading@10: case AV_CODEC_ID_MUSEPACK7: return 1152; yading@10: case AV_CODEC_ID_AC3: return 1536; yading@10: } yading@10: yading@10: if (sr > 0) { yading@10: /* calc from sample rate */ yading@10: if (id == AV_CODEC_ID_TTA) yading@10: return 256 * sr / 245; yading@10: yading@10: if (ch > 0) { yading@10: /* calc from sample rate and channels */ yading@10: if (id == AV_CODEC_ID_BINKAUDIO_DCT) yading@10: return (480 << (sr / 22050)) / ch; yading@10: } yading@10: } yading@10: yading@10: if (ba > 0) { yading@10: /* calc from block_align */ yading@10: if (id == AV_CODEC_ID_SIPR) { yading@10: switch (ba) { yading@10: case 20: return 160; yading@10: case 19: return 144; yading@10: case 29: return 288; yading@10: case 37: return 480; yading@10: } yading@10: } else if (id == AV_CODEC_ID_ILBC) { yading@10: switch (ba) { yading@10: case 38: return 160; yading@10: case 50: return 240; yading@10: } yading@10: } yading@10: } yading@10: yading@10: if (frame_bytes > 0) { yading@10: /* calc from frame_bytes only */ yading@10: if (id == AV_CODEC_ID_TRUESPEECH) yading@10: return 240 * (frame_bytes / 32); yading@10: if (id == AV_CODEC_ID_NELLYMOSER) yading@10: return 256 * (frame_bytes / 64); yading@10: if (id == AV_CODEC_ID_RA_144) yading@10: return 160 * (frame_bytes / 20); yading@10: if (id == AV_CODEC_ID_G723_1) yading@10: return 240 * (frame_bytes / 24); yading@10: yading@10: if (bps > 0) { yading@10: /* calc from frame_bytes and bits_per_coded_sample */ yading@10: if (id == AV_CODEC_ID_ADPCM_G726) yading@10: return frame_bytes * 8 / bps; yading@10: } yading@10: yading@10: if (ch > 0) { yading@10: /* calc from frame_bytes and channels */ yading@10: switch (id) { yading@10: case AV_CODEC_ID_ADPCM_AFC: yading@10: return frame_bytes / (9 * ch) * 16; yading@10: case AV_CODEC_ID_ADPCM_4XM: yading@10: case AV_CODEC_ID_ADPCM_IMA_ISS: yading@10: return (frame_bytes - 4 * ch) * 2 / ch; yading@10: case AV_CODEC_ID_ADPCM_IMA_SMJPEG: yading@10: return (frame_bytes - 4) * 2 / ch; yading@10: case AV_CODEC_ID_ADPCM_IMA_AMV: yading@10: return (frame_bytes - 8) * 2 / ch; yading@10: case AV_CODEC_ID_ADPCM_XA: yading@10: return (frame_bytes / 128) * 224 / ch; yading@10: case AV_CODEC_ID_INTERPLAY_DPCM: yading@10: return (frame_bytes - 6 - ch) / ch; yading@10: case AV_CODEC_ID_ROQ_DPCM: yading@10: return (frame_bytes - 8) / ch; yading@10: case AV_CODEC_ID_XAN_DPCM: yading@10: return (frame_bytes - 2 * ch) / ch; yading@10: case AV_CODEC_ID_MACE3: yading@10: return 3 * frame_bytes / ch; yading@10: case AV_CODEC_ID_MACE6: yading@10: return 6 * frame_bytes / ch; yading@10: case AV_CODEC_ID_PCM_LXF: yading@10: return 2 * (frame_bytes / (5 * ch)); yading@10: case AV_CODEC_ID_IAC: yading@10: case AV_CODEC_ID_IMC: yading@10: return 4 * frame_bytes / ch; yading@10: } yading@10: yading@10: if (tag) { yading@10: /* calc from frame_bytes, channels, and codec_tag */ yading@10: if (id == AV_CODEC_ID_SOL_DPCM) { yading@10: if (tag == 3) yading@10: return frame_bytes / ch; yading@10: else yading@10: return frame_bytes * 2 / ch; yading@10: } yading@10: } yading@10: yading@10: if (ba > 0) { yading@10: /* calc from frame_bytes, channels, and block_align */ yading@10: int blocks = frame_bytes / ba; yading@10: switch (avctx->codec_id) { yading@10: case AV_CODEC_ID_ADPCM_IMA_WAV: yading@10: return blocks * (1 + (ba - 4 * ch) / (4 * ch) * 8); yading@10: case AV_CODEC_ID_ADPCM_IMA_DK3: yading@10: return blocks * (((ba - 16) * 2 / 3 * 4) / ch); yading@10: case AV_CODEC_ID_ADPCM_IMA_DK4: yading@10: return blocks * (1 + (ba - 4 * ch) * 2 / ch); yading@10: case AV_CODEC_ID_ADPCM_MS: yading@10: return blocks * (2 + (ba - 7 * ch) * 2 / ch); yading@10: } yading@10: } yading@10: yading@10: if (bps > 0) { yading@10: /* calc from frame_bytes, channels, and bits_per_coded_sample */ yading@10: switch (avctx->codec_id) { yading@10: case AV_CODEC_ID_PCM_DVD: yading@10: if(bps<4) yading@10: return 0; yading@10: return 2 * (frame_bytes / ((bps * 2 / 8) * ch)); yading@10: case AV_CODEC_ID_PCM_BLURAY: yading@10: if(bps<4) yading@10: return 0; yading@10: return frame_bytes / ((FFALIGN(ch, 2) * bps) / 8); yading@10: case AV_CODEC_ID_S302M: yading@10: return 2 * (frame_bytes / ((bps + 4) / 4)) / ch; yading@10: } yading@10: } yading@10: } yading@10: } yading@10: yading@10: return 0; yading@10: } yading@10: yading@10: #if !HAVE_THREADS yading@10: int ff_thread_init(AVCodecContext *s) yading@10: { yading@10: return -1; yading@10: } yading@10: yading@10: #endif yading@10: yading@10: unsigned int av_xiphlacing(unsigned char *s, unsigned int v) yading@10: { yading@10: unsigned int n = 0; yading@10: yading@10: while (v >= 0xff) { yading@10: *s++ = 0xff; yading@10: v -= 0xff; yading@10: n++; yading@10: } yading@10: *s = v; yading@10: n++; yading@10: return n; yading@10: } yading@10: yading@10: int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b) yading@10: { yading@10: int i; yading@10: for (i = 0; i < size && !(tab[i][0] == a && tab[i][1] == b); i++) ; yading@10: return i; yading@10: } yading@10: yading@10: #if FF_API_MISSING_SAMPLE yading@10: void av_log_missing_feature(void *avc, const char *feature, int want_sample) yading@10: { yading@10: av_log(avc, AV_LOG_WARNING, "%s is not implemented. Update your FFmpeg " yading@10: "version to the newest one from Git. If the problem still " yading@10: "occurs, it means that your file has a feature which has not " yading@10: "been implemented.\n", feature); yading@10: if(want_sample) yading@10: av_log_ask_for_sample(avc, NULL); yading@10: } yading@10: yading@10: void av_log_ask_for_sample(void *avc, const char *msg, ...) yading@10: { yading@10: va_list argument_list; yading@10: yading@10: va_start(argument_list, msg); yading@10: yading@10: if (msg) yading@10: av_vlog(avc, AV_LOG_WARNING, msg, argument_list); yading@10: av_log(avc, AV_LOG_WARNING, "If you want to help, upload a sample " yading@10: "of this file to ftp://upload.ffmpeg.org/MPlayer/incoming/ " yading@10: "and contact the ffmpeg-devel mailing list.\n"); yading@10: yading@10: va_end(argument_list); yading@10: } yading@10: #endif /* FF_API_MISSING_SAMPLE */ yading@10: yading@10: static AVHWAccel *first_hwaccel = NULL; yading@10: yading@10: void av_register_hwaccel(AVHWAccel *hwaccel) yading@10: { yading@10: AVHWAccel **p = &first_hwaccel; yading@10: while (*p) yading@10: p = &(*p)->next; yading@10: *p = hwaccel; yading@10: hwaccel->next = NULL; yading@10: } yading@10: yading@10: AVHWAccel *av_hwaccel_next(AVHWAccel *hwaccel) yading@10: { yading@10: return hwaccel ? hwaccel->next : first_hwaccel; yading@10: } yading@10: yading@10: AVHWAccel *ff_find_hwaccel(enum AVCodecID codec_id, enum AVPixelFormat pix_fmt) yading@10: { yading@10: AVHWAccel *hwaccel = NULL; yading@10: yading@10: while ((hwaccel = av_hwaccel_next(hwaccel))) yading@10: if (hwaccel->id == codec_id yading@10: && hwaccel->pix_fmt == pix_fmt) yading@10: return hwaccel; yading@10: return NULL; yading@10: } yading@10: yading@10: int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op)) yading@10: { yading@10: if (ff_lockmgr_cb) { yading@10: if (ff_lockmgr_cb(&codec_mutex, AV_LOCK_DESTROY)) yading@10: return -1; yading@10: if (ff_lockmgr_cb(&avformat_mutex, AV_LOCK_DESTROY)) yading@10: return -1; yading@10: } yading@10: yading@10: ff_lockmgr_cb = cb; yading@10: yading@10: if (ff_lockmgr_cb) { yading@10: if (ff_lockmgr_cb(&codec_mutex, AV_LOCK_CREATE)) yading@10: return -1; yading@10: if (ff_lockmgr_cb(&avformat_mutex, AV_LOCK_CREATE)) yading@10: return -1; yading@10: } yading@10: return 0; yading@10: } yading@10: yading@10: int ff_lock_avcodec(AVCodecContext *log_ctx) yading@10: { yading@10: if (ff_lockmgr_cb) { yading@10: if ((*ff_lockmgr_cb)(&codec_mutex, AV_LOCK_OBTAIN)) yading@10: return -1; yading@10: } yading@10: entangled_thread_counter++; yading@10: if (entangled_thread_counter != 1) { yading@10: av_log(log_ctx, AV_LOG_ERROR, "Insufficient thread locking around avcodec_open/close()\n"); yading@10: ff_avcodec_locked = 1; yading@10: ff_unlock_avcodec(); yading@10: return AVERROR(EINVAL); yading@10: } yading@10: av_assert0(!ff_avcodec_locked); yading@10: ff_avcodec_locked = 1; yading@10: return 0; yading@10: } yading@10: yading@10: int ff_unlock_avcodec(void) yading@10: { yading@10: av_assert0(ff_avcodec_locked); yading@10: ff_avcodec_locked = 0; yading@10: entangled_thread_counter--; yading@10: if (ff_lockmgr_cb) { yading@10: if ((*ff_lockmgr_cb)(&codec_mutex, AV_LOCK_RELEASE)) yading@10: return -1; yading@10: } yading@10: return 0; yading@10: } yading@10: yading@10: int avpriv_lock_avformat(void) yading@10: { yading@10: if (ff_lockmgr_cb) { yading@10: if ((*ff_lockmgr_cb)(&avformat_mutex, AV_LOCK_OBTAIN)) yading@10: return -1; yading@10: } yading@10: return 0; yading@10: } yading@10: yading@10: int avpriv_unlock_avformat(void) yading@10: { yading@10: if (ff_lockmgr_cb) { yading@10: if ((*ff_lockmgr_cb)(&avformat_mutex, AV_LOCK_RELEASE)) yading@10: return -1; yading@10: } yading@10: return 0; yading@10: } yading@10: yading@10: unsigned int avpriv_toupper4(unsigned int x) yading@10: { yading@10: return av_toupper(x & 0xFF) + yading@10: (av_toupper((x >> 8) & 0xFF) << 8) + yading@10: (av_toupper((x >> 16) & 0xFF) << 16) + yading@10: (av_toupper((x >> 24) & 0xFF) << 24); yading@10: } yading@10: yading@10: int ff_thread_ref_frame(ThreadFrame *dst, ThreadFrame *src) yading@10: { yading@10: int ret; yading@10: yading@10: dst->owner = src->owner; yading@10: yading@10: ret = av_frame_ref(dst->f, src->f); yading@10: if (ret < 0) yading@10: return ret; yading@10: yading@10: if (src->progress && yading@10: !(dst->progress = av_buffer_ref(src->progress))) { yading@10: ff_thread_release_buffer(dst->owner, dst); yading@10: return AVERROR(ENOMEM); yading@10: } yading@10: yading@10: return 0; yading@10: } yading@10: yading@10: #if !HAVE_THREADS yading@10: yading@10: enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt) yading@10: { yading@10: return avctx->get_format(avctx, fmt); yading@10: } yading@10: yading@10: int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags) yading@10: { yading@10: f->owner = avctx; yading@10: return ff_get_buffer(avctx, f->f, flags); yading@10: } yading@10: yading@10: void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f) yading@10: { yading@10: av_frame_unref(f->f); yading@10: } yading@10: yading@10: void ff_thread_finish_setup(AVCodecContext *avctx) yading@10: { yading@10: } yading@10: yading@10: void ff_thread_report_progress(ThreadFrame *f, int progress, int field) yading@10: { yading@10: } yading@10: yading@10: void ff_thread_await_progress(ThreadFrame *f, int progress, int field) yading@10: { yading@10: } yading@10: yading@10: int ff_thread_can_start_frame(AVCodecContext *avctx) yading@10: { yading@10: return 1; yading@10: } yading@10: yading@10: #endif yading@10: yading@10: enum AVMediaType avcodec_get_type(enum AVCodecID codec_id) yading@10: { yading@10: AVCodec *c= avcodec_find_decoder(codec_id); yading@10: if(!c) yading@10: c= avcodec_find_encoder(codec_id); yading@10: if(c) yading@10: return c->type; yading@10: yading@10: if (codec_id <= AV_CODEC_ID_NONE) yading@10: return AVMEDIA_TYPE_UNKNOWN; yading@10: else if (codec_id < AV_CODEC_ID_FIRST_AUDIO) yading@10: return AVMEDIA_TYPE_VIDEO; yading@10: else if (codec_id < AV_CODEC_ID_FIRST_SUBTITLE) yading@10: return AVMEDIA_TYPE_AUDIO; yading@10: else if (codec_id < AV_CODEC_ID_FIRST_UNKNOWN) yading@10: return AVMEDIA_TYPE_SUBTITLE; yading@10: yading@10: return AVMEDIA_TYPE_UNKNOWN; yading@10: } yading@10: yading@10: int avcodec_is_open(AVCodecContext *s) yading@10: { yading@10: return !!s->internal; yading@10: } yading@10: yading@10: int avpriv_bprint_to_extradata(AVCodecContext *avctx, struct AVBPrint *buf) yading@10: { yading@10: int ret; yading@10: char *str; yading@10: yading@10: ret = av_bprint_finalize(buf, &str); yading@10: if (ret < 0) yading@10: return ret; yading@10: avctx->extradata = str; yading@10: /* Note: the string is NUL terminated (so extradata can be read as a yading@10: * string), but the ending character is not accounted in the size (in yading@10: * binary formats you are likely not supposed to mux that character). When yading@10: * extradata is copied, it is also padded with FF_INPUT_BUFFER_PADDING_SIZE yading@10: * zeros. */ yading@10: avctx->extradata_size = buf->len; yading@10: return 0; yading@10: } yading@10: yading@10: const uint8_t *avpriv_find_start_code(const uint8_t *av_restrict p, yading@10: const uint8_t *end, yading@10: uint32_t *av_restrict state) yading@10: { yading@10: int i; yading@10: yading@10: assert(p <= end); yading@10: if (p >= end) yading@10: return end; yading@10: yading@10: for (i = 0; i < 3; i++) { yading@10: uint32_t tmp = *state << 8; yading@10: *state = tmp + *(p++); yading@10: if (tmp == 0x100 || p == end) yading@10: return p; yading@10: } yading@10: yading@10: while (p < end) { yading@10: if (p[-1] > 1 ) p += 3; yading@10: else if (p[-2] ) p += 2; yading@10: else if (p[-3]|(p[-1]-1)) p++; yading@10: else { yading@10: p++; yading@10: break; yading@10: } yading@10: } yading@10: yading@10: p = FFMIN(p, end) - 4; yading@10: *state = AV_RB32(p); yading@10: yading@10: return p + 4; yading@10: }