yading@10: /* yading@10: * Copyright (c) 2011 Michael Niedermayer yading@10: * yading@10: * This file is part of FFmpeg. yading@10: * yading@10: * FFmpeg is free software; you can redistribute it and/or modify yading@10: * it under the terms of the GNU General Public License as published by yading@10: * the Free Software Foundation; either version 2 of the License, or yading@10: * (at your option) any later version. yading@10: * yading@10: * FFmpeg is distributed in the hope that it will be useful, yading@10: * but WITHOUT ANY WARRANTY; without even the implied warranty of yading@10: * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the yading@10: * GNU General Public License for more details. yading@10: * yading@10: * You should have received a copy of the GNU General Public License yading@10: * along with FFmpeg; if not, write to the Free Software yading@10: * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA yading@10: * yading@10: * Parts of this file have been stolen from mplayer yading@10: */ yading@10: yading@10: /** yading@10: * @file yading@10: */ yading@10: yading@10: #include "avfilter.h" yading@10: #include "video.h" yading@10: #include "formats.h" yading@10: #include "internal.h" yading@10: #include "libavutil/avassert.h" yading@10: #include "libavutil/pixdesc.h" yading@10: #include "libavutil/intreadwrite.h" yading@10: #include "libavutil/imgutils.h" yading@10: #include "libavutil/opt.h" yading@10: yading@10: #include "libmpcodecs/vf.h" yading@10: #include "libmpcodecs/img_format.h" yading@10: #include "libmpcodecs/cpudetect.h" yading@10: #include "libmpcodecs/av_helpers.h" yading@10: #include "libmpcodecs/vf_scale.h" yading@10: #include "libmpcodecs/libvo/fastmemcpy.h" yading@10: yading@10: #include "libswscale/swscale.h" yading@10: yading@10: yading@10: //FIXME maybe link the orig in yading@10: //XXX: identical pix_fmt must be following with each others yading@10: static const struct { yading@10: int fmt; yading@10: enum AVPixelFormat pix_fmt; yading@10: } conversion_map[] = { yading@10: {IMGFMT_ARGB, AV_PIX_FMT_ARGB}, yading@10: {IMGFMT_BGRA, AV_PIX_FMT_BGRA}, yading@10: {IMGFMT_BGR24, AV_PIX_FMT_BGR24}, yading@10: {IMGFMT_BGR16BE, AV_PIX_FMT_RGB565BE}, yading@10: {IMGFMT_BGR16LE, AV_PIX_FMT_RGB565LE}, yading@10: {IMGFMT_BGR15BE, AV_PIX_FMT_RGB555BE}, yading@10: {IMGFMT_BGR15LE, AV_PIX_FMT_RGB555LE}, yading@10: {IMGFMT_BGR12BE, AV_PIX_FMT_RGB444BE}, yading@10: {IMGFMT_BGR12LE, AV_PIX_FMT_RGB444LE}, yading@10: {IMGFMT_BGR8, AV_PIX_FMT_RGB8}, yading@10: {IMGFMT_BGR4, AV_PIX_FMT_RGB4}, yading@10: {IMGFMT_BGR1, AV_PIX_FMT_MONOBLACK}, yading@10: {IMGFMT_RGB1, AV_PIX_FMT_MONOBLACK}, yading@10: {IMGFMT_RG4B, AV_PIX_FMT_BGR4_BYTE}, yading@10: {IMGFMT_BG4B, AV_PIX_FMT_RGB4_BYTE}, yading@10: {IMGFMT_RGB48LE, AV_PIX_FMT_RGB48LE}, yading@10: {IMGFMT_RGB48BE, AV_PIX_FMT_RGB48BE}, yading@10: {IMGFMT_ABGR, AV_PIX_FMT_ABGR}, yading@10: {IMGFMT_RGBA, AV_PIX_FMT_RGBA}, yading@10: {IMGFMT_RGB24, AV_PIX_FMT_RGB24}, yading@10: {IMGFMT_RGB16BE, AV_PIX_FMT_BGR565BE}, yading@10: {IMGFMT_RGB16LE, AV_PIX_FMT_BGR565LE}, yading@10: {IMGFMT_RGB15BE, AV_PIX_FMT_BGR555BE}, yading@10: {IMGFMT_RGB15LE, AV_PIX_FMT_BGR555LE}, yading@10: {IMGFMT_RGB12BE, AV_PIX_FMT_BGR444BE}, yading@10: {IMGFMT_RGB12LE, AV_PIX_FMT_BGR444LE}, yading@10: {IMGFMT_RGB8, AV_PIX_FMT_BGR8}, yading@10: {IMGFMT_RGB4, AV_PIX_FMT_BGR4}, yading@10: {IMGFMT_BGR8, AV_PIX_FMT_PAL8}, yading@10: {IMGFMT_YUY2, AV_PIX_FMT_YUYV422}, yading@10: {IMGFMT_UYVY, AV_PIX_FMT_UYVY422}, yading@10: {IMGFMT_NV12, AV_PIX_FMT_NV12}, yading@10: {IMGFMT_NV21, AV_PIX_FMT_NV21}, yading@10: {IMGFMT_Y800, AV_PIX_FMT_GRAY8}, yading@10: {IMGFMT_Y8, AV_PIX_FMT_GRAY8}, yading@10: {IMGFMT_YVU9, AV_PIX_FMT_YUV410P}, yading@10: {IMGFMT_IF09, AV_PIX_FMT_YUV410P}, yading@10: {IMGFMT_YV12, AV_PIX_FMT_YUV420P}, yading@10: {IMGFMT_I420, AV_PIX_FMT_YUV420P}, yading@10: {IMGFMT_IYUV, AV_PIX_FMT_YUV420P}, yading@10: {IMGFMT_411P, AV_PIX_FMT_YUV411P}, yading@10: {IMGFMT_422P, AV_PIX_FMT_YUV422P}, yading@10: {IMGFMT_444P, AV_PIX_FMT_YUV444P}, yading@10: {IMGFMT_440P, AV_PIX_FMT_YUV440P}, yading@10: yading@10: {IMGFMT_420A, AV_PIX_FMT_YUVA420P}, yading@10: yading@10: {IMGFMT_420P16_LE, AV_PIX_FMT_YUV420P16LE}, yading@10: {IMGFMT_420P16_BE, AV_PIX_FMT_YUV420P16BE}, yading@10: {IMGFMT_422P16_LE, AV_PIX_FMT_YUV422P16LE}, yading@10: {IMGFMT_422P16_BE, AV_PIX_FMT_YUV422P16BE}, yading@10: {IMGFMT_444P16_LE, AV_PIX_FMT_YUV444P16LE}, yading@10: {IMGFMT_444P16_BE, AV_PIX_FMT_YUV444P16BE}, yading@10: yading@10: // YUVJ are YUV formats that use the full Y range and not just yading@10: // 16 - 235 (see colorspaces.txt). yading@10: // Currently they are all treated the same way. yading@10: {IMGFMT_YV12, AV_PIX_FMT_YUVJ420P}, yading@10: {IMGFMT_422P, AV_PIX_FMT_YUVJ422P}, yading@10: {IMGFMT_444P, AV_PIX_FMT_YUVJ444P}, yading@10: {IMGFMT_440P, AV_PIX_FMT_YUVJ440P}, yading@10: yading@10: {IMGFMT_XVMC_MOCO_MPEG2, AV_PIX_FMT_XVMC_MPEG2_MC}, yading@10: {IMGFMT_XVMC_IDCT_MPEG2, AV_PIX_FMT_XVMC_MPEG2_IDCT}, yading@10: {IMGFMT_VDPAU_MPEG1, AV_PIX_FMT_VDPAU_MPEG1}, yading@10: {IMGFMT_VDPAU_MPEG2, AV_PIX_FMT_VDPAU_MPEG2}, yading@10: {IMGFMT_VDPAU_H264, AV_PIX_FMT_VDPAU_H264}, yading@10: {IMGFMT_VDPAU_WMV3, AV_PIX_FMT_VDPAU_WMV3}, yading@10: {IMGFMT_VDPAU_VC1, AV_PIX_FMT_VDPAU_VC1}, yading@10: {IMGFMT_VDPAU_MPEG4, AV_PIX_FMT_VDPAU_MPEG4}, yading@10: {0, AV_PIX_FMT_NONE} yading@10: }; yading@10: yading@10: extern const vf_info_t ff_vf_info_dint; yading@10: extern const vf_info_t ff_vf_info_down3dright; yading@10: extern const vf_info_t ff_vf_info_eq2; yading@10: extern const vf_info_t ff_vf_info_eq; yading@10: extern const vf_info_t ff_vf_info_fil; yading@10: extern const vf_info_t ff_vf_info_fspp; yading@10: extern const vf_info_t ff_vf_info_ilpack; yading@10: extern const vf_info_t ff_vf_info_mcdeint; yading@10: extern const vf_info_t ff_vf_info_ow; yading@10: extern const vf_info_t ff_vf_info_perspective; yading@10: extern const vf_info_t ff_vf_info_phase; yading@10: extern const vf_info_t ff_vf_info_pp7; yading@10: extern const vf_info_t ff_vf_info_pullup; yading@10: extern const vf_info_t ff_vf_info_qp; yading@10: extern const vf_info_t ff_vf_info_sab; yading@10: extern const vf_info_t ff_vf_info_softpulldown; yading@10: extern const vf_info_t ff_vf_info_spp; yading@10: extern const vf_info_t ff_vf_info_tinterlace; yading@10: extern const vf_info_t ff_vf_info_uspp; yading@10: yading@10: yading@10: static const vf_info_t* const filters[]={ yading@10: &ff_vf_info_dint, yading@10: &ff_vf_info_down3dright, yading@10: &ff_vf_info_eq2, yading@10: &ff_vf_info_eq, yading@10: &ff_vf_info_fil, yading@10: &ff_vf_info_fspp, yading@10: &ff_vf_info_ilpack, yading@10: &ff_vf_info_mcdeint, yading@10: &ff_vf_info_ow, yading@10: &ff_vf_info_perspective, yading@10: &ff_vf_info_phase, yading@10: &ff_vf_info_pp7, yading@10: &ff_vf_info_pullup, yading@10: &ff_vf_info_qp, yading@10: &ff_vf_info_sab, yading@10: &ff_vf_info_softpulldown, yading@10: &ff_vf_info_spp, yading@10: &ff_vf_info_tinterlace, yading@10: &ff_vf_info_uspp, yading@10: yading@10: NULL yading@10: }; yading@10: yading@10: /* yading@10: Unsupported filters yading@10: 1bpp yading@10: ass yading@10: bmovl yading@10: crop yading@10: dvbscale yading@10: flip yading@10: expand yading@10: format yading@10: halfpack yading@10: lavc yading@10: lavcdeint yading@10: noformat yading@10: pp yading@10: scale yading@10: tfields yading@10: vo yading@10: yadif yading@10: zrmjpeg yading@10: */ yading@10: yading@10: CpuCaps ff_gCpuCaps; //FIXME initialize this so optims work yading@10: yading@10: enum AVPixelFormat ff_mp2ff_pix_fmt(int mp){ yading@10: int i; yading@10: for(i=0; conversion_map[i].fmt && mp != conversion_map[i].fmt; i++) yading@10: ; yading@10: return mp == conversion_map[i].fmt ? conversion_map[i].pix_fmt : AV_PIX_FMT_NONE; yading@10: } yading@10: yading@10: static void ff_sws_getFlagsAndFilterFromCmdLine(int *flags, SwsFilter **srcFilterParam, SwsFilter **dstFilterParam) yading@10: { yading@10: static int firstTime=1; yading@10: *flags=0; yading@10: yading@10: #if ARCH_X86 yading@10: if(ff_gCpuCaps.hasMMX) yading@10: __asm__ volatile("emms\n\t"::: "memory"); //FIXME this should not be required but it IS (even for non-MMX versions) yading@10: #endif yading@10: if(firstTime) yading@10: { yading@10: firstTime=0; yading@10: *flags= SWS_PRINT_INFO; yading@10: } yading@10: else if( ff_mp_msg_test(MSGT_VFILTER,MSGL_DBG2) ) *flags= SWS_PRINT_INFO; yading@10: yading@10: switch(SWS_BILINEAR) yading@10: { yading@10: case 0: *flags|= SWS_FAST_BILINEAR; break; yading@10: case 1: *flags|= SWS_BILINEAR; break; yading@10: case 2: *flags|= SWS_BICUBIC; break; yading@10: case 3: *flags|= SWS_X; break; yading@10: case 4: *flags|= SWS_POINT; break; yading@10: case 5: *flags|= SWS_AREA; break; yading@10: case 6: *flags|= SWS_BICUBLIN; break; yading@10: case 7: *flags|= SWS_GAUSS; break; yading@10: case 8: *flags|= SWS_SINC; break; yading@10: case 9: *flags|= SWS_LANCZOS; break; yading@10: case 10:*flags|= SWS_SPLINE; break; yading@10: default:*flags|= SWS_BILINEAR; break; yading@10: } yading@10: yading@10: *srcFilterParam= NULL; yading@10: *dstFilterParam= NULL; yading@10: } yading@10: yading@10: //exact copy from vf_scale.c yading@10: // will use sws_flags & src_filter (from cmd line) yading@10: struct SwsContext *ff_sws_getContextFromCmdLine(int srcW, int srcH, int srcFormat, int dstW, int dstH, int dstFormat) yading@10: { yading@10: int flags, i; yading@10: SwsFilter *dstFilterParam, *srcFilterParam; yading@10: enum AVPixelFormat dfmt, sfmt; yading@10: yading@10: for(i=0; conversion_map[i].fmt && dstFormat != conversion_map[i].fmt; i++); yading@10: dfmt= conversion_map[i].pix_fmt; yading@10: for(i=0; conversion_map[i].fmt && srcFormat != conversion_map[i].fmt; i++); yading@10: sfmt= conversion_map[i].pix_fmt; yading@10: yading@10: if (srcFormat == IMGFMT_RGB8 || srcFormat == IMGFMT_BGR8) sfmt = AV_PIX_FMT_PAL8; yading@10: ff_sws_getFlagsAndFilterFromCmdLine(&flags, &srcFilterParam, &dstFilterParam); yading@10: yading@10: return sws_getContext(srcW, srcH, sfmt, dstW, dstH, dfmt, flags , srcFilterParam, dstFilterParam, NULL); yading@10: } yading@10: yading@10: typedef struct { yading@10: const AVClass *class; yading@10: vf_instance_t vf; yading@10: vf_instance_t next_vf; yading@10: AVFilterContext *avfctx; yading@10: int frame_returned; yading@10: char *filter; yading@10: } MPContext; yading@10: yading@10: #define OFFSET(x) offsetof(MPContext, x) yading@10: #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM yading@10: static const AVOption mp_options[] = { yading@10: { "filter", "set MPlayer filter name and parameters", OFFSET(filter), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS }, yading@10: { NULL } yading@10: }; yading@10: yading@10: AVFILTER_DEFINE_CLASS(mp); yading@10: yading@10: void ff_mp_msg(int mod, int lev, const char *format, ... ){ yading@10: va_list va; yading@10: va_start(va, format); yading@10: //FIXME convert lev/mod yading@10: av_vlog(NULL, AV_LOG_DEBUG, format, va); yading@10: va_end(va); yading@10: } yading@10: yading@10: int ff_mp_msg_test(int mod, int lev){ yading@10: return 123; yading@10: } yading@10: yading@10: void ff_init_avcodec(void) yading@10: { yading@10: //we maybe should init but its kinda 1. unneeded 2. a bit inpolite from here yading@10: } yading@10: yading@10: //Exact copy of vf.c yading@10: void ff_vf_clone_mpi_attributes(mp_image_t* dst, mp_image_t* src){ yading@10: dst->pict_type= src->pict_type; yading@10: dst->fields = src->fields; yading@10: dst->qscale_type= src->qscale_type; yading@10: if(dst->width == src->width && dst->height == src->height){ yading@10: dst->qstride= src->qstride; yading@10: dst->qscale= src->qscale; yading@10: } yading@10: } yading@10: yading@10: //Exact copy of vf.c yading@10: void ff_vf_next_draw_slice(struct vf_instance *vf,unsigned char** src, int * stride,int w, int h, int x, int y){ yading@10: if (vf->next->draw_slice) { yading@10: vf->next->draw_slice(vf->next,src,stride,w,h,x,y); yading@10: return; yading@10: } yading@10: if (!vf->dmpi) { yading@10: ff_mp_msg(MSGT_VFILTER,MSGL_ERR,"draw_slice: dmpi not stored by vf_%s\n", vf->info->name); yading@10: return; yading@10: } yading@10: if (!(vf->dmpi->flags & MP_IMGFLAG_PLANAR)) { yading@10: memcpy_pic(vf->dmpi->planes[0]+y*vf->dmpi->stride[0]+vf->dmpi->bpp/8*x, yading@10: src[0], vf->dmpi->bpp/8*w, h, vf->dmpi->stride[0], stride[0]); yading@10: return; yading@10: } yading@10: memcpy_pic(vf->dmpi->planes[0]+y*vf->dmpi->stride[0]+x, src[0], yading@10: w, h, vf->dmpi->stride[0], stride[0]); yading@10: memcpy_pic(vf->dmpi->planes[1]+(y>>vf->dmpi->chroma_y_shift)*vf->dmpi->stride[1]+(x>>vf->dmpi->chroma_x_shift), yading@10: src[1], w>>vf->dmpi->chroma_x_shift, h>>vf->dmpi->chroma_y_shift, vf->dmpi->stride[1], stride[1]); yading@10: memcpy_pic(vf->dmpi->planes[2]+(y>>vf->dmpi->chroma_y_shift)*vf->dmpi->stride[2]+(x>>vf->dmpi->chroma_x_shift), yading@10: src[2], w>>vf->dmpi->chroma_x_shift, h>>vf->dmpi->chroma_y_shift, vf->dmpi->stride[2], stride[2]); yading@10: } yading@10: yading@10: //Exact copy of vf.c yading@10: void ff_vf_mpi_clear(mp_image_t* mpi,int x0,int y0,int w,int h){ yading@10: int y; yading@10: if(mpi->flags&MP_IMGFLAG_PLANAR){ yading@10: y0&=~1;h+=h&1; yading@10: if(x0==0 && w==mpi->width){ yading@10: // full width clear: yading@10: memset(mpi->planes[0]+mpi->stride[0]*y0,0,mpi->stride[0]*h); yading@10: memset(mpi->planes[1]+mpi->stride[1]*(y0>>mpi->chroma_y_shift),128,mpi->stride[1]*(h>>mpi->chroma_y_shift)); yading@10: memset(mpi->planes[2]+mpi->stride[2]*(y0>>mpi->chroma_y_shift),128,mpi->stride[2]*(h>>mpi->chroma_y_shift)); yading@10: } else yading@10: for(y=y0;yplanes[0]+x0+mpi->stride[0]*y,0,w); yading@10: memset(mpi->planes[0]+x0+mpi->stride[0]*(y+1),0,w); yading@10: memset(mpi->planes[1]+(x0>>mpi->chroma_x_shift)+mpi->stride[1]*(y>>mpi->chroma_y_shift),128,(w>>mpi->chroma_x_shift)); yading@10: memset(mpi->planes[2]+(x0>>mpi->chroma_x_shift)+mpi->stride[2]*(y>>mpi->chroma_y_shift),128,(w>>mpi->chroma_x_shift)); yading@10: } yading@10: return; yading@10: } yading@10: // packed: yading@10: for(y=y0;yplanes[0]+mpi->stride[0]*y+(mpi->bpp>>3)*x0; yading@10: if(mpi->flags&MP_IMGFLAG_YUV){ yading@10: unsigned int* p=(unsigned int*) dst; yading@10: int size=(mpi->bpp>>3)*w/4; yading@10: int i; yading@10: #if HAVE_BIGENDIAN yading@10: #define CLEAR_PACKEDYUV_PATTERN 0x00800080 yading@10: #define CLEAR_PACKEDYUV_PATTERN_SWAPPED 0x80008000 yading@10: #else yading@10: #define CLEAR_PACKEDYUV_PATTERN 0x80008000 yading@10: #define CLEAR_PACKEDYUV_PATTERN_SWAPPED 0x00800080 yading@10: #endif yading@10: if(mpi->flags&MP_IMGFLAG_SWAPPED){ yading@10: for(i=0;ibpp>>3)*w); yading@10: } yading@10: } yading@10: yading@10: int ff_vf_next_query_format(struct vf_instance *vf, unsigned int fmt){ yading@10: return 1; yading@10: } yading@10: yading@10: //used by delogo yading@10: unsigned int ff_vf_match_csp(vf_instance_t** vfp,const unsigned int* list,unsigned int preferred){ yading@10: return preferred; yading@10: } yading@10: yading@10: mp_image_t* ff_vf_get_image(vf_instance_t* vf, unsigned int outfmt, int mp_imgtype, int mp_imgflag, int w, int h){ yading@10: MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, next_vf)); yading@10: mp_image_t* mpi=NULL; yading@10: int w2; yading@10: int number = mp_imgtype >> 16; yading@10: yading@10: av_assert0(vf->next == NULL); // all existing filters call this just on next yading@10: yading@10: //vf_dint needs these as it calls ff_vf_get_image() before configuring the output yading@10: if(vf->w==0 && w>0) vf->w=w; yading@10: if(vf->h==0 && h>0) vf->h=h; yading@10: yading@10: av_assert0(w == -1 || w >= vf->w); yading@10: av_assert0(h == -1 || h >= vf->h); yading@10: av_assert0(vf->w > 0); yading@10: av_assert0(vf->h > 0); yading@10: yading@10: av_log(m->avfctx, AV_LOG_DEBUG, "get_image: %d:%d, vf: %d:%d\n", w,h,vf->w,vf->h); yading@10: yading@10: if (w == -1) w = vf->w; yading@10: if (h == -1) h = vf->h; yading@10: yading@10: w2=(mp_imgflag&MP_IMGFLAG_ACCEPT_ALIGNED_STRIDE)?((w+15)&(~15)):w; yading@10: yading@10: // Note: we should call libvo first to check if it supports direct rendering yading@10: // and if not, then fallback to software buffers: yading@10: switch(mp_imgtype & 0xff){ yading@10: case MP_IMGTYPE_EXPORT: yading@10: if(!vf->imgctx.export_images[0]) vf->imgctx.export_images[0]=ff_new_mp_image(w2,h); yading@10: mpi=vf->imgctx.export_images[0]; yading@10: break; yading@10: case MP_IMGTYPE_STATIC: yading@10: if(!vf->imgctx.static_images[0]) vf->imgctx.static_images[0]=ff_new_mp_image(w2,h); yading@10: mpi=vf->imgctx.static_images[0]; yading@10: break; yading@10: case MP_IMGTYPE_TEMP: yading@10: if(!vf->imgctx.temp_images[0]) vf->imgctx.temp_images[0]=ff_new_mp_image(w2,h); yading@10: mpi=vf->imgctx.temp_images[0]; yading@10: break; yading@10: case MP_IMGTYPE_IPB: yading@10: if(!(mp_imgflag&MP_IMGFLAG_READABLE)){ // B frame: yading@10: if(!vf->imgctx.temp_images[0]) vf->imgctx.temp_images[0]=ff_new_mp_image(w2,h); yading@10: mpi=vf->imgctx.temp_images[0]; yading@10: break; yading@10: } yading@10: case MP_IMGTYPE_IP: yading@10: if(!vf->imgctx.static_images[vf->imgctx.static_idx]) vf->imgctx.static_images[vf->imgctx.static_idx]=ff_new_mp_image(w2,h); yading@10: mpi=vf->imgctx.static_images[vf->imgctx.static_idx]; yading@10: vf->imgctx.static_idx^=1; yading@10: break; yading@10: case MP_IMGTYPE_NUMBERED: yading@10: if (number == -1) { yading@10: int i; yading@10: for (i = 0; i < NUM_NUMBERED_MPI; i++) yading@10: if (!vf->imgctx.numbered_images[i] || !vf->imgctx.numbered_images[i]->usage_count) yading@10: break; yading@10: number = i; yading@10: } yading@10: if (number < 0 || number >= NUM_NUMBERED_MPI) return NULL; yading@10: if (!vf->imgctx.numbered_images[number]) vf->imgctx.numbered_images[number] = ff_new_mp_image(w2,h); yading@10: mpi = vf->imgctx.numbered_images[number]; yading@10: mpi->number = number; yading@10: break; yading@10: } yading@10: if(mpi){ yading@10: mpi->type=mp_imgtype; yading@10: mpi->w=vf->w; mpi->h=vf->h; yading@10: // keep buffer allocation status & color flags only: yading@10: // mpi->flags&=~(MP_IMGFLAG_PRESERVE|MP_IMGFLAG_READABLE|MP_IMGFLAG_DIRECT); yading@10: mpi->flags&=MP_IMGFLAG_ALLOCATED|MP_IMGFLAG_TYPE_DISPLAYED|MP_IMGFLAGMASK_COLORS; yading@10: // accept restrictions, draw_slice and palette flags only: yading@10: mpi->flags|=mp_imgflag&(MP_IMGFLAGMASK_RESTRICTIONS|MP_IMGFLAG_DRAW_CALLBACK|MP_IMGFLAG_RGB_PALETTE); yading@10: if(!vf->draw_slice) mpi->flags&=~MP_IMGFLAG_DRAW_CALLBACK; yading@10: if(mpi->width!=w2 || mpi->height!=h){ yading@10: // printf("vf.c: MPI parameters changed! %dx%d -> %dx%d \n", mpi->width,mpi->height,w2,h); yading@10: if(mpi->flags&MP_IMGFLAG_ALLOCATED){ yading@10: if(mpi->widthheightplanes[0]); yading@10: mpi->flags&=~MP_IMGFLAG_ALLOCATED; yading@10: ff_mp_msg(MSGT_VFILTER,MSGL_V,"vf.c: have to REALLOCATE buffer memory :(\n"); yading@10: } yading@10: // } else { yading@10: } { yading@10: mpi->width=w2; mpi->chroma_width=(w2 + (1<chroma_x_shift) - 1)>>mpi->chroma_x_shift; yading@10: mpi->height=h; mpi->chroma_height=(h + (1<chroma_y_shift) - 1)>>mpi->chroma_y_shift; yading@10: } yading@10: } yading@10: if(!mpi->bpp) ff_mp_image_setfmt(mpi,outfmt); yading@10: if(!(mpi->flags&MP_IMGFLAG_ALLOCATED) && mpi->type>MP_IMGTYPE_EXPORT){ yading@10: yading@10: av_assert0(!vf->get_image); yading@10: // check libvo first! yading@10: if(vf->get_image) vf->get_image(vf,mpi); yading@10: yading@10: if(!(mpi->flags&MP_IMGFLAG_DIRECT)){ yading@10: // non-direct and not yet allocated image. allocate it! yading@10: if (!mpi->bpp) { // no way we can allocate this yading@10: ff_mp_msg(MSGT_DECVIDEO, MSGL_FATAL, yading@10: "ff_vf_get_image: Tried to allocate a format that can not be allocated!\n"); yading@10: return NULL; yading@10: } yading@10: yading@10: // check if codec prefer aligned stride: yading@10: if(mp_imgflag&MP_IMGFLAG_PREFER_ALIGNED_STRIDE){ yading@10: int align=(mpi->flags&MP_IMGFLAG_PLANAR && yading@10: mpi->flags&MP_IMGFLAG_YUV) ? yading@10: (8<chroma_x_shift)-1 : 15; // -- maybe FIXME yading@10: w2=((w+align)&(~align)); yading@10: if(mpi->width!=w2){ yading@10: #if 0 yading@10: // we have to change width... check if we CAN co it: yading@10: int flags=vf->query_format(vf,outfmt); // should not fail yading@10: if(!(flags&3)) ff_mp_msg(MSGT_DECVIDEO,MSGL_WARN,"??? ff_vf_get_image{vf->query_format(outfmt)} failed!\n"); yading@10: // printf("query -> 0x%X \n",flags); yading@10: if(flags&VFCAP_ACCEPT_STRIDE){ yading@10: #endif yading@10: mpi->width=w2; yading@10: mpi->chroma_width=(w2 + (1<chroma_x_shift) - 1)>>mpi->chroma_x_shift; yading@10: // } yading@10: } yading@10: } yading@10: yading@10: ff_mp_image_alloc_planes(mpi); yading@10: // printf("clearing img!\n"); yading@10: ff_vf_mpi_clear(mpi,0,0,mpi->width,mpi->height); yading@10: } yading@10: } yading@10: av_assert0(!vf->start_slice); yading@10: if(mpi->flags&MP_IMGFLAG_DRAW_CALLBACK) yading@10: if(vf->start_slice) vf->start_slice(vf,mpi); yading@10: if(!(mpi->flags&MP_IMGFLAG_TYPE_DISPLAYED)){ yading@10: ff_mp_msg(MSGT_DECVIDEO,MSGL_V,"*** [%s] %s%s mp_image_t, %dx%dx%dbpp %s %s, %d bytes\n", yading@10: "NULL"/*vf->info->name*/, yading@10: (mpi->type==MP_IMGTYPE_EXPORT)?"Exporting": yading@10: ((mpi->flags&MP_IMGFLAG_DIRECT)?"Direct Rendering":"Allocating"), yading@10: (mpi->flags&MP_IMGFLAG_DRAW_CALLBACK)?" (slices)":"", yading@10: mpi->width,mpi->height,mpi->bpp, yading@10: (mpi->flags&MP_IMGFLAG_YUV)?"YUV":((mpi->flags&MP_IMGFLAG_SWAPPED)?"BGR":"RGB"), yading@10: (mpi->flags&MP_IMGFLAG_PLANAR)?"planar":"packed", yading@10: mpi->bpp*mpi->width*mpi->height/8); yading@10: ff_mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"(imgfmt: %x, planes: %p,%p,%p strides: %d,%d,%d, chroma: %dx%d, shift: h:%d,v:%d)\n", yading@10: mpi->imgfmt, mpi->planes[0], mpi->planes[1], mpi->planes[2], yading@10: mpi->stride[0], mpi->stride[1], mpi->stride[2], yading@10: mpi->chroma_width, mpi->chroma_height, mpi->chroma_x_shift, mpi->chroma_y_shift); yading@10: mpi->flags|=MP_IMGFLAG_TYPE_DISPLAYED; yading@10: } yading@10: yading@10: mpi->qscale = NULL; yading@10: mpi->usage_count++; yading@10: } yading@10: // printf("\rVF_MPI: %p %p %p %d %d %d \n", yading@10: // mpi->planes[0],mpi->planes[1],mpi->planes[2], yading@10: // mpi->stride[0],mpi->stride[1],mpi->stride[2]); yading@10: return mpi; yading@10: } yading@10: yading@10: static void dummy_free(void *opaque, uint8_t *data){} yading@10: yading@10: int ff_vf_next_put_image(struct vf_instance *vf,mp_image_t *mpi, double pts){ yading@10: MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, vf)); yading@10: AVFilterLink *outlink = m->avfctx->outputs[0]; yading@10: AVFrame *picref = av_frame_alloc(); yading@10: int i; yading@10: yading@10: av_assert0(vf->next); yading@10: yading@10: av_log(m->avfctx, AV_LOG_DEBUG, "ff_vf_next_put_image\n"); yading@10: yading@10: if (!picref) yading@10: goto fail; yading@10: yading@10: picref->width = mpi->w; yading@10: picref->height = mpi->h; yading@10: yading@10: picref->type = AVMEDIA_TYPE_VIDEO; yading@10: yading@10: for(i=0; conversion_map[i].fmt && mpi->imgfmt != conversion_map[i].fmt; i++); yading@10: picref->format = conversion_map[i].pix_fmt; yading@10: yading@10: memcpy(picref->linesize, mpi->stride, FFMIN(sizeof(picref->linesize), sizeof(mpi->stride))); yading@10: yading@10: for(i=0; i<4 && mpi->stride[i]; i++){ yading@10: picref->data[i] = mpi->planes[i]; yading@10: } yading@10: yading@10: if(pts != MP_NOPTS_VALUE) yading@10: picref->pts= pts * av_q2d(outlink->time_base); yading@10: yading@10: if(1) { // mp buffers are currently unsupported in libavfilter, we thus must copy yading@10: AVFrame *tofree = picref; yading@10: picref = av_frame_clone(picref); yading@10: av_frame_free(&tofree); yading@10: } yading@10: yading@10: ff_filter_frame(outlink, picref); yading@10: m->frame_returned++; yading@10: yading@10: return 1; yading@10: fail: yading@10: av_frame_free(&picref); yading@10: return 0; yading@10: } yading@10: yading@10: int ff_vf_next_config(struct vf_instance *vf, yading@10: int width, int height, int d_width, int d_height, yading@10: unsigned int voflags, unsigned int outfmt){ yading@10: yading@10: av_assert0(width>0 && height>0); yading@10: vf->next->w = width; vf->next->h = height; yading@10: yading@10: return 1; yading@10: #if 0 yading@10: int flags=vf->next->query_format(vf->next,outfmt); yading@10: if(!flags){ yading@10: // hmm. colorspace mismatch!!! yading@10: //this is fatal for us ATM yading@10: return 0; yading@10: } yading@10: ff_mp_msg(MSGT_VFILTER,MSGL_V,"REQ: flags=0x%X req=0x%X \n",flags,vf->default_reqs); yading@10: miss=vf->default_reqs - (flags&vf->default_reqs); yading@10: if(miss&VFCAP_ACCEPT_STRIDE){ yading@10: // vf requires stride support but vf->next doesn't support it! yading@10: // let's insert the 'expand' filter, it does the job for us: yading@10: vf_instance_t* vf2=vf_open_filter(vf->next,"expand",NULL); yading@10: if(!vf2) return 0; // shouldn't happen! yading@10: vf->next=vf2; yading@10: } yading@10: vf->next->w = width; vf->next->h = height; yading@10: return 1; yading@10: #endif yading@10: } yading@10: yading@10: int ff_vf_next_control(struct vf_instance *vf, int request, void* data){ yading@10: MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, vf)); yading@10: av_log(m->avfctx, AV_LOG_DEBUG, "Received control %d\n", request); yading@10: return 0; yading@10: } yading@10: yading@10: static int vf_default_query_format(struct vf_instance *vf, unsigned int fmt){ yading@10: MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, vf)); yading@10: int i; yading@10: av_log(m->avfctx, AV_LOG_DEBUG, "query %X\n", fmt); yading@10: yading@10: for(i=0; conversion_map[i].fmt; i++){ yading@10: if(fmt==conversion_map[i].fmt) yading@10: return 1; //we suport all yading@10: } yading@10: return 0; yading@10: } yading@10: yading@10: yading@10: static av_cold int init(AVFilterContext *ctx) yading@10: { yading@10: MPContext *m = ctx->priv; yading@10: int cpu_flags = av_get_cpu_flags(); yading@10: char name[256]; yading@10: const char *args; yading@10: int i; yading@10: yading@10: ff_gCpuCaps.hasMMX = cpu_flags & AV_CPU_FLAG_MMX; yading@10: ff_gCpuCaps.hasMMX2 = cpu_flags & AV_CPU_FLAG_MMX2; yading@10: ff_gCpuCaps.hasSSE = cpu_flags & AV_CPU_FLAG_SSE; yading@10: ff_gCpuCaps.hasSSE2 = cpu_flags & AV_CPU_FLAG_SSE2; yading@10: ff_gCpuCaps.hasSSE3 = cpu_flags & AV_CPU_FLAG_SSE3; yading@10: ff_gCpuCaps.hasSSSE3 = cpu_flags & AV_CPU_FLAG_SSSE3; yading@10: ff_gCpuCaps.hasSSE4 = cpu_flags & AV_CPU_FLAG_SSE4; yading@10: ff_gCpuCaps.hasSSE42 = cpu_flags & AV_CPU_FLAG_SSE42; yading@10: ff_gCpuCaps.hasAVX = cpu_flags & AV_CPU_FLAG_AVX; yading@10: ff_gCpuCaps.has3DNow = cpu_flags & AV_CPU_FLAG_3DNOW; yading@10: ff_gCpuCaps.has3DNowExt = cpu_flags & AV_CPU_FLAG_3DNOWEXT; yading@10: yading@10: m->avfctx= ctx; yading@10: yading@10: args = m->filter; yading@10: if(!args || 1!=sscanf(args, "%255[^:=]", name)){ yading@10: av_log(ctx, AV_LOG_ERROR, "Invalid parameter.\n"); yading@10: return AVERROR(EINVAL); yading@10: } yading@10: args += strlen(name); yading@10: if (args[0] == '=') yading@10: args++; yading@10: yading@10: for(i=0; ;i++){ yading@10: if(!filters[i] || !strcmp(name, filters[i]->name)) yading@10: break; yading@10: } yading@10: yading@10: if(!filters[i]){ yading@10: av_log(ctx, AV_LOG_ERROR, "Unknown filter %s\n", name); yading@10: return AVERROR(EINVAL); yading@10: } yading@10: yading@10: av_log(ctx, AV_LOG_WARNING, yading@10: "'%s' is a wrapped MPlayer filter (libmpcodecs). This filter may be removed\n" yading@10: "once it has been ported to a native libavfilter.\n", name); yading@10: yading@10: memset(&m->vf,0,sizeof(m->vf)); yading@10: m->vf.info= filters[i]; yading@10: yading@10: m->vf.next = &m->next_vf; yading@10: m->vf.put_image = ff_vf_next_put_image; yading@10: m->vf.config = ff_vf_next_config; yading@10: m->vf.query_format= vf_default_query_format; yading@10: m->vf.control = ff_vf_next_control; yading@10: m->vf.default_caps=VFCAP_ACCEPT_STRIDE; yading@10: m->vf.default_reqs=0; yading@10: if(m->vf.info->opts) yading@10: av_log(ctx, AV_LOG_ERROR, "opts / m_struct_set is unsupported\n"); yading@10: #if 0 yading@10: if(vf->info->opts) { // vf_vo get some special argument yading@10: const m_struct_t* st = vf->info->opts; yading@10: void* vf_priv = m_struct_alloc(st); yading@10: int n; yading@10: for(n = 0 ; args && args[2*n] ; n++) yading@10: m_struct_set(st,vf_priv,args[2*n],args[2*n+1]); yading@10: vf->priv = vf_priv; yading@10: args = NULL; yading@10: } else // Otherwise we should have the '_oldargs_' yading@10: if(args && !strcmp(args[0],"_oldargs_")) yading@10: args = (char**)args[1]; yading@10: else yading@10: args = NULL; yading@10: #endif yading@10: if(m->vf.info->vf_open(&m->vf, (char*)args)<=0){ yading@10: av_log(ctx, AV_LOG_ERROR, "vf_open() of %s with arg=%s failed\n", name, args); yading@10: return -1; yading@10: } yading@10: yading@10: return 0; yading@10: } yading@10: yading@10: static av_cold void uninit(AVFilterContext *ctx) yading@10: { yading@10: MPContext *m = ctx->priv; yading@10: vf_instance_t *vf = &m->vf; yading@10: yading@10: while(vf){ yading@10: vf_instance_t *next = vf->next; yading@10: if(vf->uninit) yading@10: vf->uninit(vf); yading@10: ff_free_mp_image(vf->imgctx.static_images[0]); yading@10: ff_free_mp_image(vf->imgctx.static_images[1]); yading@10: ff_free_mp_image(vf->imgctx.temp_images[0]); yading@10: ff_free_mp_image(vf->imgctx.export_images[0]); yading@10: vf = next; yading@10: } yading@10: } yading@10: yading@10: static int query_formats(AVFilterContext *ctx) yading@10: { yading@10: AVFilterFormats *avfmts=NULL; yading@10: MPContext *m = ctx->priv; yading@10: enum AVPixelFormat lastpixfmt = AV_PIX_FMT_NONE; yading@10: int i; yading@10: yading@10: for(i=0; conversion_map[i].fmt; i++){ yading@10: av_log(ctx, AV_LOG_DEBUG, "query: %X\n", conversion_map[i].fmt); yading@10: if(m->vf.query_format(&m->vf, conversion_map[i].fmt)){ yading@10: av_log(ctx, AV_LOG_DEBUG, "supported,adding\n"); yading@10: if (conversion_map[i].pix_fmt != lastpixfmt) { yading@10: ff_add_format(&avfmts, conversion_map[i].pix_fmt); yading@10: lastpixfmt = conversion_map[i].pix_fmt; yading@10: } yading@10: } yading@10: } yading@10: yading@10: if (!avfmts) yading@10: return -1; yading@10: yading@10: //We assume all allowed input formats are also allowed output formats yading@10: ff_set_common_formats(ctx, avfmts); yading@10: return 0; yading@10: } yading@10: yading@10: static int config_inprops(AVFilterLink *inlink) yading@10: { yading@10: MPContext *m = inlink->dst->priv; yading@10: int i; yading@10: for(i=0; conversion_map[i].fmt && conversion_map[i].pix_fmt != inlink->format; i++); yading@10: yading@10: av_assert0(conversion_map[i].fmt && inlink->w && inlink->h); yading@10: yading@10: m->vf.fmt.have_configured = 1; yading@10: m->vf.fmt.orig_height = inlink->h; yading@10: m->vf.fmt.orig_width = inlink->w; yading@10: m->vf.fmt.orig_fmt = conversion_map[i].fmt; yading@10: yading@10: if(m->vf.config(&m->vf, inlink->w, inlink->h, inlink->w, inlink->h, 0, conversion_map[i].fmt)<=0) yading@10: return -1; yading@10: yading@10: return 0; yading@10: } yading@10: yading@10: static int config_outprops(AVFilterLink *outlink) yading@10: { yading@10: MPContext *m = outlink->src->priv; yading@10: yading@10: outlink->w = m->next_vf.w; yading@10: outlink->h = m->next_vf.h; yading@10: yading@10: return 0; yading@10: } yading@10: yading@10: static int request_frame(AVFilterLink *outlink) yading@10: { yading@10: MPContext *m = outlink->src->priv; yading@10: int ret; yading@10: yading@10: av_log(m->avfctx, AV_LOG_DEBUG, "mp request_frame\n"); yading@10: yading@10: for(m->frame_returned=0; !m->frame_returned;){ yading@10: ret=ff_request_frame(outlink->src->inputs[0]); yading@10: if(ret<0) yading@10: break; yading@10: } yading@10: yading@10: av_log(m->avfctx, AV_LOG_DEBUG, "mp request_frame ret=%d\n", ret); yading@10: return ret; yading@10: } yading@10: yading@10: static int filter_frame(AVFilterLink *inlink, AVFrame *inpic) yading@10: { yading@10: MPContext *m = inlink->dst->priv; yading@10: int i; yading@10: double pts= MP_NOPTS_VALUE; yading@10: mp_image_t* mpi = ff_new_mp_image(inpic->width, inpic->height); yading@10: yading@10: if(inpic->pts != AV_NOPTS_VALUE) yading@10: pts= inpic->pts / av_q2d(inlink->time_base); yading@10: yading@10: for(i=0; conversion_map[i].fmt && conversion_map[i].pix_fmt != inlink->format; i++); yading@10: ff_mp_image_setfmt(mpi,conversion_map[i].fmt); yading@10: yading@10: memcpy(mpi->planes, inpic->data, FFMIN(sizeof(inpic->data) , sizeof(mpi->planes))); yading@10: memcpy(mpi->stride, inpic->linesize, FFMIN(sizeof(inpic->linesize), sizeof(mpi->stride))); yading@10: yading@10: //FIXME pass interleced & tff flags around yading@10: yading@10: // mpi->flags|=MP_IMGFLAG_ALLOCATED; ? yading@10: mpi->flags |= MP_IMGFLAG_READABLE; yading@10: if(!av_frame_is_writable(inpic)) yading@10: mpi->flags |= MP_IMGFLAG_PRESERVE; yading@10: if(m->vf.put_image(&m->vf, mpi, pts) == 0){ yading@10: av_log(m->avfctx, AV_LOG_DEBUG, "put_image() says skip\n"); yading@10: }else{ yading@10: av_frame_free(&inpic); yading@10: } yading@10: ff_free_mp_image(mpi); yading@10: return 0; yading@10: } yading@10: yading@10: static const AVFilterPad mp_inputs[] = { yading@10: { yading@10: .name = "default", yading@10: .type = AVMEDIA_TYPE_VIDEO, yading@10: .filter_frame = filter_frame, yading@10: .config_props = config_inprops, yading@10: }, yading@10: { NULL } yading@10: }; yading@10: yading@10: static const AVFilterPad mp_outputs[] = { yading@10: { yading@10: .name = "default", yading@10: .type = AVMEDIA_TYPE_VIDEO, yading@10: .request_frame = request_frame, yading@10: .config_props = config_outprops, yading@10: }, yading@10: { NULL } yading@10: }; yading@10: yading@10: AVFilter avfilter_vf_mp = { yading@10: .name = "mp", yading@10: .description = NULL_IF_CONFIG_SMALL("Apply a libmpcodecs filter to the input video."), yading@10: .init = init, yading@10: .uninit = uninit, yading@10: .priv_size = sizeof(MPContext), yading@10: .query_formats = query_formats, yading@10: .inputs = mp_inputs, yading@10: .outputs = mp_outputs, yading@10: .priv_class = &mp_class, yading@10: };