h264dsp.h
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003-2010 Michael Niedermayer <michaelni@gmx.at>
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * H.264 DSP functions.
24  * @author Michael Niedermayer <michaelni@gmx.at>
25  */
26 
27 #ifndef AVCODEC_H264DSP_H
28 #define AVCODEC_H264DSP_H
29 
30 #include <stdint.h>
31 
32 typedef void (*h264_weight_func)(uint8_t *block, int stride, int height,
33  int log2_denom, int weight, int offset);
35  int stride, int height, int log2_denom,
36  int weightd, int weights, int offset);
37 
38 /**
39  * Context for storing H.264 DSP functions
40  */
41 typedef struct H264DSPContext {
42  /* weighted MC */
45 
46  /* loop filter */
47  void (*h264_v_loop_filter_luma)(uint8_t *pix /*align 16*/, int stride,
48  int alpha, int beta, int8_t *tc0);
49  void (*h264_h_loop_filter_luma)(uint8_t *pix /*align 4 */, int stride,
50  int alpha, int beta, int8_t *tc0);
51  void (*h264_h_loop_filter_luma_mbaff)(uint8_t *pix /*align 16*/, int stride,
52  int alpha, int beta, int8_t *tc0);
53  /* v/h_loop_filter_luma_intra: align 16 */
55  int alpha, int beta);
57  int alpha, int beta);
59  int stride, int alpha, int beta);
60  void (*h264_v_loop_filter_chroma)(uint8_t *pix /*align 8*/, int stride,
61  int alpha, int beta, int8_t *tc0);
62  void (*h264_h_loop_filter_chroma)(uint8_t *pix /*align 4*/, int stride,
63  int alpha, int beta, int8_t *tc0);
65  int stride, int alpha, int beta,
66  int8_t *tc0);
68  int stride, int alpha, int beta);
70  int stride, int alpha, int beta);
72  int stride, int alpha, int beta);
73  // h264_loop_filter_strength: simd only. the C version is inlined in h264.c
74  void (*h264_loop_filter_strength)(int16_t bS[2][4][4], uint8_t nnz[40],
75  int8_t ref[2][40], int16_t mv[2][40][2],
76  int bidir, int edges, int step,
77  int mask_mv0, int mask_mv1, int field);
78 
79  /* IDCT */
80  void (*h264_idct_add)(uint8_t *dst /*align 4*/,
81  int16_t *block /*align 16*/, int stride);
82  void (*h264_idct8_add)(uint8_t *dst /*align 8*/,
83  int16_t *block /*align 16*/, int stride);
84  void (*h264_idct_dc_add)(uint8_t *dst /*align 4*/,
85  int16_t *block /*align 16*/, int stride);
86  void (*h264_idct8_dc_add)(uint8_t *dst /*align 8*/,
87  int16_t *block /*align 16*/, int stride);
88 
89  void (*h264_idct_add16)(uint8_t *dst /*align 16*/, const int *blockoffset,
90  int16_t *block /*align 16*/, int stride,
91  const uint8_t nnzc[15 * 8]);
92  void (*h264_idct8_add4)(uint8_t *dst /*align 16*/, const int *blockoffset,
93  int16_t *block /*align 16*/, int stride,
94  const uint8_t nnzc[15 * 8]);
95  void (*h264_idct_add8)(uint8_t **dst /*align 16*/, const int *blockoffset,
96  int16_t *block /*align 16*/, int stride,
97  const uint8_t nnzc[15 * 8]);
98  void (*h264_idct_add16intra)(uint8_t *dst /*align 16*/, const int *blockoffset,
99  int16_t *block /*align 16*/,
100  int stride, const uint8_t nnzc[15 * 8]);
102  int16_t *input /*align 16*/, int qmul);
103  void (*h264_chroma_dc_dequant_idct)(int16_t *block, int qmul);
104 
105  /* bypass-transform */
106  void (*h264_add_pixels8_clear)(uint8_t *dst, int16_t *block, int stride);
107  void (*h264_add_pixels4_clear)(uint8_t *dst, int16_t *block, int stride);
109 
110 void ff_h264dsp_init(H264DSPContext *c, const int bit_depth,
111  const int chroma_format_idc);
112 void ff_h264dsp_init_arm(H264DSPContext *c, const int bit_depth,
113  const int chroma_format_idc);
114 void ff_h264dsp_init_ppc(H264DSPContext *c, const int bit_depth,
115  const int chroma_format_idc);
116 void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth,
117  const int chroma_format_idc);
118 
119 #endif /* AVCODEC_H264DSP_H */
void(* h264_chroma_dc_dequant_idct)(int16_t *block, int qmul)
Definition: h264dsp.h:103
FIXME Range Coding of cr are ref
Definition: snow.txt:367
void(* h264_h_loop_filter_chroma_mbaff_intra)(uint8_t *pix, int stride, int alpha, int beta)
Definition: h264dsp.h:71
void ff_h264dsp_init(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp.c:63
int stride
Definition: mace.c:144
uint8_t
void(* h264_v_loop_filter_luma_intra)(uint8_t *pix, int stride, int alpha, int beta)
Definition: h264dsp.h:54
void ff_h264dsp_init_arm(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
h264_weight_func weight_h264_pixels_tab[4]
Definition: h264dsp.h:43
void(* h264_idct_add16intra)(uint8_t *dst, const int *blockoffset, int16_t *block, int stride, const uint8_t nnzc[15 *8])
Definition: h264dsp.h:98
void(* h264_idct_add)(uint8_t *dst, int16_t *block, int stride)
Definition: h264dsp.h:80
void(* h264_h_loop_filter_luma_mbaff_intra)(uint8_t *pix, int stride, int alpha, int beta)
Definition: h264dsp.h:58
void(* h264_idct8_dc_add)(uint8_t *dst, int16_t *block, int stride)
Definition: h264dsp.h:86
static double alpha(void *priv, double x, double y)
Definition: vf_geq.c:86
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame This method is called when a frame is wanted on an output For an input
void(* h264_h_loop_filter_chroma_mbaff)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
Definition: h264dsp.h:64
static const uint8_t offset[127][2]
Definition: vf_spp.c:70
void(* h264_add_pixels8_clear)(uint8_t *dst, int16_t *block, int stride)
Definition: h264dsp.h:106
void(* h264_idct_add16)(uint8_t *dst, const int *blockoffset, int16_t *block, int stride, const uint8_t nnzc[15 *8])
Definition: h264dsp.h:89
h264_biweight_func biweight_h264_pixels_tab[4]
Definition: h264dsp.h:44
Context for storing H.264 DSP functions.
Definition: h264dsp.h:41
void(* h264_v_loop_filter_chroma)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
Definition: h264dsp.h:60
void(* h264_h_loop_filter_chroma)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
Definition: h264dsp.h:62
void(* h264_h_loop_filter_chroma_intra)(uint8_t *pix, int stride, int alpha, int beta)
Definition: h264dsp.h:69
void(* h264_h_loop_filter_luma_mbaff)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
Definition: h264dsp.h:51
void(* h264_v_loop_filter_luma)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
Definition: h264dsp.h:47
void ff_h264dsp_init_ppc(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264_altivec.c:725
void(* h264_idct8_add)(uint8_t *dst, int16_t *block, int stride)
Definition: h264dsp.h:82
static const int8_t mv[256][2]
AVS_Value src
Definition: avisynth_c.h:523
typedef void(RENAME(mix_any_func_type))
void(* h264_loop_filter_strength)(int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2], int bidir, int edges, int step, int mask_mv0, int mask_mv1, int field)
Definition: h264dsp.h:74
BYTE int const BYTE int int int height
Definition: avisynth_c.h:713
void(* h264_idct8_add4)(uint8_t *dst, const int *blockoffset, int16_t *block, int stride, const uint8_t nnzc[15 *8])
Definition: h264dsp.h:92
void(* h264_luma_dc_dequant_idct)(int16_t *output, int16_t *input, int qmul)
Definition: h264dsp.h:101
void(* h264_add_pixels4_clear)(uint8_t *dst, int16_t *block, int stride)
Definition: h264dsp.h:107
static int weight(int i, int blen, int offset)
void(* h264_h_loop_filter_luma)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
Definition: h264dsp.h:49
void ff_h264dsp_init_x86(H264DSPContext *c, const int bit_depth, const int chroma_format_idc)
Definition: h264dsp_init.c:211
static double c[64]
these buffered frames must be flushed immediately if a new input produces new output(Example:frame rate-doubling filter:filter_frame must(1) flush the second copy of the previous frame, if it is still there,(2) push the first copy of the incoming frame,(3) keep the second copy for later.) If the input frame is not enough to produce output
void(* h264_weight_func)(uint8_t *block, int stride, int height, int log2_denom, int weight, int offset)
Definition: h264dsp.h:32
void(* h264_h_loop_filter_luma_intra)(uint8_t *pix, int stride, int alpha, int beta)
Definition: h264dsp.h:56
else dst[i][x+y *dst_stride[i]]
Definition: vf_mcdeint.c:160
struct H264DSPContext H264DSPContext
Context for storing H.264 DSP functions.
void(* h264_idct_dc_add)(uint8_t *dst, int16_t *block, int stride)
Definition: h264dsp.h:84
void(* h264_v_loop_filter_chroma_intra)(uint8_t *pix, int stride, int alpha, int beta)
Definition: h264dsp.h:67
void(* h264_idct_add8)(uint8_t **dst, const int *blockoffset, int16_t *block, int stride, const uint8_t nnzc[15 *8])
Definition: h264dsp.h:95
void(* h264_biweight_func)(uint8_t *dst, uint8_t *src, int stride, int height, int log2_denom, int weightd, int weights, int offset)
Definition: h264dsp.h:34
trying all byte sequences megabyte in length and selecting the best looking sequence will yield cases to try But a word about which is also called distortion Distortion can be quantified by almost any quality measurement one chooses the sum of squared differences is used but more complex methods that consider psychovisual effects can be used as well It makes no difference in this discussion First step