52 f->
mant = i? (i<<6) >> f->
exp : 1<<5;
61 res = (((f1->
mant * f2->
mant) + 0x30) >> 4);
62 res = exp > 19 ? res << (exp - 19) : res >> (19 - exp);
63 return (f1->
sign ^ f2->
sign) ? -res : res;
68 return (value < 0) ? -1 : 1;
104 { 116, 365, 365, 116 };
106 { -22, 439, 439, -22 };
111 { 7, 217, 330, INT_MAX };
113 { INT16_MIN, 135, 273, 373, 373, 273, 135, INT16_MIN };
115 { -4, 30, 137, 582, 582, 137, 30, -4 };
117 { 0, 1, 2, 7, 7, 2, 1, 0 };
120 { -125, 79, 177, 245, 299, 348, 399, INT_MAX };
122 { INT16_MIN, 4, 135, 213, 273, 323, 373, 425,
123 425, 373, 323, 273, 213, 135, 4, INT16_MIN };
125 { -12, 18, 41, 64, 112, 198, 355, 1122,
126 1122, 355, 198, 112, 64, 41, 18, -12};
128 { 0, 0, 0, 1, 1, 1, 3, 7, 7, 3, 1, 1, 1, 0, 0, 0 };
131 { -122, -16, 67, 138, 197, 249, 297, 338,
132 377, 412, 444, 474, 501, 527, 552, INT_MAX };
134 { INT16_MIN, -66, 28, 104, 169, 224, 274, 318,
135 358, 395, 429, 459, 488, 514, 539, 566,
136 566, 539, 514, 488, 459, 429, 395, 358,
137 318, 274, 224, 169, 104, 28, -66, INT16_MIN };
139 { 14, 14, 24, 39, 40, 41, 58, 100,
140 141, 179, 219, 280, 358, 440, 529, 696,
141 696, 529, 440, 358, 280, 219, 179, 141,
142 100, 58, 41, 40, 39, 24, 14, 14 };
144 { 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 3, 4, 5, 6, 6,
145 6, 6, 5, 4, 3, 2, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 };
167 dln = ((exp<<7) + (((d<<7)>>
exp)&0x7f)) - (c->
y>>2);
188 dex = (dql>>7) & 0xf;
189 dqt = (1<<7) + (dql & 0x7f);
190 return (dql < 0) ? 0 : ((dqt<<dex) >> 7);
195 int dq, re_signal, pk0, fa1,
i, tr, ylint, ylfrac, thr2, al, dq0;
202 ylint = (c->
yl >> 15);
203 ylfrac = (c->
yl >> 10) & 0x1f;
204 thr2 = (ylint > 9) ? 0x1f << 10 : (0x20 + ylfrac) << ylint;
205 tr= (c->
td == 1 && dq > ((3*thr2)>>2));
209 re_signal = c->
se + dq;
212 pk0 = (c->
sez + dq) ?
sgn(c->
sez + dq) : 0;
213 dq0 = dq ?
sgn(dq) : 0;
221 fa1 = av_clip((-c->
a[0]*c->
pk[0]*pk0)>>5, -256, 255);
223 c->
a[1] += 128*pk0*c->
pk[1] + fa1 - (c->
a[1]>>7);
224 c->
a[1] = av_clip(c->
a[1], -12288, 12288);
225 c->
a[0] += 64*3*pk0*c->
pk[0] - (c->
a[0] >> 8);
226 c->
a[0] = av_clip(c->
a[0], -(15360 - c->
a[1]), 15360 - c->
a[1]);
229 c->
b[i] += 128*dq0*sgn(-c->
dq[i].
sign) - (c->
b[
i]>>8);
234 c->
pk[0] = pk0 ? pk0 : 1;
236 i2f(re_signal, &c->
sr[0]);
238 c->
dq[i] = c->
dq[i-1];
242 c->
td = c->
a[1] < -11776;
250 c->
ap += (-c->
ap) >> 4;
251 if (c->
y <= 1535 || c->
td || abs((c->
dms << 2) - c->
dml) >= (c->
dml >> 3))
256 c->
yu = av_clip(c->
y + c->
tbls.
W[I] + ((-c->
y)>>5), 544, 5120);
257 c->
yl += c->
yu + ((-c->
yl)>>6);
260 al = (c->
ap >= 256) ? 1<<6 : c->
ap >> 2;
261 c->
y = (c->
yl + (c->
yu - (c->
yl>>6))*al) >> 6;
272 return av_clip(re_signal << 2, -0xffff, 0xffff);
280 for (i=0; i<2; i++) {
284 for (i=0; i<6; i++) {
295 #if CONFIG_ADPCM_G726_ENCODER 314 "allowed when the compliance level is higher than unofficial. " 315 "Resample or reduce the compliance level.\n");
345 const int16_t *
samples = (
const int16_t *)frame->
data[0];
347 int i,
ret, out_size;
359 avpkt->
size = out_size;
364 #define OFFSET(x) offsetof(G726Context, x) 365 #define AE AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_ENCODING_PARAM 383 AVCodec ff_adpcm_g726_encoder = {
388 .
init = g726_encode_init,
389 .encode2 = g726_encode_frame,
394 .priv_class = &
class,
399 #if CONFIG_ADPCM_G726_DECODER 420 int *got_frame_ptr,
AVPacket *avpkt)
424 int buf_size = avpkt->
size;
428 int out_samples,
ret;
430 out_samples = buf_size * 8 / c->
code_size;
436 samples = (int16_t *)frame->
data[0];
440 while (out_samples--)
457 AVCodec ff_adpcm_g726_decoder = {
462 .
init = g726_decode_init,
463 .
decode = g726_decode_frame,
464 .
flush = g726_decode_flush,
static av_cold int g726_reset(G726Context *c)
This structure describes decoded (raw) audio or video data.
const int16_t * W
special table #1 ;-)
static unsigned int get_bits(GetBitContext *s, int n)
Read 1-25 bits.
static av_cold int init(AVCodecContext *avctx)
static int16_t g726_decode(G726Context *c, int I)
static Float11 * i2f(int i, Float11 *f)
static const int16_t iquant_tbl32[]
static const G726Tables G726Tables_pool[]
const uint8_t * F
special table #2
static const uint8_t F_tbl32[]
int b[6]
sixth order predictor coeffs
static int sgn(int value)
static const int quant_tbl40[]
40kbit/s 5bits per sample
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
#define av_assert0(cond)
assert() equivalent, that is always enabled.
const int * quant
quantization table
static const int quant_tbl16[]
16kbit/s 2bits per sample
static const uint8_t F_tbl24[]
enum AVSampleFormat sample_fmt
audio sample format
#define CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
bitstream reader API header.
int bits_per_coded_sample
bits per sample/pixel from the demuxer (needed for huffyuv).
struct Float11 Float11
G.726 11bit float.
static const int16_t W_tbl32[]
const OptionDef options[]
int ap
scale factor control
G726Tables tbls
static tables needed for computation
static int get_bits_left(GetBitContext *gb)
static const AVCodecDefault defaults[]
static const int quant_tbl24[]
24kbit/s 3bits per sample
static const int16_t iquant_tbl16[]
#define CODEC_CAP_SMALL_LAST_FRAME
Codec can be fed a final frame with a smaller size.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
static uint8_t quant(G726Context *c, int d)
Para 4.2.2 page 18: Adaptive quantizer.
simple assert() macros that are a bit more flexible than ISO C assert().
void av_log(void *avcl, int level, const char *fmt,...)
const char * name
Name of the codec implementation.
static void put_bits(J2kEncoderContext *s, int val, int n)
put n times val bit
uint64_t channel_layout
Audio channel layout.
int bit_rate
the average bitrate
audio channel layout utility functions
static const int16_t W_tbl24[]
static const int16_t W_tbl40[]
static const int16_t iquant_tbl24[]
int se
estimated signal for the next iteration
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int size)
Check AVPacket size and/or allocate data.
int a[2]
second order predictor coeffs
static void flush(AVCodecContext *avctx)
int frame_size
Number of samples per channel in an audio frame.
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
int sample_rate
samples per second
uint8_t mant
6bit mantissa
main external API structure.
static const int16_t W_tbl16[]
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static int16_t mult(Float11 *f1, Float11 *f2)
Describe the class of an AVClass context structure.
synthesis window for stochastic i
static int init_get_bits(GetBitContext *s, const uint8_t *buffer, int bit_size)
Initialize GetBitContext.
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFilterBuffer structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later.That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another.Buffer references ownership and permissions
struct G726Tables G726Tables
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static const uint8_t F_tbl16[]
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
int dml
long average magnitude of F[i]
struct G726Context G726Context
static const int16_t iquant_tbl40[]
AVSampleFormat
Audio Sample Formats.
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
static const uint8_t F_tbl40[]
int y
quantizer scaling factor for the next iteration
const int16_t * iquant
inverse quantization table
int channels
number of audio channels
int sez
estimated second order prediction
static enum AVSampleFormat sample_fmts[]
Filter the word “frame” indicates either a video frame or a group of audio samples
static int16_t inverse_quant(G726Context *c, int i)
Para 4.2.3 page 22: Inverse adaptive quantizer.
static const int quant_tbl32[]
32kbit/s 4bits per sample
static int decode(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
int dms
short average magnitude of F[i]
#define AV_CH_LAYOUT_MONO
This structure stores compressed data.
int nb_samples
number of audio samples (per channel) described by this frame
int strict_std_compliance
strictly follow the standard (MPEG4, ...).