yading@10
|
1 /*
|
yading@10
|
2 * Copyright (c) 2001-2003 The ffmpeg Project
|
yading@10
|
3 *
|
yading@10
|
4 * first version by Francois Revol (revol@free.fr)
|
yading@10
|
5 * fringe ADPCM codecs (e.g., DK3, DK4, Westwood)
|
yading@10
|
6 * by Mike Melanson (melanson@pcisys.net)
|
yading@10
|
7 *
|
yading@10
|
8 * This file is part of FFmpeg.
|
yading@10
|
9 *
|
yading@10
|
10 * FFmpeg is free software; you can redistribute it and/or
|
yading@10
|
11 * modify it under the terms of the GNU Lesser General Public
|
yading@10
|
12 * License as published by the Free Software Foundation; either
|
yading@10
|
13 * version 2.1 of the License, or (at your option) any later version.
|
yading@10
|
14 *
|
yading@10
|
15 * FFmpeg is distributed in the hope that it will be useful,
|
yading@10
|
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
|
yading@10
|
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
yading@10
|
18 * Lesser General Public License for more details.
|
yading@10
|
19 *
|
yading@10
|
20 * You should have received a copy of the GNU Lesser General Public
|
yading@10
|
21 * License along with FFmpeg; if not, write to the Free Software
|
yading@10
|
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
yading@10
|
23 */
|
yading@10
|
24
|
yading@10
|
25 #include "avcodec.h"
|
yading@10
|
26 #include "put_bits.h"
|
yading@10
|
27 #include "bytestream.h"
|
yading@10
|
28 #include "adpcm.h"
|
yading@10
|
29 #include "adpcm_data.h"
|
yading@10
|
30 #include "internal.h"
|
yading@10
|
31
|
yading@10
|
32 /**
|
yading@10
|
33 * @file
|
yading@10
|
34 * ADPCM encoders
|
yading@10
|
35 * See ADPCM decoder reference documents for codec information.
|
yading@10
|
36 */
|
yading@10
|
37
|
yading@10
|
38 typedef struct TrellisPath {
|
yading@10
|
39 int nibble;
|
yading@10
|
40 int prev;
|
yading@10
|
41 } TrellisPath;
|
yading@10
|
42
|
yading@10
|
43 typedef struct TrellisNode {
|
yading@10
|
44 uint32_t ssd;
|
yading@10
|
45 int path;
|
yading@10
|
46 int sample1;
|
yading@10
|
47 int sample2;
|
yading@10
|
48 int step;
|
yading@10
|
49 } TrellisNode;
|
yading@10
|
50
|
yading@10
|
51 typedef struct ADPCMEncodeContext {
|
yading@10
|
52 ADPCMChannelStatus status[6];
|
yading@10
|
53 TrellisPath *paths;
|
yading@10
|
54 TrellisNode *node_buf;
|
yading@10
|
55 TrellisNode **nodep_buf;
|
yading@10
|
56 uint8_t *trellis_hash;
|
yading@10
|
57 } ADPCMEncodeContext;
|
yading@10
|
58
|
yading@10
|
59 #define FREEZE_INTERVAL 128
|
yading@10
|
60
|
yading@10
|
61 static av_cold int adpcm_encode_close(AVCodecContext *avctx);
|
yading@10
|
62
|
yading@10
|
63 static av_cold int adpcm_encode_init(AVCodecContext *avctx)
|
yading@10
|
64 {
|
yading@10
|
65 ADPCMEncodeContext *s = avctx->priv_data;
|
yading@10
|
66 uint8_t *extradata;
|
yading@10
|
67 int i;
|
yading@10
|
68 int ret = AVERROR(ENOMEM);
|
yading@10
|
69
|
yading@10
|
70 if (avctx->channels > 2) {
|
yading@10
|
71 av_log(avctx, AV_LOG_ERROR, "only stereo or mono is supported\n");
|
yading@10
|
72 return AVERROR(EINVAL);
|
yading@10
|
73 }
|
yading@10
|
74
|
yading@10
|
75 if (avctx->trellis && (unsigned)avctx->trellis > 16U) {
|
yading@10
|
76 av_log(avctx, AV_LOG_ERROR, "invalid trellis size\n");
|
yading@10
|
77 return AVERROR(EINVAL);
|
yading@10
|
78 }
|
yading@10
|
79
|
yading@10
|
80 if (avctx->trellis) {
|
yading@10
|
81 int frontier = 1 << avctx->trellis;
|
yading@10
|
82 int max_paths = frontier * FREEZE_INTERVAL;
|
yading@10
|
83 FF_ALLOC_OR_GOTO(avctx, s->paths,
|
yading@10
|
84 max_paths * sizeof(*s->paths), error);
|
yading@10
|
85 FF_ALLOC_OR_GOTO(avctx, s->node_buf,
|
yading@10
|
86 2 * frontier * sizeof(*s->node_buf), error);
|
yading@10
|
87 FF_ALLOC_OR_GOTO(avctx, s->nodep_buf,
|
yading@10
|
88 2 * frontier * sizeof(*s->nodep_buf), error);
|
yading@10
|
89 FF_ALLOC_OR_GOTO(avctx, s->trellis_hash,
|
yading@10
|
90 65536 * sizeof(*s->trellis_hash), error);
|
yading@10
|
91 }
|
yading@10
|
92
|
yading@10
|
93 avctx->bits_per_coded_sample = av_get_bits_per_sample(avctx->codec->id);
|
yading@10
|
94
|
yading@10
|
95 switch (avctx->codec->id) {
|
yading@10
|
96 case AV_CODEC_ID_ADPCM_IMA_WAV:
|
yading@10
|
97 /* each 16 bits sample gives one nibble
|
yading@10
|
98 and we have 4 bytes per channel overhead */
|
yading@10
|
99 avctx->frame_size = (BLKSIZE - 4 * avctx->channels) * 8 /
|
yading@10
|
100 (4 * avctx->channels) + 1;
|
yading@10
|
101 /* seems frame_size isn't taken into account...
|
yading@10
|
102 have to buffer the samples :-( */
|
yading@10
|
103 avctx->block_align = BLKSIZE;
|
yading@10
|
104 avctx->bits_per_coded_sample = 4;
|
yading@10
|
105 break;
|
yading@10
|
106 case AV_CODEC_ID_ADPCM_IMA_QT:
|
yading@10
|
107 avctx->frame_size = 64;
|
yading@10
|
108 avctx->block_align = 34 * avctx->channels;
|
yading@10
|
109 break;
|
yading@10
|
110 case AV_CODEC_ID_ADPCM_MS:
|
yading@10
|
111 /* each 16 bits sample gives one nibble
|
yading@10
|
112 and we have 7 bytes per channel overhead */
|
yading@10
|
113 avctx->frame_size = (BLKSIZE - 7 * avctx->channels) * 2 / avctx->channels + 2;
|
yading@10
|
114 avctx->bits_per_coded_sample = 4;
|
yading@10
|
115 avctx->block_align = BLKSIZE;
|
yading@10
|
116 if (!(avctx->extradata = av_malloc(32 + FF_INPUT_BUFFER_PADDING_SIZE)))
|
yading@10
|
117 goto error;
|
yading@10
|
118 avctx->extradata_size = 32;
|
yading@10
|
119 extradata = avctx->extradata;
|
yading@10
|
120 bytestream_put_le16(&extradata, avctx->frame_size);
|
yading@10
|
121 bytestream_put_le16(&extradata, 7); /* wNumCoef */
|
yading@10
|
122 for (i = 0; i < 7; i++) {
|
yading@10
|
123 bytestream_put_le16(&extradata, ff_adpcm_AdaptCoeff1[i] * 4);
|
yading@10
|
124 bytestream_put_le16(&extradata, ff_adpcm_AdaptCoeff2[i] * 4);
|
yading@10
|
125 }
|
yading@10
|
126 break;
|
yading@10
|
127 case AV_CODEC_ID_ADPCM_YAMAHA:
|
yading@10
|
128 avctx->frame_size = BLKSIZE * 2 / avctx->channels;
|
yading@10
|
129 avctx->block_align = BLKSIZE;
|
yading@10
|
130 break;
|
yading@10
|
131 case AV_CODEC_ID_ADPCM_SWF:
|
yading@10
|
132 if (avctx->sample_rate != 11025 &&
|
yading@10
|
133 avctx->sample_rate != 22050 &&
|
yading@10
|
134 avctx->sample_rate != 44100) {
|
yading@10
|
135 av_log(avctx, AV_LOG_ERROR, "Sample rate must be 11025, "
|
yading@10
|
136 "22050 or 44100\n");
|
yading@10
|
137 ret = AVERROR(EINVAL);
|
yading@10
|
138 goto error;
|
yading@10
|
139 }
|
yading@10
|
140 avctx->frame_size = 512 * (avctx->sample_rate / 11025);
|
yading@10
|
141 break;
|
yading@10
|
142 default:
|
yading@10
|
143 ret = AVERROR(EINVAL);
|
yading@10
|
144 goto error;
|
yading@10
|
145 }
|
yading@10
|
146
|
yading@10
|
147 return 0;
|
yading@10
|
148 error:
|
yading@10
|
149 adpcm_encode_close(avctx);
|
yading@10
|
150 return ret;
|
yading@10
|
151 }
|
yading@10
|
152
|
yading@10
|
153 static av_cold int adpcm_encode_close(AVCodecContext *avctx)
|
yading@10
|
154 {
|
yading@10
|
155 ADPCMEncodeContext *s = avctx->priv_data;
|
yading@10
|
156 av_freep(&s->paths);
|
yading@10
|
157 av_freep(&s->node_buf);
|
yading@10
|
158 av_freep(&s->nodep_buf);
|
yading@10
|
159 av_freep(&s->trellis_hash);
|
yading@10
|
160
|
yading@10
|
161 return 0;
|
yading@10
|
162 }
|
yading@10
|
163
|
yading@10
|
164
|
yading@10
|
165 static inline uint8_t adpcm_ima_compress_sample(ADPCMChannelStatus *c,
|
yading@10
|
166 int16_t sample)
|
yading@10
|
167 {
|
yading@10
|
168 int delta = sample - c->prev_sample;
|
yading@10
|
169 int nibble = FFMIN(7, abs(delta) * 4 /
|
yading@10
|
170 ff_adpcm_step_table[c->step_index]) + (delta < 0) * 8;
|
yading@10
|
171 c->prev_sample += ((ff_adpcm_step_table[c->step_index] *
|
yading@10
|
172 ff_adpcm_yamaha_difflookup[nibble]) / 8);
|
yading@10
|
173 c->prev_sample = av_clip_int16(c->prev_sample);
|
yading@10
|
174 c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88);
|
yading@10
|
175 return nibble;
|
yading@10
|
176 }
|
yading@10
|
177
|
yading@10
|
178 static inline uint8_t adpcm_ima_qt_compress_sample(ADPCMChannelStatus *c,
|
yading@10
|
179 int16_t sample)
|
yading@10
|
180 {
|
yading@10
|
181 int delta = sample - c->prev_sample;
|
yading@10
|
182 int diff, step = ff_adpcm_step_table[c->step_index];
|
yading@10
|
183 int nibble = 8*(delta < 0);
|
yading@10
|
184
|
yading@10
|
185 delta= abs(delta);
|
yading@10
|
186 diff = delta + (step >> 3);
|
yading@10
|
187
|
yading@10
|
188 if (delta >= step) {
|
yading@10
|
189 nibble |= 4;
|
yading@10
|
190 delta -= step;
|
yading@10
|
191 }
|
yading@10
|
192 step >>= 1;
|
yading@10
|
193 if (delta >= step) {
|
yading@10
|
194 nibble |= 2;
|
yading@10
|
195 delta -= step;
|
yading@10
|
196 }
|
yading@10
|
197 step >>= 1;
|
yading@10
|
198 if (delta >= step) {
|
yading@10
|
199 nibble |= 1;
|
yading@10
|
200 delta -= step;
|
yading@10
|
201 }
|
yading@10
|
202 diff -= delta;
|
yading@10
|
203
|
yading@10
|
204 if (nibble & 8)
|
yading@10
|
205 c->prev_sample -= diff;
|
yading@10
|
206 else
|
yading@10
|
207 c->prev_sample += diff;
|
yading@10
|
208
|
yading@10
|
209 c->prev_sample = av_clip_int16(c->prev_sample);
|
yading@10
|
210 c->step_index = av_clip(c->step_index + ff_adpcm_index_table[nibble], 0, 88);
|
yading@10
|
211
|
yading@10
|
212 return nibble;
|
yading@10
|
213 }
|
yading@10
|
214
|
yading@10
|
215 static inline uint8_t adpcm_ms_compress_sample(ADPCMChannelStatus *c,
|
yading@10
|
216 int16_t sample)
|
yading@10
|
217 {
|
yading@10
|
218 int predictor, nibble, bias;
|
yading@10
|
219
|
yading@10
|
220 predictor = (((c->sample1) * (c->coeff1)) +
|
yading@10
|
221 (( c->sample2) * (c->coeff2))) / 64;
|
yading@10
|
222
|
yading@10
|
223 nibble = sample - predictor;
|
yading@10
|
224 if (nibble >= 0)
|
yading@10
|
225 bias = c->idelta / 2;
|
yading@10
|
226 else
|
yading@10
|
227 bias = -c->idelta / 2;
|
yading@10
|
228
|
yading@10
|
229 nibble = (nibble + bias) / c->idelta;
|
yading@10
|
230 nibble = av_clip(nibble, -8, 7) & 0x0F;
|
yading@10
|
231
|
yading@10
|
232 predictor += ((nibble & 0x08) ? (nibble - 0x10) : nibble) * c->idelta;
|
yading@10
|
233
|
yading@10
|
234 c->sample2 = c->sample1;
|
yading@10
|
235 c->sample1 = av_clip_int16(predictor);
|
yading@10
|
236
|
yading@10
|
237 c->idelta = (ff_adpcm_AdaptationTable[nibble] * c->idelta) >> 8;
|
yading@10
|
238 if (c->idelta < 16)
|
yading@10
|
239 c->idelta = 16;
|
yading@10
|
240
|
yading@10
|
241 return nibble;
|
yading@10
|
242 }
|
yading@10
|
243
|
yading@10
|
244 static inline uint8_t adpcm_yamaha_compress_sample(ADPCMChannelStatus *c,
|
yading@10
|
245 int16_t sample)
|
yading@10
|
246 {
|
yading@10
|
247 int nibble, delta;
|
yading@10
|
248
|
yading@10
|
249 if (!c->step) {
|
yading@10
|
250 c->predictor = 0;
|
yading@10
|
251 c->step = 127;
|
yading@10
|
252 }
|
yading@10
|
253
|
yading@10
|
254 delta = sample - c->predictor;
|
yading@10
|
255
|
yading@10
|
256 nibble = FFMIN(7, abs(delta) * 4 / c->step) + (delta < 0) * 8;
|
yading@10
|
257
|
yading@10
|
258 c->predictor += ((c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8);
|
yading@10
|
259 c->predictor = av_clip_int16(c->predictor);
|
yading@10
|
260 c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
|
yading@10
|
261 c->step = av_clip(c->step, 127, 24567);
|
yading@10
|
262
|
yading@10
|
263 return nibble;
|
yading@10
|
264 }
|
yading@10
|
265
|
yading@10
|
266 static void adpcm_compress_trellis(AVCodecContext *avctx,
|
yading@10
|
267 const int16_t *samples, uint8_t *dst,
|
yading@10
|
268 ADPCMChannelStatus *c, int n, int stride)
|
yading@10
|
269 {
|
yading@10
|
270 //FIXME 6% faster if frontier is a compile-time constant
|
yading@10
|
271 ADPCMEncodeContext *s = avctx->priv_data;
|
yading@10
|
272 const int frontier = 1 << avctx->trellis;
|
yading@10
|
273 const int version = avctx->codec->id;
|
yading@10
|
274 TrellisPath *paths = s->paths, *p;
|
yading@10
|
275 TrellisNode *node_buf = s->node_buf;
|
yading@10
|
276 TrellisNode **nodep_buf = s->nodep_buf;
|
yading@10
|
277 TrellisNode **nodes = nodep_buf; // nodes[] is always sorted by .ssd
|
yading@10
|
278 TrellisNode **nodes_next = nodep_buf + frontier;
|
yading@10
|
279 int pathn = 0, froze = -1, i, j, k, generation = 0;
|
yading@10
|
280 uint8_t *hash = s->trellis_hash;
|
yading@10
|
281 memset(hash, 0xff, 65536 * sizeof(*hash));
|
yading@10
|
282
|
yading@10
|
283 memset(nodep_buf, 0, 2 * frontier * sizeof(*nodep_buf));
|
yading@10
|
284 nodes[0] = node_buf + frontier;
|
yading@10
|
285 nodes[0]->ssd = 0;
|
yading@10
|
286 nodes[0]->path = 0;
|
yading@10
|
287 nodes[0]->step = c->step_index;
|
yading@10
|
288 nodes[0]->sample1 = c->sample1;
|
yading@10
|
289 nodes[0]->sample2 = c->sample2;
|
yading@10
|
290 if (version == AV_CODEC_ID_ADPCM_IMA_WAV ||
|
yading@10
|
291 version == AV_CODEC_ID_ADPCM_IMA_QT ||
|
yading@10
|
292 version == AV_CODEC_ID_ADPCM_SWF)
|
yading@10
|
293 nodes[0]->sample1 = c->prev_sample;
|
yading@10
|
294 if (version == AV_CODEC_ID_ADPCM_MS)
|
yading@10
|
295 nodes[0]->step = c->idelta;
|
yading@10
|
296 if (version == AV_CODEC_ID_ADPCM_YAMAHA) {
|
yading@10
|
297 if (c->step == 0) {
|
yading@10
|
298 nodes[0]->step = 127;
|
yading@10
|
299 nodes[0]->sample1 = 0;
|
yading@10
|
300 } else {
|
yading@10
|
301 nodes[0]->step = c->step;
|
yading@10
|
302 nodes[0]->sample1 = c->predictor;
|
yading@10
|
303 }
|
yading@10
|
304 }
|
yading@10
|
305
|
yading@10
|
306 for (i = 0; i < n; i++) {
|
yading@10
|
307 TrellisNode *t = node_buf + frontier*(i&1);
|
yading@10
|
308 TrellisNode **u;
|
yading@10
|
309 int sample = samples[i * stride];
|
yading@10
|
310 int heap_pos = 0;
|
yading@10
|
311 memset(nodes_next, 0, frontier * sizeof(TrellisNode*));
|
yading@10
|
312 for (j = 0; j < frontier && nodes[j]; j++) {
|
yading@10
|
313 // higher j have higher ssd already, so they're likely
|
yading@10
|
314 // to yield a suboptimal next sample too
|
yading@10
|
315 const int range = (j < frontier / 2) ? 1 : 0;
|
yading@10
|
316 const int step = nodes[j]->step;
|
yading@10
|
317 int nidx;
|
yading@10
|
318 if (version == AV_CODEC_ID_ADPCM_MS) {
|
yading@10
|
319 const int predictor = ((nodes[j]->sample1 * c->coeff1) +
|
yading@10
|
320 (nodes[j]->sample2 * c->coeff2)) / 64;
|
yading@10
|
321 const int div = (sample - predictor) / step;
|
yading@10
|
322 const int nmin = av_clip(div-range, -8, 6);
|
yading@10
|
323 const int nmax = av_clip(div+range, -7, 7);
|
yading@10
|
324 for (nidx = nmin; nidx <= nmax; nidx++) {
|
yading@10
|
325 const int nibble = nidx & 0xf;
|
yading@10
|
326 int dec_sample = predictor + nidx * step;
|
yading@10
|
327 #define STORE_NODE(NAME, STEP_INDEX)\
|
yading@10
|
328 int d;\
|
yading@10
|
329 uint32_t ssd;\
|
yading@10
|
330 int pos;\
|
yading@10
|
331 TrellisNode *u;\
|
yading@10
|
332 uint8_t *h;\
|
yading@10
|
333 dec_sample = av_clip_int16(dec_sample);\
|
yading@10
|
334 d = sample - dec_sample;\
|
yading@10
|
335 ssd = nodes[j]->ssd + d*d;\
|
yading@10
|
336 /* Check for wraparound, skip such samples completely. \
|
yading@10
|
337 * Note, changing ssd to a 64 bit variable would be \
|
yading@10
|
338 * simpler, avoiding this check, but it's slower on \
|
yading@10
|
339 * x86 32 bit at the moment. */\
|
yading@10
|
340 if (ssd < nodes[j]->ssd)\
|
yading@10
|
341 goto next_##NAME;\
|
yading@10
|
342 /* Collapse any two states with the same previous sample value. \
|
yading@10
|
343 * One could also distinguish states by step and by 2nd to last
|
yading@10
|
344 * sample, but the effects of that are negligible.
|
yading@10
|
345 * Since nodes in the previous generation are iterated
|
yading@10
|
346 * through a heap, they're roughly ordered from better to
|
yading@10
|
347 * worse, but not strictly ordered. Therefore, an earlier
|
yading@10
|
348 * node with the same sample value is better in most cases
|
yading@10
|
349 * (and thus the current is skipped), but not strictly
|
yading@10
|
350 * in all cases. Only skipping samples where ssd >=
|
yading@10
|
351 * ssd of the earlier node with the same sample gives
|
yading@10
|
352 * slightly worse quality, though, for some reason. */ \
|
yading@10
|
353 h = &hash[(uint16_t) dec_sample];\
|
yading@10
|
354 if (*h == generation)\
|
yading@10
|
355 goto next_##NAME;\
|
yading@10
|
356 if (heap_pos < frontier) {\
|
yading@10
|
357 pos = heap_pos++;\
|
yading@10
|
358 } else {\
|
yading@10
|
359 /* Try to replace one of the leaf nodes with the new \
|
yading@10
|
360 * one, but try a different slot each time. */\
|
yading@10
|
361 pos = (frontier >> 1) +\
|
yading@10
|
362 (heap_pos & ((frontier >> 1) - 1));\
|
yading@10
|
363 if (ssd > nodes_next[pos]->ssd)\
|
yading@10
|
364 goto next_##NAME;\
|
yading@10
|
365 heap_pos++;\
|
yading@10
|
366 }\
|
yading@10
|
367 *h = generation;\
|
yading@10
|
368 u = nodes_next[pos];\
|
yading@10
|
369 if (!u) {\
|
yading@10
|
370 av_assert1(pathn < FREEZE_INTERVAL << avctx->trellis);\
|
yading@10
|
371 u = t++;\
|
yading@10
|
372 nodes_next[pos] = u;\
|
yading@10
|
373 u->path = pathn++;\
|
yading@10
|
374 }\
|
yading@10
|
375 u->ssd = ssd;\
|
yading@10
|
376 u->step = STEP_INDEX;\
|
yading@10
|
377 u->sample2 = nodes[j]->sample1;\
|
yading@10
|
378 u->sample1 = dec_sample;\
|
yading@10
|
379 paths[u->path].nibble = nibble;\
|
yading@10
|
380 paths[u->path].prev = nodes[j]->path;\
|
yading@10
|
381 /* Sift the newly inserted node up in the heap to \
|
yading@10
|
382 * restore the heap property. */\
|
yading@10
|
383 while (pos > 0) {\
|
yading@10
|
384 int parent = (pos - 1) >> 1;\
|
yading@10
|
385 if (nodes_next[parent]->ssd <= ssd)\
|
yading@10
|
386 break;\
|
yading@10
|
387 FFSWAP(TrellisNode*, nodes_next[parent], nodes_next[pos]);\
|
yading@10
|
388 pos = parent;\
|
yading@10
|
389 }\
|
yading@10
|
390 next_##NAME:;
|
yading@10
|
391 STORE_NODE(ms, FFMAX(16,
|
yading@10
|
392 (ff_adpcm_AdaptationTable[nibble] * step) >> 8));
|
yading@10
|
393 }
|
yading@10
|
394 } else if (version == AV_CODEC_ID_ADPCM_IMA_WAV ||
|
yading@10
|
395 version == AV_CODEC_ID_ADPCM_IMA_QT ||
|
yading@10
|
396 version == AV_CODEC_ID_ADPCM_SWF) {
|
yading@10
|
397 #define LOOP_NODES(NAME, STEP_TABLE, STEP_INDEX)\
|
yading@10
|
398 const int predictor = nodes[j]->sample1;\
|
yading@10
|
399 const int div = (sample - predictor) * 4 / STEP_TABLE;\
|
yading@10
|
400 int nmin = av_clip(div - range, -7, 6);\
|
yading@10
|
401 int nmax = av_clip(div + range, -6, 7);\
|
yading@10
|
402 if (nmin <= 0)\
|
yading@10
|
403 nmin--; /* distinguish -0 from +0 */\
|
yading@10
|
404 if (nmax < 0)\
|
yading@10
|
405 nmax--;\
|
yading@10
|
406 for (nidx = nmin; nidx <= nmax; nidx++) {\
|
yading@10
|
407 const int nibble = nidx < 0 ? 7 - nidx : nidx;\
|
yading@10
|
408 int dec_sample = predictor +\
|
yading@10
|
409 (STEP_TABLE *\
|
yading@10
|
410 ff_adpcm_yamaha_difflookup[nibble]) / 8;\
|
yading@10
|
411 STORE_NODE(NAME, STEP_INDEX);\
|
yading@10
|
412 }
|
yading@10
|
413 LOOP_NODES(ima, ff_adpcm_step_table[step],
|
yading@10
|
414 av_clip(step + ff_adpcm_index_table[nibble], 0, 88));
|
yading@10
|
415 } else { //AV_CODEC_ID_ADPCM_YAMAHA
|
yading@10
|
416 LOOP_NODES(yamaha, step,
|
yading@10
|
417 av_clip((step * ff_adpcm_yamaha_indexscale[nibble]) >> 8,
|
yading@10
|
418 127, 24567));
|
yading@10
|
419 #undef LOOP_NODES
|
yading@10
|
420 #undef STORE_NODE
|
yading@10
|
421 }
|
yading@10
|
422 }
|
yading@10
|
423
|
yading@10
|
424 u = nodes;
|
yading@10
|
425 nodes = nodes_next;
|
yading@10
|
426 nodes_next = u;
|
yading@10
|
427
|
yading@10
|
428 generation++;
|
yading@10
|
429 if (generation == 255) {
|
yading@10
|
430 memset(hash, 0xff, 65536 * sizeof(*hash));
|
yading@10
|
431 generation = 0;
|
yading@10
|
432 }
|
yading@10
|
433
|
yading@10
|
434 // prevent overflow
|
yading@10
|
435 if (nodes[0]->ssd > (1 << 28)) {
|
yading@10
|
436 for (j = 1; j < frontier && nodes[j]; j++)
|
yading@10
|
437 nodes[j]->ssd -= nodes[0]->ssd;
|
yading@10
|
438 nodes[0]->ssd = 0;
|
yading@10
|
439 }
|
yading@10
|
440
|
yading@10
|
441 // merge old paths to save memory
|
yading@10
|
442 if (i == froze + FREEZE_INTERVAL) {
|
yading@10
|
443 p = &paths[nodes[0]->path];
|
yading@10
|
444 for (k = i; k > froze; k--) {
|
yading@10
|
445 dst[k] = p->nibble;
|
yading@10
|
446 p = &paths[p->prev];
|
yading@10
|
447 }
|
yading@10
|
448 froze = i;
|
yading@10
|
449 pathn = 0;
|
yading@10
|
450 // other nodes might use paths that don't coincide with the frozen one.
|
yading@10
|
451 // checking which nodes do so is too slow, so just kill them all.
|
yading@10
|
452 // this also slightly improves quality, but I don't know why.
|
yading@10
|
453 memset(nodes + 1, 0, (frontier - 1) * sizeof(TrellisNode*));
|
yading@10
|
454 }
|
yading@10
|
455 }
|
yading@10
|
456
|
yading@10
|
457 p = &paths[nodes[0]->path];
|
yading@10
|
458 for (i = n - 1; i > froze; i--) {
|
yading@10
|
459 dst[i] = p->nibble;
|
yading@10
|
460 p = &paths[p->prev];
|
yading@10
|
461 }
|
yading@10
|
462
|
yading@10
|
463 c->predictor = nodes[0]->sample1;
|
yading@10
|
464 c->sample1 = nodes[0]->sample1;
|
yading@10
|
465 c->sample2 = nodes[0]->sample2;
|
yading@10
|
466 c->step_index = nodes[0]->step;
|
yading@10
|
467 c->step = nodes[0]->step;
|
yading@10
|
468 c->idelta = nodes[0]->step;
|
yading@10
|
469 }
|
yading@10
|
470
|
yading@10
|
471 static int adpcm_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
yading@10
|
472 const AVFrame *frame, int *got_packet_ptr)
|
yading@10
|
473 {
|
yading@10
|
474 int n, i, ch, st, pkt_size, ret;
|
yading@10
|
475 const int16_t *samples;
|
yading@10
|
476 int16_t **samples_p;
|
yading@10
|
477 uint8_t *dst;
|
yading@10
|
478 ADPCMEncodeContext *c = avctx->priv_data;
|
yading@10
|
479 uint8_t *buf;
|
yading@10
|
480
|
yading@10
|
481 samples = (const int16_t *)frame->data[0];
|
yading@10
|
482 samples_p = (int16_t **)frame->extended_data;
|
yading@10
|
483 st = avctx->channels == 2;
|
yading@10
|
484
|
yading@10
|
485 if (avctx->codec_id == AV_CODEC_ID_ADPCM_SWF)
|
yading@10
|
486 pkt_size = (2 + avctx->channels * (22 + 4 * (frame->nb_samples - 1)) + 7) / 8;
|
yading@10
|
487 else
|
yading@10
|
488 pkt_size = avctx->block_align;
|
yading@10
|
489 if ((ret = ff_alloc_packet2(avctx, avpkt, pkt_size)) < 0)
|
yading@10
|
490 return ret;
|
yading@10
|
491 dst = avpkt->data;
|
yading@10
|
492
|
yading@10
|
493 switch(avctx->codec->id) {
|
yading@10
|
494 case AV_CODEC_ID_ADPCM_IMA_WAV:
|
yading@10
|
495 {
|
yading@10
|
496 int blocks, j;
|
yading@10
|
497
|
yading@10
|
498 blocks = (frame->nb_samples - 1) / 8;
|
yading@10
|
499
|
yading@10
|
500 for (ch = 0; ch < avctx->channels; ch++) {
|
yading@10
|
501 ADPCMChannelStatus *status = &c->status[ch];
|
yading@10
|
502 status->prev_sample = samples_p[ch][0];
|
yading@10
|
503 /* status->step_index = 0;
|
yading@10
|
504 XXX: not sure how to init the state machine */
|
yading@10
|
505 bytestream_put_le16(&dst, status->prev_sample);
|
yading@10
|
506 *dst++ = status->step_index;
|
yading@10
|
507 *dst++ = 0; /* unknown */
|
yading@10
|
508 }
|
yading@10
|
509
|
yading@10
|
510 /* stereo: 4 bytes (8 samples) for left, 4 bytes for right */
|
yading@10
|
511 if (avctx->trellis > 0) {
|
yading@10
|
512 FF_ALLOC_OR_GOTO(avctx, buf, avctx->channels * blocks * 8, error);
|
yading@10
|
513 for (ch = 0; ch < avctx->channels; ch++) {
|
yading@10
|
514 adpcm_compress_trellis(avctx, &samples_p[ch][1],
|
yading@10
|
515 buf + ch * blocks * 8, &c->status[ch],
|
yading@10
|
516 blocks * 8, 1);
|
yading@10
|
517 }
|
yading@10
|
518 for (i = 0; i < blocks; i++) {
|
yading@10
|
519 for (ch = 0; ch < avctx->channels; ch++) {
|
yading@10
|
520 uint8_t *buf1 = buf + ch * blocks * 8 + i * 8;
|
yading@10
|
521 for (j = 0; j < 8; j += 2)
|
yading@10
|
522 *dst++ = buf1[j] | (buf1[j + 1] << 4);
|
yading@10
|
523 }
|
yading@10
|
524 }
|
yading@10
|
525 av_free(buf);
|
yading@10
|
526 } else {
|
yading@10
|
527 for (i = 0; i < blocks; i++) {
|
yading@10
|
528 for (ch = 0; ch < avctx->channels; ch++) {
|
yading@10
|
529 ADPCMChannelStatus *status = &c->status[ch];
|
yading@10
|
530 const int16_t *smp = &samples_p[ch][1 + i * 8];
|
yading@10
|
531 for (j = 0; j < 8; j += 2) {
|
yading@10
|
532 uint8_t v = adpcm_ima_compress_sample(status, smp[j ]);
|
yading@10
|
533 v |= adpcm_ima_compress_sample(status, smp[j + 1]) << 4;
|
yading@10
|
534 *dst++ = v;
|
yading@10
|
535 }
|
yading@10
|
536 }
|
yading@10
|
537 }
|
yading@10
|
538 }
|
yading@10
|
539 break;
|
yading@10
|
540 }
|
yading@10
|
541 case AV_CODEC_ID_ADPCM_IMA_QT:
|
yading@10
|
542 {
|
yading@10
|
543 PutBitContext pb;
|
yading@10
|
544 init_put_bits(&pb, dst, pkt_size * 8);
|
yading@10
|
545
|
yading@10
|
546 for (ch = 0; ch < avctx->channels; ch++) {
|
yading@10
|
547 ADPCMChannelStatus *status = &c->status[ch];
|
yading@10
|
548 put_bits(&pb, 9, (status->prev_sample & 0xFFFF) >> 7);
|
yading@10
|
549 put_bits(&pb, 7, status->step_index);
|
yading@10
|
550 if (avctx->trellis > 0) {
|
yading@10
|
551 uint8_t buf[64];
|
yading@10
|
552 adpcm_compress_trellis(avctx, &samples_p[ch][1], buf, status,
|
yading@10
|
553 64, 1);
|
yading@10
|
554 for (i = 0; i < 64; i++)
|
yading@10
|
555 put_bits(&pb, 4, buf[i ^ 1]);
|
yading@10
|
556 } else {
|
yading@10
|
557 for (i = 0; i < 64; i += 2) {
|
yading@10
|
558 int t1, t2;
|
yading@10
|
559 t1 = adpcm_ima_qt_compress_sample(status, samples_p[ch][i ]);
|
yading@10
|
560 t2 = adpcm_ima_qt_compress_sample(status, samples_p[ch][i + 1]);
|
yading@10
|
561 put_bits(&pb, 4, t2);
|
yading@10
|
562 put_bits(&pb, 4, t1);
|
yading@10
|
563 }
|
yading@10
|
564 }
|
yading@10
|
565 }
|
yading@10
|
566
|
yading@10
|
567 flush_put_bits(&pb);
|
yading@10
|
568 break;
|
yading@10
|
569 }
|
yading@10
|
570 case AV_CODEC_ID_ADPCM_SWF:
|
yading@10
|
571 {
|
yading@10
|
572 PutBitContext pb;
|
yading@10
|
573 init_put_bits(&pb, dst, pkt_size * 8);
|
yading@10
|
574
|
yading@10
|
575 n = frame->nb_samples - 1;
|
yading@10
|
576
|
yading@10
|
577 // store AdpcmCodeSize
|
yading@10
|
578 put_bits(&pb, 2, 2); // set 4-bit flash adpcm format
|
yading@10
|
579
|
yading@10
|
580 // init the encoder state
|
yading@10
|
581 for (i = 0; i < avctx->channels; i++) {
|
yading@10
|
582 // clip step so it fits 6 bits
|
yading@10
|
583 c->status[i].step_index = av_clip(c->status[i].step_index, 0, 63);
|
yading@10
|
584 put_sbits(&pb, 16, samples[i]);
|
yading@10
|
585 put_bits(&pb, 6, c->status[i].step_index);
|
yading@10
|
586 c->status[i].prev_sample = samples[i];
|
yading@10
|
587 }
|
yading@10
|
588
|
yading@10
|
589 if (avctx->trellis > 0) {
|
yading@10
|
590 FF_ALLOC_OR_GOTO(avctx, buf, 2 * n, error);
|
yading@10
|
591 adpcm_compress_trellis(avctx, samples + avctx->channels, buf,
|
yading@10
|
592 &c->status[0], n, avctx->channels);
|
yading@10
|
593 if (avctx->channels == 2)
|
yading@10
|
594 adpcm_compress_trellis(avctx, samples + avctx->channels + 1,
|
yading@10
|
595 buf + n, &c->status[1], n,
|
yading@10
|
596 avctx->channels);
|
yading@10
|
597 for (i = 0; i < n; i++) {
|
yading@10
|
598 put_bits(&pb, 4, buf[i]);
|
yading@10
|
599 if (avctx->channels == 2)
|
yading@10
|
600 put_bits(&pb, 4, buf[n + i]);
|
yading@10
|
601 }
|
yading@10
|
602 av_free(buf);
|
yading@10
|
603 } else {
|
yading@10
|
604 for (i = 1; i < frame->nb_samples; i++) {
|
yading@10
|
605 put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[0],
|
yading@10
|
606 samples[avctx->channels * i]));
|
yading@10
|
607 if (avctx->channels == 2)
|
yading@10
|
608 put_bits(&pb, 4, adpcm_ima_compress_sample(&c->status[1],
|
yading@10
|
609 samples[2 * i + 1]));
|
yading@10
|
610 }
|
yading@10
|
611 }
|
yading@10
|
612 flush_put_bits(&pb);
|
yading@10
|
613 break;
|
yading@10
|
614 }
|
yading@10
|
615 case AV_CODEC_ID_ADPCM_MS:
|
yading@10
|
616 for (i = 0; i < avctx->channels; i++) {
|
yading@10
|
617 int predictor = 0;
|
yading@10
|
618 *dst++ = predictor;
|
yading@10
|
619 c->status[i].coeff1 = ff_adpcm_AdaptCoeff1[predictor];
|
yading@10
|
620 c->status[i].coeff2 = ff_adpcm_AdaptCoeff2[predictor];
|
yading@10
|
621 }
|
yading@10
|
622 for (i = 0; i < avctx->channels; i++) {
|
yading@10
|
623 if (c->status[i].idelta < 16)
|
yading@10
|
624 c->status[i].idelta = 16;
|
yading@10
|
625 bytestream_put_le16(&dst, c->status[i].idelta);
|
yading@10
|
626 }
|
yading@10
|
627 for (i = 0; i < avctx->channels; i++)
|
yading@10
|
628 c->status[i].sample2= *samples++;
|
yading@10
|
629 for (i = 0; i < avctx->channels; i++) {
|
yading@10
|
630 c->status[i].sample1 = *samples++;
|
yading@10
|
631 bytestream_put_le16(&dst, c->status[i].sample1);
|
yading@10
|
632 }
|
yading@10
|
633 for (i = 0; i < avctx->channels; i++)
|
yading@10
|
634 bytestream_put_le16(&dst, c->status[i].sample2);
|
yading@10
|
635
|
yading@10
|
636 if (avctx->trellis > 0) {
|
yading@10
|
637 n = avctx->block_align - 7 * avctx->channels;
|
yading@10
|
638 FF_ALLOC_OR_GOTO(avctx, buf, 2 * n, error);
|
yading@10
|
639 if (avctx->channels == 1) {
|
yading@10
|
640 adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n,
|
yading@10
|
641 avctx->channels);
|
yading@10
|
642 for (i = 0; i < n; i += 2)
|
yading@10
|
643 *dst++ = (buf[i] << 4) | buf[i + 1];
|
yading@10
|
644 } else {
|
yading@10
|
645 adpcm_compress_trellis(avctx, samples, buf,
|
yading@10
|
646 &c->status[0], n, avctx->channels);
|
yading@10
|
647 adpcm_compress_trellis(avctx, samples + 1, buf + n,
|
yading@10
|
648 &c->status[1], n, avctx->channels);
|
yading@10
|
649 for (i = 0; i < n; i++)
|
yading@10
|
650 *dst++ = (buf[i] << 4) | buf[n + i];
|
yading@10
|
651 }
|
yading@10
|
652 av_free(buf);
|
yading@10
|
653 } else {
|
yading@10
|
654 for (i = 7 * avctx->channels; i < avctx->block_align; i++) {
|
yading@10
|
655 int nibble;
|
yading@10
|
656 nibble = adpcm_ms_compress_sample(&c->status[ 0], *samples++) << 4;
|
yading@10
|
657 nibble |= adpcm_ms_compress_sample(&c->status[st], *samples++);
|
yading@10
|
658 *dst++ = nibble;
|
yading@10
|
659 }
|
yading@10
|
660 }
|
yading@10
|
661 break;
|
yading@10
|
662 case AV_CODEC_ID_ADPCM_YAMAHA:
|
yading@10
|
663 n = frame->nb_samples / 2;
|
yading@10
|
664 if (avctx->trellis > 0) {
|
yading@10
|
665 FF_ALLOC_OR_GOTO(avctx, buf, 2 * n * 2, error);
|
yading@10
|
666 n *= 2;
|
yading@10
|
667 if (avctx->channels == 1) {
|
yading@10
|
668 adpcm_compress_trellis(avctx, samples, buf, &c->status[0], n,
|
yading@10
|
669 avctx->channels);
|
yading@10
|
670 for (i = 0; i < n; i += 2)
|
yading@10
|
671 *dst++ = buf[i] | (buf[i + 1] << 4);
|
yading@10
|
672 } else {
|
yading@10
|
673 adpcm_compress_trellis(avctx, samples, buf,
|
yading@10
|
674 &c->status[0], n, avctx->channels);
|
yading@10
|
675 adpcm_compress_trellis(avctx, samples + 1, buf + n,
|
yading@10
|
676 &c->status[1], n, avctx->channels);
|
yading@10
|
677 for (i = 0; i < n; i++)
|
yading@10
|
678 *dst++ = buf[i] | (buf[n + i] << 4);
|
yading@10
|
679 }
|
yading@10
|
680 av_free(buf);
|
yading@10
|
681 } else
|
yading@10
|
682 for (n *= avctx->channels; n > 0; n--) {
|
yading@10
|
683 int nibble;
|
yading@10
|
684 nibble = adpcm_yamaha_compress_sample(&c->status[ 0], *samples++);
|
yading@10
|
685 nibble |= adpcm_yamaha_compress_sample(&c->status[st], *samples++) << 4;
|
yading@10
|
686 *dst++ = nibble;
|
yading@10
|
687 }
|
yading@10
|
688 break;
|
yading@10
|
689 default:
|
yading@10
|
690 return AVERROR(EINVAL);
|
yading@10
|
691 }
|
yading@10
|
692
|
yading@10
|
693 avpkt->size = pkt_size;
|
yading@10
|
694 *got_packet_ptr = 1;
|
yading@10
|
695 return 0;
|
yading@10
|
696 error:
|
yading@10
|
697 return AVERROR(ENOMEM);
|
yading@10
|
698 }
|
yading@10
|
699
|
yading@10
|
700 static const enum AVSampleFormat sample_fmts[] = {
|
yading@10
|
701 AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
|
yading@10
|
702 };
|
yading@10
|
703
|
yading@10
|
704 static const enum AVSampleFormat sample_fmts_p[] = {
|
yading@10
|
705 AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_NONE
|
yading@10
|
706 };
|
yading@10
|
707
|
yading@10
|
708 #define ADPCM_ENCODER(id_, name_, sample_fmts_, long_name_) \
|
yading@10
|
709 AVCodec ff_ ## name_ ## _encoder = { \
|
yading@10
|
710 .name = #name_, \
|
yading@10
|
711 .type = AVMEDIA_TYPE_AUDIO, \
|
yading@10
|
712 .id = id_, \
|
yading@10
|
713 .priv_data_size = sizeof(ADPCMEncodeContext), \
|
yading@10
|
714 .init = adpcm_encode_init, \
|
yading@10
|
715 .encode2 = adpcm_encode_frame, \
|
yading@10
|
716 .close = adpcm_encode_close, \
|
yading@10
|
717 .sample_fmts = sample_fmts_, \
|
yading@10
|
718 .long_name = NULL_IF_CONFIG_SMALL(long_name_), \
|
yading@10
|
719 }
|
yading@10
|
720
|
yading@10
|
721 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_IMA_QT, adpcm_ima_qt, sample_fmts_p, "ADPCM IMA QuickTime");
|
yading@10
|
722 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_IMA_WAV, adpcm_ima_wav, sample_fmts_p, "ADPCM IMA WAV");
|
yading@10
|
723 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_MS, adpcm_ms, sample_fmts, "ADPCM Microsoft");
|
yading@10
|
724 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_SWF, adpcm_swf, sample_fmts, "ADPCM Shockwave Flash");
|
yading@10
|
725 ADPCM_ENCODER(AV_CODEC_ID_ADPCM_YAMAHA, adpcm_yamaha, sample_fmts, "ADPCM Yamaha");
|