ADPCM Codecs
ADPCMDecoder.h
1 #pragma once
2 #include "ADPCM.h"
3 #include "ADPCMCodec.h"
4 #include "ADPCMEncoder.h"
5 #include "adpcm-ffmpeg/adpcm.h"
6 #include "adpcm-ffmpeg/bytestream.h"
8 
9 namespace adpcm_ffmpeg {
10 
17 class ADPCMDecoder : public ADPCMCodec {
18  public:
19  ADPCMDecoder() : ADPCMCodec() {
20  setBlockSize(ADAPCM_DEFAULT_BLOCK_SIZE);
21  avctx.bits_per_coded_sample = av_get_bits_per_sample();
22  avctx.priv_data = (uint8_t *)&enc_ctx;
23  }
24 
25  bool begin(int sampleRate, int channels) {
26  avctx.sample_rate = sampleRate;
27  avctx.nb_channels = channels;
28  avctx.bits_per_coded_sample = av_get_bits_per_sample();
29 
30  data_source = Undefined;
31  // determine frame size
32  int rc = adpcm_decode_init();
33  if (rc != 0) return false;
34 
35  // if frame size has not been defined, get it from encoder
36  int frame_size = frameSize();
37  if (frame_size == 0) {
38  ADPCMEncoder &enc = *ADPCMEncoderFactory::create(codecID());
39  enc.begin(sampleRate, channels);
40  setFrameSize(enc.frameSize());
41  setBlockSize(enc.blockSize());
42  frame_size = frameSize();
43  avctx.block_align = enc.blockAlign();
44  }
45 
46  assert(frame_size != 0);
47  // avctx.frame_size = frameSize;
48  avctx.sample_fmt = sample_formats[0];
49  // setup result frame data
50  frame_data_vector.resize(frame_size * channels);
51  frame.data[0] = (uint8_t *)&frame_data_vector[0];
52  // setup extra_data
53  frame_extended_data_vectors.resize(channels);
54  for (int ch = 0;ch < channels; ch++){
55  frame_extended_data_vectors[ch].resize(frame_size);
56  extended_data[ch] = &frame_extended_data_vectors[ch][0];
57  }
58  frame.extended_data = extended_data;
59  // set result samples
60  return true;
61  }
62 
63  void end() {
64  flush();
65  // release memory
66  for (int ch = 0; ch < frame_extended_data_vectors.size(); ch++)
67  frame_extended_data_vectors[ch].resize(0);
68  frame_data_vector.resize(0);
69  }
70 
71  AVFrame &decode(uint8_t *data, size_t size) {
72  packet.size = size;
73  packet.data = (uint8_t *)data;
74  return decode(packet);
75  }
76 
77  AVFrame &decode(AVPacket &packet) {
78  int got_packet_ptr = 0;
79 
80  // clear frame data result
81  // just reset the data, for the subsequent source determination
82  std::fill(frame_data_vector.begin(), frame_data_vector.end(), 0);
83  for (int ch = 0; ch < channels(); ch++) {
84  std::fill(frame_extended_data_vectors[ch].begin(),
85  frame_extended_data_vectors[ch].end(), 0);
86  }
87 
88  int rc = adpcm_decode_frame(&frame, &got_packet_ptr, &packet);
89  if (rc == 0 || !got_packet_ptr) {
90  frame.nb_samples = 0;
91  }
92 
94  if (data_source == Undefined) {
95  data_source = getDataSource((int16_t *)(frame.data[0]),
96  frame.extended_data[0], frame.nb_samples);
97  }
98 
99  // if data is in exended data, we copy it to the frame_data
100  if (data_source == FromExtended) {
101  int16_t *result16 = (int16_t *)frame.data[0];
102  int pos = 0;
103  for (int j = 0; j < frame.nb_samples; j++) {
104  for (int ch = 0; ch < channels(); ch++) {
105  result16[pos++] = frame.extended_data[ch][j];
106  }
107  }
108  }
109 
110  return frame;
111  }
112 
113  virtual std::vector<AVSampleFormat> get_sample_format() {
114  return sample_formats;
115  }
116 
117  void flush() {
118  adpcm_flush();
119  }
120 
121  protected:
122  enum DataSource { Undefined, FromFrame, FromExtended };
123  AVPacket packet;
124  AVFrame frame;
125  DataSource data_source = Undefined;
126  bool is_frame_data = true;
127  std::vector<int16_t> frame_data_vector;
128  std::vector<std::vector<int16_t>> frame_extended_data_vectors;
129  int16_t *extended_data[2] = {NULL};
130  uint8_t *data[AV_NUM_DATA_POINTERS] = {NULL};
131  // decoding
132  const uint8_t *buf;
133  int buf_size;
134  ADPCMDecodeContext *c;
135  int16_t *samples;
136  int16_t **samples_p;
137  int st; /* stereo */
138  int nb_samples, coded_samples, approx_nb_samples, ret;
139  GetByteContext gb;
140 
144  DataSource getDataSource(int16_t *frame_data, int16_t *ext_data, int len) {
145  // for (int j = 0; j < len; j++) {
146  // if (data[j] != 0) return FromFrame;
147  // if (ext_data[j] != 0) return FromExtended;
148  // }
149  // return FromFrame;
150  return isPlanar() ? FromExtended : FromFrame;
151  }
152 
153  // decoder
155  virtual int adpcm_decode_init() {
156  ADPCMDecodeContext *c = (ADPCMDecodeContext *)avctx.priv_data;
157  unsigned int min_channels = 1;
158  unsigned int max_channels = 2;
159 
160  if (c == NULL) {
161  av_log(avctx, AV_LOG_ERROR, "priv_data is null");
162  return -1;
163  }
164 
165  // adpcm_flush(avctx);
166 
167  switch (avctx.codec_id) {
168  case AV_CODEC_ID_ADPCM_IMA_AMV:
169  max_channels = 1;
170  break;
171  case AV_CODEC_ID_ADPCM_DTK:
172  case AV_CODEC_ID_ADPCM_EA:
173  min_channels = 2;
174  break;
175  case AV_CODEC_ID_ADPCM_AFC:
176  case AV_CODEC_ID_ADPCM_EA_R1:
177  case AV_CODEC_ID_ADPCM_EA_R2:
178  case AV_CODEC_ID_ADPCM_EA_R3:
179  case AV_CODEC_ID_ADPCM_EA_XAS:
180  case AV_CODEC_ID_ADPCM_MS:
181  max_channels = 6;
182  break;
183  case AV_CODEC_ID_ADPCM_MTAF:
184  min_channels = 2;
185  max_channels = 8;
186  if (avctx.nb_channels & 1) {
187  avpriv_request_sample(&avctx, "channel count %d", avctx.nb_channels);
188  return AVERROR_PATCHWELCOME;
189  }
190  break;
191  case AV_CODEC_ID_ADPCM_PSX:
192  max_channels = 8;
193  if (avctx.nb_channels <= 0 ||
194  avctx.block_align % (16 * avctx.nb_channels))
195  return AVERROR_INVALIDDATA;
196  break;
197  case AV_CODEC_ID_ADPCM_IMA_DAT4:
198  case AV_CODEC_ID_ADPCM_THP:
199  case AV_CODEC_ID_ADPCM_THP_LE:
200  max_channels = 14;
201  break;
202  }
203  if (avctx.nb_channels < min_channels || avctx.nb_channels > max_channels) {
204  av_log(avctx, AV_LOG_ERROR, "Invalid number of channels\n");
205  return AVERROR(AVERROR_INVALID);
206  }
207 
208  switch (avctx.codec_id) {
209  case AV_CODEC_ID_ADPCM_IMA_WAV:
210  // ps: define default value
211  if (avctx.bits_per_coded_sample == 0) avctx.bits_per_coded_sample = 4;
212  if (avctx.bits_per_coded_sample < 2 || avctx.bits_per_coded_sample > 5)
213  return AVERROR_INVALIDDATA;
214  break;
215  case AV_CODEC_ID_ADPCM_ARGO:
216  // ps: define default value
217  if (avctx.bits_per_coded_sample == 0) avctx.bits_per_coded_sample = 4;
218  if (avctx.block_align == 0) avctx.block_align = 17 * avctx.nb_channels;
219 
220  if (avctx.bits_per_coded_sample != 4 ||
221  avctx.block_align != 17 * avctx.nb_channels)
222  return AVERROR_INVALIDDATA;
223  break;
224  case AV_CODEC_ID_ADPCM_ZORK:
225  if (avctx.bits_per_coded_sample == 0) avctx.bits_per_coded_sample = 8;
226  if (avctx.bits_per_coded_sample != 8) return AVERROR_INVALIDDATA;
227  break;
228  default:
229  break;
230  }
231 
232  switch (avctx.codec_id) {
233  case AV_CODEC_ID_ADPCM_AICA:
234  case AV_CODEC_ID_ADPCM_IMA_CUNNING:
235  case AV_CODEC_ID_ADPCM_IMA_DAT4:
236  case AV_CODEC_ID_ADPCM_IMA_QT:
237  case AV_CODEC_ID_ADPCM_IMA_WAV:
238  case AV_CODEC_ID_ADPCM_4XM:
239  case AV_CODEC_ID_ADPCM_XA:
240  case AV_CODEC_ID_ADPCM_XMD:
241  case AV_CODEC_ID_ADPCM_EA_R1:
242  case AV_CODEC_ID_ADPCM_EA_R2:
243  case AV_CODEC_ID_ADPCM_EA_R3:
244  case AV_CODEC_ID_ADPCM_EA_XAS:
245  case AV_CODEC_ID_ADPCM_THP:
246  case AV_CODEC_ID_ADPCM_THP_LE:
247  case AV_CODEC_ID_ADPCM_AFC:
248  case AV_CODEC_ID_ADPCM_DTK:
249  case AV_CODEC_ID_ADPCM_PSX:
250  case AV_CODEC_ID_ADPCM_MTAF:
251  case AV_CODEC_ID_ADPCM_ARGO:
252  case AV_CODEC_ID_ADPCM_IMA_MOFLEX:
253  avctx.sample_fmt = AV_SAMPLE_FMT_S16P;
254  break;
255  case AV_CODEC_ID_ADPCM_IMA_WS:
256  avctx.sample_fmt =
257  c->vqa_version == 3 ? AV_SAMPLE_FMT_S16P : AV_SAMPLE_FMT_S16;
258  break;
259  case AV_CODEC_ID_ADPCM_MS:
260  avctx.sample_fmt =
261  avctx.nb_channels > 2 ? AV_SAMPLE_FMT_S16P : AV_SAMPLE_FMT_S16;
262  break;
263  default:
264  avctx.sample_fmt = AV_SAMPLE_FMT_S16;
265  }
266  return 0;
267  }
268 
269  int16_t adpcm_ima_expand_nibble(ADPCMChannelStatus *c, int8_t nibble,
270  int shift) {
271  int step_index;
272  int predictor;
273  int sign, delta, diff, step;
274 
275  step = ff_adpcm_step_table[c->step_index];
276  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
277  step_index = av_clip(step_index, 0, 88);
278 
279  sign = nibble & 8;
280  delta = nibble & 7;
281  /* perform direct multiplication instead of series of jumps proposed by
282  * the reference ADPCM implementation since modern CPUs can do the mults
283  * quickly enough */
284  diff = ((2 * delta + 1) * step) >> shift;
285  predictor = c->predictor;
286  if (sign)
287  predictor -= diff;
288  else
289  predictor += diff;
290 
291  c->predictor = av_clip_int16(predictor);
292  c->step_index = step_index;
293 
294  return (int16_t)c->predictor;
295  }
296 
297  int16_t adpcm_ima_mtf_expand_nibble(ADPCMChannelStatus *c, int nibble) {
298  int step_index, step, delta, predictor;
299 
300  step = ff_adpcm_step_table[c->step_index];
301 
302  delta = step * (2 * nibble - 15);
303  predictor = c->predictor + delta;
304 
305  step_index = c->step_index + mtf_index_table[(unsigned)nibble];
306  c->predictor = av_clip_int16(predictor >> 4);
307  c->step_index = av_clip(step_index, 0, 88);
308 
309  return (int16_t)c->predictor;
310  }
311 
312  int16_t adpcm_ima_cunning_expand_nibble(ADPCMChannelStatus *c,
313  int8_t nibble) {
314  int step_index;
315  int predictor;
316  int step;
317 
318  nibble = sign_extend(nibble & 0xF, 4);
319 
320  step = ima_cunning_step_table[c->step_index];
321  step_index = c->step_index + ima_cunning_index_table[abs(nibble)];
322  step_index = av_clip(step_index, 0, 60);
323 
324  predictor = c->predictor + step * nibble;
325 
326  c->predictor = av_clip_int16(predictor);
327  c->step_index = step_index;
328 
329  return c->predictor;
330  }
331 
332  int adpcm_ima_qt_expand_nibble(ADPCMChannelStatus *c, int nibble) {
333  int step_index;
334  int predictor;
335  int diff, step;
336 
337  step = ff_adpcm_step_table[c->step_index];
338  step_index = c->step_index + ff_adpcm_index_table[nibble];
339  step_index = av_clip(step_index, 0, 88);
340 
341  diff = step >> 3;
342  if (nibble & 4) diff += step;
343  if (nibble & 2) diff += step >> 1;
344  if (nibble & 1) diff += step >> 2;
345 
346  if (nibble & 8)
347  predictor = c->predictor - diff;
348  else
349  predictor = c->predictor + diff;
350 
351  c->predictor = av_clip_int16(predictor);
352  c->step_index = step_index;
353 
354  return c->predictor;
355  }
356 
357  int16_t adpcm_yamaha_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble) {
358  if (!c->step) {
359  c->predictor = 0;
360  c->step = 127;
361  }
362 
363  c->predictor += (c->step * ff_adpcm_yamaha_difflookup[nibble]) / 8;
364  c->predictor = av_clip_int16(c->predictor);
365  c->step = (c->step * ff_adpcm_yamaha_indexscale[nibble]) >> 8;
366  c->step = av_clip(c->step, 127, 24576);
367  return c->predictor;
368  }
369 
381  int get_nb_samples(GetByteContext *gb, int buf_size, int *coded_samples,
382  int *approx_nb_samples) {
383  ADPCMDecodeContext *s = (ADPCMDecodeContext *)avctx.priv_data;
384  int nb_samples = 0;
385  int ch = avctx.nb_channels;
386  int has_coded_samples = 0;
387  int header_size;
388 
389  *coded_samples = 0;
390  *approx_nb_samples = 0;
391 
392  if (ch <= 0) return 0;
393 
394  switch (avctx.codec_id) {
395  /* constant, only check buf_size */
396  case AV_CODEC_ID_ADPCM_EA_XAS:
397  if (buf_size < 76 * ch) return 0;
398  nb_samples = 128;
399  break;
400  case AV_CODEC_ID_ADPCM_IMA_QT:
401  if (buf_size < 34 * ch) return 0;
402  nb_samples = 64;
403  break;
404  /* simple 4-bit adpcm */
405  case AV_CODEC_ID_ADPCM_CT:
406  case AV_CODEC_ID_ADPCM_IMA_APC:
407  case AV_CODEC_ID_ADPCM_IMA_CUNNING:
408  case AV_CODEC_ID_ADPCM_IMA_EA_SEAD:
409  case AV_CODEC_ID_ADPCM_IMA_OKI:
410  case AV_CODEC_ID_ADPCM_IMA_WS:
411  case AV_CODEC_ID_ADPCM_YAMAHA:
412  case AV_CODEC_ID_ADPCM_AICA:
413  case AV_CODEC_ID_ADPCM_IMA_SSI:
414  case AV_CODEC_ID_ADPCM_IMA_APM:
415  case AV_CODEC_ID_ADPCM_IMA_ALP:
416  case AV_CODEC_ID_ADPCM_IMA_MTF:
417  nb_samples = buf_size * 2 / ch;
418  break;
419  }
420  if (nb_samples) return nb_samples;
421 
422  /* simple 4-bit adpcm, with header */
423  header_size = 0;
424  switch (avctx.codec_id) {
425  case AV_CODEC_ID_ADPCM_4XM:
426  case AV_CODEC_ID_ADPCM_AGM:
427  case AV_CODEC_ID_ADPCM_IMA_ACORN:
428  case AV_CODEC_ID_ADPCM_IMA_DAT4:
429  case AV_CODEC_ID_ADPCM_IMA_MOFLEX:
430  case AV_CODEC_ID_ADPCM_IMA_ISS:
431  header_size = 4 * ch;
432  break;
433  case AV_CODEC_ID_ADPCM_IMA_SMJPEG:
434  header_size = 4 * ch;
435  break;
436  }
437  if (header_size > 0) return (buf_size - header_size) * 2 / ch;
438 
439  /* more complex formats */
440  switch (avctx.codec_id) {
441  case AV_CODEC_ID_ADPCM_IMA_AMV:
442  bytestream2_skip(gb, 4);
443  has_coded_samples = 1;
444  *coded_samples = bytestream2_get_le32u(gb);
445  nb_samples = FFMIN((buf_size - 8) * 2, *coded_samples);
446  bytestream2_seek(gb, -8, SEEK_CUR);
447  break;
448  case AV_CODEC_ID_ADPCM_EA:
449  has_coded_samples = 1;
450  *coded_samples = bytestream2_get_le32(gb);
451  *coded_samples -= *coded_samples % 28;
452  nb_samples = (buf_size - 12) / 30 * 28;
453  break;
454  case AV_CODEC_ID_ADPCM_IMA_EA_EACS:
455  has_coded_samples = 1;
456  *coded_samples = bytestream2_get_le32(gb);
457  nb_samples = (buf_size - (4 + 8 * ch)) * 2 / ch;
458  break;
459  case AV_CODEC_ID_ADPCM_EA_MAXIS_XA:
460  nb_samples = (buf_size - ch) / ch * 2;
461  break;
462  case AV_CODEC_ID_ADPCM_EA_R1:
463  case AV_CODEC_ID_ADPCM_EA_R2:
464  case AV_CODEC_ID_ADPCM_EA_R3:
465  /* maximum number of samples */
466  /* has internal offsets and a per-frame switch to signal raw 16-bit */
467  has_coded_samples = 1;
468  switch (avctx.codec_id) {
469  case AV_CODEC_ID_ADPCM_EA_R1:
470  header_size = 4 + 9 * ch;
471  *coded_samples = bytestream2_get_le32(gb);
472  break;
473  case AV_CODEC_ID_ADPCM_EA_R2:
474  header_size = 4 + 5 * ch;
475  *coded_samples = bytestream2_get_le32(gb);
476  break;
477  case AV_CODEC_ID_ADPCM_EA_R3:
478  header_size = 4 + 5 * ch;
479  *coded_samples = bytestream2_get_be32(gb);
480  break;
481  }
482  *coded_samples -= *coded_samples % 28;
483  nb_samples = (buf_size - header_size) * 2 / ch;
484  nb_samples -= nb_samples % 28;
485  *approx_nb_samples = 1;
486  break;
487  case AV_CODEC_ID_ADPCM_IMA_DK3:
488  if (avctx.block_align > 0)
489  buf_size = FFMIN(buf_size, avctx.block_align);
490  nb_samples = ((buf_size - 16) * 2 / 3 * 4) / ch;
491  break;
492  case AV_CODEC_ID_ADPCM_IMA_DK4:
493  if (avctx.block_align > 0)
494  buf_size = FFMIN(buf_size, avctx.block_align);
495  if (buf_size < 4 * ch) return AVERROR_INVALIDDATA;
496  nb_samples = 1 + (buf_size - 4 * ch) * 2 / ch;
497  break;
498  case AV_CODEC_ID_ADPCM_IMA_RAD:
499  if (avctx.block_align > 0)
500  buf_size = FFMIN(buf_size, avctx.block_align);
501  nb_samples = (buf_size - 4 * ch) * 2 / ch;
502  break;
503  case AV_CODEC_ID_ADPCM_IMA_WAV: {
504  int bsize = ff_adpcm_ima_block_sizes[avctx.bits_per_coded_sample - 2];
505  int bsamples =
506  ff_adpcm_ima_block_samples[avctx.bits_per_coded_sample - 2];
507  if (avctx.block_align > 0)
508  buf_size = FFMIN(buf_size, avctx.block_align);
509  if (buf_size < 4 * ch) return AVERROR_INVALIDDATA;
510  nb_samples = 1 + (buf_size - 4 * ch) / (bsize * ch) * bsamples;
511  } break; /* End of CASE */
512  case AV_CODEC_ID_ADPCM_MS:
513  if (avctx.block_align > 0)
514  buf_size = FFMIN(buf_size, avctx.block_align);
515  nb_samples = (buf_size - 6 * ch) * 2 / ch;
516  break;
517  case AV_CODEC_ID_ADPCM_MTAF:
518  if (avctx.block_align > 0)
519  buf_size = FFMIN(buf_size, avctx.block_align);
520  nb_samples = (buf_size - 16 * (ch / 2)) * 2 / ch;
521  break;
522  case AV_CODEC_ID_ADPCM_SBPRO_2:
523  case AV_CODEC_ID_ADPCM_SBPRO_3:
524  case AV_CODEC_ID_ADPCM_SBPRO_4: {
525  int samples_per_byte;
526  switch (avctx.codec_id) {
527  case AV_CODEC_ID_ADPCM_SBPRO_2:
528  samples_per_byte = 4;
529  break;
530  case AV_CODEC_ID_ADPCM_SBPRO_3:
531  samples_per_byte = 3;
532  break;
533  case AV_CODEC_ID_ADPCM_SBPRO_4:
534  samples_per_byte = 2;
535  break;
536  }
537  if (!s->status[0].step_index) {
538  if (buf_size < ch) return AVERROR_INVALIDDATA;
539  nb_samples++;
540  buf_size -= ch;
541  }
542  nb_samples += buf_size * samples_per_byte / ch;
543  break;
544  }
545  case AV_CODEC_ID_ADPCM_SWF: {
546  int buf_bits = buf_size * 8 - 2;
547  int nbits = (bytestream2_get_byte(gb) >> 6) + 2;
548  int block_hdr_size = 22 * ch;
549  int block_size = block_hdr_size + nbits * ch * 4095;
550  int nblocks = buf_bits / block_size;
551  int bits_left = buf_bits - nblocks * block_size;
552  nb_samples = nblocks * 4096;
553  if (bits_left >= block_hdr_size)
554  nb_samples += 1 + (bits_left - block_hdr_size) / (nbits * ch);
555  break;
556  }
557  case AV_CODEC_ID_ADPCM_THP:
558  case AV_CODEC_ID_ADPCM_THP_LE:
559  if (avctx.extradata) {
560  nb_samples = buf_size * 14 / (8 * ch);
561  break;
562  }
563  has_coded_samples = 1;
564  bytestream2_skip(gb, 4); // channel size
565  *coded_samples = (avctx.codec_id == AV_CODEC_ID_ADPCM_THP_LE)
566  ? bytestream2_get_le32(gb)
567  : bytestream2_get_be32(gb);
568  buf_size -= 8 + 36 * ch;
569  buf_size /= ch;
570  nb_samples = buf_size / 8 * 14;
571  if (buf_size % 8 > 1) nb_samples += (buf_size % 8 - 1) * 2;
572  *approx_nb_samples = 1;
573  break;
574  case AV_CODEC_ID_ADPCM_AFC:
575  nb_samples = buf_size / (9 * ch) * 16;
576  break;
577  case AV_CODEC_ID_ADPCM_XA:
578  nb_samples = (buf_size / 128) * 224 / ch;
579  break;
580  case AV_CODEC_ID_ADPCM_XMD:
581  nb_samples = buf_size / (21 * ch) * 32;
582  break;
583  case AV_CODEC_ID_ADPCM_DTK:
584  case AV_CODEC_ID_ADPCM_PSX:
585  nb_samples = buf_size / (16 * ch) * 28;
586  break;
587  case AV_CODEC_ID_ADPCM_ARGO:
588  nb_samples = buf_size / avctx.block_align * 32;
589  break;
590  case AV_CODEC_ID_ADPCM_ZORK:
591  nb_samples = buf_size / ch;
592  break;
593  }
594 
595  /* validate coded sample count */
596  if (has_coded_samples &&
597  (*coded_samples <= 0 || *coded_samples > nb_samples))
598  return AVERROR_INVALIDDATA;
599 
600  return nb_samples;
601  }
602 
604  virtual void adpcm_flush() {
605  ADPCMDecodeContext *c = (ADPCMDecodeContext *)avctx.priv_data;
606 
607  /* Just nuke the entire state and re-init. */
608  memset(c, 0, sizeof(ADPCMDecodeContext));
609 
610  switch (avctx.codec_id) {
611  case AV_CODEC_ID_ADPCM_CT:
612  c->status[0].step = c->status[1].step = 511;
613  break;
614 
615  case AV_CODEC_ID_ADPCM_IMA_APC:
616  if (avctx.extradata && avctx.extradata_size >= 8) {
617  c->status[0].predictor = av_clip_intp2(AV_RL32(avctx.extradata), 18);
618  c->status[1].predictor =
619  av_clip_intp2(AV_RL32(avctx.extradata + 4), 18);
620  }
621  break;
622 
623  case AV_CODEC_ID_ADPCM_IMA_APM:
624  if (avctx.extradata && avctx.extradata_size >= 28) {
625  c->status[0].predictor =
626  av_clip_intp2(AV_RL32(avctx.extradata + 16), 18);
627  c->status[0].step_index =
628  av_clip(AV_RL32(avctx.extradata + 20), 0, 88);
629  c->status[1].predictor =
630  av_clip_intp2(AV_RL32(avctx.extradata + 4), 18);
631  c->status[1].step_index =
632  av_clip(AV_RL32(avctx.extradata + 8), 0, 88);
633  }
634  break;
635 
636  case AV_CODEC_ID_ADPCM_IMA_WS:
637  if (avctx.extradata && avctx.extradata_size >= 2)
638  c->vqa_version = AV_RL16(avctx.extradata);
639  break;
640  default:
641  /* Other codecs may want to handle this during decoding. */
642  c->has_status = 0;
643  return;
644  }
645 
646  c->has_status = 1;
647  }
648 
649  virtual int decode_frame_impl(AVFrame *frame, int *got_frame_ptr,
650  AVPacket *avpkt) = 0;
651 
653  virtual int adpcm_decode_frame(AVFrame *frame, int *got_frame_ptr,
654  AVPacket *avpkt) {
655  decode_frame_init(frame, got_frame_ptr, avpkt);
656 
657  int rc = decode_frame_impl(frame, got_frame_ptr, avpkt);
658  if (rc != AV_OK) return rc;
659 
660  if (avpkt->size && bytestream2_tell(&gb) == 0) {
661  av_log(avctx, AV_LOG_ERROR, "Nothing consumed\n");
662  return AVERROR_INVALIDDATA;
663  }
664 
665  *got_frame_ptr = 1;
666 
667  if (avpkt->size < bytestream2_tell(&gb)) {
668  av_log(avctx, AV_LOG_ERROR, "Overread of %d < %d\n", avpkt->size,
669  bytestream2_tell(&gb));
670  return avpkt->size;
671  }
672 
673  return bytestream2_tell(&gb);
674  }
675 
676  int decode_frame_init(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
677  buf = avpkt->data;
678  buf_size = avpkt->size;
679  c = (ADPCMDecodeContext *)avctx.priv_data;
680 
681  bytestream2_init(&gb, buf, buf_size);
682  nb_samples =
683  get_nb_samples(&gb, buf_size, &coded_samples, &approx_nb_samples);
684  if (nb_samples <= 0) {
685  av_log(avctx, AV_LOG_ERROR, "invalid number of samples in packet\n");
686  return AVERROR_INVALIDDATA;
687  }
688 
689  /* get output buffer */
690  frame->nb_samples = nb_samples;
691  if ((ret = ff_get_buffer(&avctx, frame, 0)) < 0) return ret;
692  samples = (int16_t *)frame->data[0];
693  samples_p = (int16_t **)frame->extended_data;
694 
695  /* use coded_samples when applicable */
696  /* it is always <= nb_samples, so the output buffer will be large enough */
697  if (coded_samples) {
698  if (!approx_nb_samples && coded_samples != nb_samples)
699  av_log(avctx, AV_LOG_WARNING, "mismatch in coded sample count\n");
700  frame->nb_samples = nb_samples = coded_samples;
701  }
702 
703  st = channels() == 2 ? 1 : 0;
704  return AV_OK;
705  }
706 };
707 
709  public:
711  setCodecID(AV_CODEC_ID_ADPCM_IMA_QT);
712  sample_formats.push_back(AV_SAMPLE_FMT_S16P);
713  }
714  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
715  /* In QuickTime, IMA is encoded by chunks of 34 bytes (=64 samples).
716  Channel data is interleaved per-chunk. */
717  for (int channel = 0; channel < channels(); channel++) {
718  ADPCMChannelStatus *cs = &c->status[channel];
719  int predictor;
720  int step_index;
721  /* (pppppp) (piiiiiii) */
722 
723  /* Bits 15-7 are the _top_ 9 bits of the 16-bit initial predictor value */
724  predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
725  step_index = predictor & 0x7F;
726  predictor &= ~0x7F;
727 
728  if (cs->step_index == step_index) {
729  int diff = predictor - cs->predictor;
730  if (diff < 0) diff = -diff;
731  if (diff > 0x7f) goto update;
732  } else {
733  update:
734  cs->step_index = step_index;
735  cs->predictor = predictor;
736  }
737 
738  if (cs->step_index > 88u) {
739  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n", channel,
740  cs->step_index);
741  // return AVERROR_INVALIDDATA;
742  }
743 
744  samples = samples_p[channel];
745 
746  for (int m = 0; m < 64; m += 2) {
747  int byte = bytestream2_get_byteu(&gb);
748  samples[m] = adpcm_ima_qt_expand_nibble(cs, byte & 0x0F);
749  samples[m + 1] = adpcm_ima_qt_expand_nibble(cs, byte >> 4);
750  }
751  }
752  return AV_OK;
753  }
754 };
755 
757  public:
759  setCodecID(AV_CODEC_ID_ADPCM_IMA_WAV);
760  sample_formats.push_back(AV_SAMPLE_FMT_S16P);
761  }
762  int16_t adpcm_ima_wav_expand_nibble(ADPCMChannelStatus *c, GetBitContext *gb,
763  int bps) {
764  int nibble, step_index, predictor, sign, delta, diff, step, shift;
765 
766  shift = bps - 1;
767  nibble = get_bits_le(gb, bps), step = ff_adpcm_step_table[c->step_index];
768  step_index = c->step_index + adpcm_index_tables[bps - 2][nibble];
769  step_index = av_clip(step_index, 0, 88);
770 
771  sign = nibble & (1 << shift);
772  delta = av_mod_uintp2(nibble, shift);
773  diff = ((2 * delta + 1) * step) >> shift;
774  predictor = c->predictor;
775  if (sign)
776  predictor -= diff;
777  else
778  predictor += diff;
779 
780  c->predictor = av_clip_int16(predictor);
781  c->step_index = step_index;
782 
783  return (int16_t)c->predictor;
784  }
785 
786  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
787  for (int i = 0; i < channels(); i++) {
788  ADPCMChannelStatus *cs = &c->status[i];
789  cs->predictor = samples_p[i][0] =
790  sign_extend(bytestream2_get_le16u(&gb), 16);
791 
792  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
793  if (cs->step_index > 88u) {
794  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n", i,
795  cs->step_index);
796  return AVERROR_INVALIDDATA;
797  }
798  }
799 
800  if (avctx.bits_per_coded_sample != 4) {
801  int samples_per_block =
802  ff_adpcm_ima_block_samples[avctx.bits_per_coded_sample - 2];
803  int block_size =
804  ff_adpcm_ima_block_sizes[avctx.bits_per_coded_sample - 2];
805  uint8_t temp[20 + AV_INPUT_BUFFER_PADDING_SIZE] = {0};
806  GetBitContext g;
807 
808  for (int n = 0; n < (nb_samples - 1) / samples_per_block; n++) {
809  for (int i = 0; i < channels(); i++) {
810  ADPCMChannelStatus *cs = &c->status[i];
811  samples = &samples_p[i][1 + n * samples_per_block];
812  for (int j = 0; j < block_size; j++) {
813  temp[j] = buf[4 * channels() + block_size * n * channels() + (j % 4) +
814  (j / 4) * (channels() * 4) + i * 4];
815  }
816  ret = init_get_bits8(&g, (const uint8_t *)&temp, block_size);
817  if (ret < 0) return ret;
818  for (int m = 0; m < samples_per_block; m++) {
819  samples[m] = adpcm_ima_wav_expand_nibble(
820  cs, &g, avctx.bits_per_coded_sample);
821  }
822  }
823  }
824  bytestream2_skip(&gb, avctx.block_align - channels() * 4);
825  } else {
826  for (int n = 0; n < (nb_samples - 1) / 8; n++) {
827  for (int i = 0; i < channels(); i++) {
828  ADPCMChannelStatus *cs = &c->status[i];
829  samples = &samples_p[i][1 + n * 8];
830  for (int m = 0; m < 8; m += 2) {
831  int v = bytestream2_get_byteu(&gb);
832  samples[m] = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
833  samples[m + 1] = adpcm_ima_expand_nibble(cs, v >> 4, 3);
834  }
835  }
836  }
837  }
838  return AV_OK;
839  }
840 };
841 
843  public:
844  DecoderADPCM_4XM() {
845  setCodecID(AV_CODEC_ID_ADPCM_4XM);
846  sample_formats.push_back(AV_SAMPLE_FMT_S16P);
847  }
848  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
849  for (int i = 0; i < channels(); i++)
850  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
851 
852  for (int i = 0; i < channels(); i++) {
853  c->status[i].step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
854  if (c->status[i].step_index > 88u) {
855  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n", i,
856  c->status[i].step_index);
857  return AVERROR_INVALIDDATA;
858  }
859  }
860 
861  for (int i = 0; i < channels(); i++) {
862  ADPCMChannelStatus *cs = &c->status[i];
863  samples = (int16_t *)frame->data[i];
864  for (int n = nb_samples >> 1; n > 0; n--) {
865  int v = bytestream2_get_byteu(&gb);
866  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 4);
867  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4, 4);
868  }
869  }
870  return AV_OK;
871  }
872 };
873 
875  public:
876  DecoderADPCM_AGM() {
877  setCodecID(AV_CODEC_ID_ADPCM_AGM);
878  sample_formats.push_back(AV_SAMPLE_FMT_S16);
879  }
880  int16_t adpcm_agm_expand_nibble(ADPCMChannelStatus *c, int8_t nibble) {
881  int delta, pred, step, add;
882 
883  pred = c->predictor;
884  delta = nibble & 7;
885  step = c->step;
886  add = (delta * 2 + 1) * step;
887  if (add < 0) add = add + 7;
888 
889  if ((nibble & 8) == 0)
890  pred = av_clip(pred + (add >> 3), -32767, 32767);
891  else
892  pred = av_clip(pred - (add >> 3), -32767, 32767);
893 
894  switch (delta) {
895  case 7:
896  step *= 0x99;
897  break;
898  case 6:
899  c->step = av_clip(c->step * 2, 127, 24576);
900  c->predictor = pred;
901  return pred;
902  case 5:
903  step *= 0x66;
904  break;
905  case 4:
906  step *= 0x4d;
907  break;
908  default:
909  step *= 0x39;
910  break;
911  }
912 
913  if (step < 0) step += 0x3f;
914 
915  c->step = step >> 6;
916  c->step = av_clip(c->step, 127, 24576);
917  c->predictor = pred;
918  return pred;
919  }
920 
921  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
922  for (int i = 0; i < channels(); i++)
923  c->status[i].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
924  for (int i = 0; i < channels(); i++)
925  c->status[i].step = sign_extend(bytestream2_get_le16u(&gb), 16);
926 
927  for (int n = 0; n < nb_samples >> (1 - st); n++) {
928  int v = bytestream2_get_byteu(&gb);
929  *samples++ = adpcm_agm_expand_nibble(&c->status[0], v & 0xF);
930  *samples++ = adpcm_agm_expand_nibble(&c->status[st], v >> 4);
931  }
932  return AV_OK;
933  }
934 };
935 
937  public:
938  DecoderADPCM_MS() {
939  setCodecID(AV_CODEC_ID_ADPCM_MS);
940  sample_formats.push_back(AV_SAMPLE_FMT_S16);
941  //sample_formats.push_back(AV_SAMPLE_FMT_S16P);
942  }
943  int16_t adpcm_ms_expand_nibble(ADPCMChannelStatus *c, int nibble) {
944  int predictor;
945 
946  predictor =
947  (((c->sample1) * (c->coeff1)) + ((c->sample2) * (c->coeff2))) / 64;
948  predictor += ((nibble & 0x08) ? (nibble - 0x10) : (nibble)) * c->idelta;
949 
950  c->sample2 = c->sample1;
951  c->sample1 = av_clip_int16(predictor);
952  c->idelta = (ff_adpcm_AdaptationTable[(int)nibble] * c->idelta) >> 8;
953  if (c->idelta < 16) c->idelta = 16;
954  if (c->idelta > INT_MAX / 768) {
955  av_log(NULL, AV_LOG_WARNING, "idelta overflow\n");
956  c->idelta = INT_MAX / 768;
957  }
958 
959  return c->sample1;
960  }
961 
962  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
963  int block_predictor;
964 
965  if (avctx.nb_channels > 2) {
966  for (int channel = 0; channel < avctx.nb_channels; channel++) {
967  samples = samples_p[channel];
968  block_predictor = bytestream2_get_byteu(&gb);
969  if (block_predictor > 6) {
970  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[%d] = %d\n",
971  channel, block_predictor);
972  return AVERROR_INVALIDDATA;
973  }
974  c->status[channel].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
975  c->status[channel].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
976  c->status[channel].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
977  c->status[channel].sample1 =
978  sign_extend(bytestream2_get_le16u(&gb), 16);
979  c->status[channel].sample2 =
980  sign_extend(bytestream2_get_le16u(&gb), 16);
981  *samples++ = c->status[channel].sample2;
982  *samples++ = c->status[channel].sample1;
983  for (int n = (nb_samples - 2) >> 1; n > 0; n--) {
984  int byte = bytestream2_get_byteu(&gb);
985  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte >> 4);
986  *samples++ = adpcm_ms_expand_nibble(&c->status[channel], byte & 0x0F);
987  }
988  }
989  } else {
990  block_predictor = bytestream2_get_byteu(&gb);
991  if (block_predictor > 6) {
992  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[0] = %d\n",
993  block_predictor);
994  return AVERROR_INVALIDDATA;
995  }
996  c->status[0].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
997  c->status[0].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
998  if (st) {
999  block_predictor = bytestream2_get_byteu(&gb);
1000  if (block_predictor > 6) {
1001  av_log(avctx, AV_LOG_ERROR, "ERROR: block_predictor[1] = %d\n",
1002  block_predictor);
1003  return AVERROR_INVALIDDATA;
1004  }
1005  c->status[1].coeff1 = ff_adpcm_AdaptCoeff1[block_predictor];
1006  c->status[1].coeff2 = ff_adpcm_AdaptCoeff2[block_predictor];
1007  }
1008  c->status[0].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1009  if (st) {
1010  c->status[1].idelta = sign_extend(bytestream2_get_le16u(&gb), 16);
1011  }
1012 
1013  c->status[0].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1014  if (st)
1015  c->status[1].sample1 = sign_extend(bytestream2_get_le16u(&gb), 16);
1016  c->status[0].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1017  if (st)
1018  c->status[1].sample2 = sign_extend(bytestream2_get_le16u(&gb), 16);
1019 
1020  *samples++ = c->status[0].sample2;
1021  if (st) *samples++ = c->status[1].sample2;
1022  *samples++ = c->status[0].sample1;
1023  if (st) *samples++ = c->status[1].sample1;
1024  for (int n = (nb_samples - 2) >> (1 - st); n > 0; n--) {
1025  int byte = bytestream2_get_byteu(&gb);
1026  *samples++ = adpcm_ms_expand_nibble(&c->status[0], byte >> 4);
1027  *samples++ = adpcm_ms_expand_nibble(&c->status[st], byte & 0x0F);
1028  }
1029  }
1030  return AV_OK;
1031  }
1032 };
1033 
1035  public:
1036  DecoderADPCM_MTAF() {
1037  setCodecID(AV_CODEC_ID_ADPCM_MTAF);
1038  sample_formats.push_back(AV_SAMPLE_FMT_S16P);
1039  }
1040  int16_t adpcm_mtaf_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble) {
1041  c->predictor += mtaf_stepsize[c->step][nibble];
1042  c->predictor = av_clip_int16(c->predictor);
1043  c->step += ff_adpcm_index_table[nibble];
1044  c->step = av_clip_uintp2(c->step, 5);
1045  return c->predictor;
1046  }
1047 
1048  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
1049  for (int channel = 0; channel < channels(); channel += 2) {
1050  bytestream2_skipu(&gb, 4);
1051  c->status[channel].step = bytestream2_get_le16u(&gb) & 0x1f;
1052  c->status[channel + 1].step = bytestream2_get_le16u(&gb) & 0x1f;
1053  c->status[channel].predictor =
1054  sign_extend(bytestream2_get_le16u(&gb), 16);
1055  bytestream2_skipu(&gb, 2);
1056  c->status[channel + 1].predictor =
1057  sign_extend(bytestream2_get_le16u(&gb), 16);
1058  bytestream2_skipu(&gb, 2);
1059  for (int n = 0; n < nb_samples; n += 2) {
1060  int v = bytestream2_get_byteu(&gb);
1061  samples_p[channel][n] =
1062  adpcm_mtaf_expand_nibble(&c->status[channel], v & 0x0F);
1063  samples_p[channel][n + 1] =
1064  adpcm_mtaf_expand_nibble(&c->status[channel], v >> 4);
1065  }
1066  for (int n = 0; n < nb_samples; n += 2) {
1067  int v = bytestream2_get_byteu(&gb);
1068  samples_p[channel + 1][n] =
1069  adpcm_mtaf_expand_nibble(&c->status[channel + 1], v & 0x0F);
1070  samples_p[channel + 1][n + 1] =
1071  adpcm_mtaf_expand_nibble(&c->status[channel + 1], v >> 4);
1072  }
1073  }
1074  return AV_OK;
1075  }
1076 };
1077 
1079  public:
1081  setCodecID(AV_CODEC_ID_ADPCM_IMA_DK4);
1082  sample_formats.push_back(AV_SAMPLE_FMT_S16);
1083  }
1084  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
1085  for (int channel = 0; channel < channels(); channel++) {
1086  ADPCMChannelStatus *cs = &c->status[channel];
1087  cs->predictor = *samples++ = sign_extend(bytestream2_get_le16u(&gb), 16);
1088  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1089  if (cs->step_index > 88u) {
1090  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n", channel,
1091  cs->step_index);
1092  return AVERROR_INVALIDDATA;
1093  }
1094  }
1095  for (int n = (nb_samples - 1) >> (1 - st); n > 0; n--) {
1096  int v = bytestream2_get_byteu(&gb);
1097  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1098  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1099  }
1100  /* DK3 ADPCM support macro */
1101  return AV_OK;
1102  }
1103 };
1104 
1106  public:
1108  setCodecID(AV_CODEC_ID_ADPCM_IMA_DK3);
1109  sample_formats.push_back(AV_SAMPLE_FMT_S16);
1110  }
1111 
1112  inline void dk3_get_next_nibble(int &decode_top_nibble_next, int &nibble,
1113  int &last_byte) {
1114  if (decode_top_nibble_next) {
1115  nibble = last_byte >> 4;
1116  decode_top_nibble_next = 0;
1117  } else {
1118  last_byte = bytestream2_get_byteu(&gb);
1119  nibble = last_byte & 0x0F;
1120  decode_top_nibble_next = 1;
1121  }
1122  }
1123 
1124  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
1125  int last_byte = 0;
1126  int nibble;
1127  int decode_top_nibble_next = 0;
1128  int diff_channel;
1129  const int16_t *samples_end = samples + channels() * nb_samples;
1130 
1131  bytestream2_skipu(&gb, 10);
1132  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1133  c->status[1].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1134  c->status[0].step_index = bytestream2_get_byteu(&gb);
1135  c->status[1].step_index = bytestream2_get_byteu(&gb);
1136  if (c->status[0].step_index > 88u || c->status[1].step_index > 88u) {
1137  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i/%i\n",
1138  c->status[0].step_index, c->status[1].step_index);
1139  return AVERROR_INVALIDDATA;
1140  }
1141  /* sign extend the predictors */
1142  diff_channel = c->status[1].predictor;
1143 
1144  while (samples < samples_end) {
1145  /* for this algorithm, c->status[0] is the sum channel and
1146  * c->status[1] is the diff channel */
1147 
1148  /* process the first predictor of the sum channel */
1149  dk3_get_next_nibble(decode_top_nibble_next, nibble, last_byte);
1150  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1151 
1152  /* process the diff channel predictor */
1153  dk3_get_next_nibble(decode_top_nibble_next, nibble, last_byte);
1154  adpcm_ima_expand_nibble(&c->status[1], nibble, 3);
1155 
1156  /* process the first pair of stereo PCM samples */
1157  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1158  *samples++ = c->status[0].predictor + c->status[1].predictor;
1159  *samples++ = c->status[0].predictor - c->status[1].predictor;
1160 
1161  /* process the second predictor of the sum channel */
1162  dk3_get_next_nibble(decode_top_nibble_next, nibble, last_byte);
1163  adpcm_ima_expand_nibble(&c->status[0], nibble, 3);
1164 
1165  /* process the second pair of stereo PCM samples */
1166  diff_channel = (diff_channel + c->status[1].predictor) / 2;
1167  *samples++ = c->status[0].predictor + c->status[1].predictor;
1168  *samples++ = c->status[0].predictor - c->status[1].predictor;
1169  }
1170 
1171  if ((bytestream2_tell(&gb) & 1)) bytestream2_skip(&gb, 1);
1172  return AV_OK;
1173  }
1174 };
1175 
1177  public:
1179  setCodecID(AV_CODEC_ID_ADPCM_IMA_ISS);
1180  sample_formats.push_back(AV_SAMPLE_FMT_S16);
1181  }
1182  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
1183  for (int channel = 0; channel < channels(); channel++) {
1184  ADPCMChannelStatus *cs = &c->status[channel];
1185  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1186  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1187  if (cs->step_index > 88u) {
1188  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n", channel,
1189  cs->step_index);
1190  return AVERROR_INVALIDDATA;
1191  }
1192  }
1193 
1194  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1195  int v1, v2;
1196  int v = bytestream2_get_byteu(&gb);
1197  /* nibbles are swapped for mono */
1198  if (st) {
1199  v1 = v >> 4;
1200  v2 = v & 0x0F;
1201  } else {
1202  v2 = v >> 4;
1203  v1 = v & 0x0F;
1204  }
1205  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v1, 3);
1206  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v2, 3);
1207  }
1208  return AV_OK;
1209  }
1210 };
1211 
1213  public:
1215  setCodecID(AV_CODEC_ID_ADPCM_IMA_MOFLEX);
1216  sample_formats.push_back(AV_SAMPLE_FMT_S16P);
1217  }
1218  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
1219  for (int channel = 0; channel < channels(); channel++) {
1220  ADPCMChannelStatus *cs = &c->status[channel];
1221  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1222  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1223  if (cs->step_index > 88u) {
1224  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n", channel,
1225  cs->step_index);
1226  return AVERROR_INVALIDDATA;
1227  }
1228  }
1229 
1230  for (int subframe = 0; subframe < nb_samples / 256; subframe++) {
1231  for (int channel = 0; channel < channels(); channel++) {
1232  samples = samples_p[channel] + 256 * subframe;
1233  for (int n = 0; n < 256; n += 2) {
1234  int v = bytestream2_get_byteu(&gb);
1235  *samples++ =
1236  adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1237  *samples++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4, 3);
1238  }
1239  }
1240  }
1241  return AV_OK;
1242  }
1243 };
1244 
1246  public:
1248  setCodecID(AV_CODEC_ID_ADPCM_IMA_DAT4);
1249  sample_formats.push_back(AV_SAMPLE_FMT_S16);
1250  }
1251  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
1252  for (int channel = 0; channel < channels(); channel++) {
1253  ADPCMChannelStatus *cs = &c->status[channel];
1254  samples = samples_p[channel];
1255  bytestream2_skip(&gb, 4);
1256  for (int n = 0; n < nb_samples; n += 2) {
1257  int v = bytestream2_get_byteu(&gb);
1258  *samples++ = adpcm_ima_expand_nibble(cs, v >> 4, 3);
1259  *samples++ = adpcm_ima_expand_nibble(cs, v & 0x0F, 3);
1260  }
1261  }
1262  return AV_OK;
1263  }
1264 };
1265 
1267  public:
1269  setCodecID(AV_CODEC_ID_ADPCM_IMA_APC);
1270  sample_formats.push_back(AV_SAMPLE_FMT_S16);
1271  }
1272  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
1273  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1274  int v = bytestream2_get_byteu(&gb);
1275  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1276  *samples++ = adpcm_ima_expand_nibble(&c->status[st], v & 0x0F, 3);
1277  }
1278  return AV_OK;
1279  }
1280 };
1281 
1283  public:
1285  setCodecID(AV_CODEC_ID_ADPCM_IMA_SSI);
1286  sample_formats.push_back(AV_SAMPLE_FMT_S16);
1287  }
1288  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
1289  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1290  int v = bytestream2_get_byteu(&gb);
1291  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0], v >> 4);
1292  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0x0F);
1293  }
1294  return AV_OK;
1295  }
1296 };
1297 
1299  public:
1301  setCodecID(AV_CODEC_ID_ADPCM_IMA_APM);
1302  sample_formats.push_back(AV_SAMPLE_FMT_S16);
1303  }
1304  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
1305  for (int n = nb_samples / 2; n > 0; n--) {
1306  for (int channel = 0; channel < channels(); channel++) {
1307  int v = bytestream2_get_byteu(&gb);
1308  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[channel], v >> 4);
1309  samples[st] = adpcm_ima_qt_expand_nibble(&c->status[channel], v & 0x0F);
1310  }
1311  samples += channels();
1312  }
1313  return AV_OK;
1314  }
1315 };
1316 
1318  public:
1320  setCodecID(AV_CODEC_ID_ADPCM_IMA_ALP);
1321  sample_formats.push_back(AV_SAMPLE_FMT_S16);
1322  }
1323  int16_t adpcm_ima_alp_expand_nibble(ADPCMChannelStatus *c, int8_t nibble,
1324  int shift) {
1325  int step_index;
1326  int predictor;
1327  int sign, delta, diff, step;
1328 
1329  step = ff_adpcm_step_table[c->step_index];
1330  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
1331  step_index = av_clip(step_index, 0, 88);
1332 
1333  sign = nibble & 8;
1334  delta = nibble & 7;
1335  diff = (delta * step) >> shift;
1336  predictor = c->predictor;
1337  if (sign)
1338  predictor -= diff;
1339  else
1340  predictor += diff;
1341 
1342  c->predictor = av_clip_int16(predictor);
1343  c->step_index = step_index;
1344 
1345  return (int16_t)c->predictor;
1346  }
1347 
1348  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
1349  for (int n = nb_samples / 2; n > 0; n--) {
1350  for (int channel = 0; channel < channels(); channel++) {
1351  int v = bytestream2_get_byteu(&gb);
1352  *samples++ =
1353  adpcm_ima_alp_expand_nibble(&c->status[channel], v >> 4, 2);
1354  samples[st] =
1355  adpcm_ima_alp_expand_nibble(&c->status[channel], v & 0x0F, 2);
1356  }
1357  samples += channels();
1358  }
1359  return AV_OK;
1360  }
1361 };
1362 
1364  public:
1366  setCodecID(AV_CODEC_ID_ADPCM_IMA_CUNNING);
1367  sample_formats.push_back(AV_SAMPLE_FMT_S16P);
1368  }
1369  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
1370  for (int channel = 0; channel < channels(); channel++) {
1371  int16_t *smp = samples_p[channel];
1372  for (int n = 0; n < nb_samples / 2; n++) {
1373  int v = bytestream2_get_byteu(&gb);
1374  *smp++ = adpcm_ima_cunning_expand_nibble(&c->status[channel], v & 0x0F);
1375  *smp++ = adpcm_ima_cunning_expand_nibble(&c->status[channel], v >> 4);
1376  }
1377  }
1378  return AV_OK;
1379  }
1380 };
1381 
1383  public:
1385  setCodecID(AV_CODEC_ID_ADPCM_IMA_OKI);
1386  sample_formats.push_back(AV_SAMPLE_FMT_S16);
1387  }
1388  int16_t adpcm_ima_oki_expand_nibble(ADPCMChannelStatus *c, int nibble) {
1389  int step_index, predictor, sign, delta, diff, step;
1390 
1391  step = oki_step_table[c->step_index];
1392  step_index = c->step_index + ff_adpcm_index_table[(unsigned)nibble];
1393  step_index = av_clip(step_index, 0, 48);
1394 
1395  sign = nibble & 8;
1396  delta = nibble & 7;
1397  diff = ((2 * delta + 1) * step) >> 3;
1398  predictor = c->predictor;
1399  if (sign)
1400  predictor -= diff;
1401  else
1402  predictor += diff;
1403 
1404  c->predictor = av_clip_intp2(predictor, 11);
1405  c->step_index = step_index;
1406 
1407  return c->predictor * 16;
1408  }
1409 
1410  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
1411  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1412  int v = bytestream2_get_byteu(&gb);
1413  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[0], v >> 4);
1414  *samples++ = adpcm_ima_oki_expand_nibble(&c->status[st], v & 0x0F);
1415  }
1416  return AV_OK;
1417  }
1418 };
1419 
1421  public:
1423  setCodecID(AV_CODEC_ID_ADPCM_IMA_RAD);
1424  sample_formats.push_back(AV_SAMPLE_FMT_S16);
1425  }
1426  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
1427  for (int channel = 0; channel < channels(); channel++) {
1428  ADPCMChannelStatus *cs = &c->status[channel];
1429  cs->step_index = sign_extend(bytestream2_get_le16u(&gb), 16);
1430  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1431  if (cs->step_index > 88u) {
1432  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n", channel,
1433  cs->step_index);
1434  return AVERROR_INVALIDDATA;
1435  }
1436  }
1437  for (int n = 0; n < nb_samples / 2; n++) {
1438  int byte[2];
1439 
1440  byte[0] = bytestream2_get_byteu(&gb);
1441  if (st) byte[1] = bytestream2_get_byteu(&gb);
1442  for (int channel = 0; channel < channels(); channel++) {
1443  *samples++ = adpcm_ima_expand_nibble(&c->status[channel],
1444  byte[channel] & 0x0F, 3);
1445  }
1446  for (int channel = 0; channel < channels(); channel++) {
1447  *samples++ =
1448  adpcm_ima_expand_nibble(&c->status[channel], byte[channel] >> 4, 3);
1449  }
1450  }
1451  return AV_OK;
1452  }
1453 };
1454 
1456  public:
1458  setCodecID(AV_CODEC_ID_ADPCM_IMA_WS);
1459  sample_formats.push_back(AV_SAMPLE_FMT_S16);
1460  //sample_formats.push_back(AV_SAMPLE_FMT_S16P);
1461  }
1462  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
1463  if (c->vqa_version == 3) {
1464  for (int channel = 0; channel < channels(); channel++) {
1465  int16_t *smp = samples_p[channel];
1466 
1467  for (int n = nb_samples / 2; n > 0; n--) {
1468  int v = bytestream2_get_byteu(&gb);
1469  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1470  *smp++ = adpcm_ima_expand_nibble(&c->status[channel], v >> 4, 3);
1471  }
1472  }
1473  } else {
1474  for (int n = nb_samples / 2; n > 0; n--) {
1475  for (int channel = 0; channel < channels(); channel++) {
1476  int v = bytestream2_get_byteu(&gb);
1477  *samples++ =
1478  adpcm_ima_expand_nibble(&c->status[channel], v & 0x0F, 3);
1479  samples[st] = adpcm_ima_expand_nibble(&c->status[channel], v >> 4, 3);
1480  }
1481  samples += channels();
1482  }
1483  }
1484  bytestream2_seek(&gb, 0, SEEK_END);
1485  return AV_OK;
1486  }
1487 };
1488 
1490  public:
1491  DecoderADPCM_XMD() {
1492  setCodecID(AV_CODEC_ID_ADPCM_XMD);
1493  sample_formats.push_back(AV_SAMPLE_FMT_S16P);
1494  }
1495  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
1496  int bytes_remaining, block = 0;
1497  while (bytestream2_get_bytes_left(&gb) >= 21 * channels()) {
1498  for (int channel = 0; channel < channels(); channel++) {
1499  int16_t *out = samples_p[channel] + block * 32;
1500  int16_t history[2];
1501  uint16_t scale;
1502 
1503  history[1] = sign_extend(bytestream2_get_le16(&gb), 16);
1504  history[0] = sign_extend(bytestream2_get_le16(&gb), 16);
1505  scale = bytestream2_get_le16(&gb);
1506 
1507  out[0] = history[1];
1508  out[1] = history[0];
1509 
1510  for (int n = 0; n < 15; n++) {
1511  unsigned byte = bytestream2_get_byte(&gb);
1512  int32_t nibble[2];
1513 
1514  nibble[0] = sign_extend(byte & 15, 4);
1515  nibble[1] = sign_extend(byte >> 4, 4);
1516 
1517  out[2 + n * 2] = nibble[0] * scale +
1518  ((history[0] * 3667 - history[1] * 1642) >> 11);
1519  history[1] = history[0];
1520  history[0] = out[2 + n * 2];
1521 
1522  out[2 + n * 2 + 1] = nibble[1] * scale +
1523  ((history[0] * 3667 - history[1] * 1642) >> 11);
1524  history[1] = history[0];
1525  history[0] = out[2 + n * 2 + 1];
1526  }
1527  }
1528 
1529  block++;
1530  }
1531  bytes_remaining = bytestream2_get_bytes_left(&gb);
1532  if (bytes_remaining > 0) {
1533  bytestream2_skip(&gb, bytes_remaining);
1534  }
1535  return AV_OK;
1536  }
1537 };
1538 
1540  public:
1541  DecoderADPCM_XA() {
1542  setCodecID(AV_CODEC_ID_ADPCM_XA);
1543  sample_formats.push_back(AV_SAMPLE_FMT_S16P);
1544  }
1545  int xa_decode(int16_t *out0, int16_t *out1, const uint8_t *in,
1546  ADPCMChannelStatus *left, ADPCMChannelStatus *right,
1547  int channels, int sample_offset) {
1548  int i, j;
1549  int shift, filter, f0, f1;
1550  int s_1, s_2;
1551  int d, s, t;
1552 
1553  out0 += sample_offset;
1554  if (channels == 1)
1555  out1 = out0 + 28;
1556  else
1557  out1 += sample_offset;
1558 
1559  for (i = 0; i < 4; i++) {
1560  shift = 12 - (in[4 + i * 2] & 15);
1561  filter = in[4 + i * 2] >> 4;
1562  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table)) {
1563  avpriv_request_sample(&avctx, "unknown XA-ADPCM filter %d", filter);
1564  filter = 0;
1565  }
1566  if (shift < 0) {
1567  avpriv_request_sample(&avctx, "unknown XA-ADPCM shift %d", shift);
1568  shift = 0;
1569  }
1570  f0 = xa_adpcm_table[filter][0];
1571  f1 = xa_adpcm_table[filter][1];
1572 
1573  s_1 = left->sample1;
1574  s_2 = left->sample2;
1575 
1576  for (j = 0; j < 28; j++) {
1577  d = in[16 + i + j * 4];
1578 
1579  t = sign_extend(d, 4);
1580  s = t * (1 << shift) + ((s_1 * f0 + s_2 * f1 + 32) >> 6);
1581  s_2 = s_1;
1582  s_1 = av_clip_int16(s);
1583  out0[j] = s_1;
1584  }
1585 
1586  if (channels == 2) {
1587  left->sample1 = s_1;
1588  left->sample2 = s_2;
1589  s_1 = right->sample1;
1590  s_2 = right->sample2;
1591  }
1592 
1593  shift = 12 - (in[5 + i * 2] & 15);
1594  filter = in[5 + i * 2] >> 4;
1595  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table) || shift < 0) {
1596  avpriv_request_sample(&avctx, "unknown XA-ADPCM filter %d", filter);
1597  filter = 0;
1598  }
1599  if (shift < 0) {
1600  avpriv_request_sample(&avctx, "unknown XA-ADPCM shift %d", shift);
1601  shift = 0;
1602  }
1603 
1604  f0 = xa_adpcm_table[filter][0];
1605  f1 = xa_adpcm_table[filter][1];
1606 
1607  for (j = 0; j < 28; j++) {
1608  d = in[16 + i + j * 4];
1609 
1610  t = sign_extend(d >> 4, 4);
1611  s = t * (1 << shift) + ((s_1 * f0 + s_2 * f1 + 32) >> 6);
1612  s_2 = s_1;
1613  s_1 = av_clip_int16(s);
1614  out1[j] = s_1;
1615  }
1616 
1617  if (channels == 2) {
1618  right->sample1 = s_1;
1619  right->sample2 = s_2;
1620  } else {
1621  left->sample1 = s_1;
1622  left->sample2 = s_2;
1623  }
1624 
1625  out0 += 28 * (3 - channels);
1626  out1 += 28 * (3 - channels);
1627  }
1628 
1629  return 0;
1630  }
1631 
1632  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
1633  int16_t *out0 = samples_p[0];
1634  int16_t *out1 = samples_p[1];
1635  int samples_per_block = 28 * (3 - channels()) * 4;
1636  int sample_offset = 0;
1637  int bytes_remaining;
1638  while (bytestream2_get_bytes_left(&gb) >= 128) {
1639  if ((ret =
1640  xa_decode(out0, out1, buf + bytestream2_tell(&gb), &c->status[0],
1641  &c->status[1], channels(), sample_offset)) < 0)
1642  return ret;
1643  bytestream2_skipu(&gb, 128);
1644  sample_offset += samples_per_block;
1645  }
1646  /* Less than a full block of data left, e.g. when reading from
1647  * 2324 byte per sector XA; the remainder is padding */
1648  bytes_remaining = bytestream2_get_bytes_left(&gb);
1649  if (bytes_remaining > 0) {
1650  bytestream2_skip(&gb, bytes_remaining);
1651  }
1652  return AV_OK;
1653  }
1654 };
1655 
1657  public:
1659  setCodecID(AV_CODEC_ID_ADPCM_IMA_EA_EACS);
1660  sample_formats.push_back(AV_SAMPLE_FMT_S16);
1661  }
1662  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
1663  for (int i = 0; i <= st; i++) {
1664  c->status[i].step_index = bytestream2_get_le32u(&gb);
1665  if (c->status[i].step_index > 88u) {
1666  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n", i,
1667  c->status[i].step_index);
1668  return AVERROR_INVALIDDATA;
1669  }
1670  }
1671  for (int i = 0; i <= st; i++) {
1672  c->status[i].predictor = bytestream2_get_le32u(&gb);
1673  if (FFABS((int64_t)c->status[i].predictor) > (1 << 16))
1674  return AVERROR_INVALIDDATA;
1675  }
1676 
1677  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1678  int byte = bytestream2_get_byteu(&gb);
1679  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 3);
1680  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 3);
1681  }
1682  return AV_OK;
1683  }
1684 };
1685 
1687  public:
1689  setCodecID(AV_CODEC_ID_ADPCM_IMA_EA_SEAD);
1690  sample_formats.push_back(AV_SAMPLE_FMT_S16);
1691  }
1692  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
1693  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1694  int byte = bytestream2_get_byteu(&gb);
1695  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte >> 4, 6);
1696  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte & 0x0F, 6);
1697  }
1698  return AV_OK;
1699  }
1700 };
1701 
1703  public:
1704  DecoderADPCM_EA() {
1705  setCodecID(AV_CODEC_ID_ADPCM_EA);
1706  sample_formats.push_back(AV_SAMPLE_FMT_S16);
1707  }
1708  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
1709  int previous_left_sample, previous_right_sample;
1710  int current_left_sample, current_right_sample;
1711  int next_left_sample, next_right_sample;
1712  int coeff1l, coeff2l, coeff1r, coeff2r;
1713  int shift_left, shift_right;
1714 
1715  /* Each EA ADPCM frame has a 12-byte header followed by 30-byte pieces,
1716  each coding 28 stereo samples. */
1717 
1718  if (channels() != 2) return AVERROR_INVALIDDATA;
1719 
1720  current_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1721  previous_left_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1722  current_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1723  previous_right_sample = sign_extend(bytestream2_get_le16u(&gb), 16);
1724 
1725  for (int count1 = 0; count1 < nb_samples / 28; count1++) {
1726  int byte = bytestream2_get_byteu(&gb);
1727  coeff1l = ea_adpcm_table[byte >> 4];
1728  coeff2l = ea_adpcm_table[(byte >> 4) + 4];
1729  coeff1r = ea_adpcm_table[byte & 0x0F];
1730  coeff2r = ea_adpcm_table[(byte & 0x0F) + 4];
1731 
1732  byte = bytestream2_get_byteu(&gb);
1733  shift_left = 20 - (byte >> 4);
1734  shift_right = 20 - (byte & 0x0F);
1735 
1736  for (int count2 = 0; count2 < 28; count2++) {
1737  byte = bytestream2_get_byteu(&gb);
1738  next_left_sample = sign_extend(byte >> 4, 4) * (1 << shift_left);
1739  next_right_sample = sign_extend(byte, 4) * (1 << shift_right);
1740 
1741  next_left_sample = (next_left_sample + (current_left_sample * coeff1l) +
1742  (previous_left_sample * coeff2l) + 0x80) >>
1743  8;
1744  next_right_sample =
1745  (next_right_sample + (current_right_sample * coeff1r) +
1746  (previous_right_sample * coeff2r) + 0x80) >>
1747  8;
1748 
1749  previous_left_sample = current_left_sample;
1750  current_left_sample = av_clip_int16(next_left_sample);
1751  previous_right_sample = current_right_sample;
1752  current_right_sample = av_clip_int16(next_right_sample);
1753  *samples++ = current_left_sample;
1754  *samples++ = current_right_sample;
1755  }
1756  }
1757 
1758  bytestream2_skip(&gb, 2); // Skip terminating 0x0000
1759  return AV_OK;
1760  }
1761 };
1762 
1764  public:
1766  setCodecID(AV_CODEC_ID_ADPCM_EA_MAXIS_XA);
1767  sample_formats.push_back(AV_SAMPLE_FMT_S16);
1768  }
1769  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
1770  int coeff[2][2], shift[2];
1771 
1772  for (int channel = 0; channel < channels(); channel++) {
1773  int byte = bytestream2_get_byteu(&gb);
1774  for (int i = 0; i < 2; i++)
1775  coeff[channel][i] = ea_adpcm_table[(byte >> 4) + 4 * i];
1776  shift[channel] = 20 - (byte & 0x0F);
1777  }
1778  for (int count1 = 0; count1 < nb_samples / 2; count1++) {
1779  int byte[2];
1780 
1781  byte[0] = bytestream2_get_byteu(&gb);
1782  if (st) byte[1] = bytestream2_get_byteu(&gb);
1783  for (int i = 4; i >= 0;
1784  i -= 4) { /* Pairwise samples LL RR (st) or LL LL (mono) */
1785  for (int channel = 0; channel < channels(); channel++) {
1786  int sample =
1787  sign_extend(byte[channel] >> i, 4) * (1 << shift[channel]);
1788  sample = (sample + c->status[channel].sample1 * coeff[channel][0] +
1789  c->status[channel].sample2 * coeff[channel][1] + 0x80) >>
1790  8;
1791  c->status[channel].sample2 = c->status[channel].sample1;
1792  c->status[channel].sample1 = av_clip_int16(sample);
1793  *samples++ = c->status[channel].sample1;
1794  }
1795  }
1796  }
1797  bytestream2_seek(&gb, 0, SEEK_END);
1798  return AV_OK;
1799  }
1800 };
1801 
1803  public:
1805  setCodecID(AV_CODEC_ID_ADPCM_EA_XAS);
1806  sample_formats.push_back(AV_SAMPLE_FMT_S16P);
1807  }
1808  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
1809  for (int channel = 0; channel < channels(); channel++) {
1810  int coeff[2][4], shift[4];
1811  int16_t *s = samples_p[channel];
1812  for (int n = 0; n < 4; n++, s += 32) {
1813  int val = sign_extend(bytestream2_get_le16u(&gb), 16);
1814  for (int i = 0; i < 2; i++)
1815  coeff[i][n] = ea_adpcm_table[(val & 0x0F) + 4 * i];
1816  s[0] = val & ~0x0F;
1817 
1818  val = sign_extend(bytestream2_get_le16u(&gb), 16);
1819  shift[n] = 20 - (val & 0x0F);
1820  s[1] = val & ~0x0F;
1821  }
1822 
1823  for (int m = 2; m < 32; m += 2) {
1824  s = &samples_p[channel][m];
1825  for (int n = 0; n < 4; n++, s += 32) {
1826  int level, pred;
1827  int byte = bytestream2_get_byteu(&gb);
1828 
1829  level = sign_extend(byte >> 4, 4) * (1 << shift[n]);
1830  pred = s[-1] * coeff[0][n] + s[-2] * coeff[1][n];
1831  s[0] = av_clip_int16((level + pred + 0x80) >> 8);
1832 
1833  level = sign_extend(byte, 4) * (1 << shift[n]);
1834  pred = s[0] * coeff[0][n] + s[-1] * coeff[1][n];
1835  s[1] = av_clip_int16((level + pred + 0x80) >> 8);
1836  }
1837  }
1838  }
1839  return AV_OK;
1840  }
1841 };
1842 
1844  public:
1846  setCodecID(AV_CODEC_ID_ADPCM_IMA_ACORN);
1847  sample_formats.push_back(AV_SAMPLE_FMT_S16);
1848  }
1849  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
1850  for (int channel = 0; channel < channels(); channel++) {
1851  ADPCMChannelStatus *cs = &c->status[channel];
1852  cs->predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1853  cs->step_index = bytestream2_get_le16u(&gb) & 0xFF;
1854  if (cs->step_index > 88u) {
1855  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index[%d] = %i\n", channel,
1856  cs->step_index);
1857  return AVERROR_INVALIDDATA;
1858  }
1859  }
1860  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1861  int byte = bytestream2_get_byteu(&gb);
1862  *samples++ = adpcm_ima_expand_nibble(&c->status[0], byte & 0x0F, 3);
1863  *samples++ = adpcm_ima_expand_nibble(&c->status[st], byte >> 4, 3);
1864  }
1865  return AV_OK;
1866  }
1867 };
1868 
1870  public:
1872  setCodecID(AV_CODEC_ID_ADPCM_IMA_AMV);
1873  sample_formats.push_back(AV_SAMPLE_FMT_S16);
1874  }
1875  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
1876  av_assert(channels() == 1);
1877 
1878  /*
1879  * Header format:
1880  * int16_t predictor;
1881  * uint8_t step_index;
1882  * uint8_t reserved;
1883  * uint32_t frame_size;
1884  *
1885  * Some implementations have step_index as 16-bits, but others
1886  * only use the lower 8 and store garbage in the upper 8.
1887  */
1888  c->status[0].predictor = sign_extend(bytestream2_get_le16u(&gb), 16);
1889  c->status[0].step_index = bytestream2_get_byteu(&gb);
1890  bytestream2_skipu(&gb, 5);
1891  if (c->status[0].step_index > 88u) {
1892  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1893  c->status[0].step_index);
1894  return AVERROR_INVALIDDATA;
1895  }
1896 
1897  for (int n = nb_samples >> 1; n > 0; n--) {
1898  int v = bytestream2_get_byteu(&gb);
1899 
1900  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1901  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v & 0xf, 3);
1902  }
1903 
1904  if (nb_samples & 1) {
1905  int v = bytestream2_get_byteu(&gb);
1906  *samples++ = adpcm_ima_expand_nibble(&c->status[0], v >> 4, 3);
1907 
1908  if (v & 0x0F) {
1909  /* Holds true on all the http://samples.mplayerhq.hu/amv samples. */
1910  av_log(avctx, AV_LOG_WARNING,
1911  "Last nibble set on packet with odd sample count.\n");
1912  av_log(avctx, AV_LOG_WARNING, "Sample will be skipped.\n");
1913  }
1914  }
1915  return AV_OK;
1916  }
1917 };
1918 
1920  public:
1922  setCodecID(AV_CODEC_ID_ADPCM_IMA_SMJPEG);
1923  sample_formats.push_back(AV_SAMPLE_FMT_S16);
1924  }
1925  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
1926  for (int i = 0; i < channels(); i++) {
1927  c->status[i].predictor = sign_extend(bytestream2_get_be16u(&gb), 16);
1928  c->status[i].step_index = bytestream2_get_byteu(&gb);
1929  bytestream2_skipu(&gb, 1);
1930  if (c->status[i].step_index > 88u) {
1931  av_log(avctx, AV_LOG_ERROR, "ERROR: step_index = %i\n",
1932  c->status[i].step_index);
1933  return AVERROR_INVALIDDATA;
1934  }
1935  }
1936 
1937  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1938  int v = bytestream2_get_byteu(&gb);
1939 
1940  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[0], v >> 4);
1941  *samples++ = adpcm_ima_qt_expand_nibble(&c->status[st], v & 0xf);
1942  }
1943  return AV_OK;
1944  }
1945 };
1946 
1948  public:
1949  DecoderADPCM_CT() {
1950  setCodecID(AV_CODEC_ID_ADPCM_CT);
1951  sample_formats.push_back(AV_SAMPLE_FMT_S16);
1952  }
1953  int16_t adpcm_ct_expand_nibble(ADPCMChannelStatus *c, int8_t nibble) {
1954  int sign, delta, diff;
1955  int new_step;
1956 
1957  sign = nibble & 8;
1958  delta = nibble & 7;
1959  /* perform direct multiplication instead of series of jumps proposed by
1960  * the reference ADPCM implementation since modern CPUs can do the mults
1961  * quickly enough */
1962  diff = ((2 * delta + 1) * c->step) >> 3;
1963  /* predictor update is not so trivial: predictor is multiplied on 254/256
1964  * before updating */
1965  c->predictor = ((c->predictor * 254) >> 8) + (sign ? -diff : diff);
1966  c->predictor = av_clip_int16(c->predictor);
1967  /* calculate new step and clamp it to range 511..32767 */
1968  new_step = (ff_adpcm_AdaptationTable[nibble & 7] * c->step) >> 8;
1969  c->step = av_clip(new_step, 511, 32767);
1970 
1971  return (int16_t)c->predictor;
1972  }
1973 
1974  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
1975  for (int n = nb_samples >> (1 - st); n > 0; n--) {
1976  int v = bytestream2_get_byteu(&gb);
1977  *samples++ = adpcm_ct_expand_nibble(&c->status[0], v >> 4);
1978  *samples++ = adpcm_ct_expand_nibble(&c->status[st], v & 0x0F);
1979  }
1980  return AV_OK;
1981  }
1982 };
1983 
1985  public:
1986  DecoderADPCM_SWF() {
1987  setCodecID(AV_CODEC_ID_ADPCM_SWF);
1988  sample_formats.push_back(AV_SAMPLE_FMT_S16);
1989  }
1990  void adpcm_swf_decode(const uint8_t *buf, int buf_size, int16_t *samples) {
1991  ADPCMDecodeContext *c = (ADPCMDecodeContext *)avctx.priv_data;
1992  GetBitContext gb;
1993  const int8_t *table;
1994  int channels = avctx.nb_channels;
1995  int k0, signmask, nb_bits, count;
1996  int size = buf_size * 8;
1997  int i;
1998 
1999  init_get_bits(&gb, buf, size);
2000 
2001  // read bits & initial values
2002  nb_bits = get_bits(&gb, 2) + 2;
2003  table = swf_index_tables[nb_bits - 2];
2004  k0 = 1 << (nb_bits - 2);
2005  signmask = 1 << (nb_bits - 1);
2006 
2007  while (get_bits_count(&gb) <= size - 22 * channels) {
2008  for (i = 0; i < channels; i++) {
2009  *samples++ = c->status[i].predictor = get_sbits(&gb, 16);
2010  c->status[i].step_index = get_bits(&gb, 6);
2011  }
2012 
2013  for (count = 0;
2014  get_bits_count(&gb) <= size - nb_bits * channels && count < 4095;
2015  count++) {
2016  int i;
2017 
2018  for (i = 0; i < channels; i++) {
2019  // similar to IMA adpcm
2020  int delta = get_bits(&gb, nb_bits);
2021  int step = ff_adpcm_step_table[c->status[i].step_index];
2022  int vpdiff = 0; // vpdiff = (delta+0.5)*step/4
2023  int k = k0;
2024 
2025  do {
2026  if (delta & k) vpdiff += step;
2027  step >>= 1;
2028  k >>= 1;
2029  } while (k);
2030  vpdiff += step;
2031 
2032  if (delta & signmask)
2033  c->status[i].predictor -= vpdiff;
2034  else
2035  c->status[i].predictor += vpdiff;
2036 
2037  c->status[i].step_index += table[delta & (~signmask)];
2038 
2039  c->status[i].step_index = av_clip(c->status[i].step_index, 0, 88);
2040  c->status[i].predictor = av_clip_int16(c->status[i].predictor);
2041 
2042  *samples++ = c->status[i].predictor;
2043  }
2044  }
2045  }
2046  }
2047  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
2048  adpcm_swf_decode(buf, buf_size, samples);
2049  bytestream2_seek(&gb, 0, SEEK_END);
2050  return AV_OK;
2051  }
2052 };
2053 
2055  public:
2057  setCodecID(AV_CODEC_ID_ADPCM_YAMAHA);
2058  sample_formats.push_back(AV_SAMPLE_FMT_S16);
2059  }
2060  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
2061  for (int n = nb_samples >> (1 - st); n > 0; n--) {
2062  int v = bytestream2_get_byteu(&gb);
2063  *samples++ = adpcm_yamaha_expand_nibble(&c->status[0], v & 0x0F);
2064  *samples++ = adpcm_yamaha_expand_nibble(&c->status[st], v >> 4);
2065  }
2066  return AV_OK;
2067  }
2068 };
2069 
2071  public:
2072  DecoderADPCM_AICA() {
2073  setCodecID(AV_CODEC_ID_ADPCM_AICA);
2074  sample_formats.push_back(AV_SAMPLE_FMT_S16P);
2075  }
2076  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
2077  for (int channel = 0; channel < channels(); channel++) {
2078  samples = samples_p[channel];
2079  for (int n = nb_samples >> 1; n > 0; n--) {
2080  int v = bytestream2_get_byteu(&gb);
2081  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v & 0x0F);
2082  *samples++ = adpcm_yamaha_expand_nibble(&c->status[channel], v >> 4);
2083  }
2084  }
2085  return AV_OK;
2086  }
2087 };
2088 
2090  public:
2091  DecoderADPCM_AFC() {
2092  setCodecID(AV_CODEC_ID_ADPCM_AFC);
2093  sample_formats.push_back(AV_SAMPLE_FMT_S16P);
2094  }
2095  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
2096  int samples_per_block;
2097  int blocks;
2098 
2099  if (avctx.extradata && avctx.extradata_size == 1 && avctx.extradata[0]) {
2100  samples_per_block = avctx.extradata[0] / 16;
2101  blocks = nb_samples / avctx.extradata[0];
2102  } else {
2103  samples_per_block = nb_samples / 16;
2104  blocks = 1;
2105  }
2106 
2107  for (int m = 0; m < blocks; m++) {
2108  for (int channel = 0; channel < channels(); channel++) {
2109  int prev1 = c->status[channel].sample1;
2110  int prev2 = c->status[channel].sample2;
2111 
2112  samples = samples_p[channel] + m * 16;
2113  /* Read in every sample for this channel. */
2114  for (int i = 0; i < samples_per_block; i++) {
2115  int byte = bytestream2_get_byteu(&gb);
2116  int scale = 1 << (byte >> 4);
2117  int index = byte & 0xf;
2118  int factor1 = afc_coeffs[0][index];
2119  int factor2 = afc_coeffs[1][index];
2120 
2121  /* Decode 16 samples. */
2122  for (int n = 0; n < 16; n++) {
2123  int32_t sampledat;
2124 
2125  if (n & 1) {
2126  sampledat = sign_extend(byte, 4);
2127  } else {
2128  byte = bytestream2_get_byteu(&gb);
2129  sampledat = sign_extend(byte >> 4, 4);
2130  }
2131 
2132  sampledat =
2133  ((prev1 * factor1 + prev2 * factor2) >> 11) + sampledat * scale;
2134  *samples = av_clip_int16(sampledat);
2135  prev2 = prev1;
2136  prev1 = *samples++;
2137  }
2138  }
2139 
2140  c->status[channel].sample1 = prev1;
2141  c->status[channel].sample2 = prev2;
2142  }
2143  }
2144  bytestream2_seek(&gb, 0, SEEK_END);
2145  return AV_OK;
2146  }
2147 };
2148 
2150  public:
2151  DecoderADPCM_DTK() {
2152  setCodecID(AV_CODEC_ID_ADPCM_DTK);
2153  sample_formats.push_back(AV_SAMPLE_FMT_S16P);
2154  }
2155  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
2156  for (int channel = 0; channel < channels(); channel++) {
2157  samples = samples_p[channel];
2158 
2159  /* Read in every sample for this channel. */
2160  for (int i = 0; i < nb_samples / 28; i++) {
2161  int byte, header;
2162  if (channel) bytestream2_skipu(&gb, 1);
2163  header = bytestream2_get_byteu(&gb);
2164  bytestream2_skipu(&gb, 3 - channel);
2165 
2166  /* Decode 28 samples. */
2167  for (int n = 0; n < 28; n++) {
2168  int32_t sampledat, prev;
2169 
2170  switch (header >> 4) {
2171  case 1:
2172  prev = (c->status[channel].sample1 * 0x3c);
2173  break;
2174  case 2:
2175  prev = (c->status[channel].sample1 * 0x73) -
2176  (c->status[channel].sample2 * 0x34);
2177  break;
2178  case 3:
2179  prev = (c->status[channel].sample1 * 0x62) -
2180  (c->status[channel].sample2 * 0x37);
2181  break;
2182  default:
2183  prev = 0;
2184  }
2185 
2186  prev = av_clip_intp2((prev + 0x20) >> 6, 21);
2187 
2188  byte = bytestream2_get_byteu(&gb);
2189  if (!channel)
2190  sampledat = sign_extend(byte, 4);
2191  else
2192  sampledat = sign_extend(byte >> 4, 4);
2193 
2194  sampledat =
2195  ((sampledat * (1 << 12)) >> (header & 0xf)) * (1 << 6) + prev;
2196  *samples++ = av_clip_int16(sampledat >> 6);
2197  c->status[channel].sample2 = c->status[channel].sample1;
2198  c->status[channel].sample1 = sampledat;
2199  }
2200  }
2201  if (!channel) bytestream2_seek(&gb, 0, SEEK_SET);
2202  }
2203  return AV_OK;
2204  }
2205 };
2206 
2208  public:
2209  DecoderADPCM_PSX() {
2210  setCodecID(AV_CODEC_ID_ADPCM_PSX);
2211  sample_formats.push_back(AV_SAMPLE_FMT_S16P);
2212  }
2213  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
2214  for (int block = 0;
2215  block < avpkt->size / FFMAX(avctx.block_align, 16 * channels());
2216  block++) {
2217  int nb_samples_per_block =
2218  28 * FFMAX(avctx.block_align, 16 * channels()) / (16 * channels());
2219  for (int channel = 0; channel < channels(); channel++) {
2220  samples = samples_p[channel] + block * nb_samples_per_block;
2221  av_assert((block + 1) * nb_samples_per_block <= nb_samples);
2222 
2223  /* Read in every sample for this channel. */
2224  for (int i = 0; i < nb_samples_per_block / 28; i++) {
2225  int filter, shift, flag, byte;
2226 
2227  filter = bytestream2_get_byteu(&gb);
2228  shift = filter & 0xf;
2229  filter = filter >> 4;
2230  if (filter >= FF_ARRAY_ELEMS(xa_adpcm_table))
2231  return AVERROR_INVALIDDATA;
2232  flag = bytestream2_get_byteu(&gb) & 0x7;
2233 
2234  /* Decode 28 samples. */
2235  for (int n = 0; n < 28; n++) {
2236  int sample = 0, scale;
2237 
2238  if (n & 1) {
2239  scale = sign_extend(byte >> 4, 4);
2240  } else {
2241  byte = bytestream2_get_byteu(&gb);
2242  scale = sign_extend(byte, 4);
2243  }
2244 
2245  if (flag < 0x07) {
2246  scale = scale * (1 << 12);
2247  sample =
2248  (int)((scale >> shift) + (c->status[channel].sample1 *
2249  xa_adpcm_table[filter][0] +
2250  c->status[channel].sample2 *
2251  xa_adpcm_table[filter][1]) /
2252  64);
2253  }
2254  *samples++ = av_clip_int16(sample);
2255  c->status[channel].sample2 = c->status[channel].sample1;
2256  c->status[channel].sample1 = sample;
2257  }
2258  }
2259  }
2260  }
2261  return AV_OK;
2262  }
2263 };
2264 
2266  public:
2267  DecoderADPCM_ARGO() {
2268  setCodecID(AV_CODEC_ID_ADPCM_ARGO);
2269  sample_formats.push_back(AV_SAMPLE_FMT_S16P);
2270  }
2271  int16_t ff_adpcm_argo_expand_nibble(ADPCMChannelStatus *cs, int nibble,
2272  int shift, int flag) {
2273  int sample = sign_extend(nibble, 4) * (1 << shift);
2274 
2275  if (flag)
2276  sample += (8 * cs->sample1) - (4 * cs->sample2);
2277  else
2278  sample += 4 * cs->sample1;
2279 
2280  sample = av_clip_int16(sample >> 2);
2281 
2282  cs->sample2 = cs->sample1;
2283  cs->sample1 = sample;
2284 
2285  return sample;
2286  }
2287 
2288  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
2289  /*
2290  * The format of each block:
2291  * uint8_t left_control;
2292  * uint4_t left_samples[nb_samples];
2293  * ---- and if stereo ----
2294  * uint8_t right_control;
2295  * uint4_t right_samples[nb_samples];
2296  *
2297  * Format of the control byte:
2298  * MSB [SSSSRDRR] LSB
2299  * S = (Shift Amount - 2)
2300  * D = Decoder flag.
2301  * R = Reserved
2302  *
2303  * Each block relies on the previous two samples of each channel.
2304  * They should be 0 initially.
2305  */
2306  for (int block = 0; block < avpkt->size / avctx.block_align; block++) {
2307  for (int channel = 0; channel < avctx.nb_channels; channel++) {
2308  ADPCMChannelStatus *cs = c->status + channel;
2309  int control, shift;
2310 
2311  samples = samples_p[channel] + block * 32;
2312 
2313  /* Get the control byte and decode the samples, 2 at a time. */
2314  control = bytestream2_get_byteu(&gb);
2315  shift = (control >> 4) + 2;
2316 
2317  for (int n = 0; n < 16; n++) {
2318  int sample = bytestream2_get_byteu(&gb);
2319  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 4, shift,
2320  control & 0x04);
2321  *samples++ = ff_adpcm_argo_expand_nibble(cs, sample >> 0, shift,
2322  control & 0x04);
2323  }
2324  }
2325  }
2326  return AV_OK;
2327  }
2328 };
2329 
2331  public:
2332  DecoderADPCM_ZORK() {
2333  setCodecID(AV_CODEC_ID_ADPCM_ZORK);
2334  sample_formats.push_back(AV_SAMPLE_FMT_S16);
2335  }
2336  int16_t adpcm_zork_expand_nibble(ADPCMChannelStatus *c, uint8_t nibble) {
2337  int16_t index = c->step_index;
2338  uint32_t lookup_sample = ff_adpcm_step_table[index];
2339  int32_t sample = 0;
2340 
2341  if (nibble & 0x40) sample += lookup_sample;
2342  if (nibble & 0x20) sample += lookup_sample >> 1;
2343  if (nibble & 0x10) sample += lookup_sample >> 2;
2344  if (nibble & 0x08) sample += lookup_sample >> 3;
2345  if (nibble & 0x04) sample += lookup_sample >> 4;
2346  if (nibble & 0x02) sample += lookup_sample >> 5;
2347  if (nibble & 0x01) sample += lookup_sample >> 6;
2348  if (nibble & 0x80) sample = -sample;
2349 
2350  sample += c->predictor;
2351  sample = av_clip_int16(sample);
2352 
2353  index += zork_index_table[(nibble >> 4) & 7];
2354  index = av_clip(index, 0, 88);
2355 
2356  c->predictor = sample;
2357  c->step_index = index;
2358 
2359  return sample;
2360  }
2361  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
2362  for (int n = 0; n < nb_samples * channels(); n++) {
2363  int v = bytestream2_get_byteu(&gb);
2364  *samples++ = adpcm_zork_expand_nibble(&c->status[n % channels()], v);
2365  }
2366  return AV_OK;
2367  }
2368 };
2369 
2371  public:
2373  setCodecID(AV_CODEC_ID_ADPCM_IMA_MTF);
2374  sample_formats.push_back(AV_SAMPLE_FMT_S16);
2375  }
2376  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
2377  for (int n = nb_samples / 2; n > 0; n--) {
2378  for (int channel = 0; channel < channels(); channel++) {
2379  int v = bytestream2_get_byteu(&gb);
2380  *samples++ = adpcm_ima_mtf_expand_nibble(&c->status[channel], v >> 4);
2381  samples[st] =
2382  adpcm_ima_mtf_expand_nibble(&c->status[channel], v & 0x0F);
2383  }
2384  samples += channels();
2385  }
2386  return AV_OK;
2387  }
2388 };
2389 
2391  public:
2392  DecoderADPCM_SBPRO_X(AVCodecID id) {
2393  setCodecID(id);
2394  sample_formats.push_back(AV_SAMPLE_FMT_S16);
2395  assert(id >= AV_CODEC_ID_ADPCM_SBPRO_2 && id <= AV_CODEC_ID_ADPCM_SBPRO_4);
2396  }
2397  int16_t adpcm_sbpro_expand_nibble(ADPCMChannelStatus *c, int8_t nibble,
2398  int size, int shift) {
2399  int sign, delta, diff;
2400 
2401  sign = nibble & (1 << (size - 1));
2402  delta = nibble & ((1 << (size - 1)) - 1);
2403  diff = delta << (7 + c->step + shift);
2404 
2405  /* clamp result */
2406  c->predictor = av_clip(c->predictor + (sign ? -diff : diff), -16384, 16256);
2407 
2408  /* calculate new step */
2409  if (delta >= (2 * size - 3) && c->step < 3)
2410  c->step++;
2411  else if (delta == 0 && c->step > 0)
2412  c->step--;
2413 
2414  return (int16_t)c->predictor;
2415  }
2416 
2417  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
2418  if (!c->status[0].step_index) {
2419  /* the first byte is a raw sample */
2420  *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
2421  if (st) *samples++ = 128 * (bytestream2_get_byteu(&gb) - 0x80);
2422  c->status[0].step_index = 1;
2423  nb_samples--;
2424  }
2425  if (avctx.codec_id == AV_CODEC_ID_ADPCM_SBPRO_4) {
2426  for (int n = nb_samples >> (1 - st); n > 0; n--) {
2427  int byte = bytestream2_get_byteu(&gb);
2428  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], byte >> 4, 4, 0);
2429  *samples++ =
2430  adpcm_sbpro_expand_nibble(&c->status[st], byte & 0x0F, 4, 0);
2431  }
2432  } else if (avctx.codec_id == AV_CODEC_ID_ADPCM_SBPRO_3) {
2433  for (int n = (nb_samples << st) / 3; n > 0; n--) {
2434  int byte = bytestream2_get_byteu(&gb);
2435  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], byte >> 5, 3, 0);
2436  *samples++ =
2437  adpcm_sbpro_expand_nibble(&c->status[0], (byte >> 2) & 0x07, 3, 0);
2438  *samples++ =
2439  adpcm_sbpro_expand_nibble(&c->status[0], byte & 0x03, 2, 0);
2440  }
2441  } else {
2442  for (int n = nb_samples >> (2 - st); n > 0; n--) {
2443  int byte = bytestream2_get_byteu(&gb);
2444  *samples++ = adpcm_sbpro_expand_nibble(&c->status[0], byte >> 6, 2, 2);
2445  *samples++ =
2446  adpcm_sbpro_expand_nibble(&c->status[st], (byte >> 4) & 0x03, 2, 2);
2447  *samples++ =
2448  adpcm_sbpro_expand_nibble(&c->status[0], (byte >> 2) & 0x03, 2, 2);
2449  *samples++ =
2450  adpcm_sbpro_expand_nibble(&c->status[st], byte & 0x03, 2, 2);
2451  }
2452  }
2453  return AV_OK;
2454  }
2455 };
2456 
2458  public:
2459  DecoderADPCM_SBPRO_2() : DecoderADPCM_SBPRO_X(AV_CODEC_ID_ADPCM_SBPRO_2) {}
2460 };
2461 
2463  public:
2464  DecoderADPCM_SBPRO_3() : DecoderADPCM_SBPRO_X(AV_CODEC_ID_ADPCM_SBPRO_3) {}
2465 };
2466 
2468  public:
2469  DecoderADPCM_SBPRO_4() : DecoderADPCM_SBPRO_X(AV_CODEC_ID_ADPCM_SBPRO_4) {}
2470 };
2471 
2473  public:
2474  DecoderADPCM_THP() { DecoderADPCM_THP(AV_CODEC_ID_ADPCM_THP); }
2475  DecoderADPCM_THP(AVCodecID id) {
2476  setCodecID(id);
2477  assert(id == AV_CODEC_ID_ADPCM_THP || id == AV_CODEC_ID_ADPCM_THP_LE);
2478  sample_formats.push_back(AV_SAMPLE_FMT_S16P);
2479  }
2480  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
2481  int table[14][16];
2482 
2483 #define THP_GET16(g) \
2484  sign_extend(avctx.codec_id == AV_CODEC_ID_ADPCM_THP_LE \
2485  ? bytestream2_get_le16u(&(g)) \
2486  : bytestream2_get_be16u(&(g)), \
2487  16)
2488 
2489  if (avctx.extradata) {
2490  GetByteContext tb;
2491  if (avctx.extradata_size < 32 * channels()) {
2492  av_log(avctx, AV_LOG_ERROR, "Missing coeff table\n");
2493  return AVERROR_INVALIDDATA;
2494  }
2495 
2496  bytestream2_init(&tb, avctx.extradata, avctx.extradata_size);
2497  for (int i = 0; i < channels(); i++)
2498  for (int n = 0; n < 16; n++) table[i][n] = THP_GET16(tb);
2499  } else {
2500  for (int i = 0; i < channels(); i++)
2501  for (int n = 0; n < 16; n++) table[i][n] = THP_GET16(gb);
2502 
2503  if (!c->has_status) {
2504  /* Initialize the previous sample. */
2505  for (int i = 0; i < channels(); i++) {
2506  c->status[i].sample1 = THP_GET16(gb);
2507  c->status[i].sample2 = THP_GET16(gb);
2508  }
2509  c->has_status = 1;
2510  } else {
2511  bytestream2_skip(&gb, channels() * 4);
2512  }
2513  }
2514 
2515  for (int ch = 0; ch < channels(); ch++) {
2516  samples = samples_p[ch];
2517 
2518  /* Read in every sample for this channel. */
2519  for (int i = 0; i < (nb_samples + 13) / 14; i++) {
2520  int byte = bytestream2_get_byteu(&gb);
2521  int index = (byte >> 4) & 7;
2522  unsigned int exp = byte & 0x0F;
2523  int64_t factor1 = table[ch][index * 2];
2524  int64_t factor2 = table[ch][index * 2 + 1];
2525 
2526  /* Decode 14 samples. */
2527  for (int n = 0; n < 14 && (i * 14 + n < nb_samples); n++) {
2528  int32_t sampledat;
2529 
2530  if (n & 1) {
2531  sampledat = sign_extend(byte, 4);
2532  } else {
2533  byte = bytestream2_get_byteu(&gb);
2534  sampledat = sign_extend(byte >> 4, 4);
2535  }
2536 
2537  sampledat = ((c->status[ch].sample1 * factor1 +
2538  c->status[ch].sample2 * factor2) >>
2539  11) +
2540  sampledat * (1 << exp);
2541  *samples = av_clip_int16(sampledat);
2542  c->status[ch].sample2 = c->status[ch].sample1;
2543  c->status[ch].sample1 = *samples++;
2544  }
2545  }
2546  }
2547  return AV_OK;
2548  }
2549 };
2550 
2552  public:
2553  DecoderADPCM_THP_LE() : DecoderADPCM_THP(AV_CODEC_ID_ADPCM_THP_LE) {}
2554 };
2555 
2556 // r1 - r3
2558  public:
2559  DecoderADPCM_EA_RX(AVCodecID id) {
2560  setCodecID(id);
2561  assert(id == AV_CODEC_ID_ADPCM_EA_R1 || id == AV_CODEC_ID_ADPCM_EA_R2 ||
2562  id == AV_CODEC_ID_ADPCM_EA_R3);
2563  sample_formats.push_back(AV_SAMPLE_FMT_S16P);
2564  }
2565  int decode_frame_impl(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt) {
2566  /* channel numbering
2567  2chan: 0=fl, 1=fr
2568  4chan: 0=fl, 1=rl, 2=fr, 3=rr
2569  6chan: 0=fl, 1=c, 2=fr, 3=rl, 4=rr, 5=sub */
2570  const int big_endian = avctx.codec_id == AV_CODEC_ID_ADPCM_EA_R3;
2571  int previous_sample, current_sample, next_sample;
2572  int coeff1, coeff2;
2573  int shift;
2574  int16_t *samplesC;
2575  int count = 0;
2576  int offsets[6];
2577 
2578  for (unsigned channel = 0; channel < channels(); channel++)
2579  offsets[channel] =
2580  (big_endian ? bytestream2_get_be32(&gb) : bytestream2_get_le32(&gb)) +
2581  (channels() + 1) * 4;
2582 
2583  for (unsigned channel = 0; channel < channels(); channel++) {
2584  int count1;
2585 
2586  bytestream2_seek(&gb, offsets[channel], SEEK_SET);
2587  samplesC = samples_p[channel];
2588 
2589  if (avctx.codec_id == AV_CODEC_ID_ADPCM_EA_R1) {
2590  current_sample = sign_extend(bytestream2_get_le16(&gb), 16);
2591  previous_sample = sign_extend(bytestream2_get_le16(&gb), 16);
2592  } else {
2593  current_sample = c->status[channel].predictor;
2594  previous_sample = c->status[channel].prev_sample;
2595  }
2596 
2597  for (count1 = 0; count1 < nb_samples / 28; count1++) {
2598  int byte = bytestream2_get_byte(&gb);
2599  if (byte == 0xEE) { /* only seen in R2 and R3 */
2600  current_sample = sign_extend(bytestream2_get_be16(&gb), 16);
2601  previous_sample = sign_extend(bytestream2_get_be16(&gb), 16);
2602 
2603  for (int count2 = 0; count2 < 28; count2++)
2604  *samplesC++ = sign_extend(bytestream2_get_be16(&gb), 16);
2605  } else {
2606  coeff1 = ea_adpcm_table[byte >> 4];
2607  coeff2 = ea_adpcm_table[(byte >> 4) + 4];
2608  shift = 20 - (byte & 0x0F);
2609 
2610  for (int count2 = 0; count2 < 28; count2++) {
2611  if (count2 & 1)
2612  next_sample = (unsigned)sign_extend(byte, 4) << shift;
2613  else {
2614  byte = bytestream2_get_byte(&gb);
2615  next_sample = (unsigned)sign_extend(byte >> 4, 4) << shift;
2616  }
2617 
2618  next_sample +=
2619  (current_sample * coeff1) + (previous_sample * coeff2);
2620  next_sample = av_clip_int16(next_sample >> 8);
2621 
2622  previous_sample = current_sample;
2623  current_sample = next_sample;
2624  *samplesC++ = current_sample;
2625  }
2626  }
2627  }
2628  if (!count) {
2629  count = count1;
2630  } else if (count != count1) {
2631  av_log(avctx, AV_LOG_WARNING, "per-channel sample count mismatch\n");
2632  count = FFMAX(count, count1);
2633  }
2634 
2635  if (avctx.codec_id != AV_CODEC_ID_ADPCM_EA_R1) {
2636  c->status[channel].predictor = current_sample;
2637  c->status[channel].prev_sample = previous_sample;
2638  }
2639  }
2640 
2641  frame->nb_samples = count * 28;
2642  bytestream2_seek(&gb, 0, SEEK_END);
2643  return AV_OK;
2644  }
2645 };
2646 
2648  public:
2649  DecoderADPCM_EA_R1() : DecoderADPCM_EA_RX(AV_CODEC_ID_ADPCM_EA_R1) {}
2650 };
2651 
2653  public:
2654  DecoderADPCM_EA_R2() : DecoderADPCM_EA_RX(AV_CODEC_ID_ADPCM_EA_R2) {}
2655 };
2656 
2658  public:
2659  DecoderADPCM_EA_R3() : DecoderADPCM_EA_RX(AV_CODEC_ID_ADPCM_EA_R3) {}
2660 };
2661 
2663  public:
2664  static ADPCMDecoder *create(AVCodecID id) {
2665  switch (id) {
2666 #if ENABLE_BROKEN_CODECS
2667  case AV_CODEC_ID_ADPCM_IMA_QT:
2668  return new DecoderADPCM_IMA_QT();
2669 #endif
2670  case AV_CODEC_ID_ADPCM_IMA_WAV:
2671  return new DecoderADPCM_IMA_WAV();
2672  case AV_CODEC_ID_ADPCM_IMA_DK3:
2673  return new DecoderADPCM_IMA_DK3();
2674  case AV_CODEC_ID_ADPCM_IMA_DK4:
2675  return new DecoderADPCM_IMA_DK4();
2676  case AV_CODEC_ID_ADPCM_IMA_WS:
2677  return new DecoderADPCM_IMA_WS();
2678  case AV_CODEC_ID_ADPCM_IMA_SMJPEG:
2679  return new DecoderADPCM_IMA_SMJPEG();
2680  case AV_CODEC_ID_ADPCM_MS:
2681  return new DecoderADPCM_MS();
2682  case AV_CODEC_ID_ADPCM_4XM:
2683  return new DecoderADPCM_4XM();
2684  case AV_CODEC_ID_ADPCM_XA:
2685  return new DecoderADPCM_XA();
2686  case AV_CODEC_ID_ADPCM_EA:
2687  return new DecoderADPCM_EA();
2688  case AV_CODEC_ID_ADPCM_CT:
2689  return new DecoderADPCM_CT();
2690  case AV_CODEC_ID_ADPCM_SWF:
2691  return new DecoderADPCM_SWF();
2692  case AV_CODEC_ID_ADPCM_YAMAHA:
2693  return new DecoderADPCM_YAMAHA();
2694  case AV_CODEC_ID_ADPCM_IMA_AMV:
2695  return new DecoderADPCM_IMA_AMV();
2696  case AV_CODEC_ID_ADPCM_IMA_EA_SEAD:
2697  return new DecoderADPCM_IMA_EA_SEAD();
2698  case AV_CODEC_ID_ADPCM_IMA_EA_EACS:
2699  return new DecoderADPCM_IMA_EA_EACS();
2700  case AV_CODEC_ID_ADPCM_EA_XAS:
2701  return new DecoderADPCM_EA_XAS();
2702  case AV_CODEC_ID_ADPCM_EA_MAXIS_XA:
2703  return new DecoderADPCM_EA_MAXIS_XA();
2704  case AV_CODEC_ID_ADPCM_IMA_ISS:
2705  return new DecoderADPCM_IMA_ISS();
2706  case AV_CODEC_ID_ADPCM_IMA_APC:
2707  return new DecoderADPCM_IMA_APC();
2708  case AV_CODEC_ID_ADPCM_AFC:
2709  return new DecoderADPCM_AFC();
2710  case AV_CODEC_ID_ADPCM_IMA_OKI:
2711  return new DecoderADPCM_IMA_OKI();
2712  case AV_CODEC_ID_ADPCM_DTK:
2713  return new DecoderADPCM_DTK();
2714  case AV_CODEC_ID_ADPCM_IMA_RAD:
2715  return new DecoderADPCM_IMA_RAD();
2716  case AV_CODEC_ID_ADPCM_PSX:
2717  return new DecoderADPCM_PSX();
2718  case AV_CODEC_ID_ADPCM_AICA:
2719  return new DecoderADPCM_AICA();
2720  case AV_CODEC_ID_ADPCM_IMA_DAT4:
2721  return new DecoderADPCM_IMA_DAT4();
2722  case AV_CODEC_ID_ADPCM_MTAF:
2723  return new DecoderADPCM_MTAF();
2724  case AV_CODEC_ID_ADPCM_AGM:
2725  return new DecoderADPCM_AGM();
2726  case AV_CODEC_ID_ADPCM_ARGO:
2727  return new DecoderADPCM_ARGO();
2728  case AV_CODEC_ID_ADPCM_IMA_SSI:
2729  return new DecoderADPCM_IMA_SSI();
2730  case AV_CODEC_ID_ADPCM_ZORK:
2731  return new DecoderADPCM_ZORK();
2732  case AV_CODEC_ID_ADPCM_IMA_APM:
2733  return new DecoderADPCM_IMA_APM();
2734  case AV_CODEC_ID_ADPCM_IMA_ALP:
2735  return new DecoderADPCM_IMA_ALP();
2736  case AV_CODEC_ID_ADPCM_IMA_MTF:
2737  return new DecoderADPCM_IMA_MTF();
2738  case AV_CODEC_ID_ADPCM_IMA_CUNNING:
2739  return new DecoderADPCM_IMA_CUNNING();
2740  case AV_CODEC_ID_ADPCM_IMA_MOFLEX:
2741  return new DecoderADPCM_IMA_MOFLEX();
2742  case AV_CODEC_ID_ADPCM_IMA_ACORN:
2743  return new DecoderADPCM_IMA_ACORN();
2744  case AV_CODEC_ID_ADPCM_XMD:
2745  return new DecoderADPCM_XMD();
2746  case AV_CODEC_ID_ADPCM_SBPRO_4:
2747  return new DecoderADPCM_SBPRO_4();
2748  case AV_CODEC_ID_ADPCM_SBPRO_3:
2749  return new DecoderADPCM_SBPRO_3();
2750  case AV_CODEC_ID_ADPCM_SBPRO_2:
2751  return new DecoderADPCM_SBPRO_2();
2752  case AV_CODEC_ID_ADPCM_THP:
2753  return new DecoderADPCM_THP();
2754  case AV_CODEC_ID_ADPCM_THP_LE:
2755  return new DecoderADPCM_THP_LE();
2756  case AV_CODEC_ID_ADPCM_EA_R1:
2757  return new DecoderADPCM_EA_R1();
2758  case AV_CODEC_ID_ADPCM_EA_R2:
2759  return new DecoderADPCM_EA_R2();
2760  case AV_CODEC_ID_ADPCM_EA_R3:
2761  return new DecoderADPCM_EA_R3();
2762  default:
2763  av_log(avctx, AV_LOG_ERROR, "ERROR: decoder [%d] not implemented\n",
2764  id);
2765  return nullptr;
2766  };
2767  }
2768 };
2769 
2770 } // namespace adpcm_ffmpeg
Common ADPCM Functionality.
Definition: ADPCMCodec.h:21
Definition: ADPCMDecoder.h:2662
ADPCM Decoder.
Definition: ADPCMDecoder.h:17
virtual int adpcm_decode_frame(AVFrame *frame, int *got_frame_ptr, AVPacket *avpkt)
Decode a pcm frame.
Definition: ADPCMDecoder.h:653
int get_nb_samples(GetByteContext *gb, int buf_size, int *coded_samples, int *approx_nb_samples)
Definition: ADPCMDecoder.h:381
AVFrame & decode(AVPacket &packet)
Definition: ADPCMDecoder.h:77
virtual int adpcm_decode_init()
Init decoder.
Definition: ADPCMDecoder.h:155
DataSource getDataSource(int16_t *frame_data, int16_t *ext_data, int len)
Definition: ADPCMDecoder.h:144
virtual void adpcm_flush()
Flush decoder.
Definition: ADPCMDecoder.h:604
ADPCM Encoder.
Definition: ADPCMEncoder.h:15
Definition: ADPCMDecoder.h:842
Definition: ADPCMDecoder.h:2089
Definition: ADPCMDecoder.h:874
Definition: ADPCMDecoder.h:2070
Definition: ADPCMDecoder.h:2265
Definition: ADPCMDecoder.h:1947
Definition: ADPCMDecoder.h:2149
Definition: ADPCMDecoder.h:1763
Definition: ADPCMDecoder.h:2647
Definition: ADPCMDecoder.h:2652
Definition: ADPCMDecoder.h:2657
Definition: ADPCMDecoder.h:2557
Definition: ADPCMDecoder.h:1802
Definition: ADPCMDecoder.h:1702
Definition: ADPCMDecoder.h:1843
Definition: ADPCMDecoder.h:1317
Definition: ADPCMDecoder.h:1869
Definition: ADPCMDecoder.h:1266
Definition: ADPCMDecoder.h:1298
Definition: ADPCMDecoder.h:1363
Definition: ADPCMDecoder.h:1245
Definition: ADPCMDecoder.h:1105
Definition: ADPCMDecoder.h:1078
Definition: ADPCMDecoder.h:1656
Definition: ADPCMDecoder.h:1686
Definition: ADPCMDecoder.h:1176
Definition: ADPCMDecoder.h:1212
Definition: ADPCMDecoder.h:2370
Definition: ADPCMDecoder.h:1382
Definition: ADPCMDecoder.h:708
Definition: ADPCMDecoder.h:1420
Definition: ADPCMDecoder.h:1919
Definition: ADPCMDecoder.h:1282
Definition: ADPCMDecoder.h:756
Definition: ADPCMDecoder.h:1455
Definition: ADPCMDecoder.h:936
Definition: ADPCMDecoder.h:1034
Definition: ADPCMDecoder.h:2207
Definition: ADPCMDecoder.h:2457
Definition: ADPCMDecoder.h:2462
Definition: ADPCMDecoder.h:2467
Definition: ADPCMDecoder.h:2390
Definition: ADPCMDecoder.h:1984
Definition: ADPCMDecoder.h:2551
Definition: ADPCMDecoder.h:2472
Definition: ADPCMDecoder.h:1539
Definition: ADPCMDecoder.h:1489
Definition: ADPCMDecoder.h:2054
Definition: ADPCMDecoder.h:2330
Definition: adpcm.h:206
Definition: adpcm.h:255
int vqa_version
Definition: adpcm.h:257
int has_status
Definition: adpcm.h:258
int block_align
number of bytes per packet if constant and known or 0 Used by some WAV based audio codecs.
Definition: adpcm.h:242
This structure provides the uncompressed PCM data.
Definition: adpcm.h:151
int16_t ** extended_data
Definition: adpcm.h:185
int nb_samples
Definition: adpcm.h:169
uint8_t * data[AV_NUM_DATA_POINTERS]
Definition: adpcm.h:164
This structure stores compressed data. It is typically exported by demuxers and then passed as input ...
Definition: adpcm.h:143
Definition: get_bits.h:100
Definition: bytestream.h:36