172
172
copy = av_mallocz(4* entries);
180
for (i = 0; i < entries; i++)
181
energy += abs(buf[i]);
183
low_bits = bits_to_store(energy / (entries * 2));
187
put_bits(pb, 4, low_bits);
180
for (i = 0; i < entries; i++)
181
energy += abs(buf[i]);
183
low_bits = bits_to_store(energy / (entries * 2));
187
put_bits(pb, 4, low_bits);
190
190
for (i = 0; i < entries; i++)
192
put_bits(pb, low_bits, abs(buf[i]));
193
copy[i] = abs(buf[i]) >> low_bits;
192
put_bits(pb, low_bits, abs(buf[i]));
193
copy[i] = abs(buf[i]) >> low_bits;
198
198
bits = av_mallocz(4* entries*max);
205
205
for (i = 0; i <= max; i++)
207
for (j = 0; j < entries; j++)
209
bits[x++] = copy[j] > i;
207
for (j = 0; j < entries; j++)
209
bits[x++] = copy[j] > i;
212
212
// store bitstream
215
int steplet = step >> 8;
217
if (pos + steplet > x)
220
for (i = 0; i < steplet; i++)
221
if (bits[i+pos] != dominant)
224
put_bits(pb, 1, any);
229
step += step / ADAPT_LEVEL;
235
while (((pos + interloper) < x) && (bits[pos + interloper] == dominant))
239
write_uint_max(pb, interloper, (step >> 8) - 1);
241
pos += interloper + 1;
242
step -= step / ADAPT_LEVEL;
248
dominant = !dominant;
215
int steplet = step >> 8;
217
if (pos + steplet > x)
220
for (i = 0; i < steplet; i++)
221
if (bits[i+pos] != dominant)
224
put_bits(pb, 1, any);
229
step += step / ADAPT_LEVEL;
235
while (((pos + interloper) < x) && (bits[pos + interloper] == dominant))
239
write_uint_max(pb, interloper, (step >> 8) - 1);
241
pos += interloper + 1;
242
step -= step / ADAPT_LEVEL;
248
dominant = !dominant;
253
253
for (i = 0; i < entries; i++)
255
put_bits(pb, 1, buf[i] < 0);
255
put_bits(pb, 1, buf[i] < 0);
257
257
// av_free(bits);
258
258
// av_free(copy);
268
268
int *bits = av_mallocz(4* entries);
275
low_bits = get_bits(gb, 4);
275
low_bits = get_bits(gb, 4);
278
for (i = 0; i < entries; i++)
279
buf[i] = get_bits(gb, low_bits);
278
for (i = 0; i < entries; i++)
279
buf[i] = get_bits(gb, low_bits);
282
282
// av_log(NULL, AV_LOG_INFO, "entries: %d, low bits: %d\n", entries, low_bits);
284
284
while (n_zeros < entries)
286
int steplet = step >> 8;
290
for (i = 0; i < steplet; i++)
291
bits[x++] = dominant;
296
step += step / ADAPT_LEVEL;
300
int actual_run = read_uint_max(gb, steplet-1);
302
// av_log(NULL, AV_LOG_INFO, "actual run: %d\n", actual_run);
304
for (i = 0; i < actual_run; i++)
305
bits[x++] = dominant;
307
bits[x++] = !dominant;
310
n_zeros += actual_run;
314
step -= step / ADAPT_LEVEL;
320
dominant = !dominant;
286
int steplet = step >> 8;
290
for (i = 0; i < steplet; i++)
291
bits[x++] = dominant;
296
step += step / ADAPT_LEVEL;
300
int actual_run = read_uint_max(gb, steplet-1);
302
// av_log(NULL, AV_LOG_INFO, "actual run: %d\n", actual_run);
304
for (i = 0; i < actual_run; i++)
305
bits[x++] = dominant;
307
bits[x++] = !dominant;
310
n_zeros += actual_run;
314
step -= step / ADAPT_LEVEL;
320
dominant = !dominant;
324
324
// reconstruct unsigned values
326
326
for (i = 0; n_zeros < entries; i++)
333
level += 1 << low_bits;
336
if (buf[pos] >= level)
343
buf[pos] += 1 << low_bits;
333
level += 1 << low_bits;
336
if (buf[pos] >= level)
343
buf[pos] += 1 << low_bits;
349
349
// av_free(bits);
352
352
for (i = 0; i < entries; i++)
353
if (buf[i] && get_bits1(gb))
353
if (buf[i] && get_bits1(gb))
356
356
// av_log(NULL, AV_LOG_INFO, "zeros: %d pos: %d\n", n_zeros, pos);
384
384
int *k_ptr = &(k[order-2]),
385
*state_ptr = &(state[order-2]);
385
*state_ptr = &(state[order-2]);
386
386
for (i = order-2; i >= 0; i--, k_ptr--, state_ptr--)
388
int k_value = *k_ptr, state_value = *state_ptr;
389
x -= shift_down(k_value * state_value, LATTICE_SHIFT);
390
state_ptr[1] = state_value + shift_down(k_value * x, LATTICE_SHIFT);
388
int k_value = *k_ptr, state_value = *state_ptr;
389
x -= shift_down(k_value * state_value, LATTICE_SHIFT);
390
state_ptr[1] = state_value + shift_down(k_value * x, LATTICE_SHIFT);
393
393
for (i = order-2; i >= 0; i--)
395
x -= shift_down(k[i] * state[i], LATTICE_SHIFT);
396
state[i+1] = state[i] + shift_down(k[i] * x, LATTICE_SHIFT);
395
x -= shift_down(k[i] * state[i], LATTICE_SHIFT);
396
state[i+1] = state[i] + shift_down(k[i] * x, LATTICE_SHIFT);
400
// don't drift too far, to avoid overflows
400
// don't drift too far, to avoid overflows
401
401
if (x > (SAMPLE_FACTOR<<16)) x = (SAMPLE_FACTOR<<16);
402
402
if (x < -(SAMPLE_FACTOR<<16)) x = -(SAMPLE_FACTOR<<16);
411
411
// actual whitened result as it goes.
413
413
static void modified_levinson_durbin(int *window, int window_entries,
414
int *out, int out_entries, int channels, int *tap_quant)
414
int *out, int out_entries, int channels, int *tap_quant)
417
417
int *state = av_mallocz(4* window_entries);
419
419
memcpy(state, window, 4* window_entries);
421
421
for (i = 0; i < out_entries; i++)
423
int step = (i+1)*channels, k, j;
424
double xx = 0.0, xy = 0.0;
423
int step = (i+1)*channels, k, j;
424
double xx = 0.0, xy = 0.0;
426
int *x_ptr = &(window[step]), *state_ptr = &(state[0]);
427
j = window_entries - step;
428
for (;j>=0;j--,x_ptr++,state_ptr++)
430
double x_value = *x_ptr, state_value = *state_ptr;
431
xx += state_value*state_value;
432
xy += x_value*state_value;
426
int *x_ptr = &(window[step]), *state_ptr = &(state[0]);
427
j = window_entries - step;
428
for (;j>=0;j--,x_ptr++,state_ptr++)
430
double x_value = *x_ptr, state_value = *state_ptr;
431
xx += state_value*state_value;
432
xy += x_value*state_value;
435
for (j = 0; j <= (window_entries - step); j++);
437
double stepval = window[step+j], stateval = window[j];
438
// xx += (double)window[j]*(double)window[j];
439
// xy += (double)window[step+j]*(double)window[j];
440
xx += stateval*stateval;
441
xy += stepval*stateval;
435
for (j = 0; j <= (window_entries - step); j++);
437
double stepval = window[step+j], stateval = window[j];
438
// xx += (double)window[j]*(double)window[j];
439
// xy += (double)window[step+j]*(double)window[j];
440
xx += stateval*stateval;
441
xy += stepval*stateval;
447
k = (int)(floor(-xy/xx * (double)LATTICE_FACTOR / (double)(tap_quant[i]) + 0.5));
449
if (k > (LATTICE_FACTOR/tap_quant[i]))
450
k = LATTICE_FACTOR/tap_quant[i];
451
if (-k > (LATTICE_FACTOR/tap_quant[i]))
452
k = -(LATTICE_FACTOR/tap_quant[i]);
447
k = (int)(floor(-xy/xx * (double)LATTICE_FACTOR / (double)(tap_quant[i]) + 0.5));
449
if (k > (LATTICE_FACTOR/tap_quant[i]))
450
k = LATTICE_FACTOR/tap_quant[i];
451
if (-k > (LATTICE_FACTOR/tap_quant[i]))
452
k = -(LATTICE_FACTOR/tap_quant[i]);
458
x_ptr = &(window[step]);
459
state_ptr = &(state[0]);
460
j = window_entries - step;
461
for (;j>=0;j--,x_ptr++,state_ptr++)
463
int x_value = *x_ptr, state_value = *state_ptr;
464
*x_ptr = x_value + shift_down(k*state_value,LATTICE_SHIFT);
465
*state_ptr = state_value + shift_down(k*x_value, LATTICE_SHIFT);
468
for (j=0; j <= (window_entries - step); j++)
470
int stepval = window[step+j], stateval=state[j];
471
window[step+j] += shift_down(k * stateval, LATTICE_SHIFT);
472
state[j] += shift_down(k * stepval, LATTICE_SHIFT);
458
x_ptr = &(window[step]);
459
state_ptr = &(state[0]);
460
j = window_entries - step;
461
for (;j>=0;j--,x_ptr++,state_ptr++)
463
int x_value = *x_ptr, state_value = *state_ptr;
464
*x_ptr = x_value + shift_down(k*state_value,LATTICE_SHIFT);
465
*state_ptr = state_value + shift_down(k*x_value, LATTICE_SHIFT);
468
for (j=0; j <= (window_entries - step); j++)
470
int stepval = window[step+j], stateval=state[j];
471
window[step+j] += shift_down(k * stateval, LATTICE_SHIFT);
472
state[j] += shift_down(k * stepval, LATTICE_SHIFT);
508
508
if (avctx->channels > MAX_CHANNELS)
510
av_log(avctx, AV_LOG_ERROR, "Only mono and stereo streams are supported by now\n");
510
av_log(avctx, AV_LOG_ERROR, "Only mono and stereo streams are supported by now\n");
511
511
return -1; /* only stereo or mono for now */
514
514
if (avctx->channels == 2)
515
s->decorrelation = MID_SIDE;
515
s->decorrelation = MID_SIDE;
517
517
if (avctx->codec->id == CODEC_ID_SONIC_LS)
522
s->quantization = 0.0;
522
s->quantization = 0.0;
528
s->quantization = 1.0;
528
s->quantization = 1.0;
532
532
if ((s->num_taps < 32) || (s->num_taps > 1024) ||
533
((s->num_taps>>5)<<5 != s->num_taps))
533
((s->num_taps>>5)<<5 != s->num_taps))
535
av_log(avctx, AV_LOG_ERROR, "Invalid number of taps\n");
535
av_log(avctx, AV_LOG_ERROR, "Invalid number of taps\n");
540
540
s->tap_quant = av_mallocz(4* s->num_taps);
541
541
for (i = 0; i < s->num_taps; i++)
542
s->tap_quant[i] = (int)(sqrt(i+1));
542
s->tap_quant[i] = (int)(sqrt(i+1));
544
544
s->channels = avctx->channels;
545
545
s->samplerate = avctx->sample_rate;
550
550
s->tail = av_mallocz(4* s->num_taps*s->channels);
553
553
s->tail_size = s->num_taps*s->channels;
555
555
s->predictor_k = av_mallocz(4 * s->num_taps);
556
556
if (!s->predictor_k)
559
559
for (i = 0; i < s->channels; i++)
561
s->coded_samples[i] = av_mallocz(4* s->block_align);
562
if (!s->coded_samples[i])
561
s->coded_samples[i] = av_mallocz(4* s->block_align);
562
if (!s->coded_samples[i])
566
566
s->int_samples = av_mallocz(4* s->frame_size);
568
568
s->window_size = ((2*s->tail_size)+s->frame_size);
569
569
s->window = av_mallocz(4* s->window_size);
573
573
avctx->extradata = av_mallocz(16);
574
574
if (!avctx->extradata)
576
576
init_put_bits(&pb, avctx->extradata, 16*8);
578
578
put_bits(&pb, 2, version); // version
579
579
if (version == 1)
581
put_bits(&pb, 2, s->channels);
582
put_bits(&pb, 4, code_samplerate(s->samplerate));
581
put_bits(&pb, 2, s->channels);
582
put_bits(&pb, 4, code_samplerate(s->samplerate));
584
584
put_bits(&pb, 1, s->lossless);
585
585
if (!s->lossless)
586
put_bits(&pb, 3, SAMPLE_SHIFT); // XXX FIXME: sample precision
586
put_bits(&pb, 3, SAMPLE_SHIFT); // XXX FIXME: sample precision
587
587
put_bits(&pb, 2, s->decorrelation);
588
588
put_bits(&pb, 2, s->downsampling);
589
589
put_bits(&pb, 5, (s->num_taps >> 5)-1); // 32..1024
636
636
// short -> internal
637
637
for (i = 0; i < s->frame_size; i++)
638
s->int_samples[i] = samples[i];
638
s->int_samples[i] = samples[i];
640
640
if (!s->lossless)
641
for (i = 0; i < s->frame_size; i++)
642
s->int_samples[i] = s->int_samples[i] << SAMPLE_SHIFT;
641
for (i = 0; i < s->frame_size; i++)
642
s->int_samples[i] = s->int_samples[i] << SAMPLE_SHIFT;
644
644
switch(s->decorrelation)
647
for (i = 0; i < s->frame_size; i += s->channels)
649
s->int_samples[i] += s->int_samples[i+1];
650
s->int_samples[i+1] -= shift(s->int_samples[i], 1);
654
for (i = 0; i < s->frame_size; i += s->channels)
655
s->int_samples[i+1] -= s->int_samples[i];
658
for (i = 0; i < s->frame_size; i += s->channels)
659
s->int_samples[i] -= s->int_samples[i+1];
647
for (i = 0; i < s->frame_size; i += s->channels)
649
s->int_samples[i] += s->int_samples[i+1];
650
s->int_samples[i+1] -= shift(s->int_samples[i], 1);
654
for (i = 0; i < s->frame_size; i += s->channels)
655
s->int_samples[i+1] -= s->int_samples[i];
658
for (i = 0; i < s->frame_size; i += s->channels)
659
s->int_samples[i] -= s->int_samples[i+1];
663
663
memset(s->window, 0, 4* s->window_size);
665
665
for (i = 0; i < s->tail_size; i++)
666
s->window[x++] = s->tail[i];
666
s->window[x++] = s->tail[i];
668
668
for (i = 0; i < s->frame_size; i++)
669
s->window[x++] = s->int_samples[i];
671
for (i = 0; i < s->tail_size; i++)
674
for (i = 0; i < s->tail_size; i++)
675
s->tail[i] = s->int_samples[s->frame_size - s->tail_size + i];
669
s->window[x++] = s->int_samples[i];
671
for (i = 0; i < s->tail_size; i++)
674
for (i = 0; i < s->tail_size; i++)
675
s->tail[i] = s->int_samples[s->frame_size - s->tail_size + i];
678
678
modified_levinson_durbin(s->window, s->window_size,
679
s->predictor_k, s->num_taps, s->channels, s->tap_quant);
679
s->predictor_k, s->num_taps, s->channels, s->tap_quant);
680
680
if (intlist_write(&pb, s->predictor_k, s->num_taps, 0) < 0)
683
683
for (ch = 0; ch < s->channels; ch++)
686
for (i = 0; i < s->block_align; i++)
689
for (j = 0; j < s->downsampling; j++, x += s->channels)
691
s->coded_samples[ch][i] = sum;
686
for (i = 0; i < s->block_align; i++)
689
for (j = 0; j < s->downsampling; j++, x += s->channels)
691
s->coded_samples[ch][i] = sum;
695
// simple rate control code
695
// simple rate control code
696
696
if (!s->lossless)
698
double energy1 = 0.0, energy2 = 0.0;
699
for (ch = 0; ch < s->channels; ch++)
701
for (i = 0; i < s->block_align; i++)
703
double sample = s->coded_samples[ch][i];
704
energy2 += sample*sample;
705
energy1 += fabs(sample);
709
energy2 = sqrt(energy2/(s->channels*s->block_align));
710
energy1 = sqrt(2.0)*energy1/(s->channels*s->block_align);
712
// increase bitrate when samples are like a gaussian distribution
713
// reduce bitrate when samples are like a two-tailed exponential distribution
715
if (energy2 > energy1)
716
energy2 += (energy2-energy1)*RATE_VARIATION;
718
quant = (int)(BASE_QUANT*s->quantization*energy2/SAMPLE_FACTOR);
719
// av_log(avctx, AV_LOG_DEBUG, "quant: %d energy: %f / %f\n", quant, energy1, energy2);
726
set_ue_golomb(&pb, quant);
728
quant *= SAMPLE_FACTOR;
698
double energy1 = 0.0, energy2 = 0.0;
699
for (ch = 0; ch < s->channels; ch++)
701
for (i = 0; i < s->block_align; i++)
703
double sample = s->coded_samples[ch][i];
704
energy2 += sample*sample;
705
energy1 += fabs(sample);
709
energy2 = sqrt(energy2/(s->channels*s->block_align));
710
energy1 = sqrt(2.0)*energy1/(s->channels*s->block_align);
712
// increase bitrate when samples are like a gaussian distribution
713
// reduce bitrate when samples are like a two-tailed exponential distribution
715
if (energy2 > energy1)
716
energy2 += (energy2-energy1)*RATE_VARIATION;
718
quant = (int)(BASE_QUANT*s->quantization*energy2/SAMPLE_FACTOR);
719
// av_log(avctx, AV_LOG_DEBUG, "quant: %d energy: %f / %f\n", quant, energy1, energy2);
726
set_ue_golomb(&pb, quant);
728
quant *= SAMPLE_FACTOR;
731
731
// write out coded samples
732
732
for (ch = 0; ch < s->channels; ch++)
735
for (i = 0; i < s->block_align; i++)
736
s->coded_samples[ch][i] = divide(s->coded_samples[ch][i], quant);
735
for (i = 0; i < s->block_align; i++)
736
s->coded_samples[ch][i] = divide(s->coded_samples[ch][i], quant);
738
if (intlist_write(&pb, s->coded_samples[ch], s->block_align, 1) < 0)
738
if (intlist_write(&pb, s->coded_samples[ch], s->block_align, 1) < 0)
742
742
// av_log(avctx, AV_LOG_DEBUG, "used bytes: %d\n", (put_bits_count(&pb)+7)/8);
751
751
SonicContext *s = avctx->priv_data;
752
752
GetBitContext gb;
755
755
s->channels = avctx->channels;
756
756
s->samplerate = avctx->sample_rate;
758
758
if (!avctx->extradata)
760
av_log(avctx, AV_LOG_ERROR, "No mandatory headers present\n");
760
av_log(avctx, AV_LOG_ERROR, "No mandatory headers present\n");
764
764
init_get_bits(&gb, avctx->extradata, avctx->extradata_size);
766
766
version = get_bits(&gb, 2);
769
av_log(avctx, AV_LOG_ERROR, "Unsupported Sonic version, please report\n");
769
av_log(avctx, AV_LOG_ERROR, "Unsupported Sonic version, please report\n");
773
773
if (version == 1)
775
s->channels = get_bits(&gb, 2);
776
s->samplerate = samplerate_table[get_bits(&gb, 4)];
777
av_log(avctx, AV_LOG_INFO, "Sonicv2 chans: %d samprate: %d\n",
778
s->channels, s->samplerate);
775
s->channels = get_bits(&gb, 2);
776
s->samplerate = samplerate_table[get_bits(&gb, 4)];
777
av_log(avctx, AV_LOG_INFO, "Sonicv2 chans: %d samprate: %d\n",
778
s->channels, s->samplerate);
781
781
if (s->channels > MAX_CHANNELS)
783
av_log(avctx, AV_LOG_ERROR, "Only mono and stereo streams are supported by now\n");
783
av_log(avctx, AV_LOG_ERROR, "Only mono and stereo streams are supported by now\n");
787
787
s->lossless = get_bits1(&gb);
788
788
if (!s->lossless)
789
skip_bits(&gb, 3); // XXX FIXME
789
skip_bits(&gb, 3); // XXX FIXME
790
790
s->decorrelation = get_bits(&gb, 2);
792
792
s->downsampling = get_bits(&gb, 2);
793
793
s->num_taps = (get_bits(&gb, 5)+1)<<5;
794
794
if (get_bits1(&gb)) // XXX FIXME
795
av_log(avctx, AV_LOG_INFO, "Custom quant table\n");
795
av_log(avctx, AV_LOG_INFO, "Custom quant table\n");
797
797
s->block_align = (int)(2048.0*(s->samplerate/44100))/s->downsampling;
798
798
s->frame_size = s->channels*s->block_align*s->downsampling;
799
799
// avctx->frame_size = s->block_align;
801
801
av_log(avctx, AV_LOG_INFO, "Sonic: ver: %d ls: %d dr: %d taps: %d block: %d frame: %d downsamp: %d\n",
802
version, s->lossless, s->decorrelation, s->num_taps, s->block_align, s->frame_size, s->downsampling);
802
version, s->lossless, s->decorrelation, s->num_taps, s->block_align, s->frame_size, s->downsampling);
805
805
s->tap_quant = av_mallocz(4* s->num_taps);
806
806
for (i = 0; i < s->num_taps; i++)
807
s->tap_quant[i] = (int)(sqrt(i+1));
807
s->tap_quant[i] = (int)(sqrt(i+1));
809
809
s->predictor_k = av_mallocz(4* s->num_taps);
811
811
for (i = 0; i < s->channels; i++)
813
s->predictor_state[i] = av_mallocz(4* s->num_taps);
814
if (!s->predictor_state[i])
813
s->predictor_state[i] = av_mallocz(4* s->num_taps);
814
if (!s->predictor_state[i])
818
818
for (i = 0; i < s->channels; i++)
820
s->coded_samples[i] = av_mallocz(4* s->block_align);
821
if (!s->coded_samples[i])
820
s->coded_samples[i] = av_mallocz(4* s->block_align);
821
if (!s->coded_samples[i])
824
824
s->int_samples = av_mallocz(4* s->frame_size);
856
856
if (buf_size == 0) return 0;
858
858
// av_log(NULL, AV_LOG_INFO, "buf_size: %d\n", buf_size);
860
860
init_get_bits(&gb, buf, buf_size*8);
862
862
intlist_read(&gb, s->predictor_k, s->num_taps, 0);
865
865
for (i = 0; i < s->num_taps; i++)
866
s->predictor_k[i] *= s->tap_quant[i];
866
s->predictor_k[i] *= s->tap_quant[i];
871
quant = get_ue_golomb(&gb) * SAMPLE_FACTOR;
871
quant = get_ue_golomb(&gb) * SAMPLE_FACTOR;
873
873
// av_log(NULL, AV_LOG_INFO, "quant: %d\n", quant);
875
875
for (ch = 0; ch < s->channels; ch++)
879
predictor_init_state(s->predictor_k, s->predictor_state[ch], s->num_taps);
881
intlist_read(&gb, s->coded_samples[ch], s->block_align, 1);
883
for (i = 0; i < s->block_align; i++)
885
for (j = 0; j < s->downsampling - 1; j++)
887
s->int_samples[x] = predictor_calc_error(s->predictor_k, s->predictor_state[ch], s->num_taps, 0);
891
s->int_samples[x] = predictor_calc_error(s->predictor_k, s->predictor_state[ch], s->num_taps, s->coded_samples[ch][i] * quant);
895
for (i = 0; i < s->num_taps; i++)
896
s->predictor_state[ch][i] = s->int_samples[s->frame_size - s->channels + ch - i*s->channels];
879
predictor_init_state(s->predictor_k, s->predictor_state[ch], s->num_taps);
881
intlist_read(&gb, s->coded_samples[ch], s->block_align, 1);
883
for (i = 0; i < s->block_align; i++)
885
for (j = 0; j < s->downsampling - 1; j++)
887
s->int_samples[x] = predictor_calc_error(s->predictor_k, s->predictor_state[ch], s->num_taps, 0);
891
s->int_samples[x] = predictor_calc_error(s->predictor_k, s->predictor_state[ch], s->num_taps, s->coded_samples[ch][i] * quant);
895
for (i = 0; i < s->num_taps; i++)
896
s->predictor_state[ch][i] = s->int_samples[s->frame_size - s->channels + ch - i*s->channels];
899
899
switch(s->decorrelation)
902
for (i = 0; i < s->frame_size; i += s->channels)
904
s->int_samples[i+1] += shift(s->int_samples[i], 1);
905
s->int_samples[i] -= s->int_samples[i+1];
909
for (i = 0; i < s->frame_size; i += s->channels)
910
s->int_samples[i+1] += s->int_samples[i];
913
for (i = 0; i < s->frame_size; i += s->channels)
914
s->int_samples[i] += s->int_samples[i+1];
902
for (i = 0; i < s->frame_size; i += s->channels)
904
s->int_samples[i+1] += shift(s->int_samples[i], 1);
905
s->int_samples[i] -= s->int_samples[i+1];
909
for (i = 0; i < s->frame_size; i += s->channels)
910
s->int_samples[i+1] += s->int_samples[i];
913
for (i = 0; i < s->frame_size; i += s->channels)
914
s->int_samples[i] += s->int_samples[i+1];
918
918
if (!s->lossless)
919
for (i = 0; i < s->frame_size; i++)
920
s->int_samples[i] = shift(s->int_samples[i], SAMPLE_SHIFT);
919
for (i = 0; i < s->frame_size; i++)
920
s->int_samples[i] = shift(s->int_samples[i], SAMPLE_SHIFT);
922
922
// internal -> short
923
923
for (i = 0; i < s->frame_size; i++)
925
if (s->int_samples[i] > 32767)
927
else if (s->int_samples[i] < -32768)
930
samples[i] = s->int_samples[i];
925
if (s->int_samples[i] > 32767)
927
else if (s->int_samples[i] < -32768)
930
samples[i] = s->int_samples[i];
933
933
align_get_bits(&gb);