1
/* Copyright (C) 2002-2006 Jean-Marc Valin
3
Various analysis/synthesis filters
5
Redistribution and use in source and binary forms, with or without
6
modification, are permitted provided that the following conditions
9
- Redistributions of source code must retain the above copyright
10
notice, this list of conditions and the following disclaimer.
12
- Redistributions in binary form must reproduce the above copyright
13
notice, this list of conditions and the following disclaimer in the
14
documentation and/or other materials provided with the distribution.
16
- Neither the name of the Xiph.org Foundation nor the names of its
17
contributors may be used to endorse or promote products derived from
18
this software without specific prior written permission.
20
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21
``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
24
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
25
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
27
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
28
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
29
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
30
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38
#include "stack_alloc.h"
40
#include "math_approx.h"
45
#include "filters_sse.h"
46
#elif defined (ARM4_ASM) || defined(ARM5E_ASM)
47
#include "filters_arm4.h"
48
#elif defined (BFIN_ASM)
49
#include "filters_bfin.h"
54
void bw_lpc(spx_word16_t gamma, const spx_coef_t *lpc_in, spx_coef_t *lpc_out, int order)
57
spx_word16_t tmp=gamma;
60
lpc_out[i] = MULT16_16_P15(tmp,lpc_in[i]);
61
tmp = MULT16_16_P15(tmp, gamma);
65
void sanitize_values32(spx_word32_t *vec, spx_word32_t min_val, spx_word32_t max_val, int len)
70
/* It's important we do the test that way so we can catch NaNs, which are neither greater nor smaller */
71
if (!(vec[i]>=min_val && vec[i] <= max_val))
75
else if (vec[i] > max_val)
77
else /* Has to be NaN */
83
void highpass(const spx_word16_t *x, spx_word16_t *y, int len, int filtID, spx_mem_t *mem)
87
const spx_word16_t Pcoef[5][3] = {{16384, -31313, 14991}, {16384, -31569, 15249}, {16384, -31677, 15328}, {16384, -32313, 15947}, {16384, -22446, 6537}};
88
const spx_word16_t Zcoef[5][3] = {{15672, -31344, 15672}, {15802, -31601, 15802}, {15847, -31694, 15847}, {16162, -32322, 16162}, {14418, -28836, 14418}};
90
const spx_word16_t Pcoef[5][3] = {{1.00000f, -1.91120f, 0.91498f}, {1.00000f, -1.92683f, 0.93071f}, {1.00000f, -1.93338f, 0.93553f}, {1.00000f, -1.97226f, 0.97332f}, {1.00000f, -1.37000f, 0.39900f}};
91
const spx_word16_t Zcoef[5][3] = {{0.95654f, -1.91309f, 0.95654f}, {0.96446f, -1.92879f, 0.96446f}, {0.96723f, -1.93445f, 0.96723f}, {0.98645f, -1.97277f, 0.98645f}, {0.88000f, -1.76000f, 0.88000f}};
93
const spx_word16_t *den, *num;
97
den = Pcoef[filtID]; num = Zcoef[filtID];
102
spx_word32_t vout = ADD32(MULT16_16(num[0], x[i]),mem[0]);
103
yi = EXTRACT16(SATURATE(PSHR32(vout,14),32767));
104
mem[0] = ADD32(MAC16_16(mem[1], num[1],x[i]), SHL32(MULT16_32_Q15(-den[1],vout),1));
105
mem[1] = ADD32(MULT16_16(num[2],x[i]), SHL32(MULT16_32_Q15(-den[2],vout),1));
112
/* FIXME: These functions are ugly and probably introduce too much error */
113
void signal_mul(const spx_sig_t *x, spx_sig_t *y, spx_word32_t scale, int len)
118
y[i] = SHL32(MULT16_32_Q14(EXTRACT16(SHR32(x[i],7)),scale),7);
122
void signal_div(const spx_word16_t *x, spx_word16_t *y, spx_word32_t scale, int len)
125
if (scale > SHL32(EXTEND32(SIG_SCALING), 8))
127
spx_word16_t scale_1;
128
scale = PSHR32(scale, SIG_SHIFT);
129
scale_1 = EXTRACT16(PDIV32_16(SHL32(EXTEND32(SIG_SCALING),7),scale));
132
y[i] = MULT16_16_P15(scale_1, x[i]);
134
} else if (scale > SHR32(EXTEND32(SIG_SCALING), 2)) {
135
spx_word16_t scale_1;
136
scale = PSHR32(scale, SIG_SHIFT-5);
137
scale_1 = DIV32_16(SHL32(EXTEND32(SIG_SCALING),3),scale);
140
y[i] = PSHR32(MULT16_16(scale_1, SHL16(x[i],2)),8);
143
spx_word16_t scale_1;
144
scale = PSHR32(scale, SIG_SHIFT-7);
147
scale_1 = DIV32_16(SHL32(EXTEND32(SIG_SCALING),3),scale);
150
y[i] = PSHR32(MULT16_16(scale_1, SHL16(x[i],2)),6);
157
void signal_mul(const spx_sig_t *x, spx_sig_t *y, spx_word32_t scale, int len)
164
void signal_div(const spx_sig_t *x, spx_sig_t *y, spx_word32_t scale, int len)
167
float scale_1 = 1/scale;
179
spx_word16_t compute_rms(const spx_sig_t *x, int len)
188
spx_sig_t tmp = x[i];
196
while (max_val>16383)
206
tmp = EXTRACT16(SHR32(x[i],sig_shift));
207
sum2 = MAC16_16(sum2,tmp,tmp);
208
tmp = EXTRACT16(SHR32(x[i+1],sig_shift));
209
sum2 = MAC16_16(sum2,tmp,tmp);
210
tmp = EXTRACT16(SHR32(x[i+2],sig_shift));
211
sum2 = MAC16_16(sum2,tmp,tmp);
212
tmp = EXTRACT16(SHR32(x[i+3],sig_shift));
213
sum2 = MAC16_16(sum2,tmp,tmp);
214
sum = ADD32(sum,SHR32(sum2,6));
217
return EXTRACT16(PSHR32(SHL32(EXTEND32(spx_sqrt(DIV32(sum,len))),(sig_shift+3)),SIG_SHIFT));
220
spx_word16_t compute_rms16(const spx_word16_t *x, int len)
223
spx_word16_t max_val=10;
227
spx_sig_t tmp = x[i];
239
sum2 = MAC16_16(sum2,SHR16(x[i],1),SHR16(x[i],1));
240
sum2 = MAC16_16(sum2,SHR16(x[i+1],1),SHR16(x[i+1],1));
241
sum2 = MAC16_16(sum2,SHR16(x[i+2],1),SHR16(x[i+2],1));
242
sum2 = MAC16_16(sum2,SHR16(x[i+3],1),SHR16(x[i+3],1));
243
sum = ADD32(sum,SHR32(sum2,6));
245
return SHL16(spx_sqrt(DIV32(sum,len)),4);
258
sum2 = MAC16_16(sum2,SHL16(x[i],sig_shift),SHL16(x[i],sig_shift));
259
sum2 = MAC16_16(sum2,SHL16(x[i+1],sig_shift),SHL16(x[i+1],sig_shift));
260
sum2 = MAC16_16(sum2,SHL16(x[i+2],sig_shift),SHL16(x[i+2],sig_shift));
261
sum2 = MAC16_16(sum2,SHL16(x[i+3],sig_shift),SHL16(x[i+3],sig_shift));
262
sum = ADD32(sum,SHR32(sum2,6));
264
return SHL16(spx_sqrt(DIV32(sum,len)),3-sig_shift);
268
#ifndef OVERRIDE_NORMALIZE16
269
int normalize16(const spx_sig_t *x, spx_word16_t *y, spx_sig_t max_scale, int len)
277
spx_sig_t tmp = x[i];
285
while (max_val>max_scale)
292
y[i] = EXTRACT16(SHR32(x[i], sig_shift));
300
spx_word16_t compute_rms(const spx_sig_t *x, int len)
308
return sqrt(.1+sum/len);
310
spx_word16_t compute_rms16(const spx_word16_t *x, int len)
312
return compute_rms(x, len);
318
#ifndef OVERRIDE_FILTER_MEM16
319
void filter_mem16(const spx_word16_t *x, const spx_coef_t *num, const spx_coef_t *den, spx_word16_t *y, int N, int ord, spx_mem_t *mem, char *stack)
322
spx_word16_t xi,yi,nyi;
326
yi = EXTRACT16(SATURATE(ADD32(EXTEND32(x[i]),PSHR32(mem[0],LPC_SHIFT)),32767));
328
for (j=0;j<ord-1;j++)
330
mem[j] = MAC16_16(MAC16_16(mem[j+1], num[j],xi), den[j],nyi);
332
mem[ord-1] = ADD32(MULT16_16(num[ord-1],xi), MULT16_16(den[ord-1],nyi));
338
#ifndef OVERRIDE_IIR_MEM16
339
void iir_mem16(const spx_word16_t *x, const spx_coef_t *den, spx_word16_t *y, int N, int ord, spx_mem_t *mem, char *stack)
346
yi = EXTRACT16(SATURATE(ADD32(EXTEND32(x[i]),PSHR32(mem[0],LPC_SHIFT)),32767));
348
for (j=0;j<ord-1;j++)
350
mem[j] = MAC16_16(mem[j+1],den[j],nyi);
352
mem[ord-1] = MULT16_16(den[ord-1],nyi);
358
#ifndef OVERRIDE_FIR_MEM16
359
void fir_mem16(const spx_word16_t *x, const spx_coef_t *num, spx_word16_t *y, int N, int ord, spx_mem_t *mem, char *stack)
367
yi = EXTRACT16(SATURATE(ADD32(EXTEND32(x[i]),PSHR32(mem[0],LPC_SHIFT)),32767));
368
for (j=0;j<ord-1;j++)
370
mem[j] = MAC16_16(mem[j+1], num[j],xi);
372
mem[ord-1] = MULT16_16(num[ord-1],xi);
379
void syn_percep_zero16(const spx_word16_t *xx, const spx_coef_t *ak, const spx_coef_t *awk1, const spx_coef_t *awk2, spx_word16_t *y, int N, int ord, char *stack)
382
VARDECL(spx_mem_t *mem);
383
ALLOC(mem, ord, spx_mem_t);
386
iir_mem16(xx, ak, y, N, ord, mem, stack);
389
filter_mem16(y, awk1, awk2, y, N, ord, mem, stack);
391
void residue_percep_zero16(const spx_word16_t *xx, const spx_coef_t *ak, const spx_coef_t *awk1, const spx_coef_t *awk2, spx_word16_t *y, int N, int ord, char *stack)
394
VARDECL(spx_mem_t *mem);
395
ALLOC(mem, ord, spx_mem_t);
398
filter_mem16(xx, ak, awk1, y, N, ord, mem, stack);
401
fir_mem16(y, awk2, y, N, ord, mem, stack);
405
#ifndef OVERRIDE_COMPUTE_IMPULSE_RESPONSE
406
void compute_impulse_response(const spx_coef_t *ak, const spx_coef_t *awk1, const spx_coef_t *awk2, spx_word16_t *y, int N, int ord, char *stack)
409
spx_word16_t y1, ny1i, ny2i;
410
VARDECL(spx_mem_t *mem1);
411
VARDECL(spx_mem_t *mem2);
412
ALLOC(mem1, ord, spx_mem_t);
413
ALLOC(mem2, ord, spx_mem_t);
422
mem1[i] = mem2[i] = 0;
425
y1 = ADD16(y[i], EXTRACT16(PSHR32(mem1[0],LPC_SHIFT)));
427
y[i] = PSHR32(ADD32(SHL32(EXTEND32(y1),LPC_SHIFT+1),mem2[0]),LPC_SHIFT);
429
for (j=0;j<ord-1;j++)
431
mem1[j] = MAC16_16(mem1[j+1], awk2[j],ny1i);
432
mem2[j] = MAC16_16(mem2[j+1], ak[j],ny2i);
434
mem1[ord-1] = MULT16_16(awk2[ord-1],ny1i);
435
mem2[ord-1] = MULT16_16(ak[ord-1],ny2i);
440
/* Decomposes a signal into low-band and high-band using a QMF */
441
void qmf_decomp(const spx_word16_t *xx, const spx_word16_t *aa, spx_word16_t *y1, spx_word16_t *y2, int N, int M, spx_word16_t *mem, char *stack)
444
VARDECL(spx_word16_t *a);
445
VARDECL(spx_word16_t *x);
448
ALLOC(a, M, spx_word16_t);
449
ALLOC(x, N+M-1, spx_word16_t);
457
x[i+M-1]=SHR16(xx[i],1);
459
mem[i]=SHR16(xx[N-i-1],1);
460
for (i=0,k=0;i<N;i+=2,k++)
462
spx_word32_t y1k=0, y2k=0;
465
y1k=ADD32(y1k,MULT16_16(a[j],ADD16(x[i+j],x2[i-j])));
466
y2k=SUB32(y2k,MULT16_16(a[j],SUB16(x[i+j],x2[i-j])));
468
y1k=ADD32(y1k,MULT16_16(a[j],ADD16(x[i+j],x2[i-j])));
469
y2k=ADD32(y2k,MULT16_16(a[j],SUB16(x[i+j],x2[i-j])));
471
y1[k] = EXTRACT16(SATURATE(PSHR32(y1k,15),32767));
472
y2[k] = EXTRACT16(SATURATE(PSHR32(y2k,15),32767));
476
/* Re-synthesised a signal from the QMF low-band and high-band signals */
477
void qmf_synth(const spx_word16_t *x1, const spx_word16_t *x2, const spx_word16_t *a, spx_word16_t *y, int N, int M, spx_word16_t *mem1, spx_word16_t *mem2, char *stack)
479
all odd x[i] are zero -- well, actually they are left out of the array now
480
N and M are multiples of 4 */
484
VARDECL(spx_word16_t *xx1);
485
VARDECL(spx_word16_t *xx2);
489
ALLOC(xx1, M2+N2, spx_word16_t);
490
ALLOC(xx2, M2+N2, spx_word16_t);
492
for (i = 0; i < N2; i++)
494
for (i = 0; i < M2; i++)
495
xx1[N2+i] = mem1[2*i+1];
496
for (i = 0; i < N2; i++)
498
for (i = 0; i < M2; i++)
499
xx2[N2+i] = mem2[2*i+1];
501
for (i = 0; i < N2; i += 2) {
502
spx_sig_t y0, y1, y2, y3;
503
spx_word16_t x10, x20;
505
y0 = y1 = y2 = y3 = 0;
509
for (j = 0; j < M2; j += 2) {
510
spx_word16_t x11, x21;
519
/* We multiply twice by the same coef to avoid overflows */
520
y0 = MAC16_16(MAC16_16(y0, a0, x11), NEG16(a0), x21);
521
y1 = MAC16_16(MAC16_16(y1, a1, x11), a1, x21);
522
y2 = MAC16_16(MAC16_16(y2, a0, x10), NEG16(a0), x20);
523
y3 = MAC16_16(MAC16_16(y3, a1, x10), a1, x20);
525
y0 = ADD32(y0,MULT16_16(a0, x11-x21));
526
y1 = ADD32(y1,MULT16_16(a1, x11+x21));
527
y2 = ADD32(y2,MULT16_16(a0, x10-x20));
528
y3 = ADD32(y3,MULT16_16(a1, x10+x20));
536
/* We multiply twice by the same coef to avoid overflows */
537
y0 = MAC16_16(MAC16_16(y0, a0, x10), NEG16(a0), x20);
538
y1 = MAC16_16(MAC16_16(y1, a1, x10), a1, x20);
539
y2 = MAC16_16(MAC16_16(y2, a0, x11), NEG16(a0), x21);
540
y3 = MAC16_16(MAC16_16(y3, a1, x11), a1, x21);
542
y0 = ADD32(y0,MULT16_16(a0, x10-x20));
543
y1 = ADD32(y1,MULT16_16(a1, x10+x20));
544
y2 = ADD32(y2,MULT16_16(a0, x11-x21));
545
y3 = ADD32(y3,MULT16_16(a1, x11+x21));
549
y[2*i] = EXTRACT16(SATURATE32(PSHR32(y0,15),32767));
550
y[2*i+1] = EXTRACT16(SATURATE32(PSHR32(y1,15),32767));
551
y[2*i+2] = EXTRACT16(SATURATE32(PSHR32(y2,15),32767));
552
y[2*i+3] = EXTRACT16(SATURATE32(PSHR32(y3,15),32767));
554
/* Normalize up explicitly if we're in float */
562
for (i = 0; i < M2; i++)
563
mem1[2*i+1] = xx1[i];
564
for (i = 0; i < M2; i++)
565
mem2[2*i+1] = xx2[i];
570
const spx_word16_t shift_filt[3][7] = {{-33, 1043, -4551, 19959, 19959, -4551, 1043},
571
{-98, 1133, -4425, 29179, 8895, -2328, 444},
572
{444, -2328, 8895, 29179, -4425, 1133, -98}};
574
const spx_word16_t shift_filt[3][7] = {{-390, 1540, -4993, 20123, 20123, -4993, 1540},
575
{-1064, 2817, -6694, 31589, 6837, -990, -209},
576
{-209, -990, 6837, 31589, -6694, 2817, -1064}};
580
const float shift_filt[3][7] = {{-9.9369e-04, 3.1831e-02, -1.3889e-01, 6.0910e-01, 6.0910e-01, -1.3889e-01, 3.1831e-02},
581
{-0.0029937, 0.0345613, -0.1350474, 0.8904793, 0.2714479, -0.0710304, 0.0135403},
582
{0.0135403, -0.0710304, 0.2714479, 0.8904793, -0.1350474, 0.0345613, -0.0029937}};
584
const float shift_filt[3][7] = {{-0.011915f, 0.046995f, -0.152373f, 0.614108f, 0.614108f, -0.152373f, 0.046995f},
585
{-0.0324855f, 0.0859768f, -0.2042986f, 0.9640297f, 0.2086420f, -0.0302054f, -0.0063646f},
586
{-0.0063646f, -0.0302054f, 0.2086420f, 0.9640297f, -0.2042986f, 0.0859768f, -0.0324855f}};
591
spx_word16_t *exc, /*decoded excitation*/
592
spx_word16_t *interp, /*decoded excitation*/
593
int pitch, /*pitch period*/
598
spx_word32_t corr[4][7];
599
spx_word32_t maxcorr;
603
corr[0][i] = inner_prod(exc, exc-pitch-3+i, len);
618
tmp += MULT16_32_Q15(shift_filt[i][k],corr[0][j+k-3]);
623
maxcorr = corr[0][0];
628
if (corr[i][j] > maxcorr)
630
maxcorr = corr[i][j];
638
spx_word32_t tmp = 0;
643
tmp += MULT16_16(exc[i-(pitch-maxj+3)+k-3],shift_filt[maxi-1][k]);
646
tmp = SHL32(exc[i-(pitch-maxj+3)],15);
648
interp[i] = PSHR32(tmp,15);
654
spx_word16_t *exc, /*decoded excitation*/
655
spx_word16_t *new_exc, /*enhanced excitation*/
656
spx_coef_t *ak, /*LPC filter coefs*/
658
int nsf, /*sub-frame size*/
659
int pitch, /*pitch period*/
661
spx_word16_t comb_gain, /*gain of comb filter*/
666
VARDECL(spx_word16_t *iexc);
667
spx_word16_t old_ener, new_ener;
670
spx_word16_t iexc0_mag, iexc1_mag, exc_mag;
671
spx_word32_t corr0, corr1;
672
spx_word16_t gain0, gain1;
673
spx_word16_t pgain1, pgain2;
677
spx_word16_t gg1, gg2;
681
#if 0 /* Set to 1 to enable full pitch search */
683
spx_word16_t nol_pitch_coef[6];
684
spx_word16_t ol_pitch_coef;
685
open_loop_nbest_pitch(exc, 20, 120, nsf,
686
nol_pitch, nol_pitch_coef, 6, stack);
687
corr_pitch=nol_pitch[0];
688
ol_pitch_coef = nol_pitch_coef[0];
689
/*Try to remove pitch multiples*/
693
if ((nol_pitch_coef[i]>MULT16_16_Q15(nol_pitch_coef[0],19661)) &&
695
if ((nol_pitch_coef[i]>.6*nol_pitch_coef[0]) &&
697
(ABS(2*nol_pitch[i]-corr_pitch)<=2 || ABS(3*nol_pitch[i]-corr_pitch)<=3 ||
698
ABS(4*nol_pitch[i]-corr_pitch)<=4 || ABS(5*nol_pitch[i]-corr_pitch)<=5))
700
corr_pitch = nol_pitch[i];
707
ALLOC(iexc, 2*nsf, spx_word16_t);
709
interp_pitch(exc, iexc, corr_pitch, 80);
710
if (corr_pitch>max_pitch)
711
interp_pitch(exc, iexc+nsf, 2*corr_pitch, 80);
713
interp_pitch(exc, iexc+nsf, -corr_pitch, 80);
718
if (ABS16(exc[i])>16383)
727
exc[i] = SHR16(exc[i],1);
728
for (i=0;i<2*nsf;i++)
729
iexc[i] = SHR16(iexc[i],1);
732
/*interp_pitch(exc, iexc+2*nsf, 2*corr_pitch, 80);*/
734
/*printf ("%d %d %f\n", pitch, corr_pitch, max_corr*ener_1);*/
735
iexc0_mag = spx_sqrt(1000+inner_prod(iexc,iexc,nsf));
736
iexc1_mag = spx_sqrt(1000+inner_prod(iexc+nsf,iexc+nsf,nsf));
737
exc_mag = spx_sqrt(1+inner_prod(exc,exc,nsf));
738
corr0 = inner_prod(iexc,exc,nsf);
741
corr1 = inner_prod(iexc+nsf,exc,nsf);
745
/* Doesn't cost much to limit the ratio and it makes the rest easier */
746
if (SHL32(EXTEND32(iexc0_mag),6) < EXTEND32(exc_mag))
747
iexc0_mag = ADD16(1,PSHR16(exc_mag,6));
748
if (SHL32(EXTEND32(iexc1_mag),6) < EXTEND32(exc_mag))
749
iexc1_mag = ADD16(1,PSHR16(exc_mag,6));
751
if (corr0 > MULT16_16(iexc0_mag,exc_mag))
752
pgain1 = QCONST16(1., 14);
754
pgain1 = PDIV32_16(SHL32(PDIV32(corr0, exc_mag),14),iexc0_mag);
755
if (corr1 > MULT16_16(iexc1_mag,exc_mag))
756
pgain2 = QCONST16(1., 14);
758
pgain2 = PDIV32_16(SHL32(PDIV32(corr1, exc_mag),14),iexc1_mag);
759
gg1 = PDIV32_16(SHL32(EXTEND32(exc_mag),8), iexc0_mag);
760
gg2 = PDIV32_16(SHL32(EXTEND32(exc_mag),8), iexc1_mag);
764
c1 = (MULT16_16_Q15(QCONST16(.4,15),comb_gain)+QCONST16(.07,15));
765
c2 = QCONST16(.5,15)+MULT16_16_Q14(QCONST16(1.72,14),(c1-QCONST16(.07,15)));
767
c1 = .4*comb_gain+.07;
768
c2 = .5+1.72*(c1-.07);
775
g1 = 32767 - MULT16_16_Q13(MULT16_16_Q15(c2, pgain1),pgain1);
776
g2 = 32767 - MULT16_16_Q13(MULT16_16_Q15(c2, pgain2),pgain2);
778
g1 = 1-c2*pgain1*pgain1;
779
g2 = 1-c2*pgain2*pgain2;
785
g1 = (spx_word16_t)PDIV32_16(SHL32(EXTEND32(c1),14),(spx_word16_t)g1);
786
g2 = (spx_word16_t)PDIV32_16(SHL32(EXTEND32(c1),14),(spx_word16_t)g2);
787
if (corr_pitch>max_pitch)
789
gain0 = MULT16_16_Q15(QCONST16(.7,15),MULT16_16_Q14(g1,gg1));
790
gain1 = MULT16_16_Q15(QCONST16(.3,15),MULT16_16_Q14(g2,gg2));
792
gain0 = MULT16_16_Q15(QCONST16(.6,15),MULT16_16_Q14(g1,gg1));
793
gain1 = MULT16_16_Q15(QCONST16(.6,15),MULT16_16_Q14(g2,gg2));
796
new_exc[i] = ADD16(exc[i], EXTRACT16(PSHR32(ADD32(MULT16_16(gain0,iexc[i]), MULT16_16(gain1,iexc[i+nsf])),8)));
797
/* FIXME: compute_rms16 is currently not quite accurate enough (but close) */
798
new_ener = compute_rms16(new_exc, nsf);
799
old_ener = compute_rms16(exc, nsf);
805
if (old_ener > new_ener)
807
ngain = PDIV32_16(SHL32(EXTEND32(old_ener),14),new_ener);
810
new_exc[i] = MULT16_16_Q14(ngain, new_exc[i]);
815
exc[i] = SHL16(exc[i],1);
817
new_exc[i] = SHL16(SATURATE16(new_exc[i],16383),1);