2
/* audioopmodule - Module to detect peak values in arrays */
4
#define PY_SSIZE_T_CLEAN
10
#if defined(__CHAR_UNSIGNED__)
12
/* This module currently does not work on systems where only unsigned
13
characters are available. Take it out of Setup. Sorry. */
17
static const int maxvals[] = {0, 0x7F, 0x7FFF, 0x7FFFFF, 0x7FFFFFFF};
18
/* -1 trick is needed on Windows to support -0x80000000 without a warning */
19
static const int minvals[] = {0, -0x80, -0x8000, -0x800000, -0x7FFFFFFF-1};
20
static const unsigned int masks[] = {0, 0xFF, 0xFFFF, 0xFFFFFF, 0xFFFFFFFF};
23
fbound(double val, double minval, double maxval)
27
else if (val < minval + 1)
33
/* Code shamelessly stolen from sox, 12.17.7, g711.c
34
** (c) Craig Reese, Joe Campbell and Jeff Poskanzer 1989 */
39
* Functions linear2alaw, linear2ulaw have been updated to correctly
40
* convert unquantized 16 bit values.
41
* Tables for direct u- to A-law and A- to u-law conversions have been
43
* Borge Lindberg, Center for PersonKommunikation, Aalborg University.
47
#define BIAS 0x84 /* define the add-in bias for 16 bit samples */
49
#define SIGN_BIT (0x80) /* Sign bit for a A-law byte. */
50
#define QUANT_MASK (0xf) /* Quantization field mask. */
51
#define SEG_SHIFT (4) /* Left shift for segment number. */
52
#define SEG_MASK (0x70) /* Segment field mask. */
54
static PyInt16 seg_aend[8] = {0x1F, 0x3F, 0x7F, 0xFF,
55
0x1FF, 0x3FF, 0x7FF, 0xFFF};
56
static PyInt16 seg_uend[8] = {0x3F, 0x7F, 0xFF, 0x1FF,
57
0x3FF, 0x7FF, 0xFFF, 0x1FFF};
60
search(PyInt16 val, PyInt16 *table, int size)
64
for (i = 0; i < size; i++) {
70
#define st_ulaw2linear16(uc) (_st_ulaw2linear16[uc])
71
#define st_alaw2linear16(uc) (_st_alaw2linear16[uc])
73
static PyInt16 _st_ulaw2linear16[256] = {
74
-32124, -31100, -30076, -29052, -28028, -27004, -25980,
75
-24956, -23932, -22908, -21884, -20860, -19836, -18812,
76
-17788, -16764, -15996, -15484, -14972, -14460, -13948,
77
-13436, -12924, -12412, -11900, -11388, -10876, -10364,
78
-9852, -9340, -8828, -8316, -7932, -7676, -7420,
79
-7164, -6908, -6652, -6396, -6140, -5884, -5628,
80
-5372, -5116, -4860, -4604, -4348, -4092, -3900,
81
-3772, -3644, -3516, -3388, -3260, -3132, -3004,
82
-2876, -2748, -2620, -2492, -2364, -2236, -2108,
83
-1980, -1884, -1820, -1756, -1692, -1628, -1564,
84
-1500, -1436, -1372, -1308, -1244, -1180, -1116,
85
-1052, -988, -924, -876, -844, -812, -780,
86
-748, -716, -684, -652, -620, -588, -556,
87
-524, -492, -460, -428, -396, -372, -356,
88
-340, -324, -308, -292, -276, -260, -244,
89
-228, -212, -196, -180, -164, -148, -132,
90
-120, -112, -104, -96, -88, -80, -72,
91
-64, -56, -48, -40, -32, -24, -16,
92
-8, 0, 32124, 31100, 30076, 29052, 28028,
93
27004, 25980, 24956, 23932, 22908, 21884, 20860,
94
19836, 18812, 17788, 16764, 15996, 15484, 14972,
95
14460, 13948, 13436, 12924, 12412, 11900, 11388,
96
10876, 10364, 9852, 9340, 8828, 8316, 7932,
97
7676, 7420, 7164, 6908, 6652, 6396, 6140,
98
5884, 5628, 5372, 5116, 4860, 4604, 4348,
99
4092, 3900, 3772, 3644, 3516, 3388, 3260,
100
3132, 3004, 2876, 2748, 2620, 2492, 2364,
101
2236, 2108, 1980, 1884, 1820, 1756, 1692,
102
1628, 1564, 1500, 1436, 1372, 1308, 1244,
103
1180, 1116, 1052, 988, 924, 876, 844,
104
812, 780, 748, 716, 684, 652, 620,
105
588, 556, 524, 492, 460, 428, 396,
106
372, 356, 340, 324, 308, 292, 276,
107
260, 244, 228, 212, 196, 180, 164,
108
148, 132, 120, 112, 104, 96, 88,
109
80, 72, 64, 56, 48, 40, 32,
114
* linear2ulaw() accepts a 14-bit signed integer and encodes it as u-law data
115
* stored in a unsigned char. This function should only be called with
116
* the data shifted such that it only contains information in the lower
119
* In order to simplify the encoding process, the original linear magnitude
120
* is biased by adding 33 which shifts the encoding range from (0 - 8158) to
121
* (33 - 8191). The result can be seen in the following encoding table:
123
* Biased Linear Input Code Compressed Code
124
* ------------------------ ---------------
125
* 00000001wxyza 000wxyz
126
* 0000001wxyzab 001wxyz
127
* 000001wxyzabc 010wxyz
128
* 00001wxyzabcd 011wxyz
129
* 0001wxyzabcde 100wxyz
130
* 001wxyzabcdef 101wxyz
131
* 01wxyzabcdefg 110wxyz
132
* 1wxyzabcdefgh 111wxyz
134
* Each biased linear code has a leading 1 which identifies the segment
135
* number. The value of the segment number is equal to 7 minus the number
136
* of leading 0's. The quantization interval is directly available as the
137
* four bits wxyz. * The trailing bits (a - h) are ignored.
139
* Ordinarily the complement of the resulting code word is used for
140
* transmission, and so the code word is complemented before it is returned.
142
* For further information see John C. Bellamy's Digital Telephony, 1982,
143
* John Wiley & Sons, pps 98-111 and 472-476.
146
st_14linear2ulaw(PyInt16 pcm_val) /* 2's complement (14-bit range) */
152
/* u-law inverts all bits */
153
/* Get the sign and the magnitude of the value. */
160
if ( pcm_val > CLIP ) pcm_val = CLIP; /* clip the magnitude */
161
pcm_val += (BIAS >> 2);
163
/* Convert the scaled magnitude to segment number. */
164
seg = search(pcm_val, seg_uend, 8);
167
* Combine the sign, segment, quantization bits;
168
* and complement the code word.
170
if (seg >= 8) /* out of range, return maximum value. */
171
return (unsigned char) (0x7F ^ mask);
173
uval = (unsigned char) (seg << 4) | ((pcm_val >> (seg + 1)) & 0xF);
174
return (uval ^ mask);
179
static PyInt16 _st_alaw2linear16[256] = {
180
-5504, -5248, -6016, -5760, -4480, -4224, -4992,
181
-4736, -7552, -7296, -8064, -7808, -6528, -6272,
182
-7040, -6784, -2752, -2624, -3008, -2880, -2240,
183
-2112, -2496, -2368, -3776, -3648, -4032, -3904,
184
-3264, -3136, -3520, -3392, -22016, -20992, -24064,
185
-23040, -17920, -16896, -19968, -18944, -30208, -29184,
186
-32256, -31232, -26112, -25088, -28160, -27136, -11008,
187
-10496, -12032, -11520, -8960, -8448, -9984, -9472,
188
-15104, -14592, -16128, -15616, -13056, -12544, -14080,
189
-13568, -344, -328, -376, -360, -280, -264,
190
-312, -296, -472, -456, -504, -488, -408,
191
-392, -440, -424, -88, -72, -120, -104,
192
-24, -8, -56, -40, -216, -200, -248,
193
-232, -152, -136, -184, -168, -1376, -1312,
194
-1504, -1440, -1120, -1056, -1248, -1184, -1888,
195
-1824, -2016, -1952, -1632, -1568, -1760, -1696,
196
-688, -656, -752, -720, -560, -528, -624,
197
-592, -944, -912, -1008, -976, -816, -784,
198
-880, -848, 5504, 5248, 6016, 5760, 4480,
199
4224, 4992, 4736, 7552, 7296, 8064, 7808,
200
6528, 6272, 7040, 6784, 2752, 2624, 3008,
201
2880, 2240, 2112, 2496, 2368, 3776, 3648,
202
4032, 3904, 3264, 3136, 3520, 3392, 22016,
203
20992, 24064, 23040, 17920, 16896, 19968, 18944,
204
30208, 29184, 32256, 31232, 26112, 25088, 28160,
205
27136, 11008, 10496, 12032, 11520, 8960, 8448,
206
9984, 9472, 15104, 14592, 16128, 15616, 13056,
207
12544, 14080, 13568, 344, 328, 376, 360,
208
280, 264, 312, 296, 472, 456, 504,
209
488, 408, 392, 440, 424, 88, 72,
210
120, 104, 24, 8, 56, 40, 216,
211
200, 248, 232, 152, 136, 184, 168,
212
1376, 1312, 1504, 1440, 1120, 1056, 1248,
213
1184, 1888, 1824, 2016, 1952, 1632, 1568,
214
1760, 1696, 688, 656, 752, 720, 560,
215
528, 624, 592, 944, 912, 1008, 976,
220
* linear2alaw() accepts an 13-bit signed integer and encodes it as A-law data
221
* stored in a unsigned char. This function should only be called with
222
* the data shifted such that it only contains information in the lower
225
* Linear Input Code Compressed Code
226
* ------------------------ ---------------
227
* 0000000wxyza 000wxyz
228
* 0000001wxyza 001wxyz
229
* 000001wxyzab 010wxyz
230
* 00001wxyzabc 011wxyz
231
* 0001wxyzabcd 100wxyz
232
* 001wxyzabcde 101wxyz
233
* 01wxyzabcdef 110wxyz
234
* 1wxyzabcdefg 111wxyz
236
* For further information see John C. Bellamy's Digital Telephony, 1982,
237
* John Wiley & Sons, pps 98-111 and 472-476.
240
st_linear2alaw(PyInt16 pcm_val) /* 2's complement (13-bit range) */
246
/* A-law using even bit inversion */
248
mask = 0xD5; /* sign (7th) bit = 1 */
250
mask = 0x55; /* sign bit = 0 */
251
pcm_val = -pcm_val - 1;
254
/* Convert the scaled magnitude to segment number. */
255
seg = search(pcm_val, seg_aend, 8);
257
/* Combine the sign, segment, and quantization bits. */
259
if (seg >= 8) /* out of range, return maximum value. */
260
return (unsigned char) (0x7F ^ mask);
262
aval = (unsigned char) seg << SEG_SHIFT;
264
aval |= (pcm_val >> 1) & QUANT_MASK;
266
aval |= (pcm_val >> seg) & QUANT_MASK;
267
return (aval ^ mask);
270
/* End of code taken from sox */
272
/* Intel ADPCM step variation table */
273
static int indexTable[16] = {
274
-1, -1, -1, -1, 2, 4, 6, 8,
275
-1, -1, -1, -1, 2, 4, 6, 8,
278
static int stepsizeTable[89] = {
279
7, 8, 9, 10, 11, 12, 13, 14, 16, 17,
280
19, 21, 23, 25, 28, 31, 34, 37, 41, 45,
281
50, 55, 60, 66, 73, 80, 88, 97, 107, 118,
282
130, 143, 157, 173, 190, 209, 230, 253, 279, 307,
283
337, 371, 408, 449, 494, 544, 598, 658, 724, 796,
284
876, 963, 1060, 1166, 1282, 1411, 1552, 1707, 1878, 2066,
285
2272, 2499, 2749, 3024, 3327, 3660, 4026, 4428, 4871, 5358,
286
5894, 6484, 7132, 7845, 8630, 9493, 10442, 11487, 12635, 13899,
287
15289, 16818, 18500, 20350, 22385, 24623, 27086, 29794, 32767
290
#define GETINTX(T, cp, i) (*(T *)((unsigned char *)(cp) + (i)))
291
#define SETINTX(T, cp, i, val) do { \
292
*(T *)((unsigned char *)(cp) + (i)) = (T)(val); \
296
#define GETINT8(cp, i) GETINTX(signed char, (cp), (i))
297
#define GETINT16(cp, i) GETINTX(short, (cp), (i))
298
#define GETINT32(cp, i) GETINTX(PY_INT32_T, (cp), (i))
301
#define GETINT24(cp, i) ( \
302
((unsigned char *)(cp) + (i))[2] + \
303
(((unsigned char *)(cp) + (i))[1] << 8) + \
304
(((signed char *)(cp) + (i))[0] << 16) )
306
#define GETINT24(cp, i) ( \
307
((unsigned char *)(cp) + (i))[0] + \
308
(((unsigned char *)(cp) + (i))[1] << 8) + \
309
(((signed char *)(cp) + (i))[2] << 16) )
313
#define SETINT8(cp, i, val) SETINTX(signed char, (cp), (i), (val))
314
#define SETINT16(cp, i, val) SETINTX(short, (cp), (i), (val))
315
#define SETINT32(cp, i, val) SETINTX(PY_INT32_T, (cp), (i), (val))
318
#define SETINT24(cp, i, val) do { \
319
((unsigned char *)(cp) + (i))[2] = (int)(val); \
320
((unsigned char *)(cp) + (i))[1] = (int)(val) >> 8; \
321
((signed char *)(cp) + (i))[0] = (int)(val) >> 16; \
324
#define SETINT24(cp, i, val) do { \
325
((unsigned char *)(cp) + (i))[0] = (int)(val); \
326
((unsigned char *)(cp) + (i))[1] = (int)(val) >> 8; \
327
((signed char *)(cp) + (i))[2] = (int)(val) >> 16; \
332
#define GETRAWSAMPLE(size, cp, i) ( \
333
(size == 1) ? (int)GETINT8((cp), (i)) : \
334
(size == 2) ? (int)GETINT16((cp), (i)) : \
335
(size == 3) ? (int)GETINT24((cp), (i)) : \
336
(int)GETINT32((cp), (i)))
338
#define SETRAWSAMPLE(size, cp, i, val) do { \
340
SETINT8((cp), (i), (val)); \
341
else if (size == 2) \
342
SETINT16((cp), (i), (val)); \
343
else if (size == 3) \
344
SETINT24((cp), (i), (val)); \
346
SETINT32((cp), (i), (val)); \
350
#define GETSAMPLE32(size, cp, i) ( \
351
(size == 1) ? (int)GETINT8((cp), (i)) << 24 : \
352
(size == 2) ? (int)GETINT16((cp), (i)) << 16 : \
353
(size == 3) ? (int)GETINT24((cp), (i)) << 8 : \
354
(int)GETINT32((cp), (i)))
356
#define SETSAMPLE32(size, cp, i, val) do { \
358
SETINT8((cp), (i), (val) >> 24); \
359
else if (size == 2) \
360
SETINT16((cp), (i), (val) >> 16); \
361
else if (size == 3) \
362
SETINT24((cp), (i), (val) >> 8); \
364
SETINT32((cp), (i), (val)); \
368
static PyObject *AudioopError;
371
audioop_check_size(int size)
373
if (size < 1 || size > 4) {
374
PyErr_SetString(AudioopError, "Size should be 1, 2, 3 or 4");
382
audioop_check_parameters(Py_ssize_t len, int size)
384
if (!audioop_check_size(size))
386
if (len % size != 0) {
387
PyErr_SetString(AudioopError, "not a whole number of frames");
394
audioop_getsample(PyObject *self, PyObject *args)
401
if (!PyArg_ParseTuple(args, "y*in:getsample", &view, &size, &i))
403
if (!audioop_check_parameters(view.len, size))
405
if (i < 0 || i >= view.len/size) {
406
PyErr_SetString(AudioopError, "Index out of range");
409
val = GETRAWSAMPLE(size, view.buf, i*size);
410
PyBuffer_Release(&view);
411
return PyLong_FromLong(val);
414
PyBuffer_Release(&view);
419
audioop_max(PyObject *self, PyObject *args)
424
unsigned int absval, max = 0;
426
if (!PyArg_ParseTuple(args, "y*i:max", &view, &size))
428
if (!audioop_check_parameters(view.len, size)) {
429
PyBuffer_Release(&view);
432
for (i = 0; i < view.len; i += size) {
433
int val = GETRAWSAMPLE(size, view.buf, i);
434
if (val < 0) absval = (-val);
436
if (absval > max) max = absval;
438
PyBuffer_Release(&view);
439
return PyLong_FromUnsignedLong(max);
443
audioop_minmax(PyObject *self, PyObject *args)
448
/* -1 trick below is needed on Windows to support -0x80000000 without
450
int min = 0x7fffffff, max = -0x7FFFFFFF-1;
452
if (!PyArg_ParseTuple(args, "y*i:minmax", &view, &size))
454
if (!audioop_check_parameters(view.len, size)) {
455
PyBuffer_Release(&view);
458
for (i = 0; i < view.len; i += size) {
459
int val = GETRAWSAMPLE(size, view.buf, i);
460
if (val > max) max = val;
461
if (val < min) min = val;
463
PyBuffer_Release(&view);
464
return Py_BuildValue("(ii)", min, max);
468
audioop_avg(PyObject *self, PyObject *args)
475
if (!PyArg_ParseTuple(args, "y*i:avg", &view, &size))
477
if (!audioop_check_parameters(view.len, size)) {
478
PyBuffer_Release(&view);
481
for (i = 0; i < view.len; i += size)
482
sum += GETRAWSAMPLE(size, view.buf, i);
486
avg = (int)floor(sum / (double)(view.len/size));
487
PyBuffer_Release(&view);
488
return PyLong_FromLong(avg);
492
audioop_rms(PyObject *self, PyObject *args)
498
double sum_squares = 0.0;
500
if (!PyArg_ParseTuple(args, "y*i:rms", &view, &size))
502
if (!audioop_check_parameters(view.len, size)) {
503
PyBuffer_Release(&view);
506
for (i = 0; i < view.len; i += size) {
507
double val = GETRAWSAMPLE(size, view.buf, i);
508
sum_squares += val*val;
513
res = (unsigned int)sqrt(sum_squares / (double)(view.len/size));
514
PyBuffer_Release(&view);
515
return PyLong_FromUnsignedLong(res);
518
static double _sum2(const short *a, const short *b, Py_ssize_t len)
523
for( i=0; i<len; i++) {
524
sum = sum + (double)a[i]*(double)b[i];
530
** Findfit tries to locate a sample within another sample. Its main use
531
** is in echo-cancellation (to find the feedback of the output signal in
532
** the input signal).
533
** The method used is as follows:
535
** let R be the reference signal (length n) and A the input signal (length N)
536
** with N > n, and let all sums be over i from 0 to n-1.
538
** Now, for each j in {0..N-n} we compute a factor fj so that -fj*R matches A
539
** as good as possible, i.e. sum( (A[j+i]+fj*R[i])^2 ) is minimal. This
540
** equation gives fj = sum( A[j+i]R[i] ) / sum(R[i]^2).
542
** Next, we compute the relative distance between the original signal and
543
** the modified signal and minimize that over j:
544
** vj = sum( (A[j+i]-fj*R[i])^2 ) / sum( A[j+i]^2 ) =>
545
** vj = ( sum(A[j+i]^2)*sum(R[i]^2) - sum(A[j+i]R[i])^2 ) / sum( A[j+i]^2 )
547
** In the code variables correspond as follows:
554
** sum_ri_2 sum(R[i]^2)
555
** sum_aij_2 sum(A[i+j]^2)
556
** sum_aij_ri sum(A[i+j]R[i])
558
** sum_ri is calculated once, sum_aij_2 is updated each step and sum_aij_ri
559
** is completely recalculated each step.
562
audioop_findfit(PyObject *self, PyObject *args)
566
const short *cp1, *cp2;
567
Py_ssize_t len1, len2;
568
Py_ssize_t j, best_j;
569
double aj_m1, aj_lm1;
570
double sum_ri_2, sum_aij_2, sum_aij_ri, result, best_result, factor;
572
if (!PyArg_ParseTuple(args, "y*y*:findfit", &view1, &view2))
574
if (view1.len & 1 || view2.len & 1) {
575
PyErr_SetString(AudioopError, "Strings should be even-sized");
578
cp1 = (const short *)view1.buf;
579
len1 = view1.len >> 1;
580
cp2 = (const short *)view2.buf;
581
len2 = view2.len >> 1;
584
PyErr_SetString(AudioopError, "First sample should be longer");
587
sum_ri_2 = _sum2(cp2, cp2, len2);
588
sum_aij_2 = _sum2(cp1, cp1, len2);
589
sum_aij_ri = _sum2(cp1, cp2, len2);
591
result = (sum_ri_2*sum_aij_2 - sum_aij_ri*sum_aij_ri) / sum_aij_2;
593
best_result = result;
596
for ( j=1; j<=len1-len2; j++) {
597
aj_m1 = (double)cp1[j-1];
598
aj_lm1 = (double)cp1[j+len2-1];
600
sum_aij_2 = sum_aij_2 + aj_lm1*aj_lm1 - aj_m1*aj_m1;
601
sum_aij_ri = _sum2(cp1+j, cp2, len2);
603
result = (sum_ri_2*sum_aij_2 - sum_aij_ri*sum_aij_ri)
606
if ( result < best_result ) {
607
best_result = result;
613
factor = _sum2(cp1+best_j, cp2, len2) / sum_ri_2;
615
PyBuffer_Release(&view1);
616
PyBuffer_Release(&view2);
617
return Py_BuildValue("(nf)", best_j, factor);
620
PyBuffer_Release(&view1);
621
PyBuffer_Release(&view2);
626
** findfactor finds a factor f so that the energy in A-fB is minimal.
627
** See the comment for findfit for details.
630
audioop_findfactor(PyObject *self, PyObject *args)
634
const short *cp1, *cp2;
636
double sum_ri_2, sum_aij_ri, result;
638
if (!PyArg_ParseTuple(args, "y*y*:findfactor", &view1, &view2))
640
if (view1.len & 1 || view2.len & 1) {
641
PyErr_SetString(AudioopError, "Strings should be even-sized");
644
if (view1.len != view2.len) {
645
PyErr_SetString(AudioopError, "Samples should be same size");
648
cp1 = (const short *)view1.buf;
649
cp2 = (const short *)view2.buf;
650
len = view1.len >> 1;
651
sum_ri_2 = _sum2(cp2, cp2, len);
652
sum_aij_ri = _sum2(cp1, cp2, len);
654
result = sum_aij_ri / sum_ri_2;
656
PyBuffer_Release(&view1);
657
PyBuffer_Release(&view2);
658
return PyFloat_FromDouble(result);
661
PyBuffer_Release(&view1);
662
PyBuffer_Release(&view2);
667
** findmax returns the index of the n-sized segment of the input sample
668
** that contains the most energy.
671
audioop_findmax(PyObject *self, PyObject *args)
675
Py_ssize_t len1, len2;
676
Py_ssize_t j, best_j;
677
double aj_m1, aj_lm1;
678
double result, best_result;
680
if (!PyArg_ParseTuple(args, "y*n:findmax", &view, &len2))
683
PyErr_SetString(AudioopError, "Strings should be even-sized");
686
cp1 = (const short *)view.buf;
687
len1 = view.len >> 1;
689
if (len2 < 0 || len1 < len2) {
690
PyErr_SetString(AudioopError, "Input sample should be longer");
694
result = _sum2(cp1, cp1, len2);
696
best_result = result;
699
for ( j=1; j<=len1-len2; j++) {
700
aj_m1 = (double)cp1[j-1];
701
aj_lm1 = (double)cp1[j+len2-1];
703
result = result + aj_lm1*aj_lm1 - aj_m1*aj_m1;
705
if ( result > best_result ) {
706
best_result = result;
712
PyBuffer_Release(&view);
713
return PyLong_FromSsize_t(best_j);
716
PyBuffer_Release(&view);
721
audioop_avgpp(PyObject *self, PyObject *args)
725
int size, prevval, prevextremevalid = 0,
729
int diff, prevdiff, nextreme = 0;
731
if (!PyArg_ParseTuple(args, "y*i:avgpp", &view, &size))
733
if (!audioop_check_parameters(view.len, size)) {
734
PyBuffer_Release(&view);
737
if (view.len <= size) {
738
PyBuffer_Release(&view);
739
return PyLong_FromLong(0);
741
prevval = GETRAWSAMPLE(size, view.buf, 0);
742
prevdiff = 17; /* Anything != 0, 1 */
743
for (i = size; i < view.len; i += size) {
744
int val = GETRAWSAMPLE(size, view.buf, i);
745
if (val != prevval) {
746
diff = val < prevval;
747
if (prevdiff == !diff) {
748
/* Derivative changed sign. Compute difference to last
749
** extreme value and remember.
751
if (prevextremevalid) {
752
if (prevval < prevextreme)
753
sum += (double)((unsigned int)prevextreme -
754
(unsigned int)prevval);
756
sum += (double)((unsigned int)prevval -
757
(unsigned int)prevextreme);
760
prevextremevalid = 1;
761
prevextreme = prevval;
770
avg = (unsigned int)(sum / (double)nextreme);
771
PyBuffer_Release(&view);
772
return PyLong_FromUnsignedLong(avg);
776
audioop_maxpp(PyObject *self, PyObject *args)
780
int size, prevval, prevextremevalid = 0,
782
unsigned int max = 0, extremediff;
785
if (!PyArg_ParseTuple(args, "y*i:maxpp", &view, &size))
787
if (!audioop_check_parameters(view.len, size)) {
788
PyBuffer_Release(&view);
791
if (view.len <= size) {
792
PyBuffer_Release(&view);
793
return PyLong_FromLong(0);
795
prevval = GETRAWSAMPLE(size, view.buf, 0);
796
prevdiff = 17; /* Anything != 0, 1 */
797
for (i = size; i < view.len; i += size) {
798
int val = GETRAWSAMPLE(size, view.buf, i);
799
if (val != prevval) {
800
diff = val < prevval;
801
if (prevdiff == !diff) {
802
/* Derivative changed sign. Compute difference to
803
** last extreme value and remember.
805
if (prevextremevalid) {
806
if (prevval < prevextreme)
807
extremediff = (unsigned int)prevextreme -
808
(unsigned int)prevval;
810
extremediff = (unsigned int)prevval -
811
(unsigned int)prevextreme;
812
if ( extremediff > max )
815
prevextremevalid = 1;
816
prevextreme = prevval;
822
PyBuffer_Release(&view);
823
return PyLong_FromUnsignedLong(max);
827
audioop_cross(PyObject *self, PyObject *args)
835
if (!PyArg_ParseTuple(args, "y*i:cross", &view, &size))
837
if (!audioop_check_parameters(view.len, size)) {
838
PyBuffer_Release(&view);
842
prevval = 17; /* Anything <> 0,1 */
843
for (i = 0; i < view.len; i += size) {
844
int val = GETRAWSAMPLE(size, view.buf, i) < 0;
845
if (val != prevval) ncross++;
848
PyBuffer_Release(&view);
849
return PyLong_FromSsize_t(ncross);
853
audioop_mul(PyObject *self, PyObject *args)
859
double factor, maxval, minval;
862
if (!PyArg_ParseTuple(args, "y*id:mul", &view, &size, &factor))
864
if (!audioop_check_parameters(view.len, size))
867
maxval = (double) maxvals[size];
868
minval = (double) minvals[size];
870
rv = PyBytes_FromStringAndSize(NULL, view.len);
873
ncp = (signed char *)PyBytes_AsString(rv);
875
for (i = 0; i < view.len; i += size) {
876
double val = GETRAWSAMPLE(size, view.buf, i);
878
val = floor(fbound(val, minval, maxval));
879
SETRAWSAMPLE(size, ncp, i, (int)val);
882
PyBuffer_Release(&view);
887
audioop_tomono(PyObject *self, PyObject *args)
890
signed char *cp, *ncp;
893
double fac1, fac2, maxval, minval;
896
if (!PyArg_ParseTuple(args, "y*idd:tomono",
897
&pcp, &size, &fac1, &fac2))
901
if (!audioop_check_parameters(len, size))
903
if (((len / size) & 1) != 0) {
904
PyErr_SetString(AudioopError, "not a whole number of frames");
908
maxval = (double) maxvals[size];
909
minval = (double) minvals[size];
911
rv = PyBytes_FromStringAndSize(NULL, len/2);
914
ncp = (signed char *)PyBytes_AsString(rv);
916
for (i = 0; i < len; i += size*2) {
917
double val1 = GETRAWSAMPLE(size, cp, i);
918
double val2 = GETRAWSAMPLE(size, cp, i + size);
919
double val = val1*fac1 + val2*fac2;
920
val = floor(fbound(val, minval, maxval));
921
SETRAWSAMPLE(size, ncp, i/2, val);
924
PyBuffer_Release(&pcp);
929
audioop_tostereo(PyObject *self, PyObject *args)
935
double fac1, fac2, maxval, minval;
938
if (!PyArg_ParseTuple(args, "y*idd:tostereo",
939
&view, &size, &fac1, &fac2))
941
if (!audioop_check_parameters(view.len, size))
944
maxval = (double) maxvals[size];
945
minval = (double) minvals[size];
947
if (view.len > PY_SSIZE_T_MAX/2) {
948
PyErr_SetString(PyExc_MemoryError,
949
"not enough memory for output buffer");
953
rv = PyBytes_FromStringAndSize(NULL, view.len*2);
956
ncp = (signed char *)PyBytes_AsString(rv);
958
for (i = 0; i < view.len; i += size) {
959
double val = GETRAWSAMPLE(size, view.buf, i);
960
int val1 = (int)floor(fbound(val*fac1, minval, maxval));
961
int val2 = (int)floor(fbound(val*fac2, minval, maxval));
962
SETRAWSAMPLE(size, ncp, i*2, val1);
963
SETRAWSAMPLE(size, ncp, i*2 + size, val2);
966
PyBuffer_Release(&view);
971
audioop_add(PyObject *self, PyObject *args)
977
int size, minval, maxval, newval;
980
if (!PyArg_ParseTuple(args, "y*y*i:add",
981
&view1, &view2, &size))
983
if (!audioop_check_parameters(view1.len, size))
985
if (view1.len != view2.len) {
986
PyErr_SetString(AudioopError, "Lengths should be the same");
990
maxval = maxvals[size];
991
minval = minvals[size];
993
rv = PyBytes_FromStringAndSize(NULL, view1.len);
996
ncp = (signed char *)PyBytes_AsString(rv);
998
for (i = 0; i < view1.len; i += size) {
999
int val1 = GETRAWSAMPLE(size, view1.buf, i);
1000
int val2 = GETRAWSAMPLE(size, view2.buf, i);
1003
newval = val1 + val2;
1004
/* truncate in case of overflow */
1005
if (newval > maxval)
1007
else if (newval < minval)
1011
double fval = (double)val1 + (double)val2;
1012
/* truncate in case of overflow */
1013
newval = (int)floor(fbound(fval, minval, maxval));
1016
SETRAWSAMPLE(size, ncp, i, newval);
1019
PyBuffer_Release(&view1);
1020
PyBuffer_Release(&view2);
1025
audioop_bias(PyObject *self, PyObject *args)
1031
unsigned int val = 0, mask;
1032
PyObject *rv = NULL;
1034
if (!PyArg_ParseTuple(args, "y*ii:bias",
1035
&view, &size, &bias))
1038
if (!audioop_check_parameters(view.len, size))
1041
rv = PyBytes_FromStringAndSize(NULL, view.len);
1044
ncp = (signed char *)PyBytes_AsString(rv);
1048
for (i = 0; i < view.len; i += size) {
1050
val = GETINTX(unsigned char, view.buf, i);
1052
val = GETINTX(unsigned short, view.buf, i);
1054
val = ((unsigned int)GETINT24(view.buf, i)) & 0xffffffu;
1057
val = GETINTX(PY_UINT32_T, view.buf, i);
1060
val += (unsigned int)bias;
1061
/* wrap around in case of overflow */
1065
SETINTX(unsigned char, ncp, i, val);
1067
SETINTX(unsigned short, ncp, i, val);
1069
SETINT24(ncp, i, (int)val);
1072
SETINTX(PY_UINT32_T, ncp, i, val);
1076
PyBuffer_Release(&view);
1081
audioop_reverse(PyObject *self, PyObject *args)
1087
PyObject *rv = NULL;
1089
if (!PyArg_ParseTuple(args, "y*i:reverse",
1093
if (!audioop_check_parameters(view.len, size))
1096
rv = PyBytes_FromStringAndSize(NULL, view.len);
1099
ncp = (unsigned char *)PyBytes_AsString(rv);
1101
for (i = 0; i < view.len; i += size) {
1102
int val = GETRAWSAMPLE(size, view.buf, i);
1103
SETRAWSAMPLE(size, ncp, view.len - i - size, val);
1106
PyBuffer_Release(&view);
1111
audioop_byteswap(PyObject *self, PyObject *args)
1117
PyObject *rv = NULL;
1119
if (!PyArg_ParseTuple(args, "y*i:swapbytes",
1123
if (!audioop_check_parameters(view.len, size))
1126
rv = PyBytes_FromStringAndSize(NULL, view.len);
1129
ncp = (unsigned char *)PyBytes_AsString(rv);
1131
for (i = 0; i < view.len; i += size) {
1133
for (j = 0; j < size; j++)
1134
ncp[i + size - 1 - j] = ((unsigned char *)view.buf)[i + j];
1137
PyBuffer_Release(&view);
1142
audioop_lin2lin(PyObject *self, PyObject *args)
1148
PyObject *rv = NULL;
1150
if (!PyArg_ParseTuple(args, "y*ii:lin2lin",
1151
&view, &size, &size2))
1154
if (!audioop_check_parameters(view.len, size))
1156
if (!audioop_check_size(size2))
1159
if (view.len/size > PY_SSIZE_T_MAX/size2) {
1160
PyErr_SetString(PyExc_MemoryError,
1161
"not enough memory for output buffer");
1164
rv = PyBytes_FromStringAndSize(NULL, (view.len/size)*size2);
1167
ncp = (unsigned char *)PyBytes_AsString(rv);
1169
for (i = j = 0; i < view.len; i += size, j += size2) {
1170
int val = GETSAMPLE32(size, view.buf, i);
1171
SETSAMPLE32(size2, ncp, j, val);
1174
PyBuffer_Release(&view);
1190
audioop_ratecv(PyObject *self, PyObject *args)
1195
int size, nchannels, inrate, outrate, weightA, weightB;
1196
int chan, d, *prev_i, *cur_i, cur_o;
1197
PyObject *state, *samps, *str, *rv = NULL;
1198
int bytes_per_frame;
1202
if (!PyArg_ParseTuple(args, "y*iiiiO|ii:ratecv", &view, &size,
1203
&nchannels, &inrate, &outrate, &state,
1204
&weightA, &weightB))
1206
if (!audioop_check_size(size))
1208
if (nchannels < 1) {
1209
PyErr_SetString(AudioopError, "# of channels should be >= 1");
1212
if (size > INT_MAX / nchannels) {
1213
/* This overflow test is rigorously correct because
1214
both multiplicands are >= 1. Use the argument names
1215
from the docs for the error msg. */
1216
PyErr_SetString(PyExc_OverflowError,
1217
"width * nchannels too big for a C int");
1220
bytes_per_frame = size * nchannels;
1221
if (weightA < 1 || weightB < 0) {
1222
PyErr_SetString(AudioopError,
1223
"weightA should be >= 1, weightB should be >= 0");
1226
if (view.len % bytes_per_frame != 0) {
1227
PyErr_SetString(AudioopError, "not a whole number of frames");
1230
if (inrate <= 0 || outrate <= 0) {
1231
PyErr_SetString(AudioopError, "sampling rate not > 0");
1234
/* divide inrate and outrate by their greatest common divisor */
1235
d = gcd(inrate, outrate);
1238
/* divide weightA and weightB by their greatest common divisor */
1239
d = gcd(weightA, weightB);
1243
if ((size_t)nchannels > PY_SIZE_MAX/sizeof(int)) {
1244
PyErr_SetString(PyExc_MemoryError,
1245
"not enough memory for output buffer");
1248
prev_i = (int *) PyMem_Malloc(nchannels * sizeof(int));
1249
cur_i = (int *) PyMem_Malloc(nchannels * sizeof(int));
1250
if (prev_i == NULL || cur_i == NULL) {
1251
(void) PyErr_NoMemory();
1255
len = view.len / bytes_per_frame; /* # of frames */
1257
if (state == Py_None) {
1259
for (chan = 0; chan < nchannels; chan++)
1260
prev_i[chan] = cur_i[chan] = 0;
1263
if (!PyArg_ParseTuple(state,
1264
"iO!;audioop.ratecv: illegal state argument",
1265
&d, &PyTuple_Type, &samps))
1267
if (PyTuple_Size(samps) != nchannels) {
1268
PyErr_SetString(AudioopError,
1269
"illegal state argument");
1272
for (chan = 0; chan < nchannels; chan++) {
1273
if (!PyArg_ParseTuple(PyTuple_GetItem(samps, chan),
1274
"ii:ratecv", &prev_i[chan],
1280
/* str <- Space for the output buffer. */
1282
str = PyBytes_FromStringAndSize(NULL, 0);
1284
/* There are len input frames, so we need (mathematically)
1285
ceiling(len*outrate/inrate) output frames, and each frame
1286
requires bytes_per_frame bytes. Computing this
1287
without spurious overflow is the challenge; we can
1288
settle for a reasonable upper bound, though, in this
1289
case ceiling(len/inrate) * outrate. */
1291
/* compute ceiling(len/inrate) without overflow */
1292
Py_ssize_t q = len > 0 ? 1 + (len - 1) / inrate : 0;
1293
if (outrate > PY_SSIZE_T_MAX / q / bytes_per_frame)
1296
str = PyBytes_FromStringAndSize(NULL,
1297
q * outrate * bytes_per_frame);
1300
PyErr_SetString(PyExc_MemoryError,
1301
"not enough memory for output buffer");
1304
ncp = PyBytes_AsString(str);
1310
samps = PyTuple_New(nchannels);
1313
for (chan = 0; chan < nchannels; chan++)
1314
PyTuple_SetItem(samps, chan,
1315
Py_BuildValue("(ii)",
1318
if (PyErr_Occurred())
1320
/* We have checked before that the length
1321
* of the string fits into int. */
1322
len = (Py_ssize_t)(ncp - PyBytes_AsString(str));
1323
rv = PyBytes_FromStringAndSize
1324
(PyBytes_AsString(str), len);
1329
rv = Py_BuildValue("(O(iO))", str, d, samps);
1332
goto exit; /* return rv */
1334
for (chan = 0; chan < nchannels; chan++) {
1335
prev_i[chan] = cur_i[chan];
1336
cur_i[chan] = GETSAMPLE32(size, cp, 0);
1338
/* implements a simple digital filter */
1339
cur_i[chan] = (int)(
1340
((double)weightA * (double)cur_i[chan] +
1341
(double)weightB * (double)prev_i[chan]) /
1342
((double)weightA + (double)weightB));
1348
for (chan = 0; chan < nchannels; chan++) {
1349
cur_o = (int)(((double)prev_i[chan] * (double)d +
1350
(double)cur_i[chan] * (double)(outrate - d)) /
1352
SETSAMPLE32(size, ncp, 0, cur_o);
1362
PyBuffer_Release(&view);
1367
audioop_lin2ulaw(PyObject *self, PyObject *args)
1373
PyObject *rv = NULL;
1375
if (!PyArg_ParseTuple(args, "y*i:lin2ulaw",
1379
if (!audioop_check_parameters(view.len, size))
1382
rv = PyBytes_FromStringAndSize(NULL, view.len/size);
1385
ncp = (unsigned char *)PyBytes_AsString(rv);
1387
for (i = 0; i < view.len; i += size) {
1388
int val = GETSAMPLE32(size, view.buf, i);
1389
*ncp++ = st_14linear2ulaw(val >> 18);
1392
PyBuffer_Release(&view);
1397
audioop_ulaw2lin(PyObject *self, PyObject *args)
1404
PyObject *rv = NULL;
1406
if (!PyArg_ParseTuple(args, "y*i:ulaw2lin",
1410
if (!audioop_check_size(size))
1413
if (view.len > PY_SSIZE_T_MAX/size) {
1414
PyErr_SetString(PyExc_MemoryError,
1415
"not enough memory for output buffer");
1418
rv = PyBytes_FromStringAndSize(NULL, view.len*size);
1421
ncp = (signed char *)PyBytes_AsString(rv);
1424
for (i = 0; i < view.len*size; i += size) {
1425
int val = st_ulaw2linear16(*cp++) << 16;
1426
SETSAMPLE32(size, ncp, i, val);
1429
PyBuffer_Release(&view);
1434
audioop_lin2alaw(PyObject *self, PyObject *args)
1440
PyObject *rv = NULL;
1442
if (!PyArg_ParseTuple(args, "y*i:lin2alaw",
1446
if (!audioop_check_parameters(view.len, size))
1449
rv = PyBytes_FromStringAndSize(NULL, view.len/size);
1452
ncp = (unsigned char *)PyBytes_AsString(rv);
1454
for (i = 0; i < view.len; i += size) {
1455
int val = GETSAMPLE32(size, view.buf, i);
1456
*ncp++ = st_linear2alaw(val >> 19);
1459
PyBuffer_Release(&view);
1464
audioop_alaw2lin(PyObject *self, PyObject *args)
1471
PyObject *rv = NULL;
1473
if (!PyArg_ParseTuple(args, "y*i:alaw2lin",
1477
if (!audioop_check_size(size))
1480
if (view.len > PY_SSIZE_T_MAX/size) {
1481
PyErr_SetString(PyExc_MemoryError,
1482
"not enough memory for output buffer");
1485
rv = PyBytes_FromStringAndSize(NULL, view.len*size);
1488
ncp = (signed char *)PyBytes_AsString(rv);
1491
for (i = 0; i < view.len*size; i += size) {
1492
val = st_alaw2linear16(*cp++) << 16;
1493
SETSAMPLE32(size, ncp, i, val);
1496
PyBuffer_Release(&view);
1501
audioop_lin2adpcm(PyObject *self, PyObject *args)
1506
int size, step, valpred, delta,
1507
index, sign, vpdiff, diff;
1508
PyObject *rv = NULL, *state, *str;
1509
int outputbuffer = 0, bufferstep;
1511
if (!PyArg_ParseTuple(args, "y*iO:lin2adpcm",
1512
&view, &size, &state))
1515
if (!audioop_check_parameters(view.len, size))
1518
str = PyBytes_FromStringAndSize(NULL, view.len/(size*2));
1521
ncp = (signed char *)PyBytes_AsString(str);
1523
/* Decode state, should have (value, step) */
1524
if ( state == Py_None ) {
1525
/* First time, it seems. Set defaults */
1528
} else if (!PyArg_ParseTuple(state, "ii", &valpred, &index))
1531
step = stepsizeTable[index];
1534
for (i = 0; i < view.len; i += size) {
1535
int val = GETSAMPLE32(size, view.buf, i) >> 16;
1537
/* Step 1 - compute difference with previous value */
1538
if (val < valpred) {
1539
diff = valpred - val;
1543
diff = val - valpred;
1547
/* Step 2 - Divide and clamp */
1549
** This code *approximately* computes:
1550
** delta = diff*4/step;
1551
** vpdiff = (delta+0.5)*step/4;
1552
** but in shift step bits are dropped. The net result of this
1553
** is that even if you have fast mul/div hardware you cannot
1554
** put it to good use since the fixup would be too expensive.
1557
vpdiff = (step >> 3);
1559
if ( diff >= step ) {
1565
if ( diff >= step ) {
1571
if ( diff >= step ) {
1576
/* Step 3 - Update previous value */
1582
/* Step 4 - Clamp previous value to 16 bits */
1583
if ( valpred > 32767 )
1585
else if ( valpred < -32768 )
1588
/* Step 5 - Assemble value, update index and step values */
1591
index += indexTable[delta];
1592
if ( index < 0 ) index = 0;
1593
if ( index > 88 ) index = 88;
1594
step = stepsizeTable[index];
1596
/* Step 6 - Output value */
1598
outputbuffer = (delta << 4) & 0xf0;
1600
*ncp++ = (delta & 0x0f) | outputbuffer;
1602
bufferstep = !bufferstep;
1604
rv = Py_BuildValue("(O(ii))", str, valpred, index);
1607
PyBuffer_Release(&view);
1612
audioop_adpcm2lin(PyObject *self, PyObject *args)
1617
Py_ssize_t i, outlen;
1618
int size, valpred, step, delta, index, sign, vpdiff;
1619
PyObject *rv = NULL, *str, *state;
1620
int inputbuffer = 0, bufferstep;
1622
if (!PyArg_ParseTuple(args, "y*iO:adpcm2lin",
1623
&view, &size, &state))
1626
if (!audioop_check_size(size))
1629
/* Decode state, should have (value, step) */
1630
if ( state == Py_None ) {
1631
/* First time, it seems. Set defaults */
1634
} else if (!PyArg_ParseTuple(state, "ii", &valpred, &index))
1637
if (view.len > (PY_SSIZE_T_MAX/2)/size) {
1638
PyErr_SetString(PyExc_MemoryError,
1639
"not enough memory for output buffer");
1642
outlen = view.len*size*2;
1643
str = PyBytes_FromStringAndSize(NULL, outlen);
1646
ncp = (signed char *)PyBytes_AsString(str);
1649
step = stepsizeTable[index];
1652
for (i = 0; i < outlen; i += size) {
1653
/* Step 1 - get the delta value and compute next index */
1655
delta = inputbuffer & 0xf;
1657
inputbuffer = *cp++;
1658
delta = (inputbuffer >> 4) & 0xf;
1661
bufferstep = !bufferstep;
1663
/* Step 2 - Find new index value (for later) */
1664
index += indexTable[delta];
1665
if ( index < 0 ) index = 0;
1666
if ( index > 88 ) index = 88;
1668
/* Step 3 - Separate sign and magnitude */
1672
/* Step 4 - Compute difference and new predicted value */
1674
** Computes 'vpdiff = (delta+0.5)*step/4', but see comment
1678
if ( delta & 4 ) vpdiff += step;
1679
if ( delta & 2 ) vpdiff += step>>1;
1680
if ( delta & 1 ) vpdiff += step>>2;
1687
/* Step 5 - clamp output value */
1688
if ( valpred > 32767 )
1690
else if ( valpred < -32768 )
1693
/* Step 6 - Update step value */
1694
step = stepsizeTable[index];
1696
/* Step 6 - Output value */
1697
SETSAMPLE32(size, ncp, i, valpred << 16);
1700
rv = Py_BuildValue("(O(ii))", str, valpred, index);
1703
PyBuffer_Release(&view);
1707
static PyMethodDef audioop_methods[] = {
1708
{ "max", audioop_max, METH_VARARGS },
1709
{ "minmax", audioop_minmax, METH_VARARGS },
1710
{ "avg", audioop_avg, METH_VARARGS },
1711
{ "maxpp", audioop_maxpp, METH_VARARGS },
1712
{ "avgpp", audioop_avgpp, METH_VARARGS },
1713
{ "rms", audioop_rms, METH_VARARGS },
1714
{ "findfit", audioop_findfit, METH_VARARGS },
1715
{ "findmax", audioop_findmax, METH_VARARGS },
1716
{ "findfactor", audioop_findfactor, METH_VARARGS },
1717
{ "cross", audioop_cross, METH_VARARGS },
1718
{ "mul", audioop_mul, METH_VARARGS },
1719
{ "add", audioop_add, METH_VARARGS },
1720
{ "bias", audioop_bias, METH_VARARGS },
1721
{ "ulaw2lin", audioop_ulaw2lin, METH_VARARGS },
1722
{ "lin2ulaw", audioop_lin2ulaw, METH_VARARGS },
1723
{ "alaw2lin", audioop_alaw2lin, METH_VARARGS },
1724
{ "lin2alaw", audioop_lin2alaw, METH_VARARGS },
1725
{ "lin2lin", audioop_lin2lin, METH_VARARGS },
1726
{ "adpcm2lin", audioop_adpcm2lin, METH_VARARGS },
1727
{ "lin2adpcm", audioop_lin2adpcm, METH_VARARGS },
1728
{ "tomono", audioop_tomono, METH_VARARGS },
1729
{ "tostereo", audioop_tostereo, METH_VARARGS },
1730
{ "getsample", audioop_getsample, METH_VARARGS },
1731
{ "reverse", audioop_reverse, METH_VARARGS },
1732
{ "byteswap", audioop_byteswap, METH_VARARGS },
1733
{ "ratecv", audioop_ratecv, METH_VARARGS },
1738
static struct PyModuleDef audioopmodule = {
1739
PyModuleDef_HEAD_INIT,
1751
PyInit_audioop(void)
1754
m = PyModule_Create(&audioopmodule);
1757
d = PyModule_GetDict(m);
1760
AudioopError = PyErr_NewException("audioop.error", NULL, NULL);
1761
if (AudioopError != NULL)
1762
PyDict_SetItemString(d,"error",AudioopError);