1
/* ffmpeg/libavcodec/ppc/fdct_altivec.c, this file is part of the
2
* AltiVec optimized library for the FFMPEG Multimedia System
3
* Copyright (C) 2003 James Klicman <james@klicman.org>
5
* This file is part of FFmpeg.
7
* FFmpeg is free software; you can redistribute it and/or
8
* modify it under the terms of the GNU Lesser General Public
9
* License as published by the Free Software Foundation; either
10
* version 2.1 of the License, or (at your option) any later version.
12
* FFmpeg is distributed in the hope that it will be useful,
13
* but WITHOUT ANY WARRANTY; without even the implied warranty of
14
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15
* Lesser General Public License for more details.
17
* You should have received a copy of the GNU Lesser General Public
18
* License along with FFmpeg; if not, write to the Free Software
19
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24
#include "../dsputil.h"
25
#include "dsputil_altivec.h"
26
#include "gcc_fixes.h"
29
#define vs16(v) ((vector signed short)(v))
30
#define vs32(v) ((vector signed int)(v))
31
#define vu8(v) ((vector unsigned char)(v))
32
#define vu16(v) ((vector unsigned short)(v))
33
#define vu32(v) ((vector unsigned int)(v))
36
#define C1 0.98078525066375732421875000 /* cos(1*PI/16) */
37
#define C2 0.92387950420379638671875000 /* cos(2*PI/16) */
38
#define C3 0.83146959543228149414062500 /* cos(3*PI/16) */
39
#define C4 0.70710676908493041992187500 /* cos(4*PI/16) */
40
#define C5 0.55557024478912353515625000 /* cos(5*PI/16) */
41
#define C6 0.38268342614173889160156250 /* cos(6*PI/16) */
42
#define C7 0.19509032368659973144531250 /* cos(7*PI/16) */
43
#define SQRT_2 1.41421353816986083984375000 /* sqrt(2) */
48
#define W2 (SQRT_2 * C6)
49
#define W3 (SQRT_2 * C3)
50
#define W4 (SQRT_2 * (-C1 + C3 + C5 - C7))
51
#define W5 (SQRT_2 * ( C1 + C3 - C5 + C7))
52
#define W6 (SQRT_2 * ( C1 + C3 + C5 - C7))
53
#define W7 (SQRT_2 * ( C1 + C3 - C5 - C7))
54
#define W8 (SQRT_2 * ( C7 - C3))
55
#define W9 (SQRT_2 * (-C1 - C3))
56
#define WA (SQRT_2 * (-C3 - C5))
57
#define WB (SQRT_2 * ( C5 - C3))
60
static vector float fdctconsts[3] = {
61
(vector float)AVV( W0, W1, W2, W3 ),
62
(vector float)AVV( W4, W5, W6, W7 ),
63
(vector float)AVV( W8, W9, WA, WB )
66
#define LD_W0 vec_splat(cnsts0, 0)
67
#define LD_W1 vec_splat(cnsts0, 1)
68
#define LD_W2 vec_splat(cnsts0, 2)
69
#define LD_W3 vec_splat(cnsts0, 3)
70
#define LD_W4 vec_splat(cnsts1, 0)
71
#define LD_W5 vec_splat(cnsts1, 1)
72
#define LD_W6 vec_splat(cnsts1, 2)
73
#define LD_W7 vec_splat(cnsts1, 3)
74
#define LD_W8 vec_splat(cnsts2, 0)
75
#define LD_W9 vec_splat(cnsts2, 1)
76
#define LD_WA vec_splat(cnsts2, 2)
77
#define LD_WB vec_splat(cnsts2, 3)
80
#define FDCTROW(b0,b1,b2,b3,b4,b5,b6,b7) /* {{{ */ \
81
x0 = vec_add(b0, b7); /* x0 = b0 + b7; */ \
82
x7 = vec_sub(b0, b7); /* x7 = b0 - b7; */ \
83
x1 = vec_add(b1, b6); /* x1 = b1 + b6; */ \
84
x6 = vec_sub(b1, b6); /* x6 = b1 - b6; */ \
85
x2 = vec_add(b2, b5); /* x2 = b2 + b5; */ \
86
x5 = vec_sub(b2, b5); /* x5 = b2 - b5; */ \
87
x3 = vec_add(b3, b4); /* x3 = b3 + b4; */ \
88
x4 = vec_sub(b3, b4); /* x4 = b3 - b4; */ \
90
b7 = vec_add(x0, x3); /* b7 = x0 + x3; */ \
91
b1 = vec_add(x1, x2); /* b1 = x1 + x2; */ \
92
b0 = vec_add(b7, b1); /* b0 = b7 + b1; */ \
93
b4 = vec_sub(b7, b1); /* b4 = b7 - b1; */ \
95
b2 = vec_sub(x0, x3); /* b2 = x0 - x3; */ \
96
b6 = vec_sub(x1, x2); /* b6 = x1 - x2; */ \
97
b5 = vec_add(b6, b2); /* b5 = b6 + b2; */ \
99
b5 = vec_madd(cnst, b5, mzero); /* b5 = b5 * W2; */ \
101
b2 = vec_madd(cnst, b2, b5); /* b2 = b5 + b2 * W1; */ \
103
b6 = vec_madd(cnst, b6, b5); /* b6 = b5 + b6 * W0; */ \
105
x0 = vec_add(x4, x7); /* x0 = x4 + x7; */ \
106
x1 = vec_add(x5, x6); /* x1 = x5 + x6; */ \
107
x2 = vec_add(x4, x6); /* x2 = x4 + x6; */ \
108
x3 = vec_add(x5, x7); /* x3 = x5 + x7; */ \
109
x8 = vec_add(x2, x3); /* x8 = x2 + x3; */ \
111
x8 = vec_madd(cnst, x8, mzero); /* x8 = x8 * W3; */ \
114
x0 = vec_madd(cnst, x0, mzero); /* x0 *= W8; */ \
116
x1 = vec_madd(cnst, x1, mzero); /* x1 *= W9; */ \
118
x2 = vec_madd(cnst, x2, x8); /* x2 = x2 * WA + x8; */ \
120
x3 = vec_madd(cnst, x3, x8); /* x3 = x3 * WB + x8; */ \
123
b7 = vec_madd(cnst, x4, x0); /* b7 = x4 * W4 + x0; */ \
125
b5 = vec_madd(cnst, x5, x1); /* b5 = x5 * W5 + x1; */ \
127
b3 = vec_madd(cnst, x6, x1); /* b3 = x6 * W6 + x1; */ \
129
b1 = vec_madd(cnst, x7, x0); /* b1 = x7 * W7 + x0; */ \
131
b7 = vec_add(b7, x2); /* b7 = b7 + x2; */ \
132
b5 = vec_add(b5, x3); /* b5 = b5 + x3; */ \
133
b3 = vec_add(b3, x2); /* b3 = b3 + x2; */ \
134
b1 = vec_add(b1, x3); /* b1 = b1 + x3; */ \
137
#define FDCTCOL(b0,b1,b2,b3,b4,b5,b6,b7) /* {{{ */ \
138
x0 = vec_add(b0, b7); /* x0 = b0 + b7; */ \
139
x7 = vec_sub(b0, b7); /* x7 = b0 - b7; */ \
140
x1 = vec_add(b1, b6); /* x1 = b1 + b6; */ \
141
x6 = vec_sub(b1, b6); /* x6 = b1 - b6; */ \
142
x2 = vec_add(b2, b5); /* x2 = b2 + b5; */ \
143
x5 = vec_sub(b2, b5); /* x5 = b2 - b5; */ \
144
x3 = vec_add(b3, b4); /* x3 = b3 + b4; */ \
145
x4 = vec_sub(b3, b4); /* x4 = b3 - b4; */ \
147
b7 = vec_add(x0, x3); /* b7 = x0 + x3; */ \
148
b1 = vec_add(x1, x2); /* b1 = x1 + x2; */ \
149
b0 = vec_add(b7, b1); /* b0 = b7 + b1; */ \
150
b4 = vec_sub(b7, b1); /* b4 = b7 - b1; */ \
152
b2 = vec_sub(x0, x3); /* b2 = x0 - x3; */ \
153
b6 = vec_sub(x1, x2); /* b6 = x1 - x2; */ \
154
b5 = vec_add(b6, b2); /* b5 = b6 + b2; */ \
156
b5 = vec_madd(cnst, b5, mzero); /* b5 = b5 * W2; */ \
158
b2 = vec_madd(cnst, b2, b5); /* b2 = b5 + b2 * W1; */ \
160
b6 = vec_madd(cnst, b6, b5); /* b6 = b5 + b6 * W0; */ \
162
x0 = vec_add(x4, x7); /* x0 = x4 + x7; */ \
163
x1 = vec_add(x5, x6); /* x1 = x5 + x6; */ \
164
x2 = vec_add(x4, x6); /* x2 = x4 + x6; */ \
165
x3 = vec_add(x5, x7); /* x3 = x5 + x7; */ \
166
x8 = vec_add(x2, x3); /* x8 = x2 + x3; */ \
168
x8 = vec_madd(cnst, x8, mzero); /* x8 = x8 * W3; */ \
171
x0 = vec_madd(cnst, x0, mzero); /* x0 *= W8; */ \
173
x1 = vec_madd(cnst, x1, mzero); /* x1 *= W9; */ \
175
x2 = vec_madd(cnst, x2, x8); /* x2 = x2 * WA + x8; */ \
177
x3 = vec_madd(cnst, x3, x8); /* x3 = x3 * WB + x8; */ \
180
b7 = vec_madd(cnst, x4, x0); /* b7 = x4 * W4 + x0; */ \
182
b5 = vec_madd(cnst, x5, x1); /* b5 = x5 * W5 + x1; */ \
184
b3 = vec_madd(cnst, x6, x1); /* b3 = x6 * W6 + x1; */ \
186
b1 = vec_madd(cnst, x7, x0); /* b1 = x7 * W7 + x0; */ \
188
b7 = vec_add(b7, x2); /* b7 += x2; */ \
189
b5 = vec_add(b5, x3); /* b5 += x3; */ \
190
b3 = vec_add(b3, x2); /* b3 += x2; */ \
191
b1 = vec_add(b1, x3); /* b1 += x3; */ \
196
/* two dimensional discrete cosine transform */
198
void fdct_altivec(int16_t *block)
200
POWERPC_PERF_DECLARE(altivec_fdct, 1);
201
vector signed short *bp;
203
vector float b00, b10, b20, b30, b40, b50, b60, b70;
204
vector float b01, b11, b21, b31, b41, b51, b61, b71;
205
vector float mzero, cnst, cnsts0, cnsts1, cnsts2;
206
vector float x0, x1, x2, x3, x4, x5, x6, x7, x8;
208
POWERPC_PERF_START_COUNT(altivec_fdct, 1);
211
/* setup constants {{{ */
213
mzero = ((vector float)vec_splat_u32(-1));
214
mzero = ((vector float)vec_sl(vu32(mzero), vu32(mzero)));
216
cnsts0 = vec_ld(0, cp); cp++;
217
cnsts1 = vec_ld(0, cp); cp++;
218
cnsts2 = vec_ld(0, cp);
222
/* 8x8 matrix transpose (vector short[8]) {{{ */
223
#define MERGE_S16(hl,a,b) vec_merge##hl(vs16(a), vs16(b))
225
bp = (vector signed short*)block;
226
b00 = ((vector float)vec_ld(0, bp));
227
b40 = ((vector float)vec_ld(16*4, bp));
228
b01 = ((vector float)MERGE_S16(h, b00, b40));
229
b11 = ((vector float)MERGE_S16(l, b00, b40));
231
b10 = ((vector float)vec_ld(0, bp));
232
b50 = ((vector float)vec_ld(16*4, bp));
233
b21 = ((vector float)MERGE_S16(h, b10, b50));
234
b31 = ((vector float)MERGE_S16(l, b10, b50));
236
b20 = ((vector float)vec_ld(0, bp));
237
b60 = ((vector float)vec_ld(16*4, bp));
238
b41 = ((vector float)MERGE_S16(h, b20, b60));
239
b51 = ((vector float)MERGE_S16(l, b20, b60));
241
b30 = ((vector float)vec_ld(0, bp));
242
b70 = ((vector float)vec_ld(16*4, bp));
243
b61 = ((vector float)MERGE_S16(h, b30, b70));
244
b71 = ((vector float)MERGE_S16(l, b30, b70));
246
x0 = ((vector float)MERGE_S16(h, b01, b41));
247
x1 = ((vector float)MERGE_S16(l, b01, b41));
248
x2 = ((vector float)MERGE_S16(h, b11, b51));
249
x3 = ((vector float)MERGE_S16(l, b11, b51));
250
x4 = ((vector float)MERGE_S16(h, b21, b61));
251
x5 = ((vector float)MERGE_S16(l, b21, b61));
252
x6 = ((vector float)MERGE_S16(h, b31, b71));
253
x7 = ((vector float)MERGE_S16(l, b31, b71));
255
b00 = ((vector float)MERGE_S16(h, x0, x4));
256
b10 = ((vector float)MERGE_S16(l, x0, x4));
257
b20 = ((vector float)MERGE_S16(h, x1, x5));
258
b30 = ((vector float)MERGE_S16(l, x1, x5));
259
b40 = ((vector float)MERGE_S16(h, x2, x6));
260
b50 = ((vector float)MERGE_S16(l, x2, x6));
261
b60 = ((vector float)MERGE_S16(h, x3, x7));
262
b70 = ((vector float)MERGE_S16(l, x3, x7));
268
/* Some of the initial calculations can be done as vector short before
269
* conversion to vector float. The following code section takes advantage
274
x0 = ((vector float)vec_add(vs16(b00), vs16(b70)));
275
x7 = ((vector float)vec_sub(vs16(b00), vs16(b70)));
276
x1 = ((vector float)vec_add(vs16(b10), vs16(b60)));
277
x6 = ((vector float)vec_sub(vs16(b10), vs16(b60)));
278
x2 = ((vector float)vec_add(vs16(b20), vs16(b50)));
279
x5 = ((vector float)vec_sub(vs16(b20), vs16(b50)));
280
x3 = ((vector float)vec_add(vs16(b30), vs16(b40)));
281
x4 = ((vector float)vec_sub(vs16(b30), vs16(b40)));
283
b70 = ((vector float)vec_add(vs16(x0), vs16(x3)));
284
b10 = ((vector float)vec_add(vs16(x1), vs16(x2)));
286
b00 = ((vector float)vec_add(vs16(b70), vs16(b10)));
287
b40 = ((vector float)vec_sub(vs16(b70), vs16(b10)));
290
b##n##1 = ((vector float)vec_unpackl(vs16(b##n##0))); \
291
b##n##0 = ((vector float)vec_unpackh(vs16(b##n##0))); \
292
b##n##1 = vec_ctf(vs32(b##n##1), 0); \
293
b##n##0 = vec_ctf(vs32(b##n##0), 0);
298
b20 = ((vector float)vec_sub(vs16(x0), vs16(x3)));
299
b60 = ((vector float)vec_sub(vs16(x1), vs16(x2)));
306
x0 = vec_add(b60, b20);
307
x1 = vec_add(b61, b21);
310
x0 = vec_madd(cnst, x0, mzero);
311
x1 = vec_madd(cnst, x1, mzero);
313
b20 = vec_madd(cnst, b20, x0);
314
b21 = vec_madd(cnst, b21, x1);
316
b60 = vec_madd(cnst, b60, x0);
317
b61 = vec_madd(cnst, b61, x1);
320
b##0 = ((vector float)vec_unpackh(vs16(x))); \
321
b##1 = ((vector float)vec_unpackl(vs16(x))); \
322
b##0 = vec_ctf(vs32(b##0), 0); \
323
b##1 = vec_ctf(vs32(b##1), 0); \
333
x0 = vec_add(b70, b10);
334
x1 = vec_add(b50, b30);
335
x2 = vec_add(b70, b30);
336
x3 = vec_add(b50, b10);
337
x8 = vec_add(x2, x3);
339
x8 = vec_madd(cnst, x8, mzero);
342
x0 = vec_madd(cnst, x0, mzero);
344
x1 = vec_madd(cnst, x1, mzero);
346
x2 = vec_madd(cnst, x2, x8);
348
x3 = vec_madd(cnst, x3, x8);
351
b70 = vec_madd(cnst, b70, x0);
353
b50 = vec_madd(cnst, b50, x1);
355
b30 = vec_madd(cnst, b30, x1);
357
b10 = vec_madd(cnst, b10, x0);
359
b70 = vec_add(b70, x2);
360
b50 = vec_add(b50, x3);
361
b30 = vec_add(b30, x2);
362
b10 = vec_add(b10, x3);
365
x0 = vec_add(b71, b11);
366
x1 = vec_add(b51, b31);
367
x2 = vec_add(b71, b31);
368
x3 = vec_add(b51, b11);
369
x8 = vec_add(x2, x3);
371
x8 = vec_madd(cnst, x8, mzero);
374
x0 = vec_madd(cnst, x0, mzero);
376
x1 = vec_madd(cnst, x1, mzero);
378
x2 = vec_madd(cnst, x2, x8);
380
x3 = vec_madd(cnst, x3, x8);
383
b71 = vec_madd(cnst, b71, x0);
385
b51 = vec_madd(cnst, b51, x1);
387
b31 = vec_madd(cnst, b31, x1);
389
b11 = vec_madd(cnst, b11, x0);
391
b71 = vec_add(b71, x2);
392
b51 = vec_add(b51, x3);
393
b31 = vec_add(b31, x2);
394
b11 = vec_add(b11, x3);
397
/* convert to float {{{ */
399
vs32(b##n##1) = vec_unpackl(vs16(b##n##0)); \
400
vs32(b##n##0) = vec_unpackh(vs16(b##n##0)); \
401
b##n##1 = vec_ctf(vs32(b##n##1), 0); \
402
b##n##0 = vec_ctf(vs32(b##n##0), 0); \
416
FDCTROW(b00, b10, b20, b30, b40, b50, b60, b70);
417
FDCTROW(b01, b11, b21, b31, b41, b51, b61, b71);
421
/* 8x8 matrix transpose (vector float[8][2]) {{{ */
422
x0 = vec_mergel(b00, b20);
423
x1 = vec_mergeh(b00, b20);
424
x2 = vec_mergel(b10, b30);
425
x3 = vec_mergeh(b10, b30);
427
b00 = vec_mergeh(x1, x3);
428
b10 = vec_mergel(x1, x3);
429
b20 = vec_mergeh(x0, x2);
430
b30 = vec_mergel(x0, x2);
432
x4 = vec_mergel(b41, b61);
433
x5 = vec_mergeh(b41, b61);
434
x6 = vec_mergel(b51, b71);
435
x7 = vec_mergeh(b51, b71);
437
b41 = vec_mergeh(x5, x7);
438
b51 = vec_mergel(x5, x7);
439
b61 = vec_mergeh(x4, x6);
440
b71 = vec_mergel(x4, x6);
442
x0 = vec_mergel(b01, b21);
443
x1 = vec_mergeh(b01, b21);
444
x2 = vec_mergel(b11, b31);
445
x3 = vec_mergeh(b11, b31);
447
x4 = vec_mergel(b40, b60);
448
x5 = vec_mergeh(b40, b60);
449
x6 = vec_mergel(b50, b70);
450
x7 = vec_mergeh(b50, b70);
452
b40 = vec_mergeh(x1, x3);
453
b50 = vec_mergel(x1, x3);
454
b60 = vec_mergeh(x0, x2);
455
b70 = vec_mergel(x0, x2);
457
b01 = vec_mergeh(x5, x7);
458
b11 = vec_mergel(x5, x7);
459
b21 = vec_mergeh(x4, x6);
460
b31 = vec_mergel(x4, x6);
464
FDCTCOL(b00, b10, b20, b30, b40, b50, b60, b70);
465
FDCTCOL(b01, b11, b21, b31, b41, b51, b61, b71);
468
/* round, convert back to short {{{ */
470
b##n##0 = vec_round(b##n##0); \
471
b##n##1 = vec_round(b##n##1); \
472
b##n##0 = ((vector float)vec_cts(b##n##0, 0)); \
473
b##n##1 = ((vector float)vec_cts(b##n##1, 0)); \
474
b##n##0 = ((vector float)vec_pack(vs32(b##n##0), vs32(b##n##1))); \
475
vec_st(vs16(b##n##0), 0, bp);
477
bp = (vector signed short*)block;
490
POWERPC_PERF_STOP_COUNT(altivec_fdct, 1);
493
/* vim:set foldmethod=marker foldlevel=0: */