44
46
#include "dsputil_altivec.h"
46
48
#define vector_s16_t vector signed short
49
#define const_vector_s16_t const_vector signed short
47
50
#define vector_u16_t vector unsigned short
48
51
#define vector_s8_t vector signed char
49
52
#define vector_u8_t vector unsigned char
50
53
#define vector_s32_t vector signed int
51
54
#define vector_u32_t vector unsigned int
55
t1 = vec_mradds (a1, vx7, vx1 ); \
56
t8 = vec_mradds (a1, vx1, vec_subs (zero, vx7)); \
57
t7 = vec_mradds (a2, vx5, vx3); \
58
t3 = vec_mradds (ma2, vx3, vx5); \
61
t5 = vec_adds (vx0, vx4); \
62
t0 = vec_subs (vx0, vx4); \
63
t2 = vec_mradds (a0, vx6, vx2); \
64
t4 = vec_mradds (a0, vx2, vec_subs (zero, vx6)); \
65
t6 = vec_adds (t8, t3); \
66
t3 = vec_subs (t8, t3); \
67
t8 = vec_subs (t1, t7); \
68
t1 = vec_adds (t1, t7); \
71
t7 = vec_adds (t5, t2); \
72
t2 = vec_subs (t5, t2); \
73
t5 = vec_adds (t0, t4); \
74
t0 = vec_subs (t0, t4); \
75
t4 = vec_subs (t8, t3); \
76
t3 = vec_adds (t8, t3); \
79
vy0 = vec_adds (t7, t1); \
80
vy7 = vec_subs (t7, t1); \
81
vy1 = vec_mradds (c4, t3, t5); \
82
vy6 = vec_mradds (mc4, t3, t5); \
83
vy2 = vec_mradds (c4, t4, t0); \
84
vy5 = vec_mradds (mc4, t4, t0); \
85
vy3 = vec_adds (t2, t6); \
58
t1 = vec_mradds (a1, vx7, vx1 ); \
59
t8 = vec_mradds (a1, vx1, vec_subs (zero, vx7)); \
60
t7 = vec_mradds (a2, vx5, vx3); \
61
t3 = vec_mradds (ma2, vx3, vx5); \
64
t5 = vec_adds (vx0, vx4); \
65
t0 = vec_subs (vx0, vx4); \
66
t2 = vec_mradds (a0, vx6, vx2); \
67
t4 = vec_mradds (a0, vx2, vec_subs (zero, vx6)); \
68
t6 = vec_adds (t8, t3); \
69
t3 = vec_subs (t8, t3); \
70
t8 = vec_subs (t1, t7); \
71
t1 = vec_adds (t1, t7); \
74
t7 = vec_adds (t5, t2); \
75
t2 = vec_subs (t5, t2); \
76
t5 = vec_adds (t0, t4); \
77
t0 = vec_subs (t0, t4); \
78
t4 = vec_subs (t8, t3); \
79
t3 = vec_adds (t8, t3); \
82
vy0 = vec_adds (t7, t1); \
83
vy7 = vec_subs (t7, t1); \
84
vy1 = vec_mradds (c4, t3, t5); \
85
vy6 = vec_mradds (mc4, t3, t5); \
86
vy2 = vec_mradds (c4, t4, t0); \
87
vy5 = vec_mradds (mc4, t4, t0); \
88
vy3 = vec_adds (t2, t6); \
86
89
vy4 = vec_subs (t2, t6);
90
vector_s16_t vx0, vx1, vx2, vx3, vx4, vx5, vx6, vx7; \
91
vector_s16_t vy0, vy1, vy2, vy3, vy4, vy5, vy6, vy7; \
92
vector_s16_t a0, a1, a2, ma2, c4, mc4, zero, bias; \
93
vector_s16_t t0, t1, t2, t3, t4, t5, t6, t7, t8; \
96
c4 = vec_splat (constants[0], 0); \
97
a0 = vec_splat (constants[0], 1); \
98
a1 = vec_splat (constants[0], 2); \
99
a2 = vec_splat (constants[0], 3); \
100
mc4 = vec_splat (constants[0], 4); \
101
ma2 = vec_splat (constants[0], 5); \
102
bias = (vector_s16_t)vec_splat ((vector_s32_t)constants[0], 3); \
104
zero = vec_splat_s16 (0); \
105
shift = vec_splat_u16 (4); \
107
vx0 = vec_mradds (vec_sl (block[0], shift), constants[1], zero); \
108
vx1 = vec_mradds (vec_sl (block[1], shift), constants[2], zero); \
109
vx2 = vec_mradds (vec_sl (block[2], shift), constants[3], zero); \
110
vx3 = vec_mradds (vec_sl (block[3], shift), constants[4], zero); \
111
vx4 = vec_mradds (vec_sl (block[4], shift), constants[1], zero); \
112
vx5 = vec_mradds (vec_sl (block[5], shift), constants[4], zero); \
113
vx6 = vec_mradds (vec_sl (block[6], shift), constants[3], zero); \
114
vx7 = vec_mradds (vec_sl (block[7], shift), constants[2], zero); \
118
vx0 = vec_mergeh (vy0, vy4); \
119
vx1 = vec_mergel (vy0, vy4); \
120
vx2 = vec_mergeh (vy1, vy5); \
121
vx3 = vec_mergel (vy1, vy5); \
122
vx4 = vec_mergeh (vy2, vy6); \
123
vx5 = vec_mergel (vy2, vy6); \
124
vx6 = vec_mergeh (vy3, vy7); \
125
vx7 = vec_mergel (vy3, vy7); \
127
vy0 = vec_mergeh (vx0, vx4); \
128
vy1 = vec_mergel (vx0, vx4); \
129
vy2 = vec_mergeh (vx1, vx5); \
130
vy3 = vec_mergel (vx1, vx5); \
131
vy4 = vec_mergeh (vx2, vx6); \
132
vy5 = vec_mergel (vx2, vx6); \
133
vy6 = vec_mergeh (vx3, vx7); \
134
vy7 = vec_mergel (vx3, vx7); \
136
vx0 = vec_adds (vec_mergeh (vy0, vy4), bias); \
137
vx1 = vec_mergel (vy0, vy4); \
138
vx2 = vec_mergeh (vy1, vy5); \
139
vx3 = vec_mergel (vy1, vy5); \
140
vx4 = vec_mergeh (vy2, vy6); \
141
vx5 = vec_mergel (vy2, vy6); \
142
vx6 = vec_mergeh (vy3, vy7); \
143
vx7 = vec_mergel (vy3, vy7); \
147
shift = vec_splat_u16 (6); \
148
vx0 = vec_sra (vy0, shift); \
149
vx1 = vec_sra (vy1, shift); \
150
vx2 = vec_sra (vy2, shift); \
151
vx3 = vec_sra (vy3, shift); \
152
vx4 = vec_sra (vy4, shift); \
153
vx5 = vec_sra (vy5, shift); \
154
vx6 = vec_sra (vy6, shift); \
93
vector_s16_t vx0, vx1, vx2, vx3, vx4, vx5, vx6, vx7; \
94
vector_s16_t vy0, vy1, vy2, vy3, vy4, vy5, vy6, vy7; \
95
vector_s16_t a0, a1, a2, ma2, c4, mc4, zero, bias; \
96
vector_s16_t t0, t1, t2, t3, t4, t5, t6, t7, t8; \
99
c4 = vec_splat (constants[0], 0); \
100
a0 = vec_splat (constants[0], 1); \
101
a1 = vec_splat (constants[0], 2); \
102
a2 = vec_splat (constants[0], 3); \
103
mc4 = vec_splat (constants[0], 4); \
104
ma2 = vec_splat (constants[0], 5); \
105
bias = (vector_s16_t)vec_splat ((vector_s32_t)constants[0], 3); \
107
zero = vec_splat_s16 (0); \
108
shift = vec_splat_u16 (4); \
110
vx0 = vec_mradds (vec_sl (block[0], shift), constants[1], zero); \
111
vx1 = vec_mradds (vec_sl (block[1], shift), constants[2], zero); \
112
vx2 = vec_mradds (vec_sl (block[2], shift), constants[3], zero); \
113
vx3 = vec_mradds (vec_sl (block[3], shift), constants[4], zero); \
114
vx4 = vec_mradds (vec_sl (block[4], shift), constants[1], zero); \
115
vx5 = vec_mradds (vec_sl (block[5], shift), constants[4], zero); \
116
vx6 = vec_mradds (vec_sl (block[6], shift), constants[3], zero); \
117
vx7 = vec_mradds (vec_sl (block[7], shift), constants[2], zero); \
121
vx0 = vec_mergeh (vy0, vy4); \
122
vx1 = vec_mergel (vy0, vy4); \
123
vx2 = vec_mergeh (vy1, vy5); \
124
vx3 = vec_mergel (vy1, vy5); \
125
vx4 = vec_mergeh (vy2, vy6); \
126
vx5 = vec_mergel (vy2, vy6); \
127
vx6 = vec_mergeh (vy3, vy7); \
128
vx7 = vec_mergel (vy3, vy7); \
130
vy0 = vec_mergeh (vx0, vx4); \
131
vy1 = vec_mergel (vx0, vx4); \
132
vy2 = vec_mergeh (vx1, vx5); \
133
vy3 = vec_mergel (vx1, vx5); \
134
vy4 = vec_mergeh (vx2, vx6); \
135
vy5 = vec_mergel (vx2, vx6); \
136
vy6 = vec_mergeh (vx3, vx7); \
137
vy7 = vec_mergel (vx3, vx7); \
139
vx0 = vec_adds (vec_mergeh (vy0, vy4), bias); \
140
vx1 = vec_mergel (vy0, vy4); \
141
vx2 = vec_mergeh (vy1, vy5); \
142
vx3 = vec_mergel (vy1, vy5); \
143
vx4 = vec_mergeh (vy2, vy6); \
144
vx5 = vec_mergel (vy2, vy6); \
145
vx6 = vec_mergeh (vy3, vy7); \
146
vx7 = vec_mergel (vy3, vy7); \
150
shift = vec_splat_u16 (6); \
151
vx0 = vec_sra (vy0, shift); \
152
vx1 = vec_sra (vy1, shift); \
153
vx2 = vec_sra (vy2, shift); \
154
vx3 = vec_sra (vy3, shift); \
155
vx4 = vec_sra (vy4, shift); \
156
vx5 = vec_sra (vy5, shift); \
157
vx6 = vec_sra (vy6, shift); \
155
158
vx7 = vec_sra (vy7, shift);
158
static const vector_s16_t constants[5] = {
161
static const_vector_s16_t constants[5] = {
159
162
(vector_s16_t) AVV(23170, 13573, 6518, 21895, -23170, -21895, 32, 31),
160
163
(vector_s16_t) AVV(16384, 22725, 21407, 19266, 16384, 19266, 21407, 22725),
161
164
(vector_s16_t) AVV(22725, 31521, 29692, 26722, 22725, 26722, 29692, 31521),
166
169
void idct_put_altivec(uint8_t* dest, int stride, vector_s16_t* block)
168
171
POWERPC_PERF_DECLARE(altivec_idct_put_num, 1);
169
#ifdef ALTIVEC_USE_REFERENCE_C_CODE
170
POWERPC_PERF_START_COUNT(altivec_idct_put_num, 1);
171
void simple_idct_put(uint8_t *dest, int line_size, int16_t *block);
172
simple_idct_put(dest, stride, (int16_t*)block);
173
POWERPC_PERF_STOP_COUNT(altivec_idct_put_num, 1);
174
#else /* ALTIVEC_USE_REFERENCE_C_CODE */
174
#ifdef CONFIG_POWERPC_PERF
177
175
POWERPC_PERF_START_COUNT(altivec_idct_put_num, 1);
181
#define COPY(dest,src) \
182
tmp = vec_packsu (src, src); \
183
vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); \
179
#define COPY(dest,src) \
180
tmp = vec_packsu (src, src); \
181
vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); \
184
182
vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
186
COPY (dest, vx0) dest += stride;
187
COPY (dest, vx1) dest += stride;
188
COPY (dest, vx2) dest += stride;
189
COPY (dest, vx3) dest += stride;
190
COPY (dest, vx4) dest += stride;
191
COPY (dest, vx5) dest += stride;
192
COPY (dest, vx6) dest += stride;
184
COPY (dest, vx0) dest += stride;
185
COPY (dest, vx1) dest += stride;
186
COPY (dest, vx2) dest += stride;
187
COPY (dest, vx3) dest += stride;
188
COPY (dest, vx4) dest += stride;
189
COPY (dest, vx5) dest += stride;
190
COPY (dest, vx6) dest += stride;
195
193
POWERPC_PERF_STOP_COUNT(altivec_idct_put_num, 1);
196
#endif /* ALTIVEC_USE_REFERENCE_C_CODE */
199
196
void idct_add_altivec(uint8_t* dest, int stride, vector_s16_t* block)
201
198
POWERPC_PERF_DECLARE(altivec_idct_add_num, 1);
202
#ifdef ALTIVEC_USE_REFERENCE_C_CODE
203
POWERPC_PERF_START_COUNT(altivec_idct_add_num, 1);
204
void simple_idct_add(uint8_t *dest, int line_size, int16_t *block);
205
simple_idct_add(dest, stride, (int16_t*)block);
206
POWERPC_PERF_STOP_COUNT(altivec_idct_add_num, 1);
207
#else /* ALTIVEC_USE_REFERENCE_C_CODE */
209
200
vector_s16_t tmp2, tmp3;
210
201
vector_u8_t perm0;
211
202
vector_u8_t perm1;
212
203
vector_u8_t p0, p1, p;
205
#ifdef CONFIG_POWERPC_PERF
214
206
POWERPC_PERF_START_COUNT(altivec_idct_add_num, 1);
221
214
perm0 = vec_mergeh (p, p0);
222
215
perm1 = vec_mergeh (p, p1);
224
#define ADD(dest,src,perm) \
225
/* *(uint64_t *)&tmp = *(uint64_t *)dest; */ \
226
tmp = vec_ld (0, dest); \
227
tmp2 = (vector_s16_t)vec_perm (tmp, (vector_u8_t)zero, perm); \
228
tmp3 = vec_adds (tmp2, src); \
229
tmp = vec_packsu (tmp3, tmp3); \
230
vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); \
217
#define ADD(dest,src,perm) \
218
/* *(uint64_t *)&tmp = *(uint64_t *)dest; */ \
219
tmp = vec_ld (0, dest); \
220
tmp2 = (vector_s16_t)vec_perm (tmp, (vector_u8_t)zero, perm); \
221
tmp3 = vec_adds (tmp2, src); \
222
tmp = vec_packsu (tmp3, tmp3); \
223
vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); \
231
224
vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
233
ADD (dest, vx0, perm0) dest += stride;
234
ADD (dest, vx1, perm1) dest += stride;
235
ADD (dest, vx2, perm0) dest += stride;
236
ADD (dest, vx3, perm1) dest += stride;
237
ADD (dest, vx4, perm0) dest += stride;
238
ADD (dest, vx5, perm1) dest += stride;
239
ADD (dest, vx6, perm0) dest += stride;
226
ADD (dest, vx0, perm0) dest += stride;
227
ADD (dest, vx1, perm1) dest += stride;
228
ADD (dest, vx2, perm0) dest += stride;
229
ADD (dest, vx3, perm1) dest += stride;
230
ADD (dest, vx4, perm0) dest += stride;
231
ADD (dest, vx5, perm1) dest += stride;
232
ADD (dest, vx6, perm0) dest += stride;
240
233
ADD (dest, vx7, perm1)
242
235
POWERPC_PERF_STOP_COUNT(altivec_idct_add_num, 1);
243
#endif /* ALTIVEC_USE_REFERENCE_C_CODE */