87
87
static inline void mmxext_row_head (int16_t * const row, const int offset,
88
88
const int16_t * const table)
90
movq_m2r (*(row+offset), mm2); /* mm2 = x6 x4 x2 x0 */
92
movq_m2r (*(row+offset+4), mm5); /* mm5 = x7 x5 x3 x1 */
93
movq_r2r (mm2, mm0); /* mm0 = x6 x4 x2 x0 */
95
movq_m2r (*table, mm3); /* mm3 = -C2 -C4 C2 C4 */
96
movq_r2r (mm5, mm6); /* mm6 = x7 x5 x3 x1 */
98
movq_m2r (*(table+4), mm4); /* mm4 = C6 C4 C6 C4 */
99
pmaddwd_r2r (mm0, mm3); /* mm3 = -C4*x4-C2*x6 C4*x0+C2*x2 */
101
pshufw_r2r (mm2, mm2, 0x4e); /* mm2 = x2 x0 x6 x4 */
91
"movq (%0), %%mm2 \n\t" /* mm2 = x6 x4 x2 x0 */
93
"movq 8(%0), %%mm5 \n\t" /* mm5 = x7 x5 x3 x1 */
94
"movq %%mm2, %%mm0 \n\t" /* mm0 = x6 x4 x2 x0 */
96
"movq (%1), %%mm3 \n\t" /* mm3 = -C2 -C4 C2 C4 */
97
"movq %%mm5, %%mm6 \n\t" /* mm6 = x7 x5 x3 x1 */
99
"movq 8(%1), %%mm4 \n\t" /* mm4 = C6 C4 C6 C4 */
100
"pmaddwd %%mm0, %%mm3 \n\t" /* mm3 = -C4*x4-C2*x6 C4*x0+C2*x2 */
102
"pshufw $0x4e, %%mm2, %%mm2 \n\t" /* mm2 = x2 x0 x6 x4 */
103
:: "r" ((row+offset)), "r" (table)
104
107
static inline void mmxext_row (const int16_t * const table,
105
108
const int32_t * const rounder)
107
movq_m2r (*(table+8), mm1); /* mm1 = -C5 -C1 C3 C1 */
108
pmaddwd_r2r (mm2, mm4); /* mm4 = C4*x0+C6*x2 C4*x4+C6*x6 */
110
pmaddwd_m2r (*(table+16), mm0); /* mm0 = C4*x4-C6*x6 C4*x0-C6*x2 */
111
pshufw_r2r (mm6, mm6, 0x4e); /* mm6 = x3 x1 x7 x5 */
113
movq_m2r (*(table+12), mm7); /* mm7 = -C7 C3 C7 C5 */
114
pmaddwd_r2r (mm5, mm1); /* mm1 = -C1*x5-C5*x7 C1*x1+C3*x3 */
116
paddd_m2r (*rounder, mm3); /* mm3 += rounder */
117
pmaddwd_r2r (mm6, mm7); /* mm7 = C3*x1-C7*x3 C5*x5+C7*x7 */
119
pmaddwd_m2r (*(table+20), mm2); /* mm2 = C4*x0-C2*x2 -C4*x4+C2*x6 */
120
paddd_r2r (mm4, mm3); /* mm3 = a1 a0 + rounder */
122
pmaddwd_m2r (*(table+24), mm5); /* mm5 = C3*x5-C1*x7 C5*x1-C1*x3 */
123
movq_r2r (mm3, mm4); /* mm4 = a1 a0 + rounder */
125
pmaddwd_m2r (*(table+28), mm6); /* mm6 = C7*x1-C5*x3 C7*x5+C3*x7 */
126
paddd_r2r (mm7, mm1); /* mm1 = b1 b0 */
128
paddd_m2r (*rounder, mm0); /* mm0 += rounder */
129
psubd_r2r (mm1, mm3); /* mm3 = a1-b1 a0-b0 + rounder */
131
psrad_i2r (ROW_SHIFT, mm3); /* mm3 = y6 y7 */
132
paddd_r2r (mm4, mm1); /* mm1 = a1+b1 a0+b0 + rounder */
134
paddd_r2r (mm2, mm0); /* mm0 = a3 a2 + rounder */
135
psrad_i2r (ROW_SHIFT, mm1); /* mm1 = y1 y0 */
137
paddd_r2r (mm6, mm5); /* mm5 = b3 b2 */
138
movq_r2r (mm0, mm4); /* mm4 = a3 a2 + rounder */
140
paddd_r2r (mm5, mm0); /* mm0 = a3+b3 a2+b2 + rounder */
141
psubd_r2r (mm5, mm4); /* mm4 = a3-b3 a2-b2 + rounder */
111
"movq 16(%0), %%mm1 \n\t" /* mm1 = -C5 -C1 C3 C1 */
112
"pmaddwd %%mm2, %%mm4 \n\t" /* mm4 = C4*x0+C6*x2 C4*x4+C6*x6 */
114
"pmaddwd 32(%0), %%mm0 \n\t" /* mm0 = C4*x4-C6*x6 C4*x0-C6*x2 */
115
"pshufw $0x4e, %%mm6, %%mm6 \n\t" /* mm6 = x3 x1 x7 x5 */
117
"movq 24(%0), %%mm7 \n\t" /* mm7 = -C7 C3 C7 C5 */
118
"pmaddwd %%mm5, %%mm1 \n\t" /* mm1= -C1*x5-C5*x7 C1*x1+C3*x3 */
120
"paddd (%1), %%mm3 \n\t" /* mm3 += rounder */
121
"pmaddwd %%mm6, %%mm7 \n\t" /* mm7 = C3*x1-C7*x3 C5*x5+C7*x7 */
123
"pmaddwd 40(%0), %%mm2 \n\t" /* mm2= C4*x0-C2*x2 -C4*x4+C2*x6 */
124
"paddd %%mm4, %%mm3 \n\t" /* mm3 = a1 a0 + rounder */
126
"pmaddwd 48(%0), %%mm5 \n\t" /* mm5 = C3*x5-C1*x7 C5*x1-C1*x3 */
127
"movq %%mm3, %%mm4 \n\t" /* mm4 = a1 a0 + rounder */
129
"pmaddwd 56(%0), %%mm6 \n\t" /* mm6 = C7*x1-C5*x3 C7*x5+C3*x7 */
130
"paddd %%mm7, %%mm1 \n\t" /* mm1 = b1 b0 */
132
"paddd (%1), %%mm0 \n\t" /* mm0 += rounder */
133
"psubd %%mm1, %%mm3 \n\t" /* mm3 = a1-b1 a0-b0 + rounder */
135
"psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm3 \n\t" /* mm3 = y6 y7 */
136
"paddd %%mm4, %%mm1 \n\t" /* mm1 = a1+b1 a0+b0 + rounder */
138
"paddd %%mm2, %%mm0 \n\t" /* mm0 = a3 a2 + rounder */
139
"psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm1 \n\t" /* mm1 = y1 y0 */
141
"paddd %%mm6, %%mm5 \n\t" /* mm5 = b3 b2 */
142
"movq %%mm0, %%mm4 \n\t" /* mm4 = a3 a2 + rounder */
144
"paddd %%mm5, %%mm0 \n\t" /* mm0 = a3+b3 a2+b2 + rounder */
145
"psubd %%mm5, %%mm4 \n\t" /* mm4 = a3-b3 a2-b2 + rounder */
146
: : "r" (table), "r" (rounder));
144
149
static inline void mmxext_row_tail (int16_t * const row, const int store)
146
psrad_i2r (ROW_SHIFT, mm0); /* mm0 = y3 y2 */
148
psrad_i2r (ROW_SHIFT, mm4); /* mm4 = y4 y5 */
150
packssdw_r2r (mm0, mm1); /* mm1 = y3 y2 y1 y0 */
152
packssdw_r2r (mm3, mm4); /* mm4 = y6 y7 y4 y5 */
154
movq_r2m (mm1, *(row+store)); /* save y3 y2 y1 y0 */
155
pshufw_r2r (mm4, mm4, 0xb1); /* mm4 = y7 y6 y5 y4 */
159
movq_r2m (mm4, *(row+store+4)); /* save y7 y6 y5 y4 */
152
"psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm0 \n\t" /* mm0 = y3 y2 */
154
"psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm4 \n\t" /* mm4 = y4 y5 */
156
"packssdw %%mm0, %%mm1 \n\t" /* mm1 = y3 y2 y1 y0 */
158
"packssdw %%mm3, %%mm4 \n\t" /* mm4 = y6 y7 y4 y5 */
160
"movq %%mm1, (%0) \n\t" /* save y3 y2 y1 y0 */
161
"pshufw $0xb1, %%mm4, %%mm4 \n\t" /* mm4 = y7 y6 y5 y4 */
165
"movq %%mm4, 8(%0) \n\t" /* save y7 y6 y5 y4 */
162
170
static inline void mmxext_row_mid (int16_t * const row, const int store,
163
171
const int offset,
164
172
const int16_t * const table)
166
movq_m2r (*(row+offset), mm2); /* mm2 = x6 x4 x2 x0 */
167
psrad_i2r (ROW_SHIFT, mm0); /* mm0 = y3 y2 */
169
movq_m2r (*(row+offset+4), mm5); /* mm5 = x7 x5 x3 x1 */
170
psrad_i2r (ROW_SHIFT, mm4); /* mm4 = y4 y5 */
172
packssdw_r2r (mm0, mm1); /* mm1 = y3 y2 y1 y0 */
173
movq_r2r (mm5, mm6); /* mm6 = x7 x5 x3 x1 */
175
packssdw_r2r (mm3, mm4); /* mm4 = y6 y7 y4 y5 */
176
movq_r2r (mm2, mm0); /* mm0 = x6 x4 x2 x0 */
178
movq_r2m (mm1, *(row+store)); /* save y3 y2 y1 y0 */
179
pshufw_r2r (mm4, mm4, 0xb1); /* mm4 = y7 y6 y5 y4 */
181
movq_m2r (*table, mm3); /* mm3 = -C2 -C4 C2 C4 */
182
movq_r2m (mm4, *(row+store+4)); /* save y7 y6 y5 y4 */
184
pmaddwd_r2r (mm0, mm3); /* mm3 = -C4*x4-C2*x6 C4*x0+C2*x2 */
186
movq_m2r (*(table+4), mm4); /* mm4 = C6 C4 C6 C4 */
187
pshufw_r2r (mm2, mm2, 0x4e); /* mm2 = x2 x0 x6 x4 */
175
"movq (%0,%1), %%mm2 \n\t" /* mm2 = x6 x4 x2 x0 */
176
"psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm0 \n\t" /* mm0 = y3 y2 */
178
"movq 8(%0,%1), %%mm5 \n\t" /* mm5 = x7 x5 x3 x1 */
179
"psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm4 \n\t" /* mm4 = y4 y5 */
181
"packssdw %%mm0, %%mm1 \n\t" /* mm1 = y3 y2 y1 y0 */
182
"movq %%mm5, %%mm6 \n\t" /* mm6 = x7 x5 x3 x1 */
184
"packssdw %%mm3, %%mm4 \n\t" /* mm4 = y6 y7 y4 y5 */
185
"movq %%mm2, %%mm0 \n\t" /* mm0 = x6 x4 x2 x0 */
187
"movq %%mm1, (%0,%2) \n\t" /* save y3 y2 y1 y0 */
188
"pshufw $0xb1, %%mm4, %%mm4\n\t" /* mm4 = y7 y6 y5 y4 */
190
"movq (%3), %%mm3 \n\t" /* mm3 = -C2 -C4 C2 C4 */
191
"movq %%mm4, 8(%0,%2) \n\t" /* save y7 y6 y5 y4 */
193
"pmaddwd %%mm0, %%mm3 \n\t" /* mm3= -C4*x4-C2*x6 C4*x0+C2*x2 */
195
"movq 8(%3), %%mm4 \n\t" /* mm4 = C6 C4 C6 C4 */
196
"pshufw $0x4e, %%mm2, %%mm2\n\t" /* mm2 = x2 x0 x6 x4 */
197
:: "r" (row), "r" ((x86_reg) (2*offset)), "r" ((x86_reg) (2*store)), "r" (table)
202
213
static inline void mmx_row_head (int16_t * const row, const int offset,
203
214
const int16_t * const table)
205
movq_m2r (*(row+offset), mm2); /* mm2 = x6 x4 x2 x0 */
207
movq_m2r (*(row+offset+4), mm5); /* mm5 = x7 x5 x3 x1 */
208
movq_r2r (mm2, mm0); /* mm0 = x6 x4 x2 x0 */
210
movq_m2r (*table, mm3); /* mm3 = C6 C4 C2 C4 */
211
movq_r2r (mm5, mm6); /* mm6 = x7 x5 x3 x1 */
213
punpckldq_r2r (mm0, mm0); /* mm0 = x2 x0 x2 x0 */
215
movq_m2r (*(table+4), mm4); /* mm4 = -C2 -C4 C6 C4 */
216
pmaddwd_r2r (mm0, mm3); /* mm3 = C4*x0+C6*x2 C4*x0+C2*x2 */
218
movq_m2r (*(table+8), mm1); /* mm1 = -C7 C3 C3 C1 */
219
punpckhdq_r2r (mm2, mm2); /* mm2 = x6 x4 x6 x4 */
217
"movq (%0), %%mm2 \n\t" /* mm2 = x6 x4 x2 x0 */
219
"movq 8(%0), %%mm5 \n\t" /* mm5 = x7 x5 x3 x1 */
220
"movq %%mm2, %%mm0 \n\t" /* mm0 = x6 x4 x2 x0 */
222
"movq (%1), %%mm3 \n\t" /* mm3 = C6 C4 C2 C4 */
223
"movq %%mm5, %%mm6 \n\t" /* mm6 = x7 x5 x3 x1 */
225
"punpckldq %%mm0, %%mm0 \n\t" /* mm0 = x2 x0 x2 x0 */
227
"movq 8(%1), %%mm4 \n\t" /* mm4 = -C2 -C4 C6 C4 */
228
"pmaddwd %%mm0, %%mm3 \n\t" /* mm3 = C4*x0+C6*x2 C4*x0+C2*x2 */
230
"movq 16(%1), %%mm1 \n\t" /* mm1 = -C7 C3 C3 C1 */
231
"punpckhdq %%mm2, %%mm2 \n\t" /* mm2 = x6 x4 x6 x4 */
232
:: "r" ((row+offset)), "r" (table)
222
236
static inline void mmx_row (const int16_t * const table,
223
237
const int32_t * const rounder)
225
pmaddwd_r2r (mm2, mm4); /* mm4 = -C4*x4-C2*x6 C4*x4+C6*x6 */
226
punpckldq_r2r (mm5, mm5); /* mm5 = x3 x1 x3 x1 */
228
pmaddwd_m2r (*(table+16), mm0); /* mm0 = C4*x0-C2*x2 C4*x0-C6*x2 */
229
punpckhdq_r2r (mm6, mm6); /* mm6 = x7 x5 x7 x5 */
231
movq_m2r (*(table+12), mm7); /* mm7 = -C5 -C1 C7 C5 */
232
pmaddwd_r2r (mm5, mm1); /* mm1 = C3*x1-C7*x3 C1*x1+C3*x3 */
234
paddd_m2r (*rounder, mm3); /* mm3 += rounder */
235
pmaddwd_r2r (mm6, mm7); /* mm7 = -C1*x5-C5*x7 C5*x5+C7*x7 */
237
pmaddwd_m2r (*(table+20), mm2); /* mm2 = C4*x4-C6*x6 -C4*x4+C2*x6 */
238
paddd_r2r (mm4, mm3); /* mm3 = a1 a0 + rounder */
240
pmaddwd_m2r (*(table+24), mm5); /* mm5 = C7*x1-C5*x3 C5*x1-C1*x3 */
241
movq_r2r (mm3, mm4); /* mm4 = a1 a0 + rounder */
243
pmaddwd_m2r (*(table+28), mm6); /* mm6 = C3*x5-C1*x7 C7*x5+C3*x7 */
244
paddd_r2r (mm7, mm1); /* mm1 = b1 b0 */
246
paddd_m2r (*rounder, mm0); /* mm0 += rounder */
247
psubd_r2r (mm1, mm3); /* mm3 = a1-b1 a0-b0 + rounder */
249
psrad_i2r (ROW_SHIFT, mm3); /* mm3 = y6 y7 */
250
paddd_r2r (mm4, mm1); /* mm1 = a1+b1 a0+b0 + rounder */
252
paddd_r2r (mm2, mm0); /* mm0 = a3 a2 + rounder */
253
psrad_i2r (ROW_SHIFT, mm1); /* mm1 = y1 y0 */
255
paddd_r2r (mm6, mm5); /* mm5 = b3 b2 */
256
movq_r2r (mm0, mm7); /* mm7 = a3 a2 + rounder */
258
paddd_r2r (mm5, mm0); /* mm0 = a3+b3 a2+b2 + rounder */
259
psubd_r2r (mm5, mm7); /* mm7 = a3-b3 a2-b2 + rounder */
240
"pmaddwd %%mm2, %%mm4 \n\t" /* mm4 = -C4*x4-C2*x6 C4*x4+C6*x6 */
241
"punpckldq %%mm5, %%mm5 \n\t" /* mm5 = x3 x1 x3 x1 */
243
"pmaddwd 32(%0), %%mm0 \n\t" /* mm0 = C4*x0-C2*x2 C4*x0-C6*x2 */
244
"punpckhdq %%mm6, %%mm6 \n\t" /* mm6 = x7 x5 x7 x5 */
246
"movq 24(%0), %%mm7 \n\t" /* mm7 = -C5 -C1 C7 C5 */
247
"pmaddwd %%mm5, %%mm1 \n\t" /* mm1 = C3*x1-C7*x3 C1*x1+C3*x3 */
249
"paddd (%1), %%mm3 \n\t" /* mm3 += rounder */
250
"pmaddwd %%mm6, %%mm7 \n\t" /* mm7 = -C1*x5-C5*x7 C5*x5+C7*x7 */
252
"pmaddwd 40(%0), %%mm2 \n\t" /* mm2 = C4*x4-C6*x6 -C4*x4+C2*x6 */
253
"paddd %%mm4, %%mm3 \n\t" /* mm3 = a1 a0 + rounder */
255
"pmaddwd 48(%0), %%mm5 \n\t" /* mm5 = C7*x1-C5*x3 C5*x1-C1*x3 */
256
"movq %%mm3, %%mm4 \n\t" /* mm4 = a1 a0 + rounder */
258
"pmaddwd 56(%0), %%mm6 \n\t" /* mm6 = C3*x5-C1*x7 C7*x5+C3*x7 */
259
"paddd %%mm7, %%mm1 \n\t" /* mm1 = b1 b0 */
261
"paddd (%1), %%mm0 \n\t" /* mm0 += rounder */
262
"psubd %%mm1, %%mm3 \n\t" /* mm3 = a1-b1 a0-b0 + rounder */
264
"psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm3 \n\t" /* mm3 = y6 y7 */
265
"paddd %%mm4, %%mm1 \n\t" /* mm1 = a1+b1 a0+b0 + rounder */
267
"paddd %%mm2, %%mm0 \n\t" /* mm0 = a3 a2 + rounder */
268
"psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm1 \n\t" /* mm1 = y1 y0 */
270
"paddd %%mm6, %%mm5 \n\t" /* mm5 = b3 b2 */
271
"movq %%mm0, %%mm7 \n\t" /* mm7 = a3 a2 + rounder */
273
"paddd %%mm5, %%mm0 \n\t" /* mm0 = a3+b3 a2+b2 + rounder */
274
"psubd %%mm5, %%mm7 \n\t" /* mm7 = a3-b3 a2-b2 + rounder */
275
:: "r" (table), "r" (rounder)
262
279
static inline void mmx_row_tail (int16_t * const row, const int store)
264
psrad_i2r (ROW_SHIFT, mm0); /* mm0 = y3 y2 */
266
psrad_i2r (ROW_SHIFT, mm7); /* mm7 = y4 y5 */
268
packssdw_r2r (mm0, mm1); /* mm1 = y3 y2 y1 y0 */
270
packssdw_r2r (mm3, mm7); /* mm7 = y6 y7 y4 y5 */
272
movq_r2m (mm1, *(row+store)); /* save y3 y2 y1 y0 */
273
movq_r2r (mm7, mm4); /* mm4 = y6 y7 y4 y5 */
275
pslld_i2r (16, mm7); /* mm7 = y7 0 y5 0 */
277
psrld_i2r (16, mm4); /* mm4 = 0 y6 0 y4 */
279
por_r2r (mm4, mm7); /* mm7 = y7 y6 y5 y4 */
283
movq_r2m (mm7, *(row+store+4)); /* save y7 y6 y5 y4 */
282
"psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm0 \n\t" /* mm0 = y3 y2 */
284
"psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm7 \n\t" /* mm7 = y4 y5 */
286
"packssdw %%mm0, %%mm1 \n\t" /* mm1 = y3 y2 y1 y0 */
288
"packssdw %%mm3, %%mm7 \n\t" /* mm7 = y6 y7 y4 y5 */
290
"movq %%mm1, (%0) \n\t" /* save y3 y2 y1 y0 */
291
"movq %%mm7, %%mm4 \n\t" /* mm4 = y6 y7 y4 y5 */
293
"pslld $16, %%mm7 \n\t" /* mm7 = y7 0 y5 0 */
295
"psrld $16, %%mm4 \n\t" /* mm4 = 0 y6 0 y4 */
297
"por %%mm4, %%mm7 \n\t" /* mm7 = y7 y6 y5 y4 */
301
"movq %%mm7, 8(%0) \n\t" /* save y7 y6 y5 y4 */
286
306
static inline void mmx_row_mid (int16_t * const row, const int store,
287
307
const int offset, const int16_t * const table)
289
movq_m2r (*(row+offset), mm2); /* mm2 = x6 x4 x2 x0 */
290
psrad_i2r (ROW_SHIFT, mm0); /* mm0 = y3 y2 */
292
movq_m2r (*(row+offset+4), mm5); /* mm5 = x7 x5 x3 x1 */
293
psrad_i2r (ROW_SHIFT, mm7); /* mm7 = y4 y5 */
295
packssdw_r2r (mm0, mm1); /* mm1 = y3 y2 y1 y0 */
296
movq_r2r (mm5, mm6); /* mm6 = x7 x5 x3 x1 */
298
packssdw_r2r (mm3, mm7); /* mm7 = y6 y7 y4 y5 */
299
movq_r2r (mm2, mm0); /* mm0 = x6 x4 x2 x0 */
301
movq_r2m (mm1, *(row+store)); /* save y3 y2 y1 y0 */
302
movq_r2r (mm7, mm1); /* mm1 = y6 y7 y4 y5 */
304
punpckldq_r2r (mm0, mm0); /* mm0 = x2 x0 x2 x0 */
305
psrld_i2r (16, mm7); /* mm7 = 0 y6 0 y4 */
307
movq_m2r (*table, mm3); /* mm3 = C6 C4 C2 C4 */
308
pslld_i2r (16, mm1); /* mm1 = y7 0 y5 0 */
310
movq_m2r (*(table+4), mm4); /* mm4 = -C2 -C4 C6 C4 */
311
por_r2r (mm1, mm7); /* mm7 = y7 y6 y5 y4 */
313
movq_m2r (*(table+8), mm1); /* mm1 = -C7 C3 C3 C1 */
314
punpckhdq_r2r (mm2, mm2); /* mm2 = x6 x4 x6 x4 */
316
movq_r2m (mm7, *(row+store+4)); /* save y7 y6 y5 y4 */
317
pmaddwd_r2r (mm0, mm3); /* mm3 = C4*x0+C6*x2 C4*x0+C2*x2 */
311
"movq (%0,%1), %%mm2 \n\t" /* mm2 = x6 x4 x2 x0 */
312
"psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm0 \n\t" /* mm0 = y3 y2 */
314
"movq 8(%0,%1), %%mm5 \n\t" /* mm5 = x7 x5 x3 x1 */
315
"psrad $" AV_STRINGIFY(ROW_SHIFT) ", %%mm7 \n\t" /* mm7 = y4 y5 */
317
"packssdw %%mm0, %%mm1 \n\t" /* mm1 = y3 y2 y1 y0 */
318
"movq %%mm5, %%mm6 \n\t" /* mm6 = x7 x5 x3 x1 */
320
"packssdw %%mm3, %%mm7 \n\t" /* mm7 = y6 y7 y4 y5 */
321
"movq %%mm2, %%mm0 \n\t" /* mm0 = x6 x4 x2 x0 */
323
"movq %%mm1, (%0,%2) \n\t" /* save y3 y2 y1 y0 */
324
"movq %%mm7, %%mm1 \n\t" /* mm1 = y6 y7 y4 y5 */
326
"punpckldq %%mm0, %%mm0 \n\t" /* mm0 = x2 x0 x2 x0 */
327
"psrld $16, %%mm7 \n\t" /* mm7 = 0 y6 0 y4 */
329
"movq (%3), %%mm3 \n\t" /* mm3 = C6 C4 C2 C4 */
330
"pslld $16, %%mm1 \n\t" /* mm1 = y7 0 y5 0 */
332
"movq 8(%3), %%mm4 \n\t" /* mm4 = -C2 -C4 C6 C4 */
333
"por %%mm1, %%mm7 \n\t" /* mm7 = y7 y6 y5 y4 */
335
"movq 16(%3), %%mm1 \n\t" /* mm1 = -C7 C3 C3 C1 */
336
"punpckhdq %%mm2, %%mm2 \n\t" /* mm2 = x6 x4 x6 x4 */
338
"movq %%mm7, 8(%0,%2) \n\t" /* save y7 y6 y5 y4 */
339
"pmaddwd %%mm0, %%mm3 \n\t" /* mm3 = C4*x0+C6*x2 C4*x0+C2*x2 */
340
: : "r" (row), "r" ((x86_reg) (2*offset)), "r" ((x86_reg) (2*store)), "r" (table)
401
DECLARE_ALIGNED(8, static const short, t1_vector)[] = {T1,T1,T1,T1};
402
DECLARE_ALIGNED(8, static const short, t2_vector)[] = {T2,T2,T2,T2};
403
DECLARE_ALIGNED(8, static const short, t3_vector)[] = {T3,T3,T3,T3};
404
DECLARE_ALIGNED(8, static const short, c4_vector)[] = {C4,C4,C4,C4};
425
DECLARE_ALIGNED(8, static const short, t1_vector)[] = {
406
432
/* column code adapted from Peter Gubanov */
407
433
/* http://www.elecard.com/peter/idct.shtml */
409
movq_m2r (*t1_vector, mm0); /* mm0 = T1 */
411
movq_m2r (*(col+offset+1*8), mm1); /* mm1 = x1 */
412
movq_r2r (mm0, mm2); /* mm2 = T1 */
414
movq_m2r (*(col+offset+7*8), mm4); /* mm4 = x7 */
415
pmulhw_r2r (mm1, mm0); /* mm0 = T1*x1 */
417
movq_m2r (*t3_vector, mm5); /* mm5 = T3 */
418
pmulhw_r2r (mm4, mm2); /* mm2 = T1*x7 */
420
movq_m2r (*(col+offset+5*8), mm6); /* mm6 = x5 */
421
movq_r2r (mm5, mm7); /* mm7 = T3-1 */
423
movq_m2r (*(col+offset+3*8), mm3); /* mm3 = x3 */
424
psubsw_r2r (mm4, mm0); /* mm0 = v17 */
426
movq_m2r (*t2_vector, mm4); /* mm4 = T2 */
427
pmulhw_r2r (mm3, mm5); /* mm5 = (T3-1)*x3 */
429
paddsw_r2r (mm2, mm1); /* mm1 = u17 */
430
pmulhw_r2r (mm6, mm7); /* mm7 = (T3-1)*x5 */
434
movq_r2r (mm4, mm2); /* mm2 = T2 */
435
paddsw_r2r (mm3, mm5); /* mm5 = T3*x3 */
437
pmulhw_m2r (*(col+offset+2*8), mm4);/* mm4 = T2*x2 */
438
paddsw_r2r (mm6, mm7); /* mm7 = T3*x5 */
440
psubsw_r2r (mm6, mm5); /* mm5 = v35 */
441
paddsw_r2r (mm3, mm7); /* mm7 = u35 */
443
movq_m2r (*(col+offset+6*8), mm3); /* mm3 = x6 */
444
movq_r2r (mm0, mm6); /* mm6 = v17 */
446
pmulhw_r2r (mm3, mm2); /* mm2 = T2*x6 */
447
psubsw_r2r (mm5, mm0); /* mm0 = b3 */
449
psubsw_r2r (mm3, mm4); /* mm4 = v26 */
450
paddsw_r2r (mm6, mm5); /* mm5 = v12 */
452
movq_r2m (mm0, *(col+offset+3*8)); /* save b3 in scratch0 */
453
movq_r2r (mm1, mm6); /* mm6 = u17 */
455
paddsw_m2r (*(col+offset+2*8), mm2);/* mm2 = u26 */
456
paddsw_r2r (mm7, mm6); /* mm6 = b0 */
458
psubsw_r2r (mm7, mm1); /* mm1 = u12 */
459
movq_r2r (mm1, mm7); /* mm7 = u12 */
461
movq_m2r (*(col+offset+0*8), mm3); /* mm3 = x0 */
462
paddsw_r2r (mm5, mm1); /* mm1 = u12+v12 */
464
movq_m2r (*c4_vector, mm0); /* mm0 = C4/2 */
465
psubsw_r2r (mm5, mm7); /* mm7 = u12-v12 */
467
movq_r2m (mm6, *(col+offset+5*8)); /* save b0 in scratch1 */
468
pmulhw_r2r (mm0, mm1); /* mm1 = b1/2 */
470
movq_r2r (mm4, mm6); /* mm6 = v26 */
471
pmulhw_r2r (mm0, mm7); /* mm7 = b2/2 */
473
movq_m2r (*(col+offset+4*8), mm5); /* mm5 = x4 */
474
movq_r2r (mm3, mm0); /* mm0 = x0 */
476
psubsw_r2r (mm5, mm3); /* mm3 = v04 */
477
paddsw_r2r (mm5, mm0); /* mm0 = u04 */
479
paddsw_r2r (mm3, mm4); /* mm4 = a1 */
480
movq_r2r (mm0, mm5); /* mm5 = u04 */
482
psubsw_r2r (mm6, mm3); /* mm3 = a2 */
483
paddsw_r2r (mm2, mm5); /* mm5 = a0 */
485
paddsw_r2r (mm1, mm1); /* mm1 = b1 */
486
psubsw_r2r (mm2, mm0); /* mm0 = a3 */
488
paddsw_r2r (mm7, mm7); /* mm7 = b2 */
489
movq_r2r (mm3, mm2); /* mm2 = a2 */
491
movq_r2r (mm4, mm6); /* mm6 = a1 */
492
paddsw_r2r (mm7, mm3); /* mm3 = a2+b2 */
494
psraw_i2r (COL_SHIFT, mm3); /* mm3 = y2 */
495
paddsw_r2r (mm1, mm4); /* mm4 = a1+b1 */
497
psraw_i2r (COL_SHIFT, mm4); /* mm4 = y1 */
498
psubsw_r2r (mm1, mm6); /* mm6 = a1-b1 */
500
movq_m2r (*(col+offset+5*8), mm1); /* mm1 = b0 */
501
psubsw_r2r (mm7, mm2); /* mm2 = a2-b2 */
503
psraw_i2r (COL_SHIFT, mm6); /* mm6 = y6 */
504
movq_r2r (mm5, mm7); /* mm7 = a0 */
506
movq_r2m (mm4, *(col+offset+1*8)); /* save y1 */
507
psraw_i2r (COL_SHIFT, mm2); /* mm2 = y5 */
509
movq_r2m (mm3, *(col+offset+2*8)); /* save y2 */
510
paddsw_r2r (mm1, mm5); /* mm5 = a0+b0 */
512
movq_m2r (*(col+offset+3*8), mm4); /* mm4 = b3 */
513
psubsw_r2r (mm1, mm7); /* mm7 = a0-b0 */
515
psraw_i2r (COL_SHIFT, mm5); /* mm5 = y0 */
516
movq_r2r (mm0, mm3); /* mm3 = a3 */
518
movq_r2m (mm2, *(col+offset+5*8)); /* save y5 */
519
psubsw_r2r (mm4, mm3); /* mm3 = a3-b3 */
521
psraw_i2r (COL_SHIFT, mm7); /* mm7 = y7 */
522
paddsw_r2r (mm0, mm4); /* mm4 = a3+b3 */
524
movq_r2m (mm5, *(col+offset+0*8)); /* save y0 */
525
psraw_i2r (COL_SHIFT, mm3); /* mm3 = y4 */
527
movq_r2m (mm6, *(col+offset+6*8)); /* save y6 */
528
psraw_i2r (COL_SHIFT, mm4); /* mm4 = y3 */
530
movq_r2m (mm7, *(col+offset+7*8)); /* save y7 */
532
movq_r2m (mm3, *(col+offset+4*8)); /* save y4 */
534
movq_r2m (mm4, *(col+offset+3*8)); /* save y3 */
436
"movq (%0), %%mm0 \n\t" /* mm0 = T1 */
438
"movq 2*8(%1), %%mm1 \n\t" /* mm1 = x1 */
439
"movq %%mm0, %%mm2 \n\t" /* mm2 = T1 */
441
"movq 7*2*8(%1), %%mm4 \n\t" /* mm4 = x7 */
442
"pmulhw %%mm1, %%mm0 \n\t" /* mm0 = T1*x1 */
444
"movq 16(%0), %%mm5 \n\t" /* mm5 = T3 */
445
"pmulhw %%mm4, %%mm2 \n\t" /* mm2 = T1*x7 */
447
"movq 2*5*8(%1), %%mm6 \n\t" /* mm6 = x5 */
448
"movq %%mm5, %%mm7 \n\t" /* mm7 = T3-1 */
450
"movq 3*8*2(%1), %%mm3 \n\t" /* mm3 = x3 */
451
"psubsw %%mm4, %%mm0 \n\t" /* mm0 = v17 */
453
"movq 8(%0), %%mm4 \n\t" /* mm4 = T2 */
454
"pmulhw %%mm3, %%mm5 \n\t" /* mm5 = (T3-1)*x3 */
456
"paddsw %%mm2, %%mm1 \n\t" /* mm1 = u17 */
457
"pmulhw %%mm6, %%mm7 \n\t" /* mm7 = (T3-1)*x5 */
461
"movq %%mm4, %%mm2 \n\t" /* mm2 = T2 */
462
"paddsw %%mm3, %%mm5 \n\t" /* mm5 = T3*x3 */
464
"pmulhw 2*8*2(%1), %%mm4 \n\t" /* mm4 = T2*x2 */
465
"paddsw %%mm6, %%mm7 \n\t" /* mm7 = T3*x5 */
467
"psubsw %%mm6, %%mm5 \n\t" /* mm5 = v35 */
468
"paddsw %%mm3, %%mm7 \n\t" /* mm7 = u35 */
470
"movq 6*8*2(%1), %%mm3 \n\t" /* mm3 = x6 */
471
"movq %%mm0, %%mm6 \n\t" /* mm6 = v17 */
473
"pmulhw %%mm3, %%mm2 \n\t" /* mm2 = T2*x6 */
474
"psubsw %%mm5, %%mm0 \n\t" /* mm0 = b3 */
476
"psubsw %%mm3, %%mm4 \n\t" /* mm4 = v26 */
477
"paddsw %%mm6, %%mm5 \n\t" /* mm5 = v12 */
479
"movq %%mm0, 3*8*2(%1)\n\t" /* save b3 in scratch0 */
480
"movq %%mm1, %%mm6 \n\t" /* mm6 = u17 */
482
"paddsw 2*8*2(%1), %%mm2 \n\t" /* mm2 = u26 */
483
"paddsw %%mm7, %%mm6 \n\t" /* mm6 = b0 */
485
"psubsw %%mm7, %%mm1 \n\t" /* mm1 = u12 */
486
"movq %%mm1, %%mm7 \n\t" /* mm7 = u12 */
488
"movq 0*8(%1), %%mm3 \n\t" /* mm3 = x0 */
489
"paddsw %%mm5, %%mm1 \n\t" /* mm1 = u12+v12 */
491
"movq 24(%0), %%mm0 \n\t" /* mm0 = C4/2 */
492
"psubsw %%mm5, %%mm7 \n\t" /* mm7 = u12-v12 */
494
"movq %%mm6, 5*8*2(%1)\n\t" /* save b0 in scratch1 */
495
"pmulhw %%mm0, %%mm1 \n\t" /* mm1 = b1/2 */
497
"movq %%mm4, %%mm6 \n\t" /* mm6 = v26 */
498
"pmulhw %%mm0, %%mm7 \n\t" /* mm7 = b2/2 */
500
"movq 4*8*2(%1), %%mm5 \n\t" /* mm5 = x4 */
501
"movq %%mm3, %%mm0 \n\t" /* mm0 = x0 */
503
"psubsw %%mm5, %%mm3 \n\t" /* mm3 = v04 */
504
"paddsw %%mm5, %%mm0 \n\t" /* mm0 = u04 */
506
"paddsw %%mm3, %%mm4 \n\t" /* mm4 = a1 */
507
"movq %%mm0, %%mm5 \n\t" /* mm5 = u04 */
509
"psubsw %%mm6, %%mm3 \n\t" /* mm3 = a2 */
510
"paddsw %%mm2, %%mm5 \n\t" /* mm5 = a0 */
512
"paddsw %%mm1, %%mm1 \n\t" /* mm1 = b1 */
513
"psubsw %%mm2, %%mm0 \n\t" /* mm0 = a3 */
515
"paddsw %%mm7, %%mm7 \n\t" /* mm7 = b2 */
516
"movq %%mm3, %%mm2 \n\t" /* mm2 = a2 */
518
"movq %%mm4, %%mm6 \n\t" /* mm6 = a1 */
519
"paddsw %%mm7, %%mm3 \n\t" /* mm3 = a2+b2 */
521
"psraw $" AV_STRINGIFY(COL_SHIFT) ", %%mm3\n\t" /* mm3 = y2 */
522
"paddsw %%mm1, %%mm4\n\t" /* mm4 = a1+b1 */
524
"psraw $" AV_STRINGIFY(COL_SHIFT) ", %%mm4\n\t" /* mm4 = y1 */
525
"psubsw %%mm1, %%mm6 \n\t" /* mm6 = a1-b1 */
527
"movq 5*8*2(%1), %%mm1 \n\t" /* mm1 = b0 */
528
"psubsw %%mm7, %%mm2 \n\t" /* mm2 = a2-b2 */
530
"psraw $" AV_STRINGIFY(COL_SHIFT) ", %%mm6\n\t" /* mm6 = y6 */
531
"movq %%mm5, %%mm7 \n\t" /* mm7 = a0 */
533
"movq %%mm4, 1*8*2(%1)\n\t" /* save y1 */
534
"psraw $" AV_STRINGIFY(COL_SHIFT) ", %%mm2\n\t" /* mm2 = y5 */
536
"movq %%mm3, 2*8*2(%1)\n\t" /* save y2 */
537
"paddsw %%mm1, %%mm5 \n\t" /* mm5 = a0+b0 */
539
"movq 3*8*2(%1), %%mm4 \n\t" /* mm4 = b3 */
540
"psubsw %%mm1, %%mm7 \n\t" /* mm7 = a0-b0 */
542
"psraw $" AV_STRINGIFY(COL_SHIFT) ", %%mm5\n\t" /* mm5 = y0 */
543
"movq %%mm0, %%mm3 \n\t" /* mm3 = a3 */
545
"movq %%mm2, 5*8*2(%1)\n\t" /* save y5 */
546
"psubsw %%mm4, %%mm3 \n\t" /* mm3 = a3-b3 */
548
"psraw $" AV_STRINGIFY(COL_SHIFT) ", %%mm7\n\t" /* mm7 = y7 */
549
"paddsw %%mm0, %%mm4 \n\t" /* mm4 = a3+b3 */
551
"movq %%mm5, 0*8*2(%1)\n\t" /* save y0 */
552
"psraw $" AV_STRINGIFY(COL_SHIFT) ", %%mm3\n\t" /* mm3 = y4 */
554
"movq %%mm6, 6*8*2(%1)\n\t" /* save y6 */
555
"psraw $" AV_STRINGIFY(COL_SHIFT) ", %%mm4\n\t" /* mm4 = y3 */
557
"movq %%mm7, 7*8*2(%1)\n\t" /* save y7 */
559
"movq %%mm3, 4*8*2(%1)\n\t" /* save y4 */
561
"movq %%mm4, 3*8*2(%1)\n\t" /* save y3 */
562
:: "r" (t1_vector), "r" (col+offset)