2
* MMX optimized motion estimation
3
* Copyright (c) 2001 Fabrice Bellard.
5
* This library is free software; you can redistribute it and/or
6
* modify it under the terms of the GNU Lesser General Public
7
* License as published by the Free Software Foundation; either
8
* version 2 of the License, or (at your option) any later version.
10
* This library is distributed in the hope that it will be useful,
11
* but WITHOUT ANY WARRANTY; without even the implied warranty of
12
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13
* Lesser General Public License for more details.
15
* You should have received a copy of the GNU Lesser General Public
16
* License along with this library; if not, write to the Free Software
17
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19
* mostly by Michael Niedermayer <michaelni@gmx.at>
21
#include "../dsputil.h"
23
static const __attribute__ ((aligned(8))) uint64_t round_tab[3]={
29
static __attribute__ ((aligned(8), unused)) uint64_t bone= 0x0101010101010101LL;
31
static inline void sad8_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
33
int len= -(stride<<h);
37
"movq (%1, %%eax), %%mm0 \n\t"
38
"movq (%2, %%eax), %%mm2 \n\t"
39
"movq (%2, %%eax), %%mm4 \n\t"
41
"psubusb %%mm0, %%mm2 \n\t"
42
"psubusb %%mm4, %%mm0 \n\t"
43
"movq (%1, %%eax), %%mm1 \n\t"
44
"movq (%2, %%eax), %%mm3 \n\t"
45
"movq (%2, %%eax), %%mm5 \n\t"
46
"psubusb %%mm1, %%mm3 \n\t"
47
"psubusb %%mm5, %%mm1 \n\t"
48
"por %%mm2, %%mm0 \n\t"
49
"por %%mm1, %%mm3 \n\t"
50
"movq %%mm0, %%mm1 \n\t"
51
"movq %%mm3, %%mm2 \n\t"
52
"punpcklbw %%mm7, %%mm0 \n\t"
53
"punpckhbw %%mm7, %%mm1 \n\t"
54
"punpcklbw %%mm7, %%mm3 \n\t"
55
"punpckhbw %%mm7, %%mm2 \n\t"
56
"paddw %%mm1, %%mm0 \n\t"
57
"paddw %%mm3, %%mm2 \n\t"
58
"paddw %%mm2, %%mm0 \n\t"
59
"paddw %%mm0, %%mm6 \n\t"
63
: "r" (blk1 - len), "r" (blk2 - len), "r" (stride)
67
static inline void sad8_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
69
int len= -(stride<<h);
73
"movq (%1, %%eax), %%mm0 \n\t"
74
"movq (%2, %%eax), %%mm2 \n\t"
75
"psadbw %%mm2, %%mm0 \n\t"
77
"movq (%1, %%eax), %%mm1 \n\t"
78
"movq (%2, %%eax), %%mm3 \n\t"
79
"psadbw %%mm1, %%mm3 \n\t"
80
"paddw %%mm3, %%mm0 \n\t"
81
"paddw %%mm0, %%mm6 \n\t"
85
: "r" (blk1 - len), "r" (blk2 - len), "r" (stride)
89
static inline void sad8_2_mmx2(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int stride, int h)
91
int len= -(stride<<h);
95
"movq (%1, %%eax), %%mm0 \n\t"
96
"movq (%2, %%eax), %%mm2 \n\t"
97
"pavgb %%mm2, %%mm0 \n\t"
98
"movq (%3, %%eax), %%mm2 \n\t"
99
"psadbw %%mm2, %%mm0 \n\t"
100
"addl %4, %%eax \n\t"
101
"movq (%1, %%eax), %%mm1 \n\t"
102
"movq (%2, %%eax), %%mm3 \n\t"
103
"pavgb %%mm1, %%mm3 \n\t"
104
"movq (%3, %%eax), %%mm1 \n\t"
105
"psadbw %%mm1, %%mm3 \n\t"
106
"paddw %%mm3, %%mm0 \n\t"
107
"paddw %%mm0, %%mm6 \n\t"
108
"addl %4, %%eax \n\t"
111
: "r" (blk1a - len), "r" (blk1b -len), "r" (blk2 - len), "r" (stride)
115
static inline void sad8_4_mmx2(uint8_t *blk1, uint8_t *blk2, int stride, int h)
117
int len= -(stride<<h);
120
"movq "MANGLE(bone)", %%mm5 \n\t"
122
"movq (%1, %%eax), %%mm0 \n\t"
123
"movq (%2, %%eax), %%mm2 \n\t"
124
"movq 1(%1, %%eax), %%mm1 \n\t"
125
"movq 1(%2, %%eax), %%mm3 \n\t"
126
"pavgb %%mm2, %%mm0 \n\t"
127
"pavgb %%mm1, %%mm3 \n\t"
128
"psubusb %%mm5, %%mm3 \n\t"
129
"pavgb %%mm3, %%mm0 \n\t"
130
"movq (%3, %%eax), %%mm2 \n\t"
131
"psadbw %%mm2, %%mm0 \n\t"
132
"addl %4, %%eax \n\t"
133
"movq (%1, %%eax), %%mm1 \n\t"
134
"movq (%2, %%eax), %%mm3 \n\t"
135
"movq 1(%1, %%eax), %%mm2 \n\t"
136
"movq 1(%2, %%eax), %%mm4 \n\t"
137
"pavgb %%mm3, %%mm1 \n\t"
138
"pavgb %%mm4, %%mm2 \n\t"
139
"psubusb %%mm5, %%mm2 \n\t"
140
"pavgb %%mm1, %%mm2 \n\t"
141
"movq (%3, %%eax), %%mm1 \n\t"
142
"psadbw %%mm1, %%mm2 \n\t"
143
"paddw %%mm2, %%mm0 \n\t"
144
"paddw %%mm0, %%mm6 \n\t"
145
"addl %4, %%eax \n\t"
148
: "r" (blk1 - len), "r" (blk1 - len + stride), "r" (blk2 - len), "r" (stride)
152
static inline void sad8_2_mmx(uint8_t *blk1a, uint8_t *blk1b, uint8_t *blk2, int stride, int h)
154
int len= -(stride<<h);
158
"movq (%1, %%eax), %%mm0 \n\t"
159
"movq (%2, %%eax), %%mm1 \n\t"
160
"movq (%1, %%eax), %%mm2 \n\t"
161
"movq (%2, %%eax), %%mm3 \n\t"
162
"punpcklbw %%mm7, %%mm0 \n\t"
163
"punpcklbw %%mm7, %%mm1 \n\t"
164
"punpckhbw %%mm7, %%mm2 \n\t"
165
"punpckhbw %%mm7, %%mm3 \n\t"
166
"paddw %%mm0, %%mm1 \n\t"
167
"paddw %%mm2, %%mm3 \n\t"
168
"movq (%3, %%eax), %%mm4 \n\t"
169
"movq (%3, %%eax), %%mm2 \n\t"
170
"paddw %%mm5, %%mm1 \n\t"
171
"paddw %%mm5, %%mm3 \n\t"
172
"psrlw $1, %%mm1 \n\t"
173
"psrlw $1, %%mm3 \n\t"
174
"packuswb %%mm3, %%mm1 \n\t"
175
"psubusb %%mm1, %%mm4 \n\t"
176
"psubusb %%mm2, %%mm1 \n\t"
177
"por %%mm4, %%mm1 \n\t"
178
"movq %%mm1, %%mm0 \n\t"
179
"punpcklbw %%mm7, %%mm0 \n\t"
180
"punpckhbw %%mm7, %%mm1 \n\t"
181
"paddw %%mm1, %%mm0 \n\t"
182
"paddw %%mm0, %%mm6 \n\t"
183
"addl %4, %%eax \n\t"
186
: "r" (blk1a - len), "r" (blk1b -len), "r" (blk2 - len), "r" (stride)
190
static inline void sad8_4_mmx(uint8_t *blk1, uint8_t *blk2, int stride, int h)
192
int len= -(stride<<h);
196
"movq (%1, %%eax), %%mm0 \n\t"
197
"movq (%2, %%eax), %%mm1 \n\t"
198
"movq %%mm0, %%mm4 \n\t"
199
"movq %%mm1, %%mm2 \n\t"
200
"punpcklbw %%mm7, %%mm0 \n\t"
201
"punpcklbw %%mm7, %%mm1 \n\t"
202
"punpckhbw %%mm7, %%mm4 \n\t"
203
"punpckhbw %%mm7, %%mm2 \n\t"
204
"paddw %%mm1, %%mm0 \n\t"
205
"paddw %%mm2, %%mm4 \n\t"
206
"movq 1(%1, %%eax), %%mm2 \n\t"
207
"movq 1(%2, %%eax), %%mm3 \n\t"
208
"movq %%mm2, %%mm1 \n\t"
209
"punpcklbw %%mm7, %%mm2 \n\t"
210
"punpckhbw %%mm7, %%mm1 \n\t"
211
"paddw %%mm0, %%mm2 \n\t"
212
"paddw %%mm4, %%mm1 \n\t"
213
"movq %%mm3, %%mm4 \n\t"
214
"punpcklbw %%mm7, %%mm3 \n\t"
215
"punpckhbw %%mm7, %%mm4 \n\t"
216
"paddw %%mm3, %%mm2 \n\t"
217
"paddw %%mm4, %%mm1 \n\t"
218
"movq (%3, %%eax), %%mm3 \n\t"
219
"movq (%3, %%eax), %%mm4 \n\t"
220
"paddw %%mm5, %%mm2 \n\t"
221
"paddw %%mm5, %%mm1 \n\t"
222
"psrlw $2, %%mm2 \n\t"
223
"psrlw $2, %%mm1 \n\t"
224
"packuswb %%mm1, %%mm2 \n\t"
225
"psubusb %%mm2, %%mm3 \n\t"
226
"psubusb %%mm4, %%mm2 \n\t"
227
"por %%mm3, %%mm2 \n\t"
228
"movq %%mm2, %%mm0 \n\t"
229
"punpcklbw %%mm7, %%mm0 \n\t"
230
"punpckhbw %%mm7, %%mm2 \n\t"
231
"paddw %%mm2, %%mm0 \n\t"
232
"paddw %%mm0, %%mm6 \n\t"
233
"addl %4, %%eax \n\t"
236
: "r" (blk1 - len), "r" (blk1 -len + stride), "r" (blk2 - len), "r" (stride)
240
static inline int sum_mmx(void)
244
"movq %%mm6, %%mm0 \n\t"
245
"psrlq $32, %%mm6 \n\t"
246
"paddw %%mm0, %%mm6 \n\t"
247
"movq %%mm6, %%mm0 \n\t"
248
"psrlq $16, %%mm6 \n\t"
249
"paddw %%mm0, %%mm6 \n\t"
250
"movd %%mm6, %0 \n\t"
256
static inline int sum_mmx2(void)
260
"movd %%mm6, %0 \n\t"
267
#define PIX_SAD(suf)\
268
static int pix_abs8x8_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\
270
asm volatile("pxor %%mm7, %%mm7 \n\t"\
271
"pxor %%mm6, %%mm6 \n\t":);\
273
sad8_ ## suf(blk1, blk2, stride, 3);\
275
return sum_ ## suf();\
277
static int sad8x8_ ## suf(void *s, uint8_t *blk2, uint8_t *blk1, int stride)\
279
asm volatile("pxor %%mm7, %%mm7 \n\t"\
280
"pxor %%mm6, %%mm6 \n\t":);\
282
sad8_ ## suf(blk1, blk2, stride, 3);\
284
return sum_ ## suf();\
287
static int pix_abs8x8_x2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\
289
asm volatile("pxor %%mm7, %%mm7 \n\t"\
290
"pxor %%mm6, %%mm6 \n\t"\
291
"movq %0, %%mm5 \n\t"\
292
:: "m"(round_tab[1]) \
295
sad8_2_ ## suf(blk1, blk1+1, blk2, stride, 3);\
297
return sum_ ## suf();\
300
static int pix_abs8x8_y2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\
302
asm volatile("pxor %%mm7, %%mm7 \n\t"\
303
"pxor %%mm6, %%mm6 \n\t"\
304
"movq %0, %%mm5 \n\t"\
305
:: "m"(round_tab[1]) \
308
sad8_2_ ## suf(blk1, blk1+stride, blk2, stride, 3);\
310
return sum_ ## suf();\
313
static int pix_abs8x8_xy2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\
315
asm volatile("pxor %%mm7, %%mm7 \n\t"\
316
"pxor %%mm6, %%mm6 \n\t"\
317
"movq %0, %%mm5 \n\t"\
318
:: "m"(round_tab[2]) \
321
sad8_4_ ## suf(blk1, blk2, stride, 3);\
323
return sum_ ## suf();\
326
static int pix_abs16x16_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\
328
asm volatile("pxor %%mm7, %%mm7 \n\t"\
329
"pxor %%mm6, %%mm6 \n\t":);\
331
sad8_ ## suf(blk1 , blk2 , stride, 4);\
332
sad8_ ## suf(blk1+8, blk2+8, stride, 4);\
334
return sum_ ## suf();\
336
static int sad16x16_ ## suf(void *s, uint8_t *blk2, uint8_t *blk1, int stride)\
338
asm volatile("pxor %%mm7, %%mm7 \n\t"\
339
"pxor %%mm6, %%mm6 \n\t":);\
341
sad8_ ## suf(blk1 , blk2 , stride, 4);\
342
sad8_ ## suf(blk1+8, blk2+8, stride, 4);\
344
return sum_ ## suf();\
346
static int pix_abs16x16_x2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\
348
asm volatile("pxor %%mm7, %%mm7 \n\t"\
349
"pxor %%mm6, %%mm6 \n\t"\
350
"movq %0, %%mm5 \n\t"\
351
:: "m"(round_tab[1]) \
354
sad8_2_ ## suf(blk1 , blk1+1, blk2 , stride, 4);\
355
sad8_2_ ## suf(blk1+8, blk1+9, blk2+8, stride, 4);\
357
return sum_ ## suf();\
359
static int pix_abs16x16_y2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\
361
asm volatile("pxor %%mm7, %%mm7 \n\t"\
362
"pxor %%mm6, %%mm6 \n\t"\
363
"movq %0, %%mm5 \n\t"\
364
:: "m"(round_tab[1]) \
367
sad8_2_ ## suf(blk1 , blk1+stride, blk2 , stride, 4);\
368
sad8_2_ ## suf(blk1+8, blk1+stride+8,blk2+8, stride, 4);\
370
return sum_ ## suf();\
372
static int pix_abs16x16_xy2_ ## suf(uint8_t *blk2, uint8_t *blk1, int stride)\
374
asm volatile("pxor %%mm7, %%mm7 \n\t"\
375
"pxor %%mm6, %%mm6 \n\t"\
376
"movq %0, %%mm5 \n\t"\
377
:: "m"(round_tab[2]) \
380
sad8_4_ ## suf(blk1 , blk2 , stride, 4);\
381
sad8_4_ ## suf(blk1+8, blk2+8, stride, 4);\
383
return sum_ ## suf();\
389
void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx)
391
if (mm_flags & MM_MMX) {
392
c->pix_abs16x16 = pix_abs16x16_mmx;
393
c->pix_abs16x16_x2 = pix_abs16x16_x2_mmx;
394
c->pix_abs16x16_y2 = pix_abs16x16_y2_mmx;
395
c->pix_abs16x16_xy2 = pix_abs16x16_xy2_mmx;
396
c->pix_abs8x8 = pix_abs8x8_mmx;
397
c->pix_abs8x8_x2 = pix_abs8x8_x2_mmx;
398
c->pix_abs8x8_y2 = pix_abs8x8_y2_mmx;
399
c->pix_abs8x8_xy2 = pix_abs8x8_xy2_mmx;
401
c->sad[0]= sad16x16_mmx;
402
c->sad[1]= sad8x8_mmx;
404
if (mm_flags & MM_MMXEXT) {
405
c->pix_abs16x16 = pix_abs16x16_mmx2;
406
c->pix_abs8x8 = pix_abs8x8_mmx2;
408
c->sad[0]= sad16x16_mmx2;
409
c->sad[1]= sad8x8_mmx2;
411
if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
412
c->pix_abs16x16_x2 = pix_abs16x16_x2_mmx2;
413
c->pix_abs16x16_y2 = pix_abs16x16_y2_mmx2;
414
c->pix_abs16x16_xy2 = pix_abs16x16_xy2_mmx2;
415
c->pix_abs8x8_x2 = pix_abs8x8_x2_mmx2;
416
c->pix_abs8x8_y2 = pix_abs8x8_y2_mmx2;
417
c->pix_abs8x8_xy2 = pix_abs8x8_xy2_mmx2;