2
* MMX optimized DSP utils
3
* Copyright (c) 2000, 2001 Fabrice Bellard.
4
* Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6
* This library is free software; you can redistribute it and/or
7
* modify it under the terms of the GNU Lesser General Public
8
* License as published by the Free Software Foundation; either
9
* version 2 of the License, or (at your option) any later version.
11
* This library is distributed in the hope that it will be useful,
12
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14
* Lesser General Public License for more details.
16
* You should have received a copy of the GNU Lesser General Public
17
* License along with this library; if not, write to the Free Software
18
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
* MMX optimization by Nick Kurshev <nickols_k@mail.ru>
23
#include "../dsputil.h"
24
#include "../simple_idct.h"
25
#include "../mpegvideo.h"
32
extern const uint8_t ff_h263_loop_filter_strength[32];
33
extern void ff_idct_xvid_mmx(short *block);
34
extern void ff_idct_xvid_mmx2(short *block);
36
int mm_flags; /* multimedia extension flags */
38
/* pixel operations */
39
static const uint64_t mm_bone attribute_used __attribute__ ((aligned(8))) = 0x0101010101010101ULL;
40
static const uint64_t mm_wone attribute_used __attribute__ ((aligned(8))) = 0x0001000100010001ULL;
41
static const uint64_t mm_wtwo attribute_used __attribute__ ((aligned(8))) = 0x0002000200020002ULL;
43
static const uint64_t ff_pw_20 attribute_used __attribute__ ((aligned(8))) = 0x0014001400140014ULL;
44
static const uint64_t ff_pw_3 attribute_used __attribute__ ((aligned(8))) = 0x0003000300030003ULL;
45
static const uint64_t ff_pw_4 attribute_used __attribute__ ((aligned(8))) = 0x0004000400040004ULL;
46
static const uint64_t ff_pw_5 attribute_used __attribute__ ((aligned(8))) = 0x0005000500050005ULL;
47
static const uint64_t ff_pw_8 attribute_used __attribute__ ((aligned(8))) = 0x0008000800080008ULL;
48
static const uint64_t ff_pw_16 attribute_used __attribute__ ((aligned(8))) = 0x0010001000100010ULL;
49
static const uint64_t ff_pw_32 attribute_used __attribute__ ((aligned(8))) = 0x0020002000200020ULL;
50
static const uint64_t ff_pw_64 attribute_used __attribute__ ((aligned(8))) = 0x0040004000400040ULL;
51
static const uint64_t ff_pw_15 attribute_used __attribute__ ((aligned(8))) = 0x000F000F000F000FULL;
53
static const uint64_t ff_pb_3F attribute_used __attribute__ ((aligned(8))) = 0x3F3F3F3F3F3F3F3FULL;
54
static const uint64_t ff_pb_FC attribute_used __attribute__ ((aligned(8))) = 0xFCFCFCFCFCFCFCFCULL;
56
#define JUMPALIGN() __asm __volatile (".balign 8"::)
57
#define MOVQ_ZERO(regd) __asm __volatile ("pxor %%" #regd ", %%" #regd ::)
59
#define MOVQ_WONE(regd) \
61
"pcmpeqd %%" #regd ", %%" #regd " \n\t" \
62
"psrlw $15, %%" #regd ::)
64
#define MOVQ_BFE(regd) \
66
"pcmpeqd %%" #regd ", %%" #regd " \n\t"\
67
"paddb %%" #regd ", %%" #regd " \n\t" ::)
70
#define MOVQ_BONE(regd) __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_bone))
71
#define MOVQ_WTWO(regd) __asm __volatile ("movq %0, %%" #regd " \n\t" ::"m"(mm_wtwo))
73
// for shared library it's better to use this way for accessing constants
75
#define MOVQ_BONE(regd) \
77
"pcmpeqd %%" #regd ", %%" #regd " \n\t" \
78
"psrlw $15, %%" #regd " \n\t" \
79
"packuswb %%" #regd ", %%" #regd " \n\t" ::)
81
#define MOVQ_WTWO(regd) \
83
"pcmpeqd %%" #regd ", %%" #regd " \n\t" \
84
"psrlw $15, %%" #regd " \n\t" \
85
"psllw $1, %%" #regd " \n\t"::)
89
// using regr as temporary and for the output result
90
// first argument is unmodifed and second is trashed
91
// regfe is supposed to contain 0xfefefefefefefefe
92
#define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
93
"movq " #rega ", " #regr " \n\t"\
94
"pand " #regb ", " #regr " \n\t"\
95
"pxor " #rega ", " #regb " \n\t"\
96
"pand " #regfe "," #regb " \n\t"\
97
"psrlq $1, " #regb " \n\t"\
98
"paddb " #regb ", " #regr " \n\t"
100
#define PAVGB_MMX(rega, regb, regr, regfe) \
101
"movq " #rega ", " #regr " \n\t"\
102
"por " #regb ", " #regr " \n\t"\
103
"pxor " #rega ", " #regb " \n\t"\
104
"pand " #regfe "," #regb " \n\t"\
105
"psrlq $1, " #regb " \n\t"\
106
"psubb " #regb ", " #regr " \n\t"
108
// mm6 is supposed to contain 0xfefefefefefefefe
109
#define PAVGBP_MMX_NO_RND(rega, regb, regr, regc, regd, regp) \
110
"movq " #rega ", " #regr " \n\t"\
111
"movq " #regc ", " #regp " \n\t"\
112
"pand " #regb ", " #regr " \n\t"\
113
"pand " #regd ", " #regp " \n\t"\
114
"pxor " #rega ", " #regb " \n\t"\
115
"pxor " #regc ", " #regd " \n\t"\
116
"pand %%mm6, " #regb " \n\t"\
117
"pand %%mm6, " #regd " \n\t"\
118
"psrlq $1, " #regb " \n\t"\
119
"psrlq $1, " #regd " \n\t"\
120
"paddb " #regb ", " #regr " \n\t"\
121
"paddb " #regd ", " #regp " \n\t"
123
#define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
124
"movq " #rega ", " #regr " \n\t"\
125
"movq " #regc ", " #regp " \n\t"\
126
"por " #regb ", " #regr " \n\t"\
127
"por " #regd ", " #regp " \n\t"\
128
"pxor " #rega ", " #regb " \n\t"\
129
"pxor " #regc ", " #regd " \n\t"\
130
"pand %%mm6, " #regb " \n\t"\
131
"pand %%mm6, " #regd " \n\t"\
132
"psrlq $1, " #regd " \n\t"\
133
"psrlq $1, " #regb " \n\t"\
134
"psubb " #regb ", " #regr " \n\t"\
135
"psubb " #regd ", " #regp " \n\t"
137
/***********************************/
138
/* MMX no rounding */
139
#define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
140
#define SET_RND MOVQ_WONE
141
#define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
142
#define PAVGB(a, b, c, e) PAVGB_MMX_NO_RND(a, b, c, e)
144
#include "dsputil_mmx_rnd.h"
150
/***********************************/
153
#define DEF(x, y) x ## _ ## y ##_mmx
154
#define SET_RND MOVQ_WTWO
155
#define PAVGBP(a, b, c, d, e, f) PAVGBP_MMX(a, b, c, d, e, f)
156
#define PAVGB(a, b, c, e) PAVGB_MMX(a, b, c, e)
158
#include "dsputil_mmx_rnd.h"
165
/***********************************/
168
#define DEF(x) x ## _3dnow
169
/* for Athlons PAVGUSB is prefered */
170
#define PAVGB "pavgusb"
172
#include "dsputil_mmx_avg.h"
177
/***********************************/
180
#define DEF(x) x ## _mmx2
182
/* Introduced only in MMX2 set */
183
#define PAVGB "pavgb"
185
#include "dsputil_mmx_avg.h"
190
#define SBUTTERFLY(a,b,t,n)\
191
"movq " #a ", " #t " \n\t" /* abcd */\
192
"punpckl" #n " " #b ", " #a " \n\t" /* aebf */\
193
"punpckh" #n " " #b ", " #t " \n\t" /* cgdh */\
195
/***********************************/
198
#ifdef CONFIG_ENCODERS
199
static void get_pixels_mmx(DCTELEM *block, const uint8_t *pixels, int line_size)
202
"mov $-128, %%"REG_a" \n\t"
203
"pxor %%mm7, %%mm7 \n\t"
206
"movq (%0), %%mm0 \n\t"
207
"movq (%0, %2), %%mm2 \n\t"
208
"movq %%mm0, %%mm1 \n\t"
209
"movq %%mm2, %%mm3 \n\t"
210
"punpcklbw %%mm7, %%mm0 \n\t"
211
"punpckhbw %%mm7, %%mm1 \n\t"
212
"punpcklbw %%mm7, %%mm2 \n\t"
213
"punpckhbw %%mm7, %%mm3 \n\t"
214
"movq %%mm0, (%1, %%"REG_a") \n\t"
215
"movq %%mm1, 8(%1, %%"REG_a") \n\t"
216
"movq %%mm2, 16(%1, %%"REG_a") \n\t"
217
"movq %%mm3, 24(%1, %%"REG_a") \n\t"
219
"add $32, %%"REG_a" \n\t"
222
: "r" (block+64), "r" ((long)line_size), "r" ((long)line_size*2)
227
static inline void diff_pixels_mmx(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride)
230
"pxor %%mm7, %%mm7 \n\t"
231
"mov $-128, %%"REG_a" \n\t"
234
"movq (%0), %%mm0 \n\t"
235
"movq (%1), %%mm2 \n\t"
236
"movq %%mm0, %%mm1 \n\t"
237
"movq %%mm2, %%mm3 \n\t"
238
"punpcklbw %%mm7, %%mm0 \n\t"
239
"punpckhbw %%mm7, %%mm1 \n\t"
240
"punpcklbw %%mm7, %%mm2 \n\t"
241
"punpckhbw %%mm7, %%mm3 \n\t"
242
"psubw %%mm2, %%mm0 \n\t"
243
"psubw %%mm3, %%mm1 \n\t"
244
"movq %%mm0, (%2, %%"REG_a") \n\t"
245
"movq %%mm1, 8(%2, %%"REG_a") \n\t"
248
"add $16, %%"REG_a" \n\t"
250
: "+r" (s1), "+r" (s2)
251
: "r" (block+64), "r" ((long)stride)
255
#endif //CONFIG_ENCODERS
257
void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
262
/* read the pixels */
267
"movq %3, %%mm0 \n\t"
268
"movq 8%3, %%mm1 \n\t"
269
"movq 16%3, %%mm2 \n\t"
270
"movq 24%3, %%mm3 \n\t"
271
"movq 32%3, %%mm4 \n\t"
272
"movq 40%3, %%mm5 \n\t"
273
"movq 48%3, %%mm6 \n\t"
274
"movq 56%3, %%mm7 \n\t"
275
"packuswb %%mm1, %%mm0 \n\t"
276
"packuswb %%mm3, %%mm2 \n\t"
277
"packuswb %%mm5, %%mm4 \n\t"
278
"packuswb %%mm7, %%mm6 \n\t"
279
"movq %%mm0, (%0) \n\t"
280
"movq %%mm2, (%0, %1) \n\t"
281
"movq %%mm4, (%0, %1, 2) \n\t"
282
"movq %%mm6, (%0, %2) \n\t"
283
::"r" (pix), "r" ((long)line_size), "r" ((long)line_size*3), "m"(*p)
288
// if here would be an exact copy of the code above
289
// compiler would generate some very strange code
292
"movq (%3), %%mm0 \n\t"
293
"movq 8(%3), %%mm1 \n\t"
294
"movq 16(%3), %%mm2 \n\t"
295
"movq 24(%3), %%mm3 \n\t"
296
"movq 32(%3), %%mm4 \n\t"
297
"movq 40(%3), %%mm5 \n\t"
298
"movq 48(%3), %%mm6 \n\t"
299
"movq 56(%3), %%mm7 \n\t"
300
"packuswb %%mm1, %%mm0 \n\t"
301
"packuswb %%mm3, %%mm2 \n\t"
302
"packuswb %%mm5, %%mm4 \n\t"
303
"packuswb %%mm7, %%mm6 \n\t"
304
"movq %%mm0, (%0) \n\t"
305
"movq %%mm2, (%0, %1) \n\t"
306
"movq %%mm4, (%0, %1, 2) \n\t"
307
"movq %%mm6, (%0, %2) \n\t"
308
::"r" (pix), "r" ((long)line_size), "r" ((long)line_size*3), "r"(p)
312
static DECLARE_ALIGNED_8(const unsigned char, vector128[8]) =
313
{ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 };
315
void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
319
movq_m2r(*vector128, mm1);
320
for (i = 0; i < 8; i++) {
321
movq_m2r(*(block), mm0);
322
packsswb_m2r(*(block + 4), mm0);
325
movq_r2m(mm0, *pixels);
330
void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
336
/* read the pixels */
343
"movq (%2), %%mm0 \n\t"
344
"movq 8(%2), %%mm1 \n\t"
345
"movq 16(%2), %%mm2 \n\t"
346
"movq 24(%2), %%mm3 \n\t"
347
"movq %0, %%mm4 \n\t"
348
"movq %1, %%mm6 \n\t"
349
"movq %%mm4, %%mm5 \n\t"
350
"punpcklbw %%mm7, %%mm4 \n\t"
351
"punpckhbw %%mm7, %%mm5 \n\t"
352
"paddsw %%mm4, %%mm0 \n\t"
353
"paddsw %%mm5, %%mm1 \n\t"
354
"movq %%mm6, %%mm5 \n\t"
355
"punpcklbw %%mm7, %%mm6 \n\t"
356
"punpckhbw %%mm7, %%mm5 \n\t"
357
"paddsw %%mm6, %%mm2 \n\t"
358
"paddsw %%mm5, %%mm3 \n\t"
359
"packuswb %%mm1, %%mm0 \n\t"
360
"packuswb %%mm3, %%mm2 \n\t"
361
"movq %%mm0, %0 \n\t"
362
"movq %%mm2, %1 \n\t"
363
:"+m"(*pix), "+m"(*(pix+line_size))
371
static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
374
"lea (%3, %3), %%"REG_a" \n\t"
377
"movd (%1), %%mm0 \n\t"
378
"movd (%1, %3), %%mm1 \n\t"
379
"movd %%mm0, (%2) \n\t"
380
"movd %%mm1, (%2, %3) \n\t"
381
"add %%"REG_a", %1 \n\t"
382
"add %%"REG_a", %2 \n\t"
383
"movd (%1), %%mm0 \n\t"
384
"movd (%1, %3), %%mm1 \n\t"
385
"movd %%mm0, (%2) \n\t"
386
"movd %%mm1, (%2, %3) \n\t"
387
"add %%"REG_a", %1 \n\t"
388
"add %%"REG_a", %2 \n\t"
391
: "+g"(h), "+r" (pixels), "+r" (block)
392
: "r"((long)line_size)
397
static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
400
"lea (%3, %3), %%"REG_a" \n\t"
403
"movq (%1), %%mm0 \n\t"
404
"movq (%1, %3), %%mm1 \n\t"
405
"movq %%mm0, (%2) \n\t"
406
"movq %%mm1, (%2, %3) \n\t"
407
"add %%"REG_a", %1 \n\t"
408
"add %%"REG_a", %2 \n\t"
409
"movq (%1), %%mm0 \n\t"
410
"movq (%1, %3), %%mm1 \n\t"
411
"movq %%mm0, (%2) \n\t"
412
"movq %%mm1, (%2, %3) \n\t"
413
"add %%"REG_a", %1 \n\t"
414
"add %%"REG_a", %2 \n\t"
417
: "+g"(h), "+r" (pixels), "+r" (block)
418
: "r"((long)line_size)
423
static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
426
"lea (%3, %3), %%"REG_a" \n\t"
429
"movq (%1), %%mm0 \n\t"
430
"movq 8(%1), %%mm4 \n\t"
431
"movq (%1, %3), %%mm1 \n\t"
432
"movq 8(%1, %3), %%mm5 \n\t"
433
"movq %%mm0, (%2) \n\t"
434
"movq %%mm4, 8(%2) \n\t"
435
"movq %%mm1, (%2, %3) \n\t"
436
"movq %%mm5, 8(%2, %3) \n\t"
437
"add %%"REG_a", %1 \n\t"
438
"add %%"REG_a", %2 \n\t"
439
"movq (%1), %%mm0 \n\t"
440
"movq 8(%1), %%mm4 \n\t"
441
"movq (%1, %3), %%mm1 \n\t"
442
"movq 8(%1, %3), %%mm5 \n\t"
443
"movq %%mm0, (%2) \n\t"
444
"movq %%mm4, 8(%2) \n\t"
445
"movq %%mm1, (%2, %3) \n\t"
446
"movq %%mm5, 8(%2, %3) \n\t"
447
"add %%"REG_a", %1 \n\t"
448
"add %%"REG_a", %2 \n\t"
451
: "+g"(h), "+r" (pixels), "+r" (block)
452
: "r"((long)line_size)
457
static void clear_blocks_mmx(DCTELEM *blocks)
460
"pxor %%mm7, %%mm7 \n\t"
461
"mov $-128*6, %%"REG_a" \n\t"
463
"movq %%mm7, (%0, %%"REG_a") \n\t"
464
"movq %%mm7, 8(%0, %%"REG_a") \n\t"
465
"movq %%mm7, 16(%0, %%"REG_a") \n\t"
466
"movq %%mm7, 24(%0, %%"REG_a") \n\t"
467
"add $32, %%"REG_a" \n\t"
469
: : "r" (((uint8_t *)blocks)+128*6)
474
#ifdef CONFIG_ENCODERS
475
static int pix_sum16_mmx(uint8_t * pix, int line_size){
478
long index= -line_size*h;
481
"pxor %%mm7, %%mm7 \n\t"
482
"pxor %%mm6, %%mm6 \n\t"
484
"movq (%2, %1), %%mm0 \n\t"
485
"movq (%2, %1), %%mm1 \n\t"
486
"movq 8(%2, %1), %%mm2 \n\t"
487
"movq 8(%2, %1), %%mm3 \n\t"
488
"punpcklbw %%mm7, %%mm0 \n\t"
489
"punpckhbw %%mm7, %%mm1 \n\t"
490
"punpcklbw %%mm7, %%mm2 \n\t"
491
"punpckhbw %%mm7, %%mm3 \n\t"
492
"paddw %%mm0, %%mm1 \n\t"
493
"paddw %%mm2, %%mm3 \n\t"
494
"paddw %%mm1, %%mm3 \n\t"
495
"paddw %%mm3, %%mm6 \n\t"
498
"movq %%mm6, %%mm5 \n\t"
499
"psrlq $32, %%mm6 \n\t"
500
"paddw %%mm5, %%mm6 \n\t"
501
"movq %%mm6, %%mm5 \n\t"
502
"psrlq $16, %%mm6 \n\t"
503
"paddw %%mm5, %%mm6 \n\t"
504
"movd %%mm6, %0 \n\t"
505
"andl $0xFFFF, %0 \n\t"
506
: "=&r" (sum), "+r" (index)
507
: "r" (pix - index), "r" ((long)line_size)
512
#endif //CONFIG_ENCODERS
514
static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
518
"movq (%1, %0), %%mm0 \n\t"
519
"movq (%2, %0), %%mm1 \n\t"
520
"paddb %%mm0, %%mm1 \n\t"
521
"movq %%mm1, (%2, %0) \n\t"
522
"movq 8(%1, %0), %%mm0 \n\t"
523
"movq 8(%2, %0), %%mm1 \n\t"
524
"paddb %%mm0, %%mm1 \n\t"
525
"movq %%mm1, 8(%2, %0) \n\t"
530
: "r"(src), "r"(dst), "r"((long)w-15)
533
dst[i+0] += src[i+0];
536
#define H263_LOOP_FILTER \
537
"pxor %%mm7, %%mm7 \n\t"\
538
"movq %0, %%mm0 \n\t"\
539
"movq %0, %%mm1 \n\t"\
540
"movq %3, %%mm2 \n\t"\
541
"movq %3, %%mm3 \n\t"\
542
"punpcklbw %%mm7, %%mm0 \n\t"\
543
"punpckhbw %%mm7, %%mm1 \n\t"\
544
"punpcklbw %%mm7, %%mm2 \n\t"\
545
"punpckhbw %%mm7, %%mm3 \n\t"\
546
"psubw %%mm2, %%mm0 \n\t"\
547
"psubw %%mm3, %%mm1 \n\t"\
548
"movq %1, %%mm2 \n\t"\
549
"movq %1, %%mm3 \n\t"\
550
"movq %2, %%mm4 \n\t"\
551
"movq %2, %%mm5 \n\t"\
552
"punpcklbw %%mm7, %%mm2 \n\t"\
553
"punpckhbw %%mm7, %%mm3 \n\t"\
554
"punpcklbw %%mm7, %%mm4 \n\t"\
555
"punpckhbw %%mm7, %%mm5 \n\t"\
556
"psubw %%mm2, %%mm4 \n\t"\
557
"psubw %%mm3, %%mm5 \n\t"\
558
"psllw $2, %%mm4 \n\t"\
559
"psllw $2, %%mm5 \n\t"\
560
"paddw %%mm0, %%mm4 \n\t"\
561
"paddw %%mm1, %%mm5 \n\t"\
562
"pxor %%mm6, %%mm6 \n\t"\
563
"pcmpgtw %%mm4, %%mm6 \n\t"\
564
"pcmpgtw %%mm5, %%mm7 \n\t"\
565
"pxor %%mm6, %%mm4 \n\t"\
566
"pxor %%mm7, %%mm5 \n\t"\
567
"psubw %%mm6, %%mm4 \n\t"\
568
"psubw %%mm7, %%mm5 \n\t"\
569
"psrlw $3, %%mm4 \n\t"\
570
"psrlw $3, %%mm5 \n\t"\
571
"packuswb %%mm5, %%mm4 \n\t"\
572
"packsswb %%mm7, %%mm6 \n\t"\
573
"pxor %%mm7, %%mm7 \n\t"\
574
"movd %4, %%mm2 \n\t"\
575
"punpcklbw %%mm2, %%mm2 \n\t"\
576
"punpcklbw %%mm2, %%mm2 \n\t"\
577
"punpcklbw %%mm2, %%mm2 \n\t"\
578
"psubusb %%mm4, %%mm2 \n\t"\
579
"movq %%mm2, %%mm3 \n\t"\
580
"psubusb %%mm4, %%mm3 \n\t"\
581
"psubb %%mm3, %%mm2 \n\t"\
582
"movq %1, %%mm3 \n\t"\
583
"movq %2, %%mm4 \n\t"\
584
"pxor %%mm6, %%mm3 \n\t"\
585
"pxor %%mm6, %%mm4 \n\t"\
586
"paddusb %%mm2, %%mm3 \n\t"\
587
"psubusb %%mm2, %%mm4 \n\t"\
588
"pxor %%mm6, %%mm3 \n\t"\
589
"pxor %%mm6, %%mm4 \n\t"\
590
"paddusb %%mm2, %%mm2 \n\t"\
591
"packsswb %%mm1, %%mm0 \n\t"\
592
"pcmpgtb %%mm0, %%mm7 \n\t"\
593
"pxor %%mm7, %%mm0 \n\t"\
594
"psubb %%mm7, %%mm0 \n\t"\
595
"movq %%mm0, %%mm1 \n\t"\
596
"psubusb %%mm2, %%mm0 \n\t"\
597
"psubb %%mm0, %%mm1 \n\t"\
598
"pand %5, %%mm1 \n\t"\
599
"psrlw $2, %%mm1 \n\t"\
600
"pxor %%mm7, %%mm1 \n\t"\
601
"psubb %%mm7, %%mm1 \n\t"\
602
"movq %0, %%mm5 \n\t"\
603
"movq %3, %%mm6 \n\t"\
604
"psubb %%mm1, %%mm5 \n\t"\
605
"paddb %%mm1, %%mm6 \n\t"
607
static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
608
const int strength= ff_h263_loop_filter_strength[qscale];
614
"movq %%mm3, %1 \n\t"
615
"movq %%mm4, %2 \n\t"
616
"movq %%mm5, %0 \n\t"
617
"movq %%mm6, %3 \n\t"
618
: "+m" (*(uint64_t*)(src - 2*stride)),
619
"+m" (*(uint64_t*)(src - 1*stride)),
620
"+m" (*(uint64_t*)(src + 0*stride)),
621
"+m" (*(uint64_t*)(src + 1*stride))
622
: "g" (2*strength), "m"(ff_pb_FC)
626
static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){
627
asm volatile( //FIXME could save 1 instruction if done as 8x4 ...
628
"movd %4, %%mm0 \n\t"
629
"movd %5, %%mm1 \n\t"
630
"movd %6, %%mm2 \n\t"
631
"movd %7, %%mm3 \n\t"
632
"punpcklbw %%mm1, %%mm0 \n\t"
633
"punpcklbw %%mm3, %%mm2 \n\t"
634
"movq %%mm0, %%mm1 \n\t"
635
"punpcklwd %%mm2, %%mm0 \n\t"
636
"punpckhwd %%mm2, %%mm1 \n\t"
637
"movd %%mm0, %0 \n\t"
638
"punpckhdq %%mm0, %%mm0 \n\t"
639
"movd %%mm0, %1 \n\t"
640
"movd %%mm1, %2 \n\t"
641
"punpckhdq %%mm1, %%mm1 \n\t"
642
"movd %%mm1, %3 \n\t"
644
: "=m" (*(uint32_t*)(dst + 0*dst_stride)),
645
"=m" (*(uint32_t*)(dst + 1*dst_stride)),
646
"=m" (*(uint32_t*)(dst + 2*dst_stride)),
647
"=m" (*(uint32_t*)(dst + 3*dst_stride))
648
: "m" (*(uint32_t*)(src + 0*src_stride)),
649
"m" (*(uint32_t*)(src + 1*src_stride)),
650
"m" (*(uint32_t*)(src + 2*src_stride)),
651
"m" (*(uint32_t*)(src + 3*src_stride))
655
static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
656
const int strength= ff_h263_loop_filter_strength[qscale];
657
uint64_t temp[4] __attribute__ ((aligned(8)));
658
uint8_t *btemp= (uint8_t*)temp;
662
transpose4x4(btemp , src , 8, stride);
663
transpose4x4(btemp+4, src + 4*stride, 8, stride);
665
H263_LOOP_FILTER // 5 3 4 6
671
: "g" (2*strength), "m"(ff_pb_FC)
675
"movq %%mm5, %%mm1 \n\t"
676
"movq %%mm4, %%mm0 \n\t"
677
"punpcklbw %%mm3, %%mm5 \n\t"
678
"punpcklbw %%mm6, %%mm4 \n\t"
679
"punpckhbw %%mm3, %%mm1 \n\t"
680
"punpckhbw %%mm6, %%mm0 \n\t"
681
"movq %%mm5, %%mm3 \n\t"
682
"movq %%mm1, %%mm6 \n\t"
683
"punpcklwd %%mm4, %%mm5 \n\t"
684
"punpcklwd %%mm0, %%mm1 \n\t"
685
"punpckhwd %%mm4, %%mm3 \n\t"
686
"punpckhwd %%mm0, %%mm6 \n\t"
687
"movd %%mm5, (%0) \n\t"
688
"punpckhdq %%mm5, %%mm5 \n\t"
689
"movd %%mm5, (%0,%2) \n\t"
690
"movd %%mm3, (%0,%2,2) \n\t"
691
"punpckhdq %%mm3, %%mm3 \n\t"
692
"movd %%mm3, (%0,%3) \n\t"
693
"movd %%mm1, (%1) \n\t"
694
"punpckhdq %%mm1, %%mm1 \n\t"
695
"movd %%mm1, (%1,%2) \n\t"
696
"movd %%mm6, (%1,%2,2) \n\t"
697
"punpckhdq %%mm6, %%mm6 \n\t"
698
"movd %%mm6, (%1,%3) \n\t"
700
"r" (src + 4*stride),
701
"r" ((long) stride ),
702
"r" ((long)(3*stride))
706
#ifdef CONFIG_ENCODERS
707
static int pix_norm1_mmx(uint8_t *pix, int line_size) {
714
"movq (%0),%%mm2\n" /* mm2 = pix[0-7] */
715
"movq 8(%0),%%mm3\n" /* mm3 = pix[8-15] */
717
"movq %%mm2,%%mm1\n" /* mm1 = mm2 = pix[0-7] */
719
"punpckhbw %%mm0,%%mm1\n" /* mm1 = [pix4-7] */
720
"punpcklbw %%mm0,%%mm2\n" /* mm2 = [pix0-3] */
722
"movq %%mm3,%%mm4\n" /* mm4 = mm3 = pix[8-15] */
723
"punpckhbw %%mm0,%%mm3\n" /* mm3 = [pix12-15] */
724
"punpcklbw %%mm0,%%mm4\n" /* mm4 = [pix8-11] */
726
"pmaddwd %%mm1,%%mm1\n" /* mm1 = (pix0^2+pix1^2,pix2^2+pix3^2) */
727
"pmaddwd %%mm2,%%mm2\n" /* mm2 = (pix4^2+pix5^2,pix6^2+pix7^2) */
729
"pmaddwd %%mm3,%%mm3\n"
730
"pmaddwd %%mm4,%%mm4\n"
732
"paddd %%mm1,%%mm2\n" /* mm2 = (pix0^2+pix1^2+pix4^2+pix5^2,
733
pix2^2+pix3^2+pix6^2+pix7^2) */
734
"paddd %%mm3,%%mm4\n"
735
"paddd %%mm2,%%mm7\n"
738
"paddd %%mm4,%%mm7\n"
743
"psrlq $32, %%mm7\n" /* shift hi dword to lo */
744
"paddd %%mm7,%%mm1\n"
746
: "+r" (pix), "=r"(tmp) : "r" ((long)line_size) : "%ecx" );
750
static int sse8_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
755
"pxor %%mm0,%%mm0\n" /* mm0 = 0 */
756
"pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
758
"movq (%0),%%mm1\n" /* mm1 = pix1[0][0-7] */
759
"movq (%1),%%mm2\n" /* mm2 = pix2[0][0-7] */
760
"movq (%0,%3),%%mm3\n" /* mm3 = pix1[1][0-7] */
761
"movq (%1,%3),%%mm4\n" /* mm4 = pix2[1][0-7] */
763
/* todo: mm1-mm2, mm3-mm4 */
764
/* algo: substract mm1 from mm2 with saturation and vice versa */
765
/* OR the results to get absolute difference */
768
"psubusb %%mm2,%%mm1\n"
769
"psubusb %%mm4,%%mm3\n"
770
"psubusb %%mm5,%%mm2\n"
771
"psubusb %%mm6,%%mm4\n"
776
/* now convert to 16-bit vectors so we can square them */
780
"punpckhbw %%mm0,%%mm2\n"
781
"punpckhbw %%mm0,%%mm4\n"
782
"punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
783
"punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
785
"pmaddwd %%mm2,%%mm2\n"
786
"pmaddwd %%mm4,%%mm4\n"
787
"pmaddwd %%mm1,%%mm1\n"
788
"pmaddwd %%mm3,%%mm3\n"
790
"lea (%0,%3,2), %0\n" /* pix1 += 2*line_size */
791
"lea (%1,%3,2), %1\n" /* pix2 += 2*line_size */
793
"paddd %%mm2,%%mm1\n"
794
"paddd %%mm4,%%mm3\n"
795
"paddd %%mm1,%%mm7\n"
796
"paddd %%mm3,%%mm7\n"
802
"psrlq $32, %%mm7\n" /* shift hi dword to lo */
803
"paddd %%mm7,%%mm1\n"
805
: "+r" (pix1), "+r" (pix2), "=r"(tmp)
806
: "r" ((long)line_size) , "m" (h)
811
static int sse16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
815
"pxor %%mm0,%%mm0\n" /* mm0 = 0 */
816
"pxor %%mm7,%%mm7\n" /* mm7 holds the sum */
818
"movq (%0),%%mm1\n" /* mm1 = pix1[0-7] */
819
"movq (%1),%%mm2\n" /* mm2 = pix2[0-7] */
820
"movq 8(%0),%%mm3\n" /* mm3 = pix1[8-15] */
821
"movq 8(%1),%%mm4\n" /* mm4 = pix2[8-15] */
823
/* todo: mm1-mm2, mm3-mm4 */
824
/* algo: substract mm1 from mm2 with saturation and vice versa */
825
/* OR the results to get absolute difference */
828
"psubusb %%mm2,%%mm1\n"
829
"psubusb %%mm4,%%mm3\n"
830
"psubusb %%mm5,%%mm2\n"
831
"psubusb %%mm6,%%mm4\n"
836
/* now convert to 16-bit vectors so we can square them */
840
"punpckhbw %%mm0,%%mm2\n"
841
"punpckhbw %%mm0,%%mm4\n"
842
"punpcklbw %%mm0,%%mm1\n" /* mm1 now spread over (mm1,mm2) */
843
"punpcklbw %%mm0,%%mm3\n" /* mm4 now spread over (mm3,mm4) */
845
"pmaddwd %%mm2,%%mm2\n"
846
"pmaddwd %%mm4,%%mm4\n"
847
"pmaddwd %%mm1,%%mm1\n"
848
"pmaddwd %%mm3,%%mm3\n"
853
"paddd %%mm2,%%mm1\n"
854
"paddd %%mm4,%%mm3\n"
855
"paddd %%mm1,%%mm7\n"
856
"paddd %%mm3,%%mm7\n"
862
"psrlq $32, %%mm7\n" /* shift hi dword to lo */
863
"paddd %%mm7,%%mm1\n"
865
: "+r" (pix1), "+r" (pix2), "=r"(tmp)
866
: "r" ((long)line_size) , "m" (h)
871
static int sse16_sse2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
875
"pxor %%xmm0,%%xmm0\n" /* mm0 = 0 */
876
"pxor %%xmm7,%%xmm7\n" /* mm7 holds the sum */
878
"movdqu (%0),%%xmm1\n" /* mm1 = pix1[0][0-15] */
879
"movdqu (%1),%%xmm2\n" /* mm2 = pix2[0][0-15] */
880
"movdqu (%0,%4),%%xmm3\n" /* mm3 = pix1[1][0-15] */
881
"movdqu (%1,%4),%%xmm4\n" /* mm4 = pix2[1][0-15] */
883
/* todo: mm1-mm2, mm3-mm4 */
884
/* algo: substract mm1 from mm2 with saturation and vice versa */
885
/* OR the results to get absolute difference */
886
"movdqa %%xmm1,%%xmm5\n"
887
"movdqa %%xmm3,%%xmm6\n"
888
"psubusb %%xmm2,%%xmm1\n"
889
"psubusb %%xmm4,%%xmm3\n"
890
"psubusb %%xmm5,%%xmm2\n"
891
"psubusb %%xmm6,%%xmm4\n"
893
"por %%xmm1,%%xmm2\n"
894
"por %%xmm3,%%xmm4\n"
896
/* now convert to 16-bit vectors so we can square them */
897
"movdqa %%xmm2,%%xmm1\n"
898
"movdqa %%xmm4,%%xmm3\n"
900
"punpckhbw %%xmm0,%%xmm2\n"
901
"punpckhbw %%xmm0,%%xmm4\n"
902
"punpcklbw %%xmm0,%%xmm1\n" /* mm1 now spread over (mm1,mm2) */
903
"punpcklbw %%xmm0,%%xmm3\n" /* mm4 now spread over (mm3,mm4) */
905
"pmaddwd %%xmm2,%%xmm2\n"
906
"pmaddwd %%xmm4,%%xmm4\n"
907
"pmaddwd %%xmm1,%%xmm1\n"
908
"pmaddwd %%xmm3,%%xmm3\n"
910
"lea (%0,%4,2), %0\n" /* pix1 += 2*line_size */
911
"lea (%1,%4,2), %1\n" /* pix2 += 2*line_size */
913
"paddd %%xmm2,%%xmm1\n"
914
"paddd %%xmm4,%%xmm3\n"
915
"paddd %%xmm1,%%xmm7\n"
916
"paddd %%xmm3,%%xmm7\n"
921
"movdqa %%xmm7,%%xmm1\n"
922
"psrldq $8, %%xmm7\n" /* shift hi qword to lo */
923
"paddd %%xmm1,%%xmm7\n"
924
"movdqa %%xmm7,%%xmm1\n"
925
"psrldq $4, %%xmm7\n" /* shift hi dword to lo */
926
"paddd %%xmm1,%%xmm7\n"
928
: "+r" (pix1), "+r" (pix2), "+r"(h), "=r"(tmp)
929
: "r" ((long)line_size));
933
static int hf_noise8_mmx(uint8_t * pix1, int line_size, int h) {
941
"movq %%mm0, %%mm1\n"
945
"movq %%mm0, %%mm2\n"
946
"movq %%mm1, %%mm3\n"
947
"punpcklbw %%mm7,%%mm0\n"
948
"punpcklbw %%mm7,%%mm1\n"
949
"punpckhbw %%mm7,%%mm2\n"
950
"punpckhbw %%mm7,%%mm3\n"
951
"psubw %%mm1, %%mm0\n"
952
"psubw %%mm3, %%mm2\n"
957
"movq %%mm4, %%mm1\n"
961
"movq %%mm4, %%mm5\n"
962
"movq %%mm1, %%mm3\n"
963
"punpcklbw %%mm7,%%mm4\n"
964
"punpcklbw %%mm7,%%mm1\n"
965
"punpckhbw %%mm7,%%mm5\n"
966
"punpckhbw %%mm7,%%mm3\n"
967
"psubw %%mm1, %%mm4\n"
968
"psubw %%mm3, %%mm5\n"
969
"psubw %%mm4, %%mm0\n"
970
"psubw %%mm5, %%mm2\n"
971
"pxor %%mm3, %%mm3\n"
972
"pxor %%mm1, %%mm1\n"
973
"pcmpgtw %%mm0, %%mm3\n\t"
974
"pcmpgtw %%mm2, %%mm1\n\t"
975
"pxor %%mm3, %%mm0\n"
976
"pxor %%mm1, %%mm2\n"
977
"psubw %%mm3, %%mm0\n"
978
"psubw %%mm1, %%mm2\n"
979
"paddw %%mm0, %%mm2\n"
980
"paddw %%mm2, %%mm6\n"
986
"movq %%mm0, %%mm1\n"
990
"movq %%mm0, %%mm2\n"
991
"movq %%mm1, %%mm3\n"
992
"punpcklbw %%mm7,%%mm0\n"
993
"punpcklbw %%mm7,%%mm1\n"
994
"punpckhbw %%mm7,%%mm2\n"
995
"punpckhbw %%mm7,%%mm3\n"
996
"psubw %%mm1, %%mm0\n"
997
"psubw %%mm3, %%mm2\n"
998
"psubw %%mm0, %%mm4\n"
999
"psubw %%mm2, %%mm5\n"
1000
"pxor %%mm3, %%mm3\n"
1001
"pxor %%mm1, %%mm1\n"
1002
"pcmpgtw %%mm4, %%mm3\n\t"
1003
"pcmpgtw %%mm5, %%mm1\n\t"
1004
"pxor %%mm3, %%mm4\n"
1005
"pxor %%mm1, %%mm5\n"
1006
"psubw %%mm3, %%mm4\n"
1007
"psubw %%mm1, %%mm5\n"
1008
"paddw %%mm4, %%mm5\n"
1009
"paddw %%mm5, %%mm6\n"
1014
"movq %%mm4, %%mm1\n"
1018
"movq %%mm4, %%mm5\n"
1019
"movq %%mm1, %%mm3\n"
1020
"punpcklbw %%mm7,%%mm4\n"
1021
"punpcklbw %%mm7,%%mm1\n"
1022
"punpckhbw %%mm7,%%mm5\n"
1023
"punpckhbw %%mm7,%%mm3\n"
1024
"psubw %%mm1, %%mm4\n"
1025
"psubw %%mm3, %%mm5\n"
1026
"psubw %%mm4, %%mm0\n"
1027
"psubw %%mm5, %%mm2\n"
1028
"pxor %%mm3, %%mm3\n"
1029
"pxor %%mm1, %%mm1\n"
1030
"pcmpgtw %%mm0, %%mm3\n\t"
1031
"pcmpgtw %%mm2, %%mm1\n\t"
1032
"pxor %%mm3, %%mm0\n"
1033
"pxor %%mm1, %%mm2\n"
1034
"psubw %%mm3, %%mm0\n"
1035
"psubw %%mm1, %%mm2\n"
1036
"paddw %%mm0, %%mm2\n"
1037
"paddw %%mm2, %%mm6\n"
1043
"movq %%mm6, %%mm0\n"
1044
"punpcklwd %%mm7,%%mm0\n"
1045
"punpckhwd %%mm7,%%mm6\n"
1046
"paddd %%mm0, %%mm6\n"
1048
"movq %%mm6,%%mm0\n"
1049
"psrlq $32, %%mm6\n"
1050
"paddd %%mm6,%%mm0\n"
1052
: "+r" (pix1), "=r"(tmp)
1053
: "r" ((long)line_size) , "g" (h-2)
1058
static int hf_noise16_mmx(uint8_t * pix1, int line_size, int h) {
1060
uint8_t * pix= pix1;
1063
"pxor %%mm7,%%mm7\n"
1064
"pxor %%mm6,%%mm6\n"
1067
"movq 1(%0),%%mm1\n"
1068
"movq %%mm0, %%mm2\n"
1069
"movq %%mm1, %%mm3\n"
1070
"punpcklbw %%mm7,%%mm0\n"
1071
"punpcklbw %%mm7,%%mm1\n"
1072
"punpckhbw %%mm7,%%mm2\n"
1073
"punpckhbw %%mm7,%%mm3\n"
1074
"psubw %%mm1, %%mm0\n"
1075
"psubw %%mm3, %%mm2\n"
1080
"movq 1(%0),%%mm1\n"
1081
"movq %%mm4, %%mm5\n"
1082
"movq %%mm1, %%mm3\n"
1083
"punpcklbw %%mm7,%%mm4\n"
1084
"punpcklbw %%mm7,%%mm1\n"
1085
"punpckhbw %%mm7,%%mm5\n"
1086
"punpckhbw %%mm7,%%mm3\n"
1087
"psubw %%mm1, %%mm4\n"
1088
"psubw %%mm3, %%mm5\n"
1089
"psubw %%mm4, %%mm0\n"
1090
"psubw %%mm5, %%mm2\n"
1091
"pxor %%mm3, %%mm3\n"
1092
"pxor %%mm1, %%mm1\n"
1093
"pcmpgtw %%mm0, %%mm3\n\t"
1094
"pcmpgtw %%mm2, %%mm1\n\t"
1095
"pxor %%mm3, %%mm0\n"
1096
"pxor %%mm1, %%mm2\n"
1097
"psubw %%mm3, %%mm0\n"
1098
"psubw %%mm1, %%mm2\n"
1099
"paddw %%mm0, %%mm2\n"
1100
"paddw %%mm2, %%mm6\n"
1106
"movq 1(%0),%%mm1\n"
1107
"movq %%mm0, %%mm2\n"
1108
"movq %%mm1, %%mm3\n"
1109
"punpcklbw %%mm7,%%mm0\n"
1110
"punpcklbw %%mm7,%%mm1\n"
1111
"punpckhbw %%mm7,%%mm2\n"
1112
"punpckhbw %%mm7,%%mm3\n"
1113
"psubw %%mm1, %%mm0\n"
1114
"psubw %%mm3, %%mm2\n"
1115
"psubw %%mm0, %%mm4\n"
1116
"psubw %%mm2, %%mm5\n"
1117
"pxor %%mm3, %%mm3\n"
1118
"pxor %%mm1, %%mm1\n"
1119
"pcmpgtw %%mm4, %%mm3\n\t"
1120
"pcmpgtw %%mm5, %%mm1\n\t"
1121
"pxor %%mm3, %%mm4\n"
1122
"pxor %%mm1, %%mm5\n"
1123
"psubw %%mm3, %%mm4\n"
1124
"psubw %%mm1, %%mm5\n"
1125
"paddw %%mm4, %%mm5\n"
1126
"paddw %%mm5, %%mm6\n"
1131
"movq 1(%0),%%mm1\n"
1132
"movq %%mm4, %%mm5\n"
1133
"movq %%mm1, %%mm3\n"
1134
"punpcklbw %%mm7,%%mm4\n"
1135
"punpcklbw %%mm7,%%mm1\n"
1136
"punpckhbw %%mm7,%%mm5\n"
1137
"punpckhbw %%mm7,%%mm3\n"
1138
"psubw %%mm1, %%mm4\n"
1139
"psubw %%mm3, %%mm5\n"
1140
"psubw %%mm4, %%mm0\n"
1141
"psubw %%mm5, %%mm2\n"
1142
"pxor %%mm3, %%mm3\n"
1143
"pxor %%mm1, %%mm1\n"
1144
"pcmpgtw %%mm0, %%mm3\n\t"
1145
"pcmpgtw %%mm2, %%mm1\n\t"
1146
"pxor %%mm3, %%mm0\n"
1147
"pxor %%mm1, %%mm2\n"
1148
"psubw %%mm3, %%mm0\n"
1149
"psubw %%mm1, %%mm2\n"
1150
"paddw %%mm0, %%mm2\n"
1151
"paddw %%mm2, %%mm6\n"
1157
"movq %%mm6, %%mm0\n"
1158
"punpcklwd %%mm7,%%mm0\n"
1159
"punpckhwd %%mm7,%%mm6\n"
1160
"paddd %%mm0, %%mm6\n"
1162
"movq %%mm6,%%mm0\n"
1163
"psrlq $32, %%mm6\n"
1164
"paddd %%mm6,%%mm0\n"
1166
: "+r" (pix1), "=r"(tmp)
1167
: "r" ((long)line_size) , "g" (h-2)
1169
return tmp + hf_noise8_mmx(pix+8, line_size, h);
1172
static int nsse16_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
1173
MpegEncContext *c = p;
1176
if(c) score1 = c->dsp.sse[0](c, pix1, pix2, line_size, h);
1177
else score1 = sse16_mmx(c, pix1, pix2, line_size, h);
1178
score2= hf_noise16_mmx(pix1, line_size, h) - hf_noise16_mmx(pix2, line_size, h);
1180
if(c) return score1 + ABS(score2)*c->avctx->nsse_weight;
1181
else return score1 + ABS(score2)*8;
1184
static int nsse8_mmx(void *p, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
1185
MpegEncContext *c = p;
1186
int score1= sse8_mmx(c, pix1, pix2, line_size, h);
1187
int score2= hf_noise8_mmx(pix1, line_size, h) - hf_noise8_mmx(pix2, line_size, h);
1189
if(c) return score1 + ABS(score2)*c->avctx->nsse_weight;
1190
else return score1 + ABS(score2)*8;
1193
static int vsad_intra16_mmx(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
1196
assert( (((int)pix) & 7) == 0);
1197
assert((line_size &7) ==0);
1199
#define SUM(in0, in1, out0, out1) \
1200
"movq (%0), %%mm2\n"\
1201
"movq 8(%0), %%mm3\n"\
1203
"movq %%mm2, " #out0 "\n"\
1204
"movq %%mm3, " #out1 "\n"\
1205
"psubusb " #in0 ", %%mm2\n"\
1206
"psubusb " #in1 ", %%mm3\n"\
1207
"psubusb " #out0 ", " #in0 "\n"\
1208
"psubusb " #out1 ", " #in1 "\n"\
1209
"por %%mm2, " #in0 "\n"\
1210
"por %%mm3, " #in1 "\n"\
1211
"movq " #in0 ", %%mm2\n"\
1212
"movq " #in1 ", %%mm3\n"\
1213
"punpcklbw %%mm7, " #in0 "\n"\
1214
"punpcklbw %%mm7, " #in1 "\n"\
1215
"punpckhbw %%mm7, %%mm2\n"\
1216
"punpckhbw %%mm7, %%mm3\n"\
1217
"paddw " #in1 ", " #in0 "\n"\
1218
"paddw %%mm3, %%mm2\n"\
1219
"paddw %%mm2, " #in0 "\n"\
1220
"paddw " #in0 ", %%mm6\n"
1225
"pxor %%mm6,%%mm6\n"
1226
"pxor %%mm7,%%mm7\n"
1228
"movq 8(%0),%%mm1\n"
1231
SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1234
SUM(%%mm4, %%mm5, %%mm0, %%mm1)
1236
SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1241
"movq %%mm6,%%mm0\n"
1242
"psrlq $32, %%mm6\n"
1243
"paddw %%mm6,%%mm0\n"
1244
"movq %%mm0,%%mm6\n"
1245
"psrlq $16, %%mm0\n"
1246
"paddw %%mm6,%%mm0\n"
1248
: "+r" (pix), "=r"(tmp)
1249
: "r" ((long)line_size) , "m" (h)
1251
return tmp & 0xFFFF;
1255
static int vsad_intra16_mmx2(void *v, uint8_t * pix, uint8_t * dummy, int line_size, int h) {
1258
assert( (((int)pix) & 7) == 0);
1259
assert((line_size &7) ==0);
1261
#define SUM(in0, in1, out0, out1) \
1262
"movq (%0), " #out0 "\n"\
1263
"movq 8(%0), " #out1 "\n"\
1265
"psadbw " #out0 ", " #in0 "\n"\
1266
"psadbw " #out1 ", " #in1 "\n"\
1267
"paddw " #in1 ", " #in0 "\n"\
1268
"paddw " #in0 ", %%mm6\n"
1272
"pxor %%mm6,%%mm6\n"
1273
"pxor %%mm7,%%mm7\n"
1275
"movq 8(%0),%%mm1\n"
1278
SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1281
SUM(%%mm4, %%mm5, %%mm0, %%mm1)
1283
SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1289
: "+r" (pix), "=r"(tmp)
1290
: "r" ((long)line_size) , "m" (h)
1296
static int vsad16_mmx(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
1299
assert( (((int)pix1) & 7) == 0);
1300
assert( (((int)pix2) & 7) == 0);
1301
assert((line_size &7) ==0);
1303
#define SUM(in0, in1, out0, out1) \
1304
"movq (%0),%%mm2\n"\
1305
"movq (%1)," #out0 "\n"\
1306
"movq 8(%0),%%mm3\n"\
1307
"movq 8(%1)," #out1 "\n"\
1310
"psubb " #out0 ", %%mm2\n"\
1311
"psubb " #out1 ", %%mm3\n"\
1312
"pxor %%mm7, %%mm2\n"\
1313
"pxor %%mm7, %%mm3\n"\
1314
"movq %%mm2, " #out0 "\n"\
1315
"movq %%mm3, " #out1 "\n"\
1316
"psubusb " #in0 ", %%mm2\n"\
1317
"psubusb " #in1 ", %%mm3\n"\
1318
"psubusb " #out0 ", " #in0 "\n"\
1319
"psubusb " #out1 ", " #in1 "\n"\
1320
"por %%mm2, " #in0 "\n"\
1321
"por %%mm3, " #in1 "\n"\
1322
"movq " #in0 ", %%mm2\n"\
1323
"movq " #in1 ", %%mm3\n"\
1324
"punpcklbw %%mm7, " #in0 "\n"\
1325
"punpcklbw %%mm7, " #in1 "\n"\
1326
"punpckhbw %%mm7, %%mm2\n"\
1327
"punpckhbw %%mm7, %%mm3\n"\
1328
"paddw " #in1 ", " #in0 "\n"\
1329
"paddw %%mm3, %%mm2\n"\
1330
"paddw %%mm2, " #in0 "\n"\
1331
"paddw " #in0 ", %%mm6\n"
1336
"pxor %%mm6,%%mm6\n"
1337
"pcmpeqw %%mm7,%%mm7\n"
1338
"psllw $15, %%mm7\n"
1339
"packsswb %%mm7, %%mm7\n"
1342
"movq 8(%0),%%mm1\n"
1343
"movq 8(%1),%%mm3\n"
1347
"psubb %%mm2, %%mm0\n"
1348
"psubb %%mm3, %%mm1\n"
1349
"pxor %%mm7, %%mm0\n"
1350
"pxor %%mm7, %%mm1\n"
1351
SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1354
SUM(%%mm4, %%mm5, %%mm0, %%mm1)
1356
SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1361
"movq %%mm6,%%mm0\n"
1362
"psrlq $32, %%mm6\n"
1363
"paddw %%mm6,%%mm0\n"
1364
"movq %%mm0,%%mm6\n"
1365
"psrlq $16, %%mm0\n"
1366
"paddw %%mm6,%%mm0\n"
1368
: "+r" (pix1), "+r" (pix2), "=r"(tmp)
1369
: "r" ((long)line_size) , "m" (h)
1371
return tmp & 0x7FFF;
1375
static int vsad16_mmx2(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) {
1378
assert( (((int)pix1) & 7) == 0);
1379
assert( (((int)pix2) & 7) == 0);
1380
assert((line_size &7) ==0);
1382
#define SUM(in0, in1, out0, out1) \
1383
"movq (%0)," #out0 "\n"\
1384
"movq (%1),%%mm2\n"\
1385
"movq 8(%0)," #out1 "\n"\
1386
"movq 8(%1),%%mm3\n"\
1389
"psubb %%mm2, " #out0 "\n"\
1390
"psubb %%mm3, " #out1 "\n"\
1391
"pxor %%mm7, " #out0 "\n"\
1392
"pxor %%mm7, " #out1 "\n"\
1393
"psadbw " #out0 ", " #in0 "\n"\
1394
"psadbw " #out1 ", " #in1 "\n"\
1395
"paddw " #in1 ", " #in0 "\n"\
1396
"paddw " #in0 ", %%mm6\n"
1400
"pxor %%mm6,%%mm6\n"
1401
"pcmpeqw %%mm7,%%mm7\n"
1402
"psllw $15, %%mm7\n"
1403
"packsswb %%mm7, %%mm7\n"
1406
"movq 8(%0),%%mm1\n"
1407
"movq 8(%1),%%mm3\n"
1411
"psubb %%mm2, %%mm0\n"
1412
"psubb %%mm3, %%mm1\n"
1413
"pxor %%mm7, %%mm0\n"
1414
"pxor %%mm7, %%mm1\n"
1415
SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1418
SUM(%%mm4, %%mm5, %%mm0, %%mm1)
1420
SUM(%%mm0, %%mm1, %%mm4, %%mm5)
1426
: "+r" (pix1), "+r" (pix2), "=r"(tmp)
1427
: "r" ((long)line_size) , "m" (h)
1433
static void diff_bytes_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
1437
"movq (%2, %0), %%mm0 \n\t"
1438
"movq (%1, %0), %%mm1 \n\t"
1439
"psubb %%mm0, %%mm1 \n\t"
1440
"movq %%mm1, (%3, %0) \n\t"
1441
"movq 8(%2, %0), %%mm0 \n\t"
1442
"movq 8(%1, %0), %%mm1 \n\t"
1443
"psubb %%mm0, %%mm1 \n\t"
1444
"movq %%mm1, 8(%3, %0) \n\t"
1449
: "r"(src1), "r"(src2), "r"(dst), "r"((long)w-15)
1452
dst[i+0] = src1[i+0]-src2[i+0];
1455
static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w, int *left, int *left_top){
1461
"movq -1(%1, %0), %%mm0 \n\t" // LT
1462
"movq (%1, %0), %%mm1 \n\t" // T
1463
"movq -1(%2, %0), %%mm2 \n\t" // L
1464
"movq (%2, %0), %%mm3 \n\t" // X
1465
"movq %%mm2, %%mm4 \n\t" // L
1466
"psubb %%mm0, %%mm2 \n\t"
1467
"paddb %%mm1, %%mm2 \n\t" // L + T - LT
1468
"movq %%mm4, %%mm5 \n\t" // L
1469
"pmaxub %%mm1, %%mm4 \n\t" // max(T, L)
1470
"pminub %%mm5, %%mm1 \n\t" // min(T, L)
1471
"pminub %%mm2, %%mm4 \n\t"
1472
"pmaxub %%mm1, %%mm4 \n\t"
1473
"psubb %%mm4, %%mm3 \n\t" // dst - pred
1474
"movq %%mm3, (%3, %0) \n\t"
1479
: "r"(src1), "r"(src2), "r"(dst), "r"((long)w)
1485
dst[0]= src2[0] - mid_pred(l, src1[0], (l + src1[0] - lt)&0xFF);
1487
*left_top= src1[w-1];
1491
#define LBUTTERFLY2(a1,b1,a2,b2)\
1492
"paddw " #b1 ", " #a1 " \n\t"\
1493
"paddw " #b2 ", " #a2 " \n\t"\
1494
"paddw " #b1 ", " #b1 " \n\t"\
1495
"paddw " #b2 ", " #b2 " \n\t"\
1496
"psubw " #a1 ", " #b1 " \n\t"\
1497
"psubw " #a2 ", " #b2 " \n\t"
1500
LBUTTERFLY2(%%mm0, %%mm1, %%mm2, %%mm3)\
1501
LBUTTERFLY2(%%mm4, %%mm5, %%mm6, %%mm7)\
1502
LBUTTERFLY2(%%mm0, %%mm2, %%mm1, %%mm3)\
1503
LBUTTERFLY2(%%mm4, %%mm6, %%mm5, %%mm7)\
1504
LBUTTERFLY2(%%mm0, %%mm4, %%mm1, %%mm5)\
1505
LBUTTERFLY2(%%mm2, %%mm6, %%mm3, %%mm7)\
1508
"pxor " #z ", " #z " \n\t"\
1509
"pcmpgtw " #a ", " #z " \n\t"\
1510
"pxor " #z ", " #a " \n\t"\
1511
"psubw " #z ", " #a " \n\t"
1513
#define MMABS_SUM(a,z, sum)\
1514
"pxor " #z ", " #z " \n\t"\
1515
"pcmpgtw " #a ", " #z " \n\t"\
1516
"pxor " #z ", " #a " \n\t"\
1517
"psubw " #z ", " #a " \n\t"\
1518
"paddusw " #a ", " #sum " \n\t"
1520
#define MMABS_MMX2(a,z)\
1521
"pxor " #z ", " #z " \n\t"\
1522
"psubw " #a ", " #z " \n\t"\
1523
"pmaxsw " #z ", " #a " \n\t"
1525
#define MMABS_SUM_MMX2(a,z, sum)\
1526
"pxor " #z ", " #z " \n\t"\
1527
"psubw " #a ", " #z " \n\t"\
1528
"pmaxsw " #z ", " #a " \n\t"\
1529
"paddusw " #a ", " #sum " \n\t"
1531
#define TRANSPOSE4(a,b,c,d,t)\
1532
SBUTTERFLY(a,b,t,wd) /* a=aebf t=cgdh */\
1533
SBUTTERFLY(c,d,b,wd) /* c=imjn b=kolp */\
1534
SBUTTERFLY(a,c,d,dq) /* a=aeim d=bfjn */\
1535
SBUTTERFLY(t,b,c,dq) /* t=cgko c=dhlp */
1537
#define LOAD4(o, a, b, c, d)\
1538
"movq "#o"(%1), " #a " \n\t"\
1539
"movq "#o"+16(%1), " #b " \n\t"\
1540
"movq "#o"+32(%1), " #c " \n\t"\
1541
"movq "#o"+48(%1), " #d " \n\t"
1543
#define STORE4(o, a, b, c, d)\
1544
"movq "#a", "#o"(%1) \n\t"\
1545
"movq "#b", "#o"+16(%1) \n\t"\
1546
"movq "#c", "#o"+32(%1) \n\t"\
1547
"movq "#d", "#o"+48(%1) \n\t"\
1549
static int hadamard8_diff_mmx(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
1550
DECLARE_ALIGNED_8(uint64_t, temp[16]);
1555
diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
1558
LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
1559
LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7)
1563
"movq %%mm7, 112(%1) \n\t"
1565
TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
1566
STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)
1568
"movq 112(%1), %%mm7 \n\t"
1569
TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
1570
STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)
1572
LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3)
1573
LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
1577
"movq %%mm7, 120(%1) \n\t"
1579
TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
1580
STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2)
1582
"movq 120(%1), %%mm7 \n\t"
1583
TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
1584
"movq %%mm7, %%mm5 \n\t"//FIXME remove
1585
"movq %%mm6, %%mm7 \n\t"
1586
"movq %%mm0, %%mm6 \n\t"
1587
// STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove
1589
LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)
1590
// LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
1593
"movq %%mm7, 64(%1) \n\t"
1595
MMABS_SUM(%%mm1, %%mm7, %%mm0)
1596
MMABS_SUM(%%mm2, %%mm7, %%mm0)
1597
MMABS_SUM(%%mm3, %%mm7, %%mm0)
1598
MMABS_SUM(%%mm4, %%mm7, %%mm0)
1599
MMABS_SUM(%%mm5, %%mm7, %%mm0)
1600
MMABS_SUM(%%mm6, %%mm7, %%mm0)
1601
"movq 64(%1), %%mm1 \n\t"
1602
MMABS_SUM(%%mm1, %%mm7, %%mm0)
1603
"movq %%mm0, 64(%1) \n\t"
1605
LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
1606
LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7)
1609
"movq %%mm7, (%1) \n\t"
1611
MMABS_SUM(%%mm1, %%mm7, %%mm0)
1612
MMABS_SUM(%%mm2, %%mm7, %%mm0)
1613
MMABS_SUM(%%mm3, %%mm7, %%mm0)
1614
MMABS_SUM(%%mm4, %%mm7, %%mm0)
1615
MMABS_SUM(%%mm5, %%mm7, %%mm0)
1616
MMABS_SUM(%%mm6, %%mm7, %%mm0)
1617
"movq (%1), %%mm1 \n\t"
1618
MMABS_SUM(%%mm1, %%mm7, %%mm0)
1619
"movq 64(%1), %%mm1 \n\t"
1620
MMABS_SUM(%%mm1, %%mm7, %%mm0)
1622
"movq %%mm0, %%mm1 \n\t"
1623
"psrlq $32, %%mm0 \n\t"
1624
"paddusw %%mm1, %%mm0 \n\t"
1625
"movq %%mm0, %%mm1 \n\t"
1626
"psrlq $16, %%mm0 \n\t"
1627
"paddusw %%mm1, %%mm0 \n\t"
1628
"movd %%mm0, %0 \n\t"
1636
static int hadamard8_diff_mmx2(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){
1637
DECLARE_ALIGNED_8(uint64_t, temp[16]);
1642
diff_pixels_mmx((DCTELEM*)temp, src1, src2, stride);
1645
LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
1646
LOAD4(64, %%mm4, %%mm5, %%mm6, %%mm7)
1650
"movq %%mm7, 112(%1) \n\t"
1652
TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
1653
STORE4(0 , %%mm0, %%mm3, %%mm7, %%mm2)
1655
"movq 112(%1), %%mm7 \n\t"
1656
TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
1657
STORE4(64, %%mm4, %%mm7, %%mm0, %%mm6)
1659
LOAD4(8 , %%mm0, %%mm1, %%mm2, %%mm3)
1660
LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
1664
"movq %%mm7, 120(%1) \n\t"
1666
TRANSPOSE4(%%mm0, %%mm1, %%mm2, %%mm3, %%mm7)
1667
STORE4(8 , %%mm0, %%mm3, %%mm7, %%mm2)
1669
"movq 120(%1), %%mm7 \n\t"
1670
TRANSPOSE4(%%mm4, %%mm5, %%mm6, %%mm7, %%mm0)
1671
"movq %%mm7, %%mm5 \n\t"//FIXME remove
1672
"movq %%mm6, %%mm7 \n\t"
1673
"movq %%mm0, %%mm6 \n\t"
1674
// STORE4(72, %%mm4, %%mm7, %%mm0, %%mm6) //FIXME remove
1676
LOAD4(64, %%mm0, %%mm1, %%mm2, %%mm3)
1677
// LOAD4(72, %%mm4, %%mm5, %%mm6, %%mm7)
1680
"movq %%mm7, 64(%1) \n\t"
1681
MMABS_MMX2(%%mm0, %%mm7)
1682
MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1683
MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0)
1684
MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0)
1685
MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0)
1686
MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0)
1687
MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0)
1688
"movq 64(%1), %%mm1 \n\t"
1689
MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1690
"movq %%mm0, 64(%1) \n\t"
1692
LOAD4(0 , %%mm0, %%mm1, %%mm2, %%mm3)
1693
LOAD4(8 , %%mm4, %%mm5, %%mm6, %%mm7)
1696
"movq %%mm7, (%1) \n\t"
1697
MMABS_MMX2(%%mm0, %%mm7)
1698
MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1699
MMABS_SUM_MMX2(%%mm2, %%mm7, %%mm0)
1700
MMABS_SUM_MMX2(%%mm3, %%mm7, %%mm0)
1701
MMABS_SUM_MMX2(%%mm4, %%mm7, %%mm0)
1702
MMABS_SUM_MMX2(%%mm5, %%mm7, %%mm0)
1703
MMABS_SUM_MMX2(%%mm6, %%mm7, %%mm0)
1704
"movq (%1), %%mm1 \n\t"
1705
MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1706
"movq 64(%1), %%mm1 \n\t"
1707
MMABS_SUM_MMX2(%%mm1, %%mm7, %%mm0)
1709
"pshufw $0x0E, %%mm0, %%mm1 \n\t"
1710
"paddusw %%mm1, %%mm0 \n\t"
1711
"pshufw $0x01, %%mm0, %%mm1 \n\t"
1712
"paddusw %%mm1, %%mm0 \n\t"
1713
"movd %%mm0, %0 \n\t"
1722
WARPER8_16_SQ(hadamard8_diff_mmx, hadamard8_diff16_mmx)
1723
WARPER8_16_SQ(hadamard8_diff_mmx2, hadamard8_diff16_mmx2)
1724
#endif //CONFIG_ENCODERS
1726
#define put_no_rnd_pixels8_mmx(a,b,c,d) put_pixels8_mmx(a,b,c,d)
1727
#define put_no_rnd_pixels16_mmx(a,b,c,d) put_pixels16_mmx(a,b,c,d)
1729
#define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
1730
"paddw " #m4 ", " #m3 " \n\t" /* x1 */\
1731
"movq "MANGLE(ff_pw_20)", %%mm4 \n\t" /* 20 */\
1732
"pmullw " #m3 ", %%mm4 \n\t" /* 20x1 */\
1733
"movq "#in7", " #m3 " \n\t" /* d */\
1734
"movq "#in0", %%mm5 \n\t" /* D */\
1735
"paddw " #m3 ", %%mm5 \n\t" /* x4 */\
1736
"psubw %%mm5, %%mm4 \n\t" /* 20x1 - x4 */\
1737
"movq "#in1", %%mm5 \n\t" /* C */\
1738
"movq "#in2", %%mm6 \n\t" /* B */\
1739
"paddw " #m6 ", %%mm5 \n\t" /* x3 */\
1740
"paddw " #m5 ", %%mm6 \n\t" /* x2 */\
1741
"paddw %%mm6, %%mm6 \n\t" /* 2x2 */\
1742
"psubw %%mm6, %%mm5 \n\t" /* -2x2 + x3 */\
1743
"pmullw "MANGLE(ff_pw_3)", %%mm5 \n\t" /* -6x2 + 3x3 */\
1744
"paddw " #rnd ", %%mm4 \n\t" /* x2 */\
1745
"paddw %%mm4, %%mm5 \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
1746
"psraw $5, %%mm5 \n\t"\
1747
"packuswb %%mm5, %%mm5 \n\t"\
1748
OP(%%mm5, out, %%mm7, d)
1750
#define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
1751
static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1755
"pxor %%mm7, %%mm7 \n\t"\
1757
"movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
1758
"movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
1759
"movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
1760
"punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
1761
"punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
1762
"pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
1763
"pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
1764
"movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
1765
"movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
1766
"psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
1767
"psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
1768
"psllq $24, %%mm4 \n\t" /* 000ABCDE */\
1769
"punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
1770
"punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
1771
"punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
1772
"paddw %%mm3, %%mm5 \n\t" /* b */\
1773
"paddw %%mm2, %%mm6 \n\t" /* c */\
1774
"paddw %%mm5, %%mm5 \n\t" /* 2b */\
1775
"psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
1776
"pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
1777
"pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
1778
"paddw %%mm4, %%mm0 \n\t" /* a */\
1779
"paddw %%mm1, %%mm5 \n\t" /* d */\
1780
"pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
1781
"psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
1782
"paddw %6, %%mm6 \n\t"\
1783
"paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
1784
"psraw $5, %%mm0 \n\t"\
1785
"movq %%mm0, %5 \n\t"\
1786
/* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1788
"movq 5(%0), %%mm0 \n\t" /* FGHIJKLM */\
1789
"movq %%mm0, %%mm5 \n\t" /* FGHIJKLM */\
1790
"movq %%mm0, %%mm6 \n\t" /* FGHIJKLM */\
1791
"psrlq $8, %%mm0 \n\t" /* GHIJKLM0 */\
1792
"psrlq $16, %%mm5 \n\t" /* HIJKLM00 */\
1793
"punpcklbw %%mm7, %%mm0 \n\t" /* 0G0H0I0J */\
1794
"punpcklbw %%mm7, %%mm5 \n\t" /* 0H0I0J0K */\
1795
"paddw %%mm0, %%mm2 \n\t" /* b */\
1796
"paddw %%mm5, %%mm3 \n\t" /* c */\
1797
"paddw %%mm2, %%mm2 \n\t" /* 2b */\
1798
"psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
1799
"movq %%mm6, %%mm2 \n\t" /* FGHIJKLM */\
1800
"psrlq $24, %%mm6 \n\t" /* IJKLM000 */\
1801
"punpcklbw %%mm7, %%mm2 \n\t" /* 0F0G0H0I */\
1802
"punpcklbw %%mm7, %%mm6 \n\t" /* 0I0J0K0L */\
1803
"pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
1804
"paddw %%mm2, %%mm1 \n\t" /* a */\
1805
"paddw %%mm6, %%mm4 \n\t" /* d */\
1806
"pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
1807
"psubw %%mm4, %%mm3 \n\t" /* - 6b +3c - d */\
1808
"paddw %6, %%mm1 \n\t"\
1809
"paddw %%mm1, %%mm3 \n\t" /* 20a - 6b +3c - d */\
1810
"psraw $5, %%mm3 \n\t"\
1811
"movq %5, %%mm1 \n\t"\
1812
"packuswb %%mm3, %%mm1 \n\t"\
1813
OP_MMX2(%%mm1, (%1),%%mm4, q)\
1814
/* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
1816
"movq 9(%0), %%mm1 \n\t" /* JKLMNOPQ */\
1817
"movq %%mm1, %%mm4 \n\t" /* JKLMNOPQ */\
1818
"movq %%mm1, %%mm3 \n\t" /* JKLMNOPQ */\
1819
"psrlq $8, %%mm1 \n\t" /* KLMNOPQ0 */\
1820
"psrlq $16, %%mm4 \n\t" /* LMNOPQ00 */\
1821
"punpcklbw %%mm7, %%mm1 \n\t" /* 0K0L0M0N */\
1822
"punpcklbw %%mm7, %%mm4 \n\t" /* 0L0M0N0O */\
1823
"paddw %%mm1, %%mm5 \n\t" /* b */\
1824
"paddw %%mm4, %%mm0 \n\t" /* c */\
1825
"paddw %%mm5, %%mm5 \n\t" /* 2b */\
1826
"psubw %%mm5, %%mm0 \n\t" /* c - 2b */\
1827
"movq %%mm3, %%mm5 \n\t" /* JKLMNOPQ */\
1828
"psrlq $24, %%mm3 \n\t" /* MNOPQ000 */\
1829
"pmullw "MANGLE(ff_pw_3)", %%mm0 \n\t" /* 3c - 6b */\
1830
"punpcklbw %%mm7, %%mm3 \n\t" /* 0M0N0O0P */\
1831
"paddw %%mm3, %%mm2 \n\t" /* d */\
1832
"psubw %%mm2, %%mm0 \n\t" /* -6b + 3c - d */\
1833
"movq %%mm5, %%mm2 \n\t" /* JKLMNOPQ */\
1834
"punpcklbw %%mm7, %%mm2 \n\t" /* 0J0K0L0M */\
1835
"punpckhbw %%mm7, %%mm5 \n\t" /* 0N0O0P0Q */\
1836
"paddw %%mm2, %%mm6 \n\t" /* a */\
1837
"pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\
1838
"paddw %6, %%mm0 \n\t"\
1839
"paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
1840
"psraw $5, %%mm0 \n\t"\
1841
/* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
1843
"paddw %%mm5, %%mm3 \n\t" /* a */\
1844
"pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0O0P0Q0Q */\
1845
"paddw %%mm4, %%mm6 \n\t" /* b */\
1846
"pshufw $0xBE, %%mm5, %%mm4 \n\t" /* 0P0Q0Q0P */\
1847
"pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0Q0Q0P0O */\
1848
"paddw %%mm1, %%mm4 \n\t" /* c */\
1849
"paddw %%mm2, %%mm5 \n\t" /* d */\
1850
"paddw %%mm6, %%mm6 \n\t" /* 2b */\
1851
"psubw %%mm6, %%mm4 \n\t" /* c - 2b */\
1852
"pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\
1853
"pmullw "MANGLE(ff_pw_3)", %%mm4 \n\t" /* 3c - 6b */\
1854
"psubw %%mm5, %%mm3 \n\t" /* -6b + 3c - d */\
1855
"paddw %6, %%mm4 \n\t"\
1856
"paddw %%mm3, %%mm4 \n\t" /* 20a - 6b + 3c - d */\
1857
"psraw $5, %%mm4 \n\t"\
1858
"packuswb %%mm4, %%mm0 \n\t"\
1859
OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
1865
: "+a"(src), "+c"(dst), "+m"(h)\
1866
: "d"((long)srcStride), "S"((long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
1871
static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1874
/* quick HACK, XXX FIXME MUST be optimized */\
1877
temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1878
temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1879
temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1880
temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1881
temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1882
temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
1883
temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
1884
temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
1885
temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
1886
temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
1887
temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
1888
temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
1889
temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
1890
temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
1891
temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
1892
temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
1894
"movq (%0), %%mm0 \n\t"\
1895
"movq 8(%0), %%mm1 \n\t"\
1896
"paddw %2, %%mm0 \n\t"\
1897
"paddw %2, %%mm1 \n\t"\
1898
"psraw $5, %%mm0 \n\t"\
1899
"psraw $5, %%mm1 \n\t"\
1900
"packuswb %%mm1, %%mm0 \n\t"\
1901
OP_3DNOW(%%mm0, (%1), %%mm1, q)\
1902
"movq 16(%0), %%mm0 \n\t"\
1903
"movq 24(%0), %%mm1 \n\t"\
1904
"paddw %2, %%mm0 \n\t"\
1905
"paddw %2, %%mm1 \n\t"\
1906
"psraw $5, %%mm0 \n\t"\
1907
"psraw $5, %%mm1 \n\t"\
1908
"packuswb %%mm1, %%mm0 \n\t"\
1909
OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
1910
:: "r"(temp), "r"(dst), "m"(ROUNDER)\
1918
static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1922
"pxor %%mm7, %%mm7 \n\t"\
1924
"movq (%0), %%mm0 \n\t" /* ABCDEFGH */\
1925
"movq %%mm0, %%mm1 \n\t" /* ABCDEFGH */\
1926
"movq %%mm0, %%mm2 \n\t" /* ABCDEFGH */\
1927
"punpcklbw %%mm7, %%mm0 \n\t" /* 0A0B0C0D */\
1928
"punpckhbw %%mm7, %%mm1 \n\t" /* 0E0F0G0H */\
1929
"pshufw $0x90, %%mm0, %%mm5 \n\t" /* 0A0A0B0C */\
1930
"pshufw $0x41, %%mm0, %%mm6 \n\t" /* 0B0A0A0B */\
1931
"movq %%mm2, %%mm3 \n\t" /* ABCDEFGH */\
1932
"movq %%mm2, %%mm4 \n\t" /* ABCDEFGH */\
1933
"psllq $8, %%mm2 \n\t" /* 0ABCDEFG */\
1934
"psllq $16, %%mm3 \n\t" /* 00ABCDEF */\
1935
"psllq $24, %%mm4 \n\t" /* 000ABCDE */\
1936
"punpckhbw %%mm7, %%mm2 \n\t" /* 0D0E0F0G */\
1937
"punpckhbw %%mm7, %%mm3 \n\t" /* 0C0D0E0F */\
1938
"punpckhbw %%mm7, %%mm4 \n\t" /* 0B0C0D0E */\
1939
"paddw %%mm3, %%mm5 \n\t" /* b */\
1940
"paddw %%mm2, %%mm6 \n\t" /* c */\
1941
"paddw %%mm5, %%mm5 \n\t" /* 2b */\
1942
"psubw %%mm5, %%mm6 \n\t" /* c - 2b */\
1943
"pshufw $0x06, %%mm0, %%mm5 \n\t" /* 0C0B0A0A */\
1944
"pmullw "MANGLE(ff_pw_3)", %%mm6 \n\t" /* 3c - 6b */\
1945
"paddw %%mm4, %%mm0 \n\t" /* a */\
1946
"paddw %%mm1, %%mm5 \n\t" /* d */\
1947
"pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
1948
"psubw %%mm5, %%mm0 \n\t" /* 20a - d */\
1949
"paddw %6, %%mm6 \n\t"\
1950
"paddw %%mm6, %%mm0 \n\t" /* 20a - 6b + 3c - d */\
1951
"psraw $5, %%mm0 \n\t"\
1952
/* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
1954
"movd 5(%0), %%mm5 \n\t" /* FGHI */\
1955
"punpcklbw %%mm7, %%mm5 \n\t" /* 0F0G0H0I */\
1956
"pshufw $0xF9, %%mm5, %%mm6 \n\t" /* 0G0H0I0I */\
1957
"paddw %%mm5, %%mm1 \n\t" /* a */\
1958
"paddw %%mm6, %%mm2 \n\t" /* b */\
1959
"pshufw $0xBE, %%mm5, %%mm6 \n\t" /* 0H0I0I0H */\
1960
"pshufw $0x6F, %%mm5, %%mm5 \n\t" /* 0I0I0H0G */\
1961
"paddw %%mm6, %%mm3 \n\t" /* c */\
1962
"paddw %%mm5, %%mm4 \n\t" /* d */\
1963
"paddw %%mm2, %%mm2 \n\t" /* 2b */\
1964
"psubw %%mm2, %%mm3 \n\t" /* c - 2b */\
1965
"pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
1966
"pmullw "MANGLE(ff_pw_3)", %%mm3 \n\t" /* 3c - 6b */\
1967
"psubw %%mm4, %%mm3 \n\t" /* -6b + 3c - d */\
1968
"paddw %6, %%mm1 \n\t"\
1969
"paddw %%mm1, %%mm3 \n\t" /* 20a - 6b + 3c - d */\
1970
"psraw $5, %%mm3 \n\t"\
1971
"packuswb %%mm3, %%mm0 \n\t"\
1972
OP_MMX2(%%mm0, (%1), %%mm4, q)\
1978
: "+a"(src), "+c"(dst), "+m"(h)\
1979
: "S"((long)srcStride), "D"((long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
1984
static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
1987
/* quick HACK, XXX FIXME MUST be optimized */\
1990
temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
1991
temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
1992
temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
1993
temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
1994
temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
1995
temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
1996
temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
1997
temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
1999
"movq (%0), %%mm0 \n\t"\
2000
"movq 8(%0), %%mm1 \n\t"\
2001
"paddw %2, %%mm0 \n\t"\
2002
"paddw %2, %%mm1 \n\t"\
2003
"psraw $5, %%mm0 \n\t"\
2004
"psraw $5, %%mm1 \n\t"\
2005
"packuswb %%mm1, %%mm0 \n\t"\
2006
OP_3DNOW(%%mm0, (%1), %%mm1, q)\
2007
:: "r"(temp), "r"(dst), "m"(ROUNDER)\
2015
#define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
2017
static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
2018
uint64_t temp[17*4];\
2019
uint64_t *temp_ptr= temp;\
2024
"pxor %%mm7, %%mm7 \n\t"\
2026
"movq (%0), %%mm0 \n\t"\
2027
"movq (%0), %%mm1 \n\t"\
2028
"movq 8(%0), %%mm2 \n\t"\
2029
"movq 8(%0), %%mm3 \n\t"\
2030
"punpcklbw %%mm7, %%mm0 \n\t"\
2031
"punpckhbw %%mm7, %%mm1 \n\t"\
2032
"punpcklbw %%mm7, %%mm2 \n\t"\
2033
"punpckhbw %%mm7, %%mm3 \n\t"\
2034
"movq %%mm0, (%1) \n\t"\
2035
"movq %%mm1, 17*8(%1) \n\t"\
2036
"movq %%mm2, 2*17*8(%1) \n\t"\
2037
"movq %%mm3, 3*17*8(%1) \n\t"\
2042
: "+r" (src), "+r" (temp_ptr), "+r"(count)\
2043
: "r" ((long)srcStride)\
2050
/*FIXME reorder for speed */\
2052
/*"pxor %%mm7, %%mm7 \n\t"*/\
2054
"movq (%0), %%mm0 \n\t"\
2055
"movq 8(%0), %%mm1 \n\t"\
2056
"movq 16(%0), %%mm2 \n\t"\
2057
"movq 24(%0), %%mm3 \n\t"\
2058
QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
2059
QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
2061
QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
2063
QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
2065
QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
2066
QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
2068
QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
2069
QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
2071
QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
2072
QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
2074
QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
2075
QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
2077
QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
2079
QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
2081
QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
2082
QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
2084
"add $136, %0 \n\t"\
2089
: "+r"(temp_ptr), "+r"(dst), "+g"(count)\
2090
: "r"((long)dstStride), "r"(2*(long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(long)dstStride)\
2095
static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
2096
uint64_t temp[9*2];\
2097
uint64_t *temp_ptr= temp;\
2102
"pxor %%mm7, %%mm7 \n\t"\
2104
"movq (%0), %%mm0 \n\t"\
2105
"movq (%0), %%mm1 \n\t"\
2106
"punpcklbw %%mm7, %%mm0 \n\t"\
2107
"punpckhbw %%mm7, %%mm1 \n\t"\
2108
"movq %%mm0, (%1) \n\t"\
2109
"movq %%mm1, 9*8(%1) \n\t"\
2114
: "+r" (src), "+r" (temp_ptr), "+r"(count)\
2115
: "r" ((long)srcStride)\
2122
/*FIXME reorder for speed */\
2124
/*"pxor %%mm7, %%mm7 \n\t"*/\
2126
"movq (%0), %%mm0 \n\t"\
2127
"movq 8(%0), %%mm1 \n\t"\
2128
"movq 16(%0), %%mm2 \n\t"\
2129
"movq 24(%0), %%mm3 \n\t"\
2130
QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0), 8(%0), (%0), 32(%0), (%1), OP)\
2131
QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 8(%0), (%0), (%0), 40(%0), (%1, %3), OP)\
2133
QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, (%0), (%0), 8(%0), 48(%0), (%1), OP)\
2135
QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, (%0), 8(%0), 16(%0), 56(%0), (%1, %3), OP)\
2137
QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
2139
QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
2141
QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
2142
QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
2149
: "+r"(temp_ptr), "+r"(dst), "+g"(count)\
2150
: "r"((long)dstStride), "r"(2*(long)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(long)dstStride)\
2155
static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
2156
OPNAME ## pixels8_mmx(dst, src, stride, 8);\
2159
static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2161
uint8_t * const half= (uint8_t*)temp;\
2162
put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
2163
OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
2166
static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2167
OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
2170
static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2172
uint8_t * const half= (uint8_t*)temp;\
2173
put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
2174
OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
2177
static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2179
uint8_t * const half= (uint8_t*)temp;\
2180
put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
2181
OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
2184
static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2185
OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
2188
static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2190
uint8_t * const half= (uint8_t*)temp;\
2191
put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
2192
OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
2194
static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2195
uint64_t half[8 + 9];\
2196
uint8_t * const halfH= ((uint8_t*)half) + 64;\
2197
uint8_t * const halfHV= ((uint8_t*)half);\
2198
put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2199
put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
2200
put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
2201
OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
2203
static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2204
uint64_t half[8 + 9];\
2205
uint8_t * const halfH= ((uint8_t*)half) + 64;\
2206
uint8_t * const halfHV= ((uint8_t*)half);\
2207
put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2208
put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
2209
put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
2210
OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
2212
static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2213
uint64_t half[8 + 9];\
2214
uint8_t * const halfH= ((uint8_t*)half) + 64;\
2215
uint8_t * const halfHV= ((uint8_t*)half);\
2216
put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2217
put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
2218
put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
2219
OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
2221
static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2222
uint64_t half[8 + 9];\
2223
uint8_t * const halfH= ((uint8_t*)half) + 64;\
2224
uint8_t * const halfHV= ((uint8_t*)half);\
2225
put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2226
put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
2227
put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
2228
OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
2230
static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2231
uint64_t half[8 + 9];\
2232
uint8_t * const halfH= ((uint8_t*)half) + 64;\
2233
uint8_t * const halfHV= ((uint8_t*)half);\
2234
put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2235
put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
2236
OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
2238
static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2239
uint64_t half[8 + 9];\
2240
uint8_t * const halfH= ((uint8_t*)half) + 64;\
2241
uint8_t * const halfHV= ((uint8_t*)half);\
2242
put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2243
put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
2244
OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
2246
static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2247
uint64_t half[8 + 9];\
2248
uint8_t * const halfH= ((uint8_t*)half);\
2249
put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2250
put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
2251
OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
2253
static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2254
uint64_t half[8 + 9];\
2255
uint8_t * const halfH= ((uint8_t*)half);\
2256
put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2257
put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
2258
OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
2260
static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2262
uint8_t * const halfH= ((uint8_t*)half);\
2263
put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
2264
OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
2266
static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
2267
OPNAME ## pixels16_mmx(dst, src, stride, 16);\
2270
static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2272
uint8_t * const half= (uint8_t*)temp;\
2273
put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
2274
OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
2277
static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2278
OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
2281
static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2283
uint8_t * const half= (uint8_t*)temp;\
2284
put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
2285
OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
2288
static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2290
uint8_t * const half= (uint8_t*)temp;\
2291
put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
2292
OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
2295
static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2296
OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
2299
static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2301
uint8_t * const half= (uint8_t*)temp;\
2302
put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
2303
OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
2305
static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2306
uint64_t half[16*2 + 17*2];\
2307
uint8_t * const halfH= ((uint8_t*)half) + 256;\
2308
uint8_t * const halfHV= ((uint8_t*)half);\
2309
put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2310
put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
2311
put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
2312
OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
2314
static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2315
uint64_t half[16*2 + 17*2];\
2316
uint8_t * const halfH= ((uint8_t*)half) + 256;\
2317
uint8_t * const halfHV= ((uint8_t*)half);\
2318
put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2319
put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
2320
put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
2321
OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
2323
static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2324
uint64_t half[16*2 + 17*2];\
2325
uint8_t * const halfH= ((uint8_t*)half) + 256;\
2326
uint8_t * const halfHV= ((uint8_t*)half);\
2327
put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2328
put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
2329
put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
2330
OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
2332
static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2333
uint64_t half[16*2 + 17*2];\
2334
uint8_t * const halfH= ((uint8_t*)half) + 256;\
2335
uint8_t * const halfHV= ((uint8_t*)half);\
2336
put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2337
put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
2338
put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
2339
OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
2341
static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2342
uint64_t half[16*2 + 17*2];\
2343
uint8_t * const halfH= ((uint8_t*)half) + 256;\
2344
uint8_t * const halfHV= ((uint8_t*)half);\
2345
put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2346
put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
2347
OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
2349
static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2350
uint64_t half[16*2 + 17*2];\
2351
uint8_t * const halfH= ((uint8_t*)half) + 256;\
2352
uint8_t * const halfHV= ((uint8_t*)half);\
2353
put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2354
put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
2355
OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
2357
static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2358
uint64_t half[17*2];\
2359
uint8_t * const halfH= ((uint8_t*)half);\
2360
put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2361
put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
2362
OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
2364
static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2365
uint64_t half[17*2];\
2366
uint8_t * const halfH= ((uint8_t*)half);\
2367
put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2368
put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
2369
OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
2371
static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
2372
uint64_t half[17*2];\
2373
uint8_t * const halfH= ((uint8_t*)half);\
2374
put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
2375
OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
2378
#define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b " \n\t"
2379
#define AVG_3DNOW_OP(a,b,temp, size) \
2380
"mov" #size " " #b ", " #temp " \n\t"\
2381
"pavgusb " #temp ", " #a " \n\t"\
2382
"mov" #size " " #a ", " #b " \n\t"
2383
#define AVG_MMX2_OP(a,b,temp, size) \
2384
"mov" #size " " #b ", " #temp " \n\t"\
2385
"pavgb " #temp ", " #a " \n\t"\
2386
"mov" #size " " #a ", " #b " \n\t"
2388
QPEL_BASE(put_ , ff_pw_16, _ , PUT_OP, PUT_OP)
2389
QPEL_BASE(avg_ , ff_pw_16, _ , AVG_MMX2_OP, AVG_3DNOW_OP)
2390
QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
2391
QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, 3dnow)
2392
QPEL_OP(avg_ , ff_pw_16, _ , AVG_3DNOW_OP, 3dnow)
2393
QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
2394
QPEL_OP(put_ , ff_pw_16, _ , PUT_OP, mmx2)
2395
QPEL_OP(avg_ , ff_pw_16, _ , AVG_MMX2_OP, mmx2)
2396
QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
2399
static void just_return() { return; }
2402
#define SET_QPEL_FUNC(postfix1, postfix2) \
2403
c->put_ ## postfix1 = put_ ## postfix2;\
2404
c->put_no_rnd_ ## postfix1 = put_no_rnd_ ## postfix2;\
2405
c->avg_ ## postfix1 = avg_ ## postfix2;
2407
static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
2408
int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height){
2410
const int ix = ox>>(16+shift);
2411
const int iy = oy>>(16+shift);
2412
const int oxs = ox>>4;
2413
const int oys = oy>>4;
2414
const int dxxs = dxx>>4;
2415
const int dxys = dxy>>4;
2416
const int dyxs = dyx>>4;
2417
const int dyys = dyy>>4;
2418
const uint16_t r4[4] = {r,r,r,r};
2419
const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys};
2420
const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys};
2421
const uint64_t shift2 = 2*shift;
2422
uint8_t edge_buf[(h+1)*stride];
2425
const int dxw = (dxx-(1<<(16+shift)))*(w-1);
2426
const int dyh = (dyy-(1<<(16+shift)))*(h-1);
2427
const int dxh = dxy*(h-1);
2428
const int dyw = dyx*(w-1);
2429
if( // non-constant fullpel offset (3% of blocks)
2430
(ox^(ox+dxw) | ox^(ox+dxh) | ox^(ox+dxw+dxh) |
2431
oy^(oy+dyw) | oy^(oy+dyh) | oy^(oy+dyw+dyh)) >> (16+shift)
2432
// uses more than 16 bits of subpel mv (only at huge resolution)
2433
|| (dxx|dxy|dyx|dyy)&15 )
2435
//FIXME could still use mmx for some of the rows
2436
ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height);
2440
src += ix + iy*stride;
2441
if( (unsigned)ix >= width-w ||
2442
(unsigned)iy >= height-h )
2444
ff_emulated_edge_mc(edge_buf, src, stride, w+1, h+1, ix, iy, width, height);
2449
"movd %0, %%mm6 \n\t"
2450
"pxor %%mm7, %%mm7 \n\t"
2451
"punpcklwd %%mm6, %%mm6 \n\t"
2452
"punpcklwd %%mm6, %%mm6 \n\t"
2456
for(x=0; x<w; x+=4){
2457
uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0),
2458
oxs - dxys + dxxs*(x+1),
2459
oxs - dxys + dxxs*(x+2),
2460
oxs - dxys + dxxs*(x+3) };
2461
uint16_t dy4[4] = { oys - dyys + dyxs*(x+0),
2462
oys - dyys + dyxs*(x+1),
2463
oys - dyys + dyxs*(x+2),
2464
oys - dyys + dyxs*(x+3) };
2468
"movq %0, %%mm4 \n\t"
2469
"movq %1, %%mm5 \n\t"
2470
"paddw %2, %%mm4 \n\t"
2471
"paddw %3, %%mm5 \n\t"
2472
"movq %%mm4, %0 \n\t"
2473
"movq %%mm5, %1 \n\t"
2474
"psrlw $12, %%mm4 \n\t"
2475
"psrlw $12, %%mm5 \n\t"
2476
: "+m"(*dx4), "+m"(*dy4)
2477
: "m"(*dxy4), "m"(*dyy4)
2481
"movq %%mm6, %%mm2 \n\t"
2482
"movq %%mm6, %%mm1 \n\t"
2483
"psubw %%mm4, %%mm2 \n\t"
2484
"psubw %%mm5, %%mm1 \n\t"
2485
"movq %%mm2, %%mm0 \n\t"
2486
"movq %%mm4, %%mm3 \n\t"
2487
"pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy)
2488
"pmullw %%mm5, %%mm3 \n\t" // dx*dy
2489
"pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy
2490
"pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy)
2492
"movd %4, %%mm5 \n\t"
2493
"movd %3, %%mm4 \n\t"
2494
"punpcklbw %%mm7, %%mm5 \n\t"
2495
"punpcklbw %%mm7, %%mm4 \n\t"
2496
"pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy
2497
"pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy
2499
"movd %2, %%mm5 \n\t"
2500
"movd %1, %%mm4 \n\t"
2501
"punpcklbw %%mm7, %%mm5 \n\t"
2502
"punpcklbw %%mm7, %%mm4 \n\t"
2503
"pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy)
2504
"pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy)
2505
"paddw %5, %%mm1 \n\t"
2506
"paddw %%mm3, %%mm2 \n\t"
2507
"paddw %%mm1, %%mm0 \n\t"
2508
"paddw %%mm2, %%mm0 \n\t"
2510
"psrlw %6, %%mm0 \n\t"
2511
"packuswb %%mm0, %%mm0 \n\t"
2512
"movd %%mm0, %0 \n\t"
2514
: "=m"(dst[x+y*stride])
2515
: "m"(src[0]), "m"(src[1]),
2516
"m"(src[stride]), "m"(src[stride+1]),
2517
"m"(*r4), "m"(shift2)
2525
static int try_8x8basis_mmx(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale){
2528
assert(ABS(scale) < 256);
2529
scale<<= 16 + 1 - BASIS_SHIFT + RECON_SHIFT;
2532
"pcmpeqw %%mm6, %%mm6 \n\t" // -1w
2533
"psrlw $15, %%mm6 \n\t" // 1w
2534
"pxor %%mm7, %%mm7 \n\t"
2535
"movd %4, %%mm5 \n\t"
2536
"punpcklwd %%mm5, %%mm5 \n\t"
2537
"punpcklwd %%mm5, %%mm5 \n\t"
2539
"movq (%1, %0), %%mm0 \n\t"
2540
"movq 8(%1, %0), %%mm1 \n\t"
2541
"pmulhw %%mm5, %%mm0 \n\t"
2542
"pmulhw %%mm5, %%mm1 \n\t"
2543
"paddw %%mm6, %%mm0 \n\t"
2544
"paddw %%mm6, %%mm1 \n\t"
2545
"psraw $1, %%mm0 \n\t"
2546
"psraw $1, %%mm1 \n\t"
2547
"paddw (%2, %0), %%mm0 \n\t"
2548
"paddw 8(%2, %0), %%mm1 \n\t"
2549
"psraw $6, %%mm0 \n\t"
2550
"psraw $6, %%mm1 \n\t"
2551
"pmullw (%3, %0), %%mm0 \n\t"
2552
"pmullw 8(%3, %0), %%mm1 \n\t"
2553
"pmaddwd %%mm0, %%mm0 \n\t"
2554
"pmaddwd %%mm1, %%mm1 \n\t"
2555
"paddd %%mm1, %%mm0 \n\t"
2556
"psrld $4, %%mm0 \n\t"
2557
"paddd %%mm0, %%mm7 \n\t"
2559
"cmp $128, %0 \n\t" //FIXME optimize & bench
2561
"movq %%mm7, %%mm6 \n\t"
2562
"psrlq $32, %%mm7 \n\t"
2563
"paddd %%mm6, %%mm7 \n\t"
2564
"psrld $2, %%mm7 \n\t"
2565
"movd %%mm7, %0 \n\t"
2568
: "r"(basis), "r"(rem), "r"(weight), "g"(scale)
2573
static void add_8x8basis_mmx(int16_t rem[64], int16_t basis[64], int scale){
2576
if(ABS(scale) < 256){
2577
scale<<= 16 + 1 - BASIS_SHIFT + RECON_SHIFT;
2579
"pcmpeqw %%mm6, %%mm6 \n\t" // -1w
2580
"psrlw $15, %%mm6 \n\t" // 1w
2581
"movd %3, %%mm5 \n\t"
2582
"punpcklwd %%mm5, %%mm5 \n\t"
2583
"punpcklwd %%mm5, %%mm5 \n\t"
2585
"movq (%1, %0), %%mm0 \n\t"
2586
"movq 8(%1, %0), %%mm1 \n\t"
2587
"pmulhw %%mm5, %%mm0 \n\t"
2588
"pmulhw %%mm5, %%mm1 \n\t"
2589
"paddw %%mm6, %%mm0 \n\t"
2590
"paddw %%mm6, %%mm1 \n\t"
2591
"psraw $1, %%mm0 \n\t"
2592
"psraw $1, %%mm1 \n\t"
2593
"paddw (%2, %0), %%mm0 \n\t"
2594
"paddw 8(%2, %0), %%mm1 \n\t"
2595
"movq %%mm0, (%2, %0) \n\t"
2596
"movq %%mm1, 8(%2, %0) \n\t"
2598
"cmp $128, %0 \n\t" //FIXME optimize & bench
2602
: "r"(basis), "r"(rem), "g"(scale)
2605
for(i=0; i<8*8; i++){
2606
rem[i] += (basis[i]*scale + (1<<(BASIS_SHIFT - RECON_SHIFT-1)))>>(BASIS_SHIFT - RECON_SHIFT);
2611
#define PREFETCH(name, op) \
2612
void name(void *mem, int stride, int h){\
2613
const uint8_t *p= mem;\
2615
asm volatile(#op" %0" :: "m"(*p));\
2619
PREFETCH(prefetch_mmx2, prefetcht0)
2620
PREFETCH(prefetch_3dnow, prefetch)
2623
#include "h264dsp_mmx.c"
2626
void ff_cavsdsp_init_mmx2(DSPContext* c, AVCodecContext *avctx);
2628
void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
2629
put_pixels8_mmx(dst, src, stride, 8);
2631
void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
2632
avg_pixels8_mmx(dst, src, stride, 8);
2634
void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
2635
put_pixels16_mmx(dst, src, stride, 16);
2637
void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
2638
avg_pixels16_mmx(dst, src, stride, 16);
2641
/* external functions, from idct_mmx.c */
2642
void ff_mmx_idct(DCTELEM *block);
2643
void ff_mmxext_idct(DCTELEM *block);
2645
void ff_vp3_idct_sse2(int16_t *input_data);
2646
void ff_vp3_idct_mmx(int16_t *data);
2647
void ff_vp3_dsp_init_mmx(void);
2649
/* XXX: those functions should be suppressed ASAP when all IDCTs are
2651
static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
2653
ff_mmx_idct (block);
2654
put_pixels_clamped_mmx(block, dest, line_size);
2656
static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
2658
ff_mmx_idct (block);
2659
add_pixels_clamped_mmx(block, dest, line_size);
2661
static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
2663
ff_mmxext_idct (block);
2664
put_pixels_clamped_mmx(block, dest, line_size);
2666
static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
2668
ff_mmxext_idct (block);
2669
add_pixels_clamped_mmx(block, dest, line_size);
2671
static void ff_vp3_idct_put_sse2(uint8_t *dest, int line_size, DCTELEM *block)
2673
ff_vp3_idct_sse2(block);
2674
put_signed_pixels_clamped_mmx(block, dest, line_size);
2676
static void ff_vp3_idct_add_sse2(uint8_t *dest, int line_size, DCTELEM *block)
2678
ff_vp3_idct_sse2(block);
2679
add_pixels_clamped_mmx(block, dest, line_size);
2681
static void ff_vp3_idct_put_mmx(uint8_t *dest, int line_size, DCTELEM *block)
2683
ff_vp3_idct_mmx(block);
2684
put_signed_pixels_clamped_mmx(block, dest, line_size);
2686
static void ff_vp3_idct_add_mmx(uint8_t *dest, int line_size, DCTELEM *block)
2688
ff_vp3_idct_mmx(block);
2689
add_pixels_clamped_mmx(block, dest, line_size);
2692
static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
2694
ff_idct_xvid_mmx (block);
2695
put_pixels_clamped_mmx(block, dest, line_size);
2697
static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block)
2699
ff_idct_xvid_mmx (block);
2700
add_pixels_clamped_mmx(block, dest, line_size);
2702
static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block)
2704
ff_idct_xvid_mmx2 (block);
2705
put_pixels_clamped_mmx(block, dest, line_size);
2707
static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
2709
ff_idct_xvid_mmx2 (block);
2710
add_pixels_clamped_mmx(block, dest, line_size);
2714
static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize)
2717
asm volatile("pxor %%mm7, %%mm7":);
2718
for(i=0; i<blocksize; i+=2) {
2720
"movq %0, %%mm0 \n\t"
2721
"movq %1, %%mm1 \n\t"
2722
"movq %%mm0, %%mm2 \n\t"
2723
"movq %%mm1, %%mm3 \n\t"
2724
"pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0
2725
"pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0
2726
"pslld $31, %%mm2 \n\t" // keep only the sign bit
2727
"pxor %%mm2, %%mm1 \n\t"
2728
"movq %%mm3, %%mm4 \n\t"
2729
"pand %%mm1, %%mm3 \n\t"
2730
"pandn %%mm1, %%mm4 \n\t"
2731
"pfadd %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
2732
"pfsub %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
2733
"movq %%mm3, %1 \n\t"
2734
"movq %%mm0, %0 \n\t"
2735
:"+m"(mag[i]), "+m"(ang[i])
2739
asm volatile("emms");
2741
static void vorbis_inverse_coupling_sse2(float *mag, float *ang, int blocksize)
2744
for(i=0; i<blocksize; i+=4) {
2746
"movaps %0, %%xmm0 \n\t"
2747
"movaps %1, %%xmm1 \n\t"
2748
"pxor %%xmm2, %%xmm2 \n\t"
2749
"pxor %%xmm3, %%xmm3 \n\t"
2750
"cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0
2751
"cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0
2752
"pslld $31, %%xmm2 \n\t" // keep only the sign bit
2753
"pxor %%xmm2, %%xmm1 \n\t"
2754
"movaps %%xmm3, %%xmm4 \n\t"
2755
"pand %%xmm1, %%xmm3 \n\t"
2756
"pandn %%xmm1, %%xmm4 \n\t"
2757
"addps %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
2758
"subps %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
2759
"movaps %%xmm3, %1 \n\t"
2760
"movaps %%xmm0, %0 \n\t"
2761
:"+m"(mag[i]), "+m"(ang[i])
2767
#ifdef CONFIG_SNOW_ENCODER
2768
extern void ff_snow_horizontal_compose97i_sse2(DWTELEM *b, int width);
2769
extern void ff_snow_horizontal_compose97i_mmx(DWTELEM *b, int width);
2770
extern void ff_snow_vertical_compose97i_sse2(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width);
2771
extern void ff_snow_vertical_compose97i_mmx(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width);
2772
extern void ff_snow_inner_add_yblock_sse2(uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
2773
int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
2774
extern void ff_snow_inner_add_yblock_mmx(uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
2775
int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
2778
void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
2780
mm_flags = mm_support();
2782
if (avctx->dsp_mask) {
2783
if (avctx->dsp_mask & FF_MM_FORCE)
2784
mm_flags |= (avctx->dsp_mask & 0xffff);
2786
mm_flags &= ~(avctx->dsp_mask & 0xffff);
2790
av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
2791
if (mm_flags & MM_MMX)
2792
av_log(avctx, AV_LOG_INFO, " mmx");
2793
if (mm_flags & MM_MMXEXT)
2794
av_log(avctx, AV_LOG_INFO, " mmxext");
2795
if (mm_flags & MM_3DNOW)
2796
av_log(avctx, AV_LOG_INFO, " 3dnow");
2797
if (mm_flags & MM_SSE)
2798
av_log(avctx, AV_LOG_INFO, " sse");
2799
if (mm_flags & MM_SSE2)
2800
av_log(avctx, AV_LOG_INFO, " sse2");
2801
av_log(avctx, AV_LOG_INFO, "\n");
2804
if (mm_flags & MM_MMX) {
2805
const int idct_algo= avctx->idct_algo;
2807
#ifdef CONFIG_ENCODERS
2808
const int dct_algo = avctx->dct_algo;
2809
if(dct_algo==FF_DCT_AUTO || dct_algo==FF_DCT_MMX){
2810
if(mm_flags & MM_SSE2){
2811
c->fdct = ff_fdct_sse2;
2812
}else if(mm_flags & MM_MMXEXT){
2813
c->fdct = ff_fdct_mmx2;
2815
c->fdct = ff_fdct_mmx;
2818
#endif //CONFIG_ENCODERS
2819
if(avctx->lowres==0){
2820
if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
2821
c->idct_put= ff_simple_idct_put_mmx;
2822
c->idct_add= ff_simple_idct_add_mmx;
2823
c->idct = ff_simple_idct_mmx;
2824
c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
2825
}else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
2826
if(mm_flags & MM_MMXEXT){
2827
c->idct_put= ff_libmpeg2mmx2_idct_put;
2828
c->idct_add= ff_libmpeg2mmx2_idct_add;
2829
c->idct = ff_mmxext_idct;
2831
c->idct_put= ff_libmpeg2mmx_idct_put;
2832
c->idct_add= ff_libmpeg2mmx_idct_add;
2833
c->idct = ff_mmx_idct;
2835
c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
2837
}else if(idct_algo==FF_IDCT_VP3){
2838
if(mm_flags & MM_SSE2){
2839
c->idct_put= ff_vp3_idct_put_sse2;
2840
c->idct_add= ff_vp3_idct_add_sse2;
2841
c->idct = ff_vp3_idct_sse2;
2842
c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
2844
ff_vp3_dsp_init_mmx();
2845
c->idct_put= ff_vp3_idct_put_mmx;
2846
c->idct_add= ff_vp3_idct_add_mmx;
2847
c->idct = ff_vp3_idct_mmx;
2848
c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM;
2851
}else if(idct_algo==FF_IDCT_CAVS){
2852
c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
2854
}else if(idct_algo==FF_IDCT_XVIDMMX){
2855
if(mm_flags & MM_MMXEXT){
2856
c->idct_put= ff_idct_xvid_mmx2_put;
2857
c->idct_add= ff_idct_xvid_mmx2_add;
2858
c->idct = ff_idct_xvid_mmx2;
2860
c->idct_put= ff_idct_xvid_mmx_put;
2861
c->idct_add= ff_idct_xvid_mmx_add;
2862
c->idct = ff_idct_xvid_mmx;
2868
#ifdef CONFIG_ENCODERS
2869
c->get_pixels = get_pixels_mmx;
2870
c->diff_pixels = diff_pixels_mmx;
2871
#endif //CONFIG_ENCODERS
2872
c->put_pixels_clamped = put_pixels_clamped_mmx;
2873
c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx;
2874
c->add_pixels_clamped = add_pixels_clamped_mmx;
2875
c->clear_blocks = clear_blocks_mmx;
2876
#ifdef CONFIG_ENCODERS
2877
c->pix_sum = pix_sum16_mmx;
2878
#endif //CONFIG_ENCODERS
2880
c->put_pixels_tab[0][0] = put_pixels16_mmx;
2881
c->put_pixels_tab[0][1] = put_pixels16_x2_mmx;
2882
c->put_pixels_tab[0][2] = put_pixels16_y2_mmx;
2883
c->put_pixels_tab[0][3] = put_pixels16_xy2_mmx;
2885
c->put_no_rnd_pixels_tab[0][0] = put_pixels16_mmx;
2886
c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx;
2887
c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx;
2888
c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_mmx;
2890
c->avg_pixels_tab[0][0] = avg_pixels16_mmx;
2891
c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx;
2892
c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx;
2893
c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx;
2895
c->avg_no_rnd_pixels_tab[0][0] = avg_no_rnd_pixels16_mmx;
2896
c->avg_no_rnd_pixels_tab[0][1] = avg_no_rnd_pixels16_x2_mmx;
2897
c->avg_no_rnd_pixels_tab[0][2] = avg_no_rnd_pixels16_y2_mmx;
2898
c->avg_no_rnd_pixels_tab[0][3] = avg_no_rnd_pixels16_xy2_mmx;
2900
c->put_pixels_tab[1][0] = put_pixels8_mmx;
2901
c->put_pixels_tab[1][1] = put_pixels8_x2_mmx;
2902
c->put_pixels_tab[1][2] = put_pixels8_y2_mmx;
2903
c->put_pixels_tab[1][3] = put_pixels8_xy2_mmx;
2905
c->put_no_rnd_pixels_tab[1][0] = put_pixels8_mmx;
2906
c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx;
2907
c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx;
2908
c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_mmx;
2910
c->avg_pixels_tab[1][0] = avg_pixels8_mmx;
2911
c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx;
2912
c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx;
2913
c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx;
2915
c->avg_no_rnd_pixels_tab[1][0] = avg_no_rnd_pixels8_mmx;
2916
c->avg_no_rnd_pixels_tab[1][1] = avg_no_rnd_pixels8_x2_mmx;
2917
c->avg_no_rnd_pixels_tab[1][2] = avg_no_rnd_pixels8_y2_mmx;
2918
c->avg_no_rnd_pixels_tab[1][3] = avg_no_rnd_pixels8_xy2_mmx;
2922
c->add_bytes= add_bytes_mmx;
2923
#ifdef CONFIG_ENCODERS
2924
c->diff_bytes= diff_bytes_mmx;
2926
c->hadamard8_diff[0]= hadamard8_diff16_mmx;
2927
c->hadamard8_diff[1]= hadamard8_diff_mmx;
2929
c->pix_norm1 = pix_norm1_mmx;
2930
c->sse[0] = (mm_flags & MM_SSE2) ? sse16_sse2 : sse16_mmx;
2931
c->sse[1] = sse8_mmx;
2932
c->vsad[4]= vsad_intra16_mmx;
2934
c->nsse[0] = nsse16_mmx;
2935
c->nsse[1] = nsse8_mmx;
2936
if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2937
c->vsad[0] = vsad16_mmx;
2940
if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2941
c->try_8x8basis= try_8x8basis_mmx;
2943
c->add_8x8basis= add_8x8basis_mmx;
2945
#endif //CONFIG_ENCODERS
2947
c->h263_v_loop_filter= h263_v_loop_filter_mmx;
2948
c->h263_h_loop_filter= h263_h_loop_filter_mmx;
2949
c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx;
2950
c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_mmx;
2952
c->h264_idct_dc_add=
2953
c->h264_idct_add= ff_h264_idct_add_mmx;
2954
c->h264_idct8_dc_add=
2955
c->h264_idct8_add= ff_h264_idct8_add_mmx;
2957
if (mm_flags & MM_MMXEXT) {
2958
c->prefetch = prefetch_mmx2;
2960
c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
2961
c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
2963
c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
2964
c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
2965
c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
2967
c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
2968
c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
2970
c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
2971
c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
2972
c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
2974
#ifdef CONFIG_ENCODERS
2975
c->hadamard8_diff[0]= hadamard8_diff16_mmx2;
2976
c->hadamard8_diff[1]= hadamard8_diff_mmx2;
2977
c->vsad[4]= vsad_intra16_mmx2;
2978
#endif //CONFIG_ENCODERS
2980
c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2;
2981
c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2;
2983
if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
2984
c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
2985
c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
2986
c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
2987
c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
2988
c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
2989
c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
2990
#ifdef CONFIG_ENCODERS
2991
c->vsad[0] = vsad16_mmx2;
2992
#endif //CONFIG_ENCODERS
2996
SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_mmx2)
2997
SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_mmx2)
2998
SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_mmx2)
2999
SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_mmx2)
3000
SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_mmx2)
3001
SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_mmx2)
3002
SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_mmx2)
3003
SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_mmx2)
3004
SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_mmx2)
3005
SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_mmx2)
3006
SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_mmx2)
3007
SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_mmx2)
3008
SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_mmx2)
3009
SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_mmx2)
3010
SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_mmx2)
3011
SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_mmx2)
3012
SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_mmx2)
3013
SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_mmx2)
3014
SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_mmx2)
3015
SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_mmx2)
3016
SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_mmx2)
3017
SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_mmx2)
3018
SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_mmx2)
3019
SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_mmx2)
3020
SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_mmx2)
3021
SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_mmx2)
3022
SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_mmx2)
3023
SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_mmx2)
3024
SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_mmx2)
3025
SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_mmx2)
3026
SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_mmx2)
3027
SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_mmx2)
3031
#define dspfunc(PFX, IDX, NUM) \
3032
c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_mmx2; \
3033
c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_mmx2; \
3034
c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_mmx2; \
3035
c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_mmx2; \
3036
c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_mmx2; \
3037
c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_mmx2; \
3038
c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_mmx2; \
3039
c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_mmx2; \
3040
c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_mmx2; \
3041
c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_mmx2; \
3042
c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_mmx2; \
3043
c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_mmx2; \
3044
c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_mmx2; \
3045
c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_mmx2; \
3046
c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_mmx2; \
3047
c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_mmx2
3049
dspfunc(put_h264_qpel, 0, 16);
3050
dspfunc(put_h264_qpel, 1, 8);
3051
dspfunc(put_h264_qpel, 2, 4);
3052
dspfunc(avg_h264_qpel, 0, 16);
3053
dspfunc(avg_h264_qpel, 1, 8);
3054
dspfunc(avg_h264_qpel, 2, 4);
3057
c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2;
3058
c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_mmx2;
3059
c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_mmx2;
3060
c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_mmx2;
3061
c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_mmx2;
3062
c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_mmx2;
3063
c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_mmx2;
3064
c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_mmx2;
3065
c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_mmx2;
3066
c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_mmx2;
3068
c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2;
3069
c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2;
3070
c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2;
3071
c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2;
3072
c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2;
3073
c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2;
3074
c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2;
3075
c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2;
3077
c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2;
3078
c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2;
3079
c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2;
3080
c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2;
3081
c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2;
3082
c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2;
3083
c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2;
3084
c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
3086
#ifdef CONFIG_CAVS_DECODER
3087
ff_cavsdsp_init_mmx2(c, avctx);
3090
#ifdef CONFIG_ENCODERS
3091
c->sub_hfyu_median_prediction= sub_hfyu_median_prediction_mmx2;
3092
#endif //CONFIG_ENCODERS
3093
} else if (mm_flags & MM_3DNOW) {
3094
c->prefetch = prefetch_3dnow;
3096
c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
3097
c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
3099
c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
3100
c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
3101
c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
3103
c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
3104
c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
3106
c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
3107
c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
3108
c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
3110
if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
3111
c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
3112
c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
3113
c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
3114
c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
3115
c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
3116
c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
3119
SET_QPEL_FUNC(qpel_pixels_tab[0][ 0], qpel16_mc00_3dnow)
3120
SET_QPEL_FUNC(qpel_pixels_tab[0][ 1], qpel16_mc10_3dnow)
3121
SET_QPEL_FUNC(qpel_pixels_tab[0][ 2], qpel16_mc20_3dnow)
3122
SET_QPEL_FUNC(qpel_pixels_tab[0][ 3], qpel16_mc30_3dnow)
3123
SET_QPEL_FUNC(qpel_pixels_tab[0][ 4], qpel16_mc01_3dnow)
3124
SET_QPEL_FUNC(qpel_pixels_tab[0][ 5], qpel16_mc11_3dnow)
3125
SET_QPEL_FUNC(qpel_pixels_tab[0][ 6], qpel16_mc21_3dnow)
3126
SET_QPEL_FUNC(qpel_pixels_tab[0][ 7], qpel16_mc31_3dnow)
3127
SET_QPEL_FUNC(qpel_pixels_tab[0][ 8], qpel16_mc02_3dnow)
3128
SET_QPEL_FUNC(qpel_pixels_tab[0][ 9], qpel16_mc12_3dnow)
3129
SET_QPEL_FUNC(qpel_pixels_tab[0][10], qpel16_mc22_3dnow)
3130
SET_QPEL_FUNC(qpel_pixels_tab[0][11], qpel16_mc32_3dnow)
3131
SET_QPEL_FUNC(qpel_pixels_tab[0][12], qpel16_mc03_3dnow)
3132
SET_QPEL_FUNC(qpel_pixels_tab[0][13], qpel16_mc13_3dnow)
3133
SET_QPEL_FUNC(qpel_pixels_tab[0][14], qpel16_mc23_3dnow)
3134
SET_QPEL_FUNC(qpel_pixels_tab[0][15], qpel16_mc33_3dnow)
3135
SET_QPEL_FUNC(qpel_pixels_tab[1][ 0], qpel8_mc00_3dnow)
3136
SET_QPEL_FUNC(qpel_pixels_tab[1][ 1], qpel8_mc10_3dnow)
3137
SET_QPEL_FUNC(qpel_pixels_tab[1][ 2], qpel8_mc20_3dnow)
3138
SET_QPEL_FUNC(qpel_pixels_tab[1][ 3], qpel8_mc30_3dnow)
3139
SET_QPEL_FUNC(qpel_pixels_tab[1][ 4], qpel8_mc01_3dnow)
3140
SET_QPEL_FUNC(qpel_pixels_tab[1][ 5], qpel8_mc11_3dnow)
3141
SET_QPEL_FUNC(qpel_pixels_tab[1][ 6], qpel8_mc21_3dnow)
3142
SET_QPEL_FUNC(qpel_pixels_tab[1][ 7], qpel8_mc31_3dnow)
3143
SET_QPEL_FUNC(qpel_pixels_tab[1][ 8], qpel8_mc02_3dnow)
3144
SET_QPEL_FUNC(qpel_pixels_tab[1][ 9], qpel8_mc12_3dnow)
3145
SET_QPEL_FUNC(qpel_pixels_tab[1][10], qpel8_mc22_3dnow)
3146
SET_QPEL_FUNC(qpel_pixels_tab[1][11], qpel8_mc32_3dnow)
3147
SET_QPEL_FUNC(qpel_pixels_tab[1][12], qpel8_mc03_3dnow)
3148
SET_QPEL_FUNC(qpel_pixels_tab[1][13], qpel8_mc13_3dnow)
3149
SET_QPEL_FUNC(qpel_pixels_tab[1][14], qpel8_mc23_3dnow)
3150
SET_QPEL_FUNC(qpel_pixels_tab[1][15], qpel8_mc33_3dnow)
3152
#define dspfunc(PFX, IDX, NUM) \
3153
c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_3dnow; \
3154
c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_3dnow; \
3155
c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_3dnow; \
3156
c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_3dnow; \
3157
c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_3dnow; \
3158
c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_3dnow; \
3159
c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_3dnow; \
3160
c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_3dnow; \
3161
c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_3dnow; \
3162
c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_3dnow; \
3163
c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_3dnow; \
3164
c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_3dnow; \
3165
c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_3dnow; \
3166
c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_3dnow; \
3167
c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_3dnow; \
3168
c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_3dnow
3170
dspfunc(put_h264_qpel, 0, 16);
3171
dspfunc(put_h264_qpel, 1, 8);
3172
dspfunc(put_h264_qpel, 2, 4);
3173
dspfunc(avg_h264_qpel, 0, 16);
3174
dspfunc(avg_h264_qpel, 1, 8);
3175
dspfunc(avg_h264_qpel, 2, 4);
3177
c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow;
3178
c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_3dnow;
3181
#ifdef CONFIG_SNOW_ENCODER
3182
if(mm_flags & MM_SSE2){
3183
c->horizontal_compose97i = ff_snow_horizontal_compose97i_sse2;
3184
c->vertical_compose97i = ff_snow_vertical_compose97i_sse2;
3185
c->inner_add_yblock = ff_snow_inner_add_yblock_sse2;
3188
c->horizontal_compose97i = ff_snow_horizontal_compose97i_mmx;
3189
c->vertical_compose97i = ff_snow_vertical_compose97i_mmx;
3190
c->inner_add_yblock = ff_snow_inner_add_yblock_mmx;
3194
if(mm_flags & MM_SSE2)
3195
c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse2;
3196
else if(mm_flags & MM_3DNOW)
3197
c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
3200
#ifdef CONFIG_ENCODERS
3201
dsputil_init_pix_mmx(c, avctx);
3202
#endif //CONFIG_ENCODERS
3204
// for speed testing
3205
get_pixels = just_return;
3206
put_pixels_clamped = just_return;
3207
add_pixels_clamped = just_return;
3209
pix_abs16x16 = just_return;
3210
pix_abs16x16_x2 = just_return;
3211
pix_abs16x16_y2 = just_return;
3212
pix_abs16x16_xy2 = just_return;
3214
put_pixels_tab[0] = just_return;
3215
put_pixels_tab[1] = just_return;
3216
put_pixels_tab[2] = just_return;
3217
put_pixels_tab[3] = just_return;
3219
put_no_rnd_pixels_tab[0] = just_return;
3220
put_no_rnd_pixels_tab[1] = just_return;
3221
put_no_rnd_pixels_tab[2] = just_return;
3222
put_no_rnd_pixels_tab[3] = just_return;
3224
avg_pixels_tab[0] = just_return;
3225
avg_pixels_tab[1] = just_return;
3226
avg_pixels_tab[2] = just_return;
3227
avg_pixels_tab[3] = just_return;
3229
avg_no_rnd_pixels_tab[0] = just_return;
3230
avg_no_rnd_pixels_tab[1] = just_return;
3231
avg_no_rnd_pixels_tab[2] = just_return;
3232
avg_no_rnd_pixels_tab[3] = just_return;
3234
//av_fdct = just_return;
3235
//ff_idct = just_return;