2
* VC-1 and WMV3 - DSP functions MMX-optimized
3
* Copyright (c) 2007 Christophe GISQUET <christophe.gisquet@free.fr>
5
* Permission is hereby granted, free of charge, to any person
6
* obtaining a copy of this software and associated documentation
7
* files (the "Software"), to deal in the Software without
8
* restriction, including without limitation the rights to use,
9
* copy, modify, merge, publish, distribute, sublicense, and/or sell
10
* copies of the Software, and to permit persons to whom the
11
* Software is furnished to do so, subject to the following
14
* The above copyright notice and this permission notice shall be
15
* included in all copies or substantial portions of the Software.
17
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
19
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
21
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
22
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
23
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
24
* OTHER DEALINGS IN THE SOFTWARE.
27
#include "libavutil/x86_cpu.h"
28
#include "libavcodec/dsputil.h"
29
#include "dsputil_mmx.h"
32
#define OP_AVG(S,D) "pavgb " #S ", " #D " \n\t"
34
/** Add rounder from mm7 to mm3 and pack result at destination */
35
#define NORMALIZE_MMX(SHIFT) \
36
"paddw %%mm7, %%mm3 \n\t" /* +bias-r */ \
37
"paddw %%mm7, %%mm4 \n\t" /* +bias-r */ \
38
"psraw "SHIFT", %%mm3 \n\t" \
39
"psraw "SHIFT", %%mm4 \n\t"
41
#define TRANSFER_DO_PACK(OP) \
42
"packuswb %%mm4, %%mm3 \n\t" \
44
"movq %%mm3, (%2) \n\t"
46
#define TRANSFER_DONT_PACK(OP) \
49
"movq %%mm3, 0(%2) \n\t" \
50
"movq %%mm4, 8(%2) \n\t"
52
/** @see MSPEL_FILTER13_CORE for use as UNPACK macro */
53
#define DO_UNPACK(reg) "punpcklbw %%mm0, " reg "\n\t"
54
#define DONT_UNPACK(reg)
56
/** Compute the rounder 32-r or 8-r and unpacks it to mm7 */
57
#define LOAD_ROUNDER_MMX(ROUND) \
58
"movd "ROUND", %%mm7 \n\t" \
59
"punpcklwd %%mm7, %%mm7 \n\t" \
60
"punpckldq %%mm7, %%mm7 \n\t"
62
#define SHIFT2_LINE(OFF, R0,R1,R2,R3) \
63
"paddw %%mm"#R2", %%mm"#R1" \n\t" \
64
"movd (%0,%3), %%mm"#R0" \n\t" \
65
"pmullw %%mm6, %%mm"#R1" \n\t" \
66
"punpcklbw %%mm0, %%mm"#R0" \n\t" \
67
"movd (%0,%2), %%mm"#R3" \n\t" \
68
"psubw %%mm"#R0", %%mm"#R1" \n\t" \
69
"punpcklbw %%mm0, %%mm"#R3" \n\t" \
70
"paddw %%mm7, %%mm"#R1" \n\t" \
71
"psubw %%mm"#R3", %%mm"#R1" \n\t" \
72
"psraw %4, %%mm"#R1" \n\t" \
73
"movq %%mm"#R1", "#OFF"(%1) \n\t" \
76
DECLARE_ALIGNED(16, const uint64_t, ff_pw_9) = 0x0009000900090009ULL;
78
/** Sacrifying mm6 allows to pipeline loads from src */
79
static void vc1_put_ver_16b_shift2_mmx(int16_t *dst,
80
const uint8_t *src, x86_reg stride,
81
int rnd, int64_t shift)
84
"mov $3, %%"REG_c" \n\t"
85
LOAD_ROUNDER_MMX("%5")
86
"movq "MANGLE(ff_pw_9)", %%mm6 \n\t"
88
"movd (%0), %%mm2 \n\t"
90
"movd (%0), %%mm3 \n\t"
91
"punpcklbw %%mm0, %%mm2 \n\t"
92
"punpcklbw %%mm0, %%mm3 \n\t"
93
SHIFT2_LINE( 0, 1, 2, 3, 4)
94
SHIFT2_LINE( 24, 2, 3, 4, 1)
95
SHIFT2_LINE( 48, 3, 4, 1, 2)
96
SHIFT2_LINE( 72, 4, 1, 2, 3)
97
SHIFT2_LINE( 96, 1, 2, 3, 4)
98
SHIFT2_LINE(120, 2, 3, 4, 1)
99
SHIFT2_LINE(144, 3, 4, 1, 2)
100
SHIFT2_LINE(168, 4, 1, 2, 3)
105
: "+r"(src), "+r"(dst)
106
: "r"(stride), "r"(-2*stride),
107
"m"(shift), "m"(rnd), "r"(9*stride-4)
113
* Data is already unpacked, so some operations can directly be made from
116
#define VC1_HOR_16b_SHIFT2(OP, OPNAME)\
117
static void OPNAME ## vc1_hor_16b_shift2_mmx(uint8_t *dst, x86_reg stride,\
118
const int16_t *src, int rnd)\
123
rnd -= (-1+9+9-1)*1024; /* Add -1024 bias */\
125
LOAD_ROUNDER_MMX("%4")\
126
"movq "MANGLE(ff_pw_128)", %%mm6\n\t"\
127
"movq "MANGLE(ff_pw_9)", %%mm5 \n\t"\
129
"movq 2*0+0(%1), %%mm1 \n\t"\
130
"movq 2*0+8(%1), %%mm2 \n\t"\
131
"movq 2*1+0(%1), %%mm3 \n\t"\
132
"movq 2*1+8(%1), %%mm4 \n\t"\
133
"paddw 2*3+0(%1), %%mm1 \n\t"\
134
"paddw 2*3+8(%1), %%mm2 \n\t"\
135
"paddw 2*2+0(%1), %%mm3 \n\t"\
136
"paddw 2*2+8(%1), %%mm4 \n\t"\
137
"pmullw %%mm5, %%mm3 \n\t"\
138
"pmullw %%mm5, %%mm4 \n\t"\
139
"psubw %%mm1, %%mm3 \n\t"\
140
"psubw %%mm2, %%mm4 \n\t"\
143
"paddw %%mm6, %%mm3 \n\t"\
144
"paddw %%mm6, %%mm4 \n\t"\
145
TRANSFER_DO_PACK(OP)\
150
: "+r"(h), "+r" (src), "+r" (dst)\
151
: "r"(stride), "m"(rnd)\
156
VC1_HOR_16b_SHIFT2(OP_PUT, put_)
157
VC1_HOR_16b_SHIFT2(OP_AVG, avg_)
161
* Purely vertical or horizontal 1/2 shift interpolation.
162
* Sacrify mm6 for *9 factor.
164
#define VC1_SHIFT2(OP, OPNAME)\
165
static void OPNAME ## vc1_shift2_mmx(uint8_t *dst, const uint8_t *src,\
166
x86_reg stride, int rnd, x86_reg offset)\
170
"mov $8, %%"REG_c" \n\t"\
171
LOAD_ROUNDER_MMX("%5")\
172
"movq "MANGLE(ff_pw_9)", %%mm6\n\t"\
174
"movd 0(%0 ), %%mm3 \n\t"\
175
"movd 4(%0 ), %%mm4 \n\t"\
176
"movd 0(%0,%2), %%mm1 \n\t"\
177
"movd 4(%0,%2), %%mm2 \n\t"\
179
"punpcklbw %%mm0, %%mm3 \n\t"\
180
"punpcklbw %%mm0, %%mm4 \n\t"\
181
"punpcklbw %%mm0, %%mm1 \n\t"\
182
"punpcklbw %%mm0, %%mm2 \n\t"\
183
"paddw %%mm1, %%mm3 \n\t"\
184
"paddw %%mm2, %%mm4 \n\t"\
185
"movd 0(%0,%3), %%mm1 \n\t"\
186
"movd 4(%0,%3), %%mm2 \n\t"\
187
"pmullw %%mm6, %%mm3 \n\t" /* 0,9,9,0*/\
188
"pmullw %%mm6, %%mm4 \n\t" /* 0,9,9,0*/\
189
"punpcklbw %%mm0, %%mm1 \n\t"\
190
"punpcklbw %%mm0, %%mm2 \n\t"\
191
"psubw %%mm1, %%mm3 \n\t" /*-1,9,9,0*/\
192
"psubw %%mm2, %%mm4 \n\t" /*-1,9,9,0*/\
193
"movd 0(%0,%2), %%mm1 \n\t"\
194
"movd 4(%0,%2), %%mm2 \n\t"\
195
"punpcklbw %%mm0, %%mm1 \n\t"\
196
"punpcklbw %%mm0, %%mm2 \n\t"\
197
"psubw %%mm1, %%mm3 \n\t" /*-1,9,9,-1*/\
198
"psubw %%mm2, %%mm4 \n\t" /*-1,9,9,-1*/\
200
"packuswb %%mm4, %%mm3 \n\t"\
202
"movq %%mm3, (%1) \n\t"\
205
"dec %%"REG_c" \n\t"\
207
: "+r"(src), "+r"(dst)\
208
: "r"(offset), "r"(-2*offset), "g"(stride), "m"(rnd),\
210
: "%"REG_c, "memory"\
214
VC1_SHIFT2(OP_PUT, put_)
215
VC1_SHIFT2(OP_AVG, avg_)
218
* Filter coefficients made global to allow access by all 1 or 3 quarter shift
219
* interpolation functions.
221
DECLARE_ASM_CONST(16, uint64_t, ff_pw_53) = 0x0035003500350035ULL;
222
DECLARE_ASM_CONST(16, uint64_t, ff_pw_18) = 0x0012001200120012ULL;
225
* Core of the 1/4 and 3/4 shift bicubic interpolation.
227
* @param UNPACK Macro unpacking arguments from 8 to 16bits (can be empty).
228
* @param MOVQ "movd 1" or "movq 2", if data read is already unpacked.
229
* @param A1 Address of 1st tap (beware of unpacked/packed).
230
* @param A2 Address of 2nd tap
231
* @param A3 Address of 3rd tap
232
* @param A4 Address of 4th tap
234
#define MSPEL_FILTER13_CORE(UNPACK, MOVQ, A1, A2, A3, A4) \
235
MOVQ "*0+"A1", %%mm1 \n\t" \
236
MOVQ "*4+"A1", %%mm2 \n\t" \
239
"pmullw "MANGLE(ff_pw_3)", %%mm1\n\t" \
240
"pmullw "MANGLE(ff_pw_3)", %%mm2\n\t" \
241
MOVQ "*0+"A2", %%mm3 \n\t" \
242
MOVQ "*4+"A2", %%mm4 \n\t" \
245
"pmullw %%mm6, %%mm3 \n\t" /* *18 */ \
246
"pmullw %%mm6, %%mm4 \n\t" /* *18 */ \
247
"psubw %%mm1, %%mm3 \n\t" /* 18,-3 */ \
248
"psubw %%mm2, %%mm4 \n\t" /* 18,-3 */ \
249
MOVQ "*0+"A4", %%mm1 \n\t" \
250
MOVQ "*4+"A4", %%mm2 \n\t" \
253
"psllw $2, %%mm1 \n\t" /* 4* */ \
254
"psllw $2, %%mm2 \n\t" /* 4* */ \
255
"psubw %%mm1, %%mm3 \n\t" /* -4,18,-3 */ \
256
"psubw %%mm2, %%mm4 \n\t" /* -4,18,-3 */ \
257
MOVQ "*0+"A3", %%mm1 \n\t" \
258
MOVQ "*4+"A3", %%mm2 \n\t" \
261
"pmullw %%mm5, %%mm1 \n\t" /* *53 */ \
262
"pmullw %%mm5, %%mm2 \n\t" /* *53 */ \
263
"paddw %%mm1, %%mm3 \n\t" /* 4,53,18,-3 */ \
264
"paddw %%mm2, %%mm4 \n\t" /* 4,53,18,-3 */
267
* Macro to build the vertical 16bits version of vc1_put_shift[13].
268
* Here, offset=src_stride. Parameters passed A1 to A4 must use
269
* %3 (src_stride) and %4 (3*src_stride).
271
* @param NAME Either 1 or 3
272
* @see MSPEL_FILTER13_CORE for information on A1->A4
274
#define MSPEL_FILTER13_VER_16B(NAME, A1, A2, A3, A4) \
276
vc1_put_ver_16b_ ## NAME ## _mmx(int16_t *dst, const uint8_t *src, \
277
x86_reg src_stride, \
278
int rnd, int64_t shift) \
283
LOAD_ROUNDER_MMX("%5") \
284
"movq "MANGLE(ff_pw_53)", %%mm5\n\t" \
285
"movq "MANGLE(ff_pw_18)", %%mm6\n\t" \
288
MSPEL_FILTER13_CORE(DO_UNPACK, "movd 1", A1, A2, A3, A4) \
289
NORMALIZE_MMX("%6") \
290
TRANSFER_DONT_PACK(OP_PUT) \
291
/* Last 3 (in fact 4) bytes on the line */ \
292
"movd 8+"A1", %%mm1 \n\t" \
294
"movq %%mm1, %%mm3 \n\t" \
295
"paddw %%mm1, %%mm1 \n\t" \
296
"paddw %%mm3, %%mm1 \n\t" /* 3* */ \
297
"movd 8+"A2", %%mm3 \n\t" \
299
"pmullw %%mm6, %%mm3 \n\t" /* *18 */ \
300
"psubw %%mm1, %%mm3 \n\t" /*18,-3 */ \
301
"movd 8+"A3", %%mm1 \n\t" \
303
"pmullw %%mm5, %%mm1 \n\t" /* *53 */ \
304
"paddw %%mm1, %%mm3 \n\t" /*53,18,-3 */ \
305
"movd 8+"A4", %%mm1 \n\t" \
307
"psllw $2, %%mm1 \n\t" /* 4* */ \
308
"psubw %%mm1, %%mm3 \n\t" \
309
"paddw %%mm7, %%mm3 \n\t" \
310
"psraw %6, %%mm3 \n\t" \
311
"movq %%mm3, 16(%2) \n\t" \
316
: "+r"(h), "+r" (src), "+r" (dst) \
317
: "r"(src_stride), "r"(3*src_stride), \
318
"m"(rnd), "m"(shift) \
324
* Macro to build the horizontal 16bits version of vc1_put_shift[13].
325
* Here, offset=16bits, so parameters passed A1 to A4 should be simple.
327
* @param NAME Either 1 or 3
328
* @see MSPEL_FILTER13_CORE for information on A1->A4
330
#define MSPEL_FILTER13_HOR_16B(NAME, A1, A2, A3, A4, OP, OPNAME) \
332
OPNAME ## vc1_hor_16b_ ## NAME ## _mmx(uint8_t *dst, x86_reg stride, \
333
const int16_t *src, int rnd) \
337
rnd -= (-4+58+13-3)*256; /* Add -256 bias */ \
339
LOAD_ROUNDER_MMX("%4") \
340
"movq "MANGLE(ff_pw_18)", %%mm6 \n\t" \
341
"movq "MANGLE(ff_pw_53)", %%mm5 \n\t" \
344
MSPEL_FILTER13_CORE(DONT_UNPACK, "movq 2", A1, A2, A3, A4) \
345
NORMALIZE_MMX("$7") \
347
"paddw "MANGLE(ff_pw_128)", %%mm3 \n\t" \
348
"paddw "MANGLE(ff_pw_128)", %%mm4 \n\t" \
349
TRANSFER_DO_PACK(OP) \
354
: "+r"(h), "+r" (src), "+r" (dst) \
355
: "r"(stride), "m"(rnd) \
361
* Macro to build the 8bits, any direction, version of vc1_put_shift[13].
362
* Here, offset=src_stride. Parameters passed A1 to A4 must use
363
* %3 (offset) and %4 (3*offset).
365
* @param NAME Either 1 or 3
366
* @see MSPEL_FILTER13_CORE for information on A1->A4
368
#define MSPEL_FILTER13_8B(NAME, A1, A2, A3, A4, OP, OPNAME) \
370
OPNAME ## vc1_## NAME ## _mmx(uint8_t *dst, const uint8_t *src, \
371
x86_reg stride, int rnd, x86_reg offset) \
377
LOAD_ROUNDER_MMX("%6") \
378
"movq "MANGLE(ff_pw_53)", %%mm5 \n\t" \
379
"movq "MANGLE(ff_pw_18)", %%mm6 \n\t" \
382
MSPEL_FILTER13_CORE(DO_UNPACK, "movd 1", A1, A2, A3, A4) \
383
NORMALIZE_MMX("$6") \
384
TRANSFER_DO_PACK(OP) \
389
: "+r"(h), "+r" (src), "+r" (dst) \
390
: "r"(offset), "r"(3*offset), "g"(stride), "m"(rnd) \
395
/** 1/4 shift bicubic interpolation */
396
MSPEL_FILTER13_8B (shift1, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )", OP_PUT, put_)
397
MSPEL_FILTER13_8B (shift1, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )", OP_AVG, avg_)
398
MSPEL_FILTER13_VER_16B(shift1, "0(%1,%4 )", "0(%1,%3,2)", "0(%1,%3 )", "0(%1 )")
399
MSPEL_FILTER13_HOR_16B(shift1, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)", OP_PUT, put_)
400
MSPEL_FILTER13_HOR_16B(shift1, "2*3(%1)", "2*2(%1)", "2*1(%1)", "2*0(%1)", OP_AVG, avg_)
402
/** 3/4 shift bicubic interpolation */
403
MSPEL_FILTER13_8B (shift3, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )", OP_PUT, put_)
404
MSPEL_FILTER13_8B (shift3, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )", OP_AVG, avg_)
405
MSPEL_FILTER13_VER_16B(shift3, "0(%1 )", "0(%1,%3 )", "0(%1,%3,2)", "0(%1,%4 )")
406
MSPEL_FILTER13_HOR_16B(shift3, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)", OP_PUT, put_)
407
MSPEL_FILTER13_HOR_16B(shift3, "2*0(%1)", "2*1(%1)", "2*2(%1)", "2*3(%1)", OP_AVG, avg_)
409
typedef void (*vc1_mspel_mc_filter_ver_16bits)(int16_t *dst, const uint8_t *src, x86_reg src_stride, int rnd, int64_t shift);
410
typedef void (*vc1_mspel_mc_filter_hor_16bits)(uint8_t *dst, x86_reg dst_stride, const int16_t *src, int rnd);
411
typedef void (*vc1_mspel_mc_filter_8bits)(uint8_t *dst, const uint8_t *src, x86_reg stride, int rnd, x86_reg offset);
414
* Interpolates fractional pel values by applying proper vertical then
417
* @param dst Destination buffer for interpolated pels.
418
* @param src Source buffer.
419
* @param stride Stride for both src and dst buffers.
420
* @param hmode Horizontal filter (expressed in quarter pixels shift).
421
* @param hmode Vertical filter.
422
* @param rnd Rounding bias.
424
#define VC1_MSPEL_MC(OP)\
425
static void OP ## vc1_mspel_mc(uint8_t *dst, const uint8_t *src, int stride,\
426
int hmode, int vmode, int rnd)\
428
static const vc1_mspel_mc_filter_ver_16bits vc1_put_shift_ver_16bits[] =\
429
{ NULL, vc1_put_ver_16b_shift1_mmx, vc1_put_ver_16b_shift2_mmx, vc1_put_ver_16b_shift3_mmx };\
430
static const vc1_mspel_mc_filter_hor_16bits vc1_put_shift_hor_16bits[] =\
431
{ NULL, OP ## vc1_hor_16b_shift1_mmx, OP ## vc1_hor_16b_shift2_mmx, OP ## vc1_hor_16b_shift3_mmx };\
432
static const vc1_mspel_mc_filter_8bits vc1_put_shift_8bits[] =\
433
{ NULL, OP ## vc1_shift1_mmx, OP ## vc1_shift2_mmx, OP ## vc1_shift3_mmx };\
436
"pxor %%mm0, %%mm0 \n\t"\
440
if (vmode) { /* Vertical filter to apply */\
441
if (hmode) { /* Horizontal filter to apply, output to tmp */\
442
static const int shift_value[] = { 0, 5, 1, 5 };\
443
int shift = (shift_value[hmode]+shift_value[vmode])>>1;\
445
DECLARE_ALIGNED(16, int16_t, tmp)[12*8];\
447
r = (1<<(shift-1)) + rnd-1;\
448
vc1_put_shift_ver_16bits[vmode](tmp, src-1, stride, r, shift);\
450
vc1_put_shift_hor_16bits[hmode](dst, stride, tmp+1, 64-rnd);\
453
else { /* No horizontal filter, output 8 lines to dst */\
454
vc1_put_shift_8bits[vmode](dst, src, stride, 1-rnd, stride);\
459
/* Horizontal mode with no vertical mode */\
460
vc1_put_shift_8bits[hmode](dst, src, stride, rnd, 1);\
466
/** Macro to ease bicubic filter interpolation functions declarations */
467
#define DECLARE_FUNCTION(a, b) \
468
static void put_vc1_mspel_mc ## a ## b ## _mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) { \
469
put_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
471
static void avg_vc1_mspel_mc ## a ## b ## _mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd) { \
472
avg_vc1_mspel_mc(dst, src, stride, a, b, rnd); \
475
DECLARE_FUNCTION(0, 1)
476
DECLARE_FUNCTION(0, 2)
477
DECLARE_FUNCTION(0, 3)
479
DECLARE_FUNCTION(1, 0)
480
DECLARE_FUNCTION(1, 1)
481
DECLARE_FUNCTION(1, 2)
482
DECLARE_FUNCTION(1, 3)
484
DECLARE_FUNCTION(2, 0)
485
DECLARE_FUNCTION(2, 1)
486
DECLARE_FUNCTION(2, 2)
487
DECLARE_FUNCTION(2, 3)
489
DECLARE_FUNCTION(3, 0)
490
DECLARE_FUNCTION(3, 1)
491
DECLARE_FUNCTION(3, 2)
492
DECLARE_FUNCTION(3, 3)
494
static void vc1_inv_trans_4x4_dc_mmx2(uint8_t *dest, int linesize, DCTELEM *block)
497
dc = (17 * dc + 4) >> 3;
498
dc = (17 * dc + 64) >> 7;
500
"movd %0, %%mm0 \n\t"
501
"pshufw $0, %%mm0, %%mm0 \n\t"
502
"pxor %%mm1, %%mm1 \n\t"
503
"psubw %%mm0, %%mm1 \n\t"
504
"packuswb %%mm0, %%mm0 \n\t"
505
"packuswb %%mm1, %%mm1 \n\t"
509
"movd %0, %%mm2 \n\t"
510
"movd %1, %%mm3 \n\t"
511
"movd %2, %%mm4 \n\t"
512
"movd %3, %%mm5 \n\t"
513
"paddusb %%mm0, %%mm2 \n\t"
514
"paddusb %%mm0, %%mm3 \n\t"
515
"paddusb %%mm0, %%mm4 \n\t"
516
"paddusb %%mm0, %%mm5 \n\t"
517
"psubusb %%mm1, %%mm2 \n\t"
518
"psubusb %%mm1, %%mm3 \n\t"
519
"psubusb %%mm1, %%mm4 \n\t"
520
"psubusb %%mm1, %%mm5 \n\t"
521
"movd %%mm2, %0 \n\t"
522
"movd %%mm3, %1 \n\t"
523
"movd %%mm4, %2 \n\t"
524
"movd %%mm5, %3 \n\t"
525
:"+m"(*(uint32_t*)(dest+0*linesize)),
526
"+m"(*(uint32_t*)(dest+1*linesize)),
527
"+m"(*(uint32_t*)(dest+2*linesize)),
528
"+m"(*(uint32_t*)(dest+3*linesize))
532
static void vc1_inv_trans_4x8_dc_mmx2(uint8_t *dest, int linesize, DCTELEM *block)
535
dc = (17 * dc + 4) >> 3;
536
dc = (12 * dc + 64) >> 7;
538
"movd %0, %%mm0 \n\t"
539
"pshufw $0, %%mm0, %%mm0 \n\t"
540
"pxor %%mm1, %%mm1 \n\t"
541
"psubw %%mm0, %%mm1 \n\t"
542
"packuswb %%mm0, %%mm0 \n\t"
543
"packuswb %%mm1, %%mm1 \n\t"
547
"movd %0, %%mm2 \n\t"
548
"movd %1, %%mm3 \n\t"
549
"movd %2, %%mm4 \n\t"
550
"movd %3, %%mm5 \n\t"
551
"paddusb %%mm0, %%mm2 \n\t"
552
"paddusb %%mm0, %%mm3 \n\t"
553
"paddusb %%mm0, %%mm4 \n\t"
554
"paddusb %%mm0, %%mm5 \n\t"
555
"psubusb %%mm1, %%mm2 \n\t"
556
"psubusb %%mm1, %%mm3 \n\t"
557
"psubusb %%mm1, %%mm4 \n\t"
558
"psubusb %%mm1, %%mm5 \n\t"
559
"movd %%mm2, %0 \n\t"
560
"movd %%mm3, %1 \n\t"
561
"movd %%mm4, %2 \n\t"
562
"movd %%mm5, %3 \n\t"
563
:"+m"(*(uint32_t*)(dest+0*linesize)),
564
"+m"(*(uint32_t*)(dest+1*linesize)),
565
"+m"(*(uint32_t*)(dest+2*linesize)),
566
"+m"(*(uint32_t*)(dest+3*linesize))
570
"movd %0, %%mm2 \n\t"
571
"movd %1, %%mm3 \n\t"
572
"movd %2, %%mm4 \n\t"
573
"movd %3, %%mm5 \n\t"
574
"paddusb %%mm0, %%mm2 \n\t"
575
"paddusb %%mm0, %%mm3 \n\t"
576
"paddusb %%mm0, %%mm4 \n\t"
577
"paddusb %%mm0, %%mm5 \n\t"
578
"psubusb %%mm1, %%mm2 \n\t"
579
"psubusb %%mm1, %%mm3 \n\t"
580
"psubusb %%mm1, %%mm4 \n\t"
581
"psubusb %%mm1, %%mm5 \n\t"
582
"movd %%mm2, %0 \n\t"
583
"movd %%mm3, %1 \n\t"
584
"movd %%mm4, %2 \n\t"
585
"movd %%mm5, %3 \n\t"
586
:"+m"(*(uint32_t*)(dest+0*linesize)),
587
"+m"(*(uint32_t*)(dest+1*linesize)),
588
"+m"(*(uint32_t*)(dest+2*linesize)),
589
"+m"(*(uint32_t*)(dest+3*linesize))
593
static void vc1_inv_trans_8x4_dc_mmx2(uint8_t *dest, int linesize, DCTELEM *block)
596
dc = ( 3 * dc + 1) >> 1;
597
dc = (17 * dc + 64) >> 7;
599
"movd %0, %%mm0 \n\t"
600
"pshufw $0, %%mm0, %%mm0 \n\t"
601
"pxor %%mm1, %%mm1 \n\t"
602
"psubw %%mm0, %%mm1 \n\t"
603
"packuswb %%mm0, %%mm0 \n\t"
604
"packuswb %%mm1, %%mm1 \n\t"
608
"movq %0, %%mm2 \n\t"
609
"movq %1, %%mm3 \n\t"
610
"movq %2, %%mm4 \n\t"
611
"movq %3, %%mm5 \n\t"
612
"paddusb %%mm0, %%mm2 \n\t"
613
"paddusb %%mm0, %%mm3 \n\t"
614
"paddusb %%mm0, %%mm4 \n\t"
615
"paddusb %%mm0, %%mm5 \n\t"
616
"psubusb %%mm1, %%mm2 \n\t"
617
"psubusb %%mm1, %%mm3 \n\t"
618
"psubusb %%mm1, %%mm4 \n\t"
619
"psubusb %%mm1, %%mm5 \n\t"
620
"movq %%mm2, %0 \n\t"
621
"movq %%mm3, %1 \n\t"
622
"movq %%mm4, %2 \n\t"
623
"movq %%mm5, %3 \n\t"
624
:"+m"(*(uint32_t*)(dest+0*linesize)),
625
"+m"(*(uint32_t*)(dest+1*linesize)),
626
"+m"(*(uint32_t*)(dest+2*linesize)),
627
"+m"(*(uint32_t*)(dest+3*linesize))
631
static void vc1_inv_trans_8x8_dc_mmx2(uint8_t *dest, int linesize, DCTELEM *block)
634
dc = (3 * dc + 1) >> 1;
635
dc = (3 * dc + 16) >> 5;
637
"movd %0, %%mm0 \n\t"
638
"pshufw $0, %%mm0, %%mm0 \n\t"
639
"pxor %%mm1, %%mm1 \n\t"
640
"psubw %%mm0, %%mm1 \n\t"
641
"packuswb %%mm0, %%mm0 \n\t"
642
"packuswb %%mm1, %%mm1 \n\t"
646
"movq %0, %%mm2 \n\t"
647
"movq %1, %%mm3 \n\t"
648
"movq %2, %%mm4 \n\t"
649
"movq %3, %%mm5 \n\t"
650
"paddusb %%mm0, %%mm2 \n\t"
651
"paddusb %%mm0, %%mm3 \n\t"
652
"paddusb %%mm0, %%mm4 \n\t"
653
"paddusb %%mm0, %%mm5 \n\t"
654
"psubusb %%mm1, %%mm2 \n\t"
655
"psubusb %%mm1, %%mm3 \n\t"
656
"psubusb %%mm1, %%mm4 \n\t"
657
"psubusb %%mm1, %%mm5 \n\t"
658
"movq %%mm2, %0 \n\t"
659
"movq %%mm3, %1 \n\t"
660
"movq %%mm4, %2 \n\t"
661
"movq %%mm5, %3 \n\t"
662
:"+m"(*(uint32_t*)(dest+0*linesize)),
663
"+m"(*(uint32_t*)(dest+1*linesize)),
664
"+m"(*(uint32_t*)(dest+2*linesize)),
665
"+m"(*(uint32_t*)(dest+3*linesize))
669
"movq %0, %%mm2 \n\t"
670
"movq %1, %%mm3 \n\t"
671
"movq %2, %%mm4 \n\t"
672
"movq %3, %%mm5 \n\t"
673
"paddusb %%mm0, %%mm2 \n\t"
674
"paddusb %%mm0, %%mm3 \n\t"
675
"paddusb %%mm0, %%mm4 \n\t"
676
"paddusb %%mm0, %%mm5 \n\t"
677
"psubusb %%mm1, %%mm2 \n\t"
678
"psubusb %%mm1, %%mm3 \n\t"
679
"psubusb %%mm1, %%mm4 \n\t"
680
"psubusb %%mm1, %%mm5 \n\t"
681
"movq %%mm2, %0 \n\t"
682
"movq %%mm3, %1 \n\t"
683
"movq %%mm4, %2 \n\t"
684
"movq %%mm5, %3 \n\t"
685
:"+m"(*(uint32_t*)(dest+0*linesize)),
686
"+m"(*(uint32_t*)(dest+1*linesize)),
687
"+m"(*(uint32_t*)(dest+2*linesize)),
688
"+m"(*(uint32_t*)(dest+3*linesize))
692
void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx) {
693
mm_flags = mm_support();
695
dsp->put_vc1_mspel_pixels_tab[ 0] = ff_put_vc1_mspel_mc00_mmx;
696
dsp->put_vc1_mspel_pixels_tab[ 4] = put_vc1_mspel_mc01_mmx;
697
dsp->put_vc1_mspel_pixels_tab[ 8] = put_vc1_mspel_mc02_mmx;
698
dsp->put_vc1_mspel_pixels_tab[12] = put_vc1_mspel_mc03_mmx;
700
dsp->put_vc1_mspel_pixels_tab[ 1] = put_vc1_mspel_mc10_mmx;
701
dsp->put_vc1_mspel_pixels_tab[ 5] = put_vc1_mspel_mc11_mmx;
702
dsp->put_vc1_mspel_pixels_tab[ 9] = put_vc1_mspel_mc12_mmx;
703
dsp->put_vc1_mspel_pixels_tab[13] = put_vc1_mspel_mc13_mmx;
705
dsp->put_vc1_mspel_pixels_tab[ 2] = put_vc1_mspel_mc20_mmx;
706
dsp->put_vc1_mspel_pixels_tab[ 6] = put_vc1_mspel_mc21_mmx;
707
dsp->put_vc1_mspel_pixels_tab[10] = put_vc1_mspel_mc22_mmx;
708
dsp->put_vc1_mspel_pixels_tab[14] = put_vc1_mspel_mc23_mmx;
710
dsp->put_vc1_mspel_pixels_tab[ 3] = put_vc1_mspel_mc30_mmx;
711
dsp->put_vc1_mspel_pixels_tab[ 7] = put_vc1_mspel_mc31_mmx;
712
dsp->put_vc1_mspel_pixels_tab[11] = put_vc1_mspel_mc32_mmx;
713
dsp->put_vc1_mspel_pixels_tab[15] = put_vc1_mspel_mc33_mmx;
715
if (mm_flags & FF_MM_MMX2){
716
dsp->avg_vc1_mspel_pixels_tab[ 0] = ff_avg_vc1_mspel_mc00_mmx2;
717
dsp->avg_vc1_mspel_pixels_tab[ 4] = avg_vc1_mspel_mc01_mmx2;
718
dsp->avg_vc1_mspel_pixels_tab[ 8] = avg_vc1_mspel_mc02_mmx2;
719
dsp->avg_vc1_mspel_pixels_tab[12] = avg_vc1_mspel_mc03_mmx2;
721
dsp->avg_vc1_mspel_pixels_tab[ 1] = avg_vc1_mspel_mc10_mmx2;
722
dsp->avg_vc1_mspel_pixels_tab[ 5] = avg_vc1_mspel_mc11_mmx2;
723
dsp->avg_vc1_mspel_pixels_tab[ 9] = avg_vc1_mspel_mc12_mmx2;
724
dsp->avg_vc1_mspel_pixels_tab[13] = avg_vc1_mspel_mc13_mmx2;
726
dsp->avg_vc1_mspel_pixels_tab[ 2] = avg_vc1_mspel_mc20_mmx2;
727
dsp->avg_vc1_mspel_pixels_tab[ 6] = avg_vc1_mspel_mc21_mmx2;
728
dsp->avg_vc1_mspel_pixels_tab[10] = avg_vc1_mspel_mc22_mmx2;
729
dsp->avg_vc1_mspel_pixels_tab[14] = avg_vc1_mspel_mc23_mmx2;
731
dsp->avg_vc1_mspel_pixels_tab[ 3] = avg_vc1_mspel_mc30_mmx2;
732
dsp->avg_vc1_mspel_pixels_tab[ 7] = avg_vc1_mspel_mc31_mmx2;
733
dsp->avg_vc1_mspel_pixels_tab[11] = avg_vc1_mspel_mc32_mmx2;
734
dsp->avg_vc1_mspel_pixels_tab[15] = avg_vc1_mspel_mc33_mmx2;
736
dsp->vc1_inv_trans_8x8_dc = vc1_inv_trans_8x8_dc_mmx2;
737
dsp->vc1_inv_trans_4x8_dc = vc1_inv_trans_4x8_dc_mmx2;
738
dsp->vc1_inv_trans_8x4_dc = vc1_inv_trans_8x4_dc_mmx2;
739
dsp->vc1_inv_trans_4x4_dc = vc1_inv_trans_4x4_dc_mmx2;