2
* MMX and SSE2 optimized snow DSP utils
3
* Copyright (c) 2005-2006 Robert Edele <yartrebo@earthlink.net>
5
* This library is free software; you can redistribute it and/or
6
* modify it under the terms of the GNU Lesser General Public
7
* License as published by the Free Software Foundation; either
8
* version 2 of the License, or (at your option) any later version.
10
* This library is distributed in the hope that it will be useful,
11
* but WITHOUT ANY WARRANTY; without even the implied warranty of
12
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13
* Lesser General Public License for more details.
15
* You should have received a copy of the GNU Lesser General Public
16
* License along with this library; if not, write to the Free Software
17
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20
#include "../avcodec.h"
24
void ff_snow_horizontal_compose97i_sse2(DWTELEM *b, int width){
25
const int w2= (width+1)>>1;
26
// SSE2 code runs faster with pointers aligned on a 32-byte boundary.
27
DWTELEM temp_buf[(width>>1) + 4];
28
DWTELEM * const temp = temp_buf + 4 - (((int)temp_buf & 0xF) >> 2);
29
const int w_l= (width>>1);
30
const int w_r= w2 - 1;
34
DWTELEM * const ref = b + w2 - 1;
35
DWTELEM b_0 = b[0]; //By allowing the first entry in b[0] to be calculated twice
36
// (the first time erroneously), we allow the SSE2 code to run an extra pass.
37
// The savings in code and time are well worth having to store this value and
38
// calculate b[0] correctly afterwards.
42
"pcmpeqd %%xmm7, %%xmm7 \n\t"
43
"pslld $31, %%xmm7 \n\t"
44
"psrld $29, %%xmm7 \n\t"
48
"movdqu (%1), %%xmm1 \n\t"
49
"movdqu 16(%1), %%xmm5 \n\t"
50
"movdqu 4(%1), %%xmm2 \n\t"
51
"movdqu 20(%1), %%xmm6 \n\t"
52
"paddd %%xmm1, %%xmm2 \n\t"
53
"paddd %%xmm5, %%xmm6 \n\t"
54
"movdqa %%xmm2, %%xmm0 \n\t"
55
"movdqa %%xmm6, %%xmm4 \n\t"
56
"paddd %%xmm2, %%xmm2 \n\t"
57
"paddd %%xmm6, %%xmm6 \n\t"
58
"paddd %%xmm0, %%xmm2 \n\t"
59
"paddd %%xmm4, %%xmm6 \n\t"
60
"paddd %%xmm7, %%xmm2 \n\t"
61
"paddd %%xmm7, %%xmm6 \n\t"
62
"psrad $3, %%xmm2 \n\t"
63
"psrad $3, %%xmm6 \n\t"
64
"movdqa (%0), %%xmm0 \n\t"
65
"movdqa 16(%0), %%xmm4 \n\t"
66
"psubd %%xmm2, %%xmm0 \n\t"
67
"psubd %%xmm6, %%xmm4 \n\t"
68
"movdqa %%xmm0, (%0) \n\t"
69
"movdqa %%xmm4, 16(%0) \n\t"
70
:: "r"(&b[i]), "r"(&ref[i])
74
snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS);
75
b[0] = b_0 - ((W_DM * 2 * ref[1]+W_DO)>>W_DS);
79
DWTELEM * const dst = b+w2;
82
for(; (((long)&dst[i]) & 0xF) && i<w_r; i++){
83
dst[i] = dst[i] - (b[i] + b[i + 1]);
87
"movdqu (%1), %%xmm1 \n\t"
88
"movdqu 16(%1), %%xmm5 \n\t"
89
"movdqu 4(%1), %%xmm2 \n\t"
90
"movdqu 20(%1), %%xmm6 \n\t"
91
"paddd %%xmm1, %%xmm2 \n\t"
92
"paddd %%xmm5, %%xmm6 \n\t"
93
"movdqa (%0), %%xmm0 \n\t"
94
"movdqa 16(%0), %%xmm4 \n\t"
95
"psubd %%xmm2, %%xmm0 \n\t"
96
"psubd %%xmm6, %%xmm4 \n\t"
97
"movdqa %%xmm0, (%0) \n\t"
98
"movdqa %%xmm4, 16(%0) \n\t"
99
:: "r"(&dst[i]), "r"(&b[i])
103
snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS);
107
DWTELEM * const ref = b+w2 - 1;
112
"pslld $1, %%xmm7 \n\t" /* xmm7 already holds a '4' from 2 lifts ago. */
114
for(; i<w_l-7; i+=8){
116
"movdqu (%1), %%xmm1 \n\t"
117
"movdqu 16(%1), %%xmm5 \n\t"
118
"movdqu 4(%1), %%xmm0 \n\t"
119
"movdqu 20(%1), %%xmm4 \n\t"
120
"paddd %%xmm1, %%xmm0 \n\t"
121
"paddd %%xmm5, %%xmm4 \n\t"
122
"movdqa %%xmm7, %%xmm1 \n\t"
123
"movdqa %%xmm7, %%xmm5 \n\t"
124
"psubd %%xmm0, %%xmm1 \n\t"
125
"psubd %%xmm4, %%xmm5 \n\t"
126
"movdqa (%0), %%xmm0 \n\t"
127
"movdqa 16(%0), %%xmm4 \n\t"
128
"pslld $2, %%xmm0 \n\t"
129
"pslld $2, %%xmm4 \n\t"
130
"psubd %%xmm0, %%xmm1 \n\t"
131
"psubd %%xmm4, %%xmm5 \n\t"
132
"psrad $4, %%xmm1 \n\t"
133
"psrad $4, %%xmm5 \n\t"
134
"movdqa (%0), %%xmm0 \n\t"
135
"movdqa 16(%0), %%xmm4 \n\t"
136
"psubd %%xmm1, %%xmm0 \n\t"
137
"psubd %%xmm5, %%xmm4 \n\t"
138
"movdqa %%xmm0, (%0) \n\t"
139
"movdqa %%xmm4, 16(%0) \n\t"
140
:: "r"(&b[i]), "r"(&ref[i])
144
snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l);
145
b[0] = b_0 - (((-2 * ref[1] + W_BO) - 4 * b_0) >> W_BS);
149
DWTELEM * const src = b+w2;
152
for(; (((long)&temp[i]) & 0xF) && i<w_r; i++){
153
temp[i] = src[i] - ((-W_AM*(b[i] + b[i+1]))>>W_AS);
155
for(; i<w_r-7; i+=8){
157
"movdqu 4(%1), %%xmm2 \n\t"
158
"movdqu 20(%1), %%xmm6 \n\t"
159
"paddd (%1), %%xmm2 \n\t"
160
"paddd 16(%1), %%xmm6 \n\t"
161
"movdqa %%xmm2, %%xmm0 \n\t"
162
"movdqa %%xmm6, %%xmm4 \n\t"
163
"pslld $2, %%xmm2 \n\t"
164
"pslld $2, %%xmm6 \n\t"
165
"psubd %%xmm2, %%xmm0 \n\t"
166
"psubd %%xmm6, %%xmm4 \n\t"
167
"psrad $1, %%xmm0 \n\t"
168
"psrad $1, %%xmm4 \n\t"
169
"movdqu (%0), %%xmm2 \n\t"
170
"movdqu 16(%0), %%xmm6 \n\t"
171
"psubd %%xmm0, %%xmm2 \n\t"
172
"psubd %%xmm4, %%xmm6 \n\t"
173
"movdqa %%xmm2, (%2) \n\t"
174
"movdqa %%xmm6, 16(%2) \n\t"
175
:: "r"(&src[i]), "r"(&b[i]), "r"(&temp[i])
179
snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -W_AM, W_AO, W_AS);
183
snow_interleave_line_header(&i, width, b, temp);
185
for (; (i & 0x1E) != 0x1E; i-=2){
189
for (i-=30; i>=0; i-=32){
191
"movdqa (%1), %%xmm0 \n\t"
192
"movdqa 16(%1), %%xmm2 \n\t"
193
"movdqa 32(%1), %%xmm4 \n\t"
194
"movdqa 48(%1), %%xmm6 \n\t"
195
"movdqa (%1), %%xmm1 \n\t"
196
"movdqa 16(%1), %%xmm3 \n\t"
197
"movdqa 32(%1), %%xmm5 \n\t"
198
"movdqa 48(%1), %%xmm7 \n\t"
199
"punpckldq (%2), %%xmm0 \n\t"
200
"punpckldq 16(%2), %%xmm2 \n\t"
201
"punpckldq 32(%2), %%xmm4 \n\t"
202
"punpckldq 48(%2), %%xmm6 \n\t"
203
"movdqa %%xmm0, (%0) \n\t"
204
"movdqa %%xmm2, 32(%0) \n\t"
205
"movdqa %%xmm4, 64(%0) \n\t"
206
"movdqa %%xmm6, 96(%0) \n\t"
207
"punpckhdq (%2), %%xmm1 \n\t"
208
"punpckhdq 16(%2), %%xmm3 \n\t"
209
"punpckhdq 32(%2), %%xmm5 \n\t"
210
"punpckhdq 48(%2), %%xmm7 \n\t"
211
"movdqa %%xmm1, 16(%0) \n\t"
212
"movdqa %%xmm3, 48(%0) \n\t"
213
"movdqa %%xmm5, 80(%0) \n\t"
214
"movdqa %%xmm7, 112(%0) \n\t"
215
:: "r"(&(b)[i]), "r"(&(b)[i>>1]), "r"(&(temp)[i>>1])
222
void ff_snow_horizontal_compose97i_mmx(DWTELEM *b, int width){
223
const int w2= (width+1)>>1;
224
DWTELEM temp[width >> 1];
225
const int w_l= (width>>1);
226
const int w_r= w2 - 1;
230
DWTELEM * const ref = b + w2 - 1;
233
b[0] = b[0] - ((W_DM * 2 * ref[1]+W_DO)>>W_DS);
235
"pcmpeqd %%mm7, %%mm7 \n\t"
236
"pslld $31, %%mm7 \n\t"
237
"psrld $29, %%mm7 \n\t"
239
for(; i<w_l-3; i+=4){
241
"movq (%1), %%mm2 \n\t"
242
"movq 8(%1), %%mm6 \n\t"
243
"paddd 4(%1), %%mm2 \n\t"
244
"paddd 12(%1), %%mm6 \n\t"
245
"movq %%mm2, %%mm0 \n\t"
246
"movq %%mm6, %%mm4 \n\t"
247
"paddd %%mm2, %%mm2 \n\t"
248
"paddd %%mm6, %%mm6 \n\t"
249
"paddd %%mm0, %%mm2 \n\t"
250
"paddd %%mm4, %%mm6 \n\t"
251
"paddd %%mm7, %%mm2 \n\t"
252
"paddd %%mm7, %%mm6 \n\t"
253
"psrad $3, %%mm2 \n\t"
254
"psrad $3, %%mm6 \n\t"
255
"movq (%0), %%mm0 \n\t"
256
"movq 8(%0), %%mm4 \n\t"
257
"psubd %%mm2, %%mm0 \n\t"
258
"psubd %%mm6, %%mm4 \n\t"
259
"movq %%mm0, (%0) \n\t"
260
"movq %%mm4, 8(%0) \n\t"
261
:: "r"(&b[i]), "r"(&ref[i])
265
snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS);
269
DWTELEM * const dst = b+w2;
272
for(; i<w_r-3; i+=4){
274
"movq (%1), %%mm2 \n\t"
275
"movq 8(%1), %%mm6 \n\t"
276
"paddd 4(%1), %%mm2 \n\t"
277
"paddd 12(%1), %%mm6 \n\t"
278
"movq (%0), %%mm0 \n\t"
279
"movq 8(%0), %%mm4 \n\t"
280
"psubd %%mm2, %%mm0 \n\t"
281
"psubd %%mm6, %%mm4 \n\t"
282
"movq %%mm0, (%0) \n\t"
283
"movq %%mm4, 8(%0) \n\t"
284
:: "r"(&dst[i]), "r"(&b[i])
288
snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS);
292
DWTELEM * const ref = b+w2 - 1;
295
b[0] = b[0] - (((-2 * ref[1] + W_BO) - 4 * b[0]) >> W_BS);
297
"pslld $1, %%mm7 \n\t" /* xmm7 already holds a '4' from 2 lifts ago. */
299
for(; i<w_l-3; i+=4){
301
"movq (%1), %%mm0 \n\t"
302
"movq 8(%1), %%mm4 \n\t"
303
"paddd 4(%1), %%mm0 \n\t"
304
"paddd 12(%1), %%mm4 \n\t"
305
"movq %%mm7, %%mm1 \n\t"
306
"movq %%mm7, %%mm5 \n\t"
307
"psubd %%mm0, %%mm1 \n\t"
308
"psubd %%mm4, %%mm5 \n\t"
309
"movq (%0), %%mm0 \n\t"
310
"movq 8(%0), %%mm4 \n\t"
311
"pslld $2, %%mm0 \n\t"
312
"pslld $2, %%mm4 \n\t"
313
"psubd %%mm0, %%mm1 \n\t"
314
"psubd %%mm4, %%mm5 \n\t"
315
"psrad $4, %%mm1 \n\t"
316
"psrad $4, %%mm5 \n\t"
317
"movq (%0), %%mm0 \n\t"
318
"movq 8(%0), %%mm4 \n\t"
319
"psubd %%mm1, %%mm0 \n\t"
320
"psubd %%mm5, %%mm4 \n\t"
321
"movq %%mm0, (%0) \n\t"
322
"movq %%mm4, 8(%0) \n\t"
323
:: "r"(&b[i]), "r"(&ref[i])
327
snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l);
331
DWTELEM * const src = b+w2;
334
for(; i<w_r-3; i+=4){
336
"movq 4(%1), %%mm2 \n\t"
337
"movq 12(%1), %%mm6 \n\t"
338
"paddd (%1), %%mm2 \n\t"
339
"paddd 8(%1), %%mm6 \n\t"
340
"movq %%mm2, %%mm0 \n\t"
341
"movq %%mm6, %%mm4 \n\t"
342
"pslld $2, %%mm2 \n\t"
343
"pslld $2, %%mm6 \n\t"
344
"psubd %%mm2, %%mm0 \n\t"
345
"psubd %%mm6, %%mm4 \n\t"
346
"psrad $1, %%mm0 \n\t"
347
"psrad $1, %%mm4 \n\t"
348
"movq (%0), %%mm2 \n\t"
349
"movq 8(%0), %%mm6 \n\t"
350
"psubd %%mm0, %%mm2 \n\t"
351
"psubd %%mm4, %%mm6 \n\t"
352
"movq %%mm2, (%2) \n\t"
353
"movq %%mm6, 8(%2) \n\t"
354
:: "r"(&src[i]), "r"(&b[i]), "r"(&temp[i])
358
snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -W_AM, W_AO, W_AS);
362
snow_interleave_line_header(&i, width, b, temp);
364
for (; (i & 0xE) != 0xE; i-=2){
368
for (i-=14; i>=0; i-=16){
370
"movq (%1), %%mm0 \n\t"
371
"movq 8(%1), %%mm2 \n\t"
372
"movq 16(%1), %%mm4 \n\t"
373
"movq 24(%1), %%mm6 \n\t"
374
"movq (%1), %%mm1 \n\t"
375
"movq 8(%1), %%mm3 \n\t"
376
"movq 16(%1), %%mm5 \n\t"
377
"movq 24(%1), %%mm7 \n\t"
378
"punpckldq (%2), %%mm0 \n\t"
379
"punpckldq 8(%2), %%mm2 \n\t"
380
"punpckldq 16(%2), %%mm4 \n\t"
381
"punpckldq 24(%2), %%mm6 \n\t"
382
"movq %%mm0, (%0) \n\t"
383
"movq %%mm2, 16(%0) \n\t"
384
"movq %%mm4, 32(%0) \n\t"
385
"movq %%mm6, 48(%0) \n\t"
386
"punpckhdq (%2), %%mm1 \n\t"
387
"punpckhdq 8(%2), %%mm3 \n\t"
388
"punpckhdq 16(%2), %%mm5 \n\t"
389
"punpckhdq 24(%2), %%mm7 \n\t"
390
"movq %%mm1, 8(%0) \n\t"
391
"movq %%mm3, 24(%0) \n\t"
392
"movq %%mm5, 40(%0) \n\t"
393
"movq %%mm7, 56(%0) \n\t"
394
:: "r"(&b[i]), "r"(&b[i>>1]), "r"(&temp[i>>1])
401
#define snow_vertical_compose_sse2_load_add(op,r,t0,t1,t2,t3)\
402
""op" (%%"r",%%"REG_d",4), %%"t0" \n\t"\
403
""op" 16(%%"r",%%"REG_d",4), %%"t1" \n\t"\
404
""op" 32(%%"r",%%"REG_d",4), %%"t2" \n\t"\
405
""op" 48(%%"r",%%"REG_d",4), %%"t3" \n\t"
407
#define snow_vertical_compose_sse2_load(r,t0,t1,t2,t3)\
408
snow_vertical_compose_sse2_load_add("movdqa",r,t0,t1,t2,t3)
410
#define snow_vertical_compose_sse2_add(r,t0,t1,t2,t3)\
411
snow_vertical_compose_sse2_load_add("paddd",r,t0,t1,t2,t3)
413
#define snow_vertical_compose_sse2_sub(s0,s1,s2,s3,t0,t1,t2,t3)\
414
"psubd %%"s0", %%"t0" \n\t"\
415
"psubd %%"s1", %%"t1" \n\t"\
416
"psubd %%"s2", %%"t2" \n\t"\
417
"psubd %%"s3", %%"t3" \n\t"
419
#define snow_vertical_compose_sse2_store(w,s0,s1,s2,s3)\
420
"movdqa %%"s0", (%%"w",%%"REG_d",4) \n\t"\
421
"movdqa %%"s1", 16(%%"w",%%"REG_d",4) \n\t"\
422
"movdqa %%"s2", 32(%%"w",%%"REG_d",4) \n\t"\
423
"movdqa %%"s3", 48(%%"w",%%"REG_d",4) \n\t"
425
#define snow_vertical_compose_sse2_sra(n,t0,t1,t2,t3)\
426
"psrad $"n", %%"t0" \n\t"\
427
"psrad $"n", %%"t1" \n\t"\
428
"psrad $"n", %%"t2" \n\t"\
429
"psrad $"n", %%"t3" \n\t"
431
#define snow_vertical_compose_sse2_r2r_add(s0,s1,s2,s3,t0,t1,t2,t3)\
432
"paddd %%"s0", %%"t0" \n\t"\
433
"paddd %%"s1", %%"t1" \n\t"\
434
"paddd %%"s2", %%"t2" \n\t"\
435
"paddd %%"s3", %%"t3" \n\t"
437
#define snow_vertical_compose_sse2_sll(n,t0,t1,t2,t3)\
438
"pslld $"n", %%"t0" \n\t"\
439
"pslld $"n", %%"t1" \n\t"\
440
"pslld $"n", %%"t2" \n\t"\
441
"pslld $"n", %%"t3" \n\t"
443
#define snow_vertical_compose_sse2_move(s0,s1,s2,s3,t0,t1,t2,t3)\
444
"movdqa %%"s0", %%"t0" \n\t"\
445
"movdqa %%"s1", %%"t1" \n\t"\
446
"movdqa %%"s2", %%"t2" \n\t"\
447
"movdqa %%"s3", %%"t3" \n\t"
449
void ff_snow_vertical_compose97i_sse2(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width){
455
b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS;
456
b3[i] -= (W_CM*(b2[i] + b4[i])+W_CO)>>W_CS;
457
b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS;
458
b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS;
465
"mov %6, %%"REG_a" \n\t"
466
"mov %4, %%"REG_b" \n\t"
468
snow_vertical_compose_sse2_load(REG_b,"xmm0","xmm2","xmm4","xmm6")
469
snow_vertical_compose_sse2_add(REG_a,"xmm0","xmm2","xmm4","xmm6")
470
snow_vertical_compose_sse2_move("xmm0","xmm2","xmm4","xmm6","xmm1","xmm3","xmm5","xmm7")
471
snow_vertical_compose_sse2_sll("1","xmm0","xmm2","xmm4","xmm6")\
472
snow_vertical_compose_sse2_r2r_add("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6")
474
"pcmpeqd %%xmm1, %%xmm1 \n\t"
475
"pslld $31, %%xmm1 \n\t"
476
"psrld $29, %%xmm1 \n\t"
477
"mov %5, %%"REG_a" \n\t"
479
snow_vertical_compose_sse2_r2r_add("xmm1","xmm1","xmm1","xmm1","xmm0","xmm2","xmm4","xmm6")
480
snow_vertical_compose_sse2_sra("3","xmm0","xmm2","xmm4","xmm6")
481
snow_vertical_compose_sse2_load(REG_a,"xmm1","xmm3","xmm5","xmm7")
482
snow_vertical_compose_sse2_sub("xmm0","xmm2","xmm4","xmm6","xmm1","xmm3","xmm5","xmm7")
483
snow_vertical_compose_sse2_store(REG_a,"xmm1","xmm3","xmm5","xmm7")
484
"mov %3, %%"REG_c" \n\t"
485
snow_vertical_compose_sse2_load(REG_b,"xmm0","xmm2","xmm4","xmm6")
486
snow_vertical_compose_sse2_add(REG_c,"xmm1","xmm3","xmm5","xmm7")
487
snow_vertical_compose_sse2_sub("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6")
488
snow_vertical_compose_sse2_store(REG_b,"xmm0","xmm2","xmm4","xmm6")
489
"mov %2, %%"REG_a" \n\t"
490
snow_vertical_compose_sse2_load(REG_c,"xmm1","xmm3","xmm5","xmm7")
491
snow_vertical_compose_sse2_add(REG_a,"xmm0","xmm2","xmm4","xmm6")
492
snow_vertical_compose_sse2_sll("2","xmm1","xmm3","xmm5","xmm7")\
493
snow_vertical_compose_sse2_r2r_add("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6")
495
"pcmpeqd %%xmm1, %%xmm1 \n\t"
496
"pslld $31, %%xmm1 \n\t"
497
"psrld $28, %%xmm1 \n\t"
498
"mov %1, %%"REG_b" \n\t"
500
snow_vertical_compose_sse2_r2r_add("xmm1","xmm1","xmm1","xmm1","xmm0","xmm2","xmm4","xmm6")
501
snow_vertical_compose_sse2_sra("4","xmm0","xmm2","xmm4","xmm6")
502
snow_vertical_compose_sse2_add(REG_c,"xmm0","xmm2","xmm4","xmm6")
503
snow_vertical_compose_sse2_store(REG_c,"xmm0","xmm2","xmm4","xmm6")
504
snow_vertical_compose_sse2_add(REG_b,"xmm0","xmm2","xmm4","xmm6")
505
snow_vertical_compose_sse2_move("xmm0","xmm2","xmm4","xmm6","xmm1","xmm3","xmm5","xmm7")
506
snow_vertical_compose_sse2_sll("1","xmm0","xmm2","xmm4","xmm6")\
507
snow_vertical_compose_sse2_r2r_add("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6")
508
snow_vertical_compose_sse2_sra("1","xmm0","xmm2","xmm4","xmm6")
509
snow_vertical_compose_sse2_add(REG_a,"xmm0","xmm2","xmm4","xmm6")
510
snow_vertical_compose_sse2_store(REG_a,"xmm0","xmm2","xmm4","xmm6")
513
"sub $16, %%"REG_d" \n\t"
517
"m"(b0),"m"(b1),"m"(b2),"m"(b3),"m"(b4),"m"(b5):
518
"%"REG_a"","%"REG_b"","%"REG_c"");
521
#define snow_vertical_compose_mmx_load_add(op,r,t0,t1,t2,t3)\
522
""op" (%%"r",%%"REG_d",4), %%"t0" \n\t"\
523
""op" 8(%%"r",%%"REG_d",4), %%"t1" \n\t"\
524
""op" 16(%%"r",%%"REG_d",4), %%"t2" \n\t"\
525
""op" 24(%%"r",%%"REG_d",4), %%"t3" \n\t"
527
#define snow_vertical_compose_mmx_load(r,t0,t1,t2,t3)\
528
snow_vertical_compose_mmx_load_add("movq",r,t0,t1,t2,t3)
530
#define snow_vertical_compose_mmx_add(r,t0,t1,t2,t3)\
531
snow_vertical_compose_mmx_load_add("paddd",r,t0,t1,t2,t3)
533
#define snow_vertical_compose_mmx_sub(s0,s1,s2,s3,t0,t1,t2,t3)\
534
snow_vertical_compose_sse2_sub(s0,s1,s2,s3,t0,t1,t2,t3)
536
#define snow_vertical_compose_mmx_store(w,s0,s1,s2,s3)\
537
"movq %%"s0", (%%"w",%%"REG_d",4) \n\t"\
538
"movq %%"s1", 8(%%"w",%%"REG_d",4) \n\t"\
539
"movq %%"s2", 16(%%"w",%%"REG_d",4) \n\t"\
540
"movq %%"s3", 24(%%"w",%%"REG_d",4) \n\t"
542
#define snow_vertical_compose_mmx_sra(n,t0,t1,t2,t3)\
543
snow_vertical_compose_sse2_sra(n,t0,t1,t2,t3)
545
#define snow_vertical_compose_mmx_r2r_add(s0,s1,s2,s3,t0,t1,t2,t3)\
546
snow_vertical_compose_sse2_r2r_add(s0,s1,s2,s3,t0,t1,t2,t3)
548
#define snow_vertical_compose_mmx_sll(n,t0,t1,t2,t3)\
549
snow_vertical_compose_sse2_sll(n,t0,t1,t2,t3)
551
#define snow_vertical_compose_mmx_move(s0,s1,s2,s3,t0,t1,t2,t3)\
552
"movq %%"s0", %%"t0" \n\t"\
553
"movq %%"s1", %%"t1" \n\t"\
554
"movq %%"s2", %%"t2" \n\t"\
555
"movq %%"s3", %%"t3" \n\t"
557
void ff_snow_vertical_compose97i_mmx(DWTELEM *b0, DWTELEM *b1, DWTELEM *b2, DWTELEM *b3, DWTELEM *b4, DWTELEM *b5, int width){
562
b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS;
563
b3[i] -= (W_CM*(b2[i] + b4[i])+W_CO)>>W_CS;
564
b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS;
565
b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS;
572
"mov %6, %%"REG_a" \n\t"
573
"mov %4, %%"REG_b" \n\t"
575
snow_vertical_compose_mmx_load(REG_b,"mm0","mm2","mm4","mm6")
576
snow_vertical_compose_mmx_add(REG_a,"mm0","mm2","mm4","mm6")
577
snow_vertical_compose_mmx_move("mm0","mm2","mm4","mm6","mm1","mm3","mm5","mm7")
578
snow_vertical_compose_mmx_sll("1","mm0","mm2","mm4","mm6")
579
snow_vertical_compose_mmx_r2r_add("mm1","mm3","mm5","mm7","mm0","mm2","mm4","mm6")
581
"pcmpeqd %%mm1, %%mm1 \n\t"
582
"pslld $31, %%mm1 \n\t"
583
"psrld $29, %%mm1 \n\t"
584
"mov %5, %%"REG_a" \n\t"
586
snow_vertical_compose_mmx_r2r_add("mm1","mm1","mm1","mm1","mm0","mm2","mm4","mm6")
587
snow_vertical_compose_mmx_sra("3","mm0","mm2","mm4","mm6")
588
snow_vertical_compose_mmx_load(REG_a,"mm1","mm3","mm5","mm7")
589
snow_vertical_compose_mmx_sub("mm0","mm2","mm4","mm6","mm1","mm3","mm5","mm7")
590
snow_vertical_compose_mmx_store(REG_a,"mm1","mm3","mm5","mm7")
591
"mov %3, %%"REG_c" \n\t"
592
snow_vertical_compose_mmx_load(REG_b,"mm0","mm2","mm4","mm6")
593
snow_vertical_compose_mmx_add(REG_c,"mm1","mm3","mm5","mm7")
594
snow_vertical_compose_mmx_sub("mm1","mm3","mm5","mm7","mm0","mm2","mm4","mm6")
595
snow_vertical_compose_mmx_store(REG_b,"mm0","mm2","mm4","mm6")
596
"mov %2, %%"REG_a" \n\t"
597
snow_vertical_compose_mmx_load(REG_c,"mm1","mm3","mm5","mm7")
598
snow_vertical_compose_mmx_add(REG_a,"mm0","mm2","mm4","mm6")
599
snow_vertical_compose_mmx_sll("2","mm1","mm3","mm5","mm7")
600
snow_vertical_compose_mmx_r2r_add("mm1","mm3","mm5","mm7","mm0","mm2","mm4","mm6")
602
"pcmpeqd %%mm1, %%mm1 \n\t"
603
"pslld $31, %%mm1 \n\t"
604
"psrld $28, %%mm1 \n\t"
605
"mov %1, %%"REG_b" \n\t"
607
snow_vertical_compose_mmx_r2r_add("mm1","mm1","mm1","mm1","mm0","mm2","mm4","mm6")
608
snow_vertical_compose_mmx_sra("4","mm0","mm2","mm4","mm6")
609
snow_vertical_compose_mmx_add(REG_c,"mm0","mm2","mm4","mm6")
610
snow_vertical_compose_mmx_store(REG_c,"mm0","mm2","mm4","mm6")
611
snow_vertical_compose_mmx_add(REG_b,"mm0","mm2","mm4","mm6")
612
snow_vertical_compose_mmx_move("mm0","mm2","mm4","mm6","mm1","mm3","mm5","mm7")
613
snow_vertical_compose_mmx_sll("1","mm0","mm2","mm4","mm6")
614
snow_vertical_compose_mmx_r2r_add("mm1","mm3","mm5","mm7","mm0","mm2","mm4","mm6")
615
snow_vertical_compose_mmx_sra("1","mm0","mm2","mm4","mm6")
616
snow_vertical_compose_mmx_add(REG_a,"mm0","mm2","mm4","mm6")
617
snow_vertical_compose_mmx_store(REG_a,"mm0","mm2","mm4","mm6")
620
"sub $8, %%"REG_d" \n\t"
624
"m"(b0),"m"(b1),"m"(b2),"m"(b3),"m"(b4),"m"(b5):
625
"%"REG_a"","%"REG_b"","%"REG_c"");
628
#define snow_inner_add_yblock_sse2_header \
629
DWTELEM * * dst_array = sb->line + src_y;\
631
"mov %6, %%"REG_c" \n\t"\
632
"mov %5, %%"REG_b" \n\t"\
633
"mov %3, %%"REG_S" \n\t"\
634
"pxor %%xmm7, %%xmm7 \n\t" /* 0 */\
635
"pcmpeqd %%xmm3, %%xmm3 \n\t"\
636
"pslld $31, %%xmm3 \n\t"\
637
"psrld $24, %%xmm3 \n\t" /* FRAC_BITS >> 1 */\
639
"mov %1, %%"REG_D" \n\t"\
640
"mov (%%"REG_D"), %%"REG_D" \n\t"\
641
"add %2, %%"REG_D" \n\t"
643
#define snow_inner_add_yblock_sse2_start_8(out_reg1, out_reg2, ptr_offset, s_offset)\
644
"mov "PTR_SIZE"*"ptr_offset"(%%"REG_a"), %%"REG_d"; \n\t"\
645
"movq (%%"REG_d"), %%"out_reg1" \n\t"\
646
"movq (%%"REG_d", %%"REG_c"), %%"out_reg2" \n\t"\
647
"punpcklbw %%xmm7, %%"out_reg1" \n\t"\
648
"punpcklbw %%xmm7, %%"out_reg2" \n\t"\
649
"movq "s_offset"(%%"REG_S"), %%xmm0 \n\t"\
650
"movq "s_offset"+16(%%"REG_S"), %%xmm4 \n\t"\
651
"punpcklbw %%xmm7, %%xmm0 \n\t"\
652
"punpcklbw %%xmm7, %%xmm4 \n\t"\
653
"pmullw %%xmm0, %%"out_reg1" \n\t"\
654
"pmullw %%xmm4, %%"out_reg2" \n\t"
656
#define snow_inner_add_yblock_sse2_start_16(out_reg1, out_reg2, ptr_offset, s_offset)\
657
"mov "PTR_SIZE"*"ptr_offset"(%%"REG_a"), %%"REG_d"; \n\t"\
658
"movq (%%"REG_d"), %%"out_reg1" \n\t"\
659
"movq 8(%%"REG_d"), %%"out_reg2" \n\t"\
660
"punpcklbw %%xmm7, %%"out_reg1" \n\t"\
661
"punpcklbw %%xmm7, %%"out_reg2" \n\t"\
662
"movq "s_offset"(%%"REG_S"), %%xmm0 \n\t"\
663
"movq "s_offset"+8(%%"REG_S"), %%xmm4 \n\t"\
664
"punpcklbw %%xmm7, %%xmm0 \n\t"\
665
"punpcklbw %%xmm7, %%xmm4 \n\t"\
666
"pmullw %%xmm0, %%"out_reg1" \n\t"\
667
"pmullw %%xmm4, %%"out_reg2" \n\t"
669
#define snow_inner_add_yblock_sse2_accum_8(ptr_offset, s_offset) \
670
snow_inner_add_yblock_sse2_start_8("xmm2", "xmm6", ptr_offset, s_offset)\
671
"paddusw %%xmm2, %%xmm1 \n\t"\
672
"paddusw %%xmm6, %%xmm5 \n\t"
674
#define snow_inner_add_yblock_sse2_accum_16(ptr_offset, s_offset) \
675
snow_inner_add_yblock_sse2_start_16("xmm2", "xmm6", ptr_offset, s_offset)\
676
"paddusw %%xmm2, %%xmm1 \n\t"\
677
"paddusw %%xmm6, %%xmm5 \n\t"
679
#define snow_inner_add_yblock_sse2_end_common1\
680
"add $32, %%"REG_S" \n\t"\
681
"add %%"REG_c", %0 \n\t"\
682
"add %%"REG_c", "PTR_SIZE"*3(%%"REG_a");\n\t"\
683
"add %%"REG_c", "PTR_SIZE"*2(%%"REG_a");\n\t"\
684
"add %%"REG_c", "PTR_SIZE"*1(%%"REG_a");\n\t"\
685
"add %%"REG_c", (%%"REG_a") \n\t"
687
#define snow_inner_add_yblock_sse2_end_common2\
689
:"+m"(dst8),"+m"(dst_array)\
691
"rm"((long)(src_x<<2)),"m"(obmc),"a"(block),"m"((long)b_h),"m"((long)src_stride):\
692
"%"REG_b"","%"REG_c"","%"REG_S"","%"REG_D"","%"REG_d"");
694
#define snow_inner_add_yblock_sse2_end_8\
695
"sal $1, %%"REG_c" \n\t"\
696
"add $"PTR_SIZE"*2, %1 \n\t"\
697
snow_inner_add_yblock_sse2_end_common1\
698
"sar $1, %%"REG_c" \n\t"\
699
"sub $2, %%"REG_b" \n\t"\
700
snow_inner_add_yblock_sse2_end_common2
702
#define snow_inner_add_yblock_sse2_end_16\
703
"add $"PTR_SIZE"*1, %1 \n\t"\
704
snow_inner_add_yblock_sse2_end_common1\
705
"dec %%"REG_b" \n\t"\
706
snow_inner_add_yblock_sse2_end_common2
708
static void inner_add_yblock_bw_8_obmc_16_bh_even_sse2(uint8_t *obmc, const long obmc_stride, uint8_t * * block, int b_w, long b_h,
709
int src_x, int src_y, long src_stride, slice_buffer * sb, int add, uint8_t * dst8){
710
snow_inner_add_yblock_sse2_header
711
snow_inner_add_yblock_sse2_start_8("xmm1", "xmm5", "3", "0")
712
snow_inner_add_yblock_sse2_accum_8("2", "8")
713
snow_inner_add_yblock_sse2_accum_8("1", "128")
714
snow_inner_add_yblock_sse2_accum_8("0", "136")
716
"mov %0, %%"REG_d" \n\t"
717
"movdqa (%%"REG_D"), %%xmm0 \n\t"
718
"movdqa %%xmm1, %%xmm2 \n\t"
720
"punpckhwd %%xmm7, %%xmm1 \n\t"
721
"punpcklwd %%xmm7, %%xmm2 \n\t"
722
"paddd %%xmm2, %%xmm0 \n\t"
723
"movdqa 16(%%"REG_D"), %%xmm2 \n\t"
724
"paddd %%xmm1, %%xmm2 \n\t"
725
"paddd %%xmm3, %%xmm0 \n\t"
726
"paddd %%xmm3, %%xmm2 \n\t"
728
"mov %1, %%"REG_D" \n\t"
729
"mov "PTR_SIZE"(%%"REG_D"), %%"REG_D";\n\t"
730
"add %2, %%"REG_D" \n\t"
732
"movdqa (%%"REG_D"), %%xmm4 \n\t"
733
"movdqa %%xmm5, %%xmm6 \n\t"
734
"punpckhwd %%xmm7, %%xmm5 \n\t"
735
"punpcklwd %%xmm7, %%xmm6 \n\t"
736
"paddd %%xmm6, %%xmm4 \n\t"
737
"movdqa 16(%%"REG_D"), %%xmm6 \n\t"
738
"paddd %%xmm5, %%xmm6 \n\t"
739
"paddd %%xmm3, %%xmm4 \n\t"
740
"paddd %%xmm3, %%xmm6 \n\t"
742
"psrad $8, %%xmm0 \n\t" /* FRAC_BITS. */
743
"psrad $8, %%xmm2 \n\t" /* FRAC_BITS. */
744
"packssdw %%xmm2, %%xmm0 \n\t"
745
"packuswb %%xmm7, %%xmm0 \n\t"
746
"movq %%xmm0, (%%"REG_d") \n\t"
748
"psrad $8, %%xmm4 \n\t" /* FRAC_BITS. */
749
"psrad $8, %%xmm6 \n\t" /* FRAC_BITS. */
750
"packssdw %%xmm6, %%xmm4 \n\t"
751
"packuswb %%xmm7, %%xmm4 \n\t"
752
"movq %%xmm4, (%%"REG_d",%%"REG_c");\n\t"
753
snow_inner_add_yblock_sse2_end_8
756
static void inner_add_yblock_bw_16_obmc_32_sse2(uint8_t *obmc, const long obmc_stride, uint8_t * * block, int b_w, long b_h,
757
int src_x, int src_y, long src_stride, slice_buffer * sb, int add, uint8_t * dst8){
758
snow_inner_add_yblock_sse2_header
759
snow_inner_add_yblock_sse2_start_16("xmm1", "xmm5", "3", "0")
760
snow_inner_add_yblock_sse2_accum_16("2", "16")
761
snow_inner_add_yblock_sse2_accum_16("1", "512")
762
snow_inner_add_yblock_sse2_accum_16("0", "528")
764
"mov %0, %%"REG_d" \n\t"
765
"movdqa %%xmm1, %%xmm0 \n\t"
766
"movdqa %%xmm5, %%xmm4 \n\t"
767
"punpcklwd %%xmm7, %%xmm0 \n\t"
768
"paddd (%%"REG_D"), %%xmm0 \n\t"
769
"punpckhwd %%xmm7, %%xmm1 \n\t"
770
"paddd 16(%%"REG_D"), %%xmm1 \n\t"
771
"punpcklwd %%xmm7, %%xmm4 \n\t"
772
"paddd 32(%%"REG_D"), %%xmm4 \n\t"
773
"punpckhwd %%xmm7, %%xmm5 \n\t"
774
"paddd 48(%%"REG_D"), %%xmm5 \n\t"
775
"paddd %%xmm3, %%xmm0 \n\t"
776
"paddd %%xmm3, %%xmm1 \n\t"
777
"paddd %%xmm3, %%xmm4 \n\t"
778
"paddd %%xmm3, %%xmm5 \n\t"
779
"psrad $8, %%xmm0 \n\t" /* FRAC_BITS. */
780
"psrad $8, %%xmm1 \n\t" /* FRAC_BITS. */
781
"psrad $8, %%xmm4 \n\t" /* FRAC_BITS. */
782
"psrad $8, %%xmm5 \n\t" /* FRAC_BITS. */
784
"packssdw %%xmm1, %%xmm0 \n\t"
785
"packssdw %%xmm5, %%xmm4 \n\t"
786
"packuswb %%xmm4, %%xmm0 \n\t"
788
"movdqu %%xmm0, (%%"REG_d") \n\t"
790
snow_inner_add_yblock_sse2_end_16
793
#define snow_inner_add_yblock_mmx_header \
794
DWTELEM * * dst_array = sb->line + src_y;\
796
"mov %6, %%"REG_c" \n\t"\
797
"mov %5, %%"REG_b" \n\t"\
798
"mov %3, %%"REG_S" \n\t"\
799
"pxor %%mm7, %%mm7 \n\t" /* 0 */\
800
"pcmpeqd %%mm3, %%mm3 \n\t"\
801
"pslld $31, %%mm3 \n\t"\
802
"psrld $24, %%mm3 \n\t" /* FRAC_BITS >> 1 */\
804
"mov %1, %%"REG_D" \n\t"\
805
"mov (%%"REG_D"), %%"REG_D" \n\t"\
806
"add %2, %%"REG_D" \n\t"
808
#define snow_inner_add_yblock_mmx_start(out_reg1, out_reg2, ptr_offset, s_offset, d_offset)\
809
"mov "PTR_SIZE"*"ptr_offset"(%%"REG_a"), %%"REG_d"; \n\t"\
810
"movd "d_offset"(%%"REG_d"), %%"out_reg1" \n\t"\
811
"movd "d_offset"+4(%%"REG_d"), %%"out_reg2" \n\t"\
812
"punpcklbw %%mm7, %%"out_reg1" \n\t"\
813
"punpcklbw %%mm7, %%"out_reg2" \n\t"\
814
"movd "s_offset"(%%"REG_S"), %%mm0 \n\t"\
815
"movd "s_offset"+4(%%"REG_S"), %%mm4 \n\t"\
816
"punpcklbw %%mm7, %%mm0 \n\t"\
817
"punpcklbw %%mm7, %%mm4 \n\t"\
818
"pmullw %%mm0, %%"out_reg1" \n\t"\
819
"pmullw %%mm4, %%"out_reg2" \n\t"
821
#define snow_inner_add_yblock_mmx_accum(ptr_offset, s_offset, d_offset) \
822
snow_inner_add_yblock_mmx_start("mm2", "mm6", ptr_offset, s_offset, d_offset)\
823
"paddusw %%mm2, %%mm1 \n\t"\
824
"paddusw %%mm6, %%mm5 \n\t"
826
#define snow_inner_add_yblock_mmx_mix(read_offset, write_offset)\
827
"mov %0, %%"REG_d" \n\t"\
828
"movq %%mm1, %%mm0 \n\t"\
829
"movq %%mm5, %%mm4 \n\t"\
830
"punpcklwd %%mm7, %%mm0 \n\t"\
831
"paddd "read_offset"(%%"REG_D"), %%mm0 \n\t"\
832
"punpckhwd %%mm7, %%mm1 \n\t"\
833
"paddd "read_offset"+8(%%"REG_D"), %%mm1 \n\t"\
834
"punpcklwd %%mm7, %%mm4 \n\t"\
835
"paddd "read_offset"+16(%%"REG_D"), %%mm4 \n\t"\
836
"punpckhwd %%mm7, %%mm5 \n\t"\
837
"paddd "read_offset"+24(%%"REG_D"), %%mm5 \n\t"\
838
"paddd %%mm3, %%mm0 \n\t"\
839
"paddd %%mm3, %%mm1 \n\t"\
840
"paddd %%mm3, %%mm4 \n\t"\
841
"paddd %%mm3, %%mm5 \n\t"\
842
"psrad $8, %%mm0 \n\t"\
843
"psrad $8, %%mm1 \n\t"\
844
"psrad $8, %%mm4 \n\t"\
845
"psrad $8, %%mm5 \n\t"\
847
"packssdw %%mm1, %%mm0 \n\t"\
848
"packssdw %%mm5, %%mm4 \n\t"\
849
"packuswb %%mm4, %%mm0 \n\t"\
850
"movq %%mm0, "write_offset"(%%"REG_d") \n\t"
852
#define snow_inner_add_yblock_mmx_end(s_step)\
853
"add $"s_step", %%"REG_S" \n\t"\
854
"add %%"REG_c", "PTR_SIZE"*3(%%"REG_a");\n\t"\
855
"add %%"REG_c", "PTR_SIZE"*2(%%"REG_a");\n\t"\
856
"add %%"REG_c", "PTR_SIZE"*1(%%"REG_a");\n\t"\
857
"add %%"REG_c", (%%"REG_a") \n\t"\
858
"add $"PTR_SIZE"*1, %1 \n\t"\
859
"add %%"REG_c", %0 \n\t"\
860
"dec %%"REG_b" \n\t"\
862
:"+m"(dst8),"+m"(dst_array)\
864
"rm"((long)(src_x<<2)),"m"(obmc),"a"(block),"m"((long)b_h),"m"((long)src_stride):\
865
"%"REG_b"","%"REG_c"","%"REG_S"","%"REG_D"","%"REG_d"");
867
static void inner_add_yblock_bw_8_obmc_16_mmx(uint8_t *obmc, const long obmc_stride, uint8_t * * block, int b_w, long b_h,
868
int src_x, int src_y, long src_stride, slice_buffer * sb, int add, uint8_t * dst8){
869
snow_inner_add_yblock_mmx_header
870
snow_inner_add_yblock_mmx_start("mm1", "mm5", "3", "0", "0")
871
snow_inner_add_yblock_mmx_accum("2", "8", "0")
872
snow_inner_add_yblock_mmx_accum("1", "128", "0")
873
snow_inner_add_yblock_mmx_accum("0", "136", "0")
874
snow_inner_add_yblock_mmx_mix("0", "0")
875
snow_inner_add_yblock_mmx_end("16")
878
static void inner_add_yblock_bw_16_obmc_32_mmx(uint8_t *obmc, const long obmc_stride, uint8_t * * block, int b_w, long b_h,
879
int src_x, int src_y, long src_stride, slice_buffer * sb, int add, uint8_t * dst8){
880
snow_inner_add_yblock_mmx_header
881
snow_inner_add_yblock_mmx_start("mm1", "mm5", "3", "0", "0")
882
snow_inner_add_yblock_mmx_accum("2", "16", "0")
883
snow_inner_add_yblock_mmx_accum("1", "512", "0")
884
snow_inner_add_yblock_mmx_accum("0", "528", "0")
885
snow_inner_add_yblock_mmx_mix("0", "0")
887
snow_inner_add_yblock_mmx_start("mm1", "mm5", "3", "8", "8")
888
snow_inner_add_yblock_mmx_accum("2", "24", "8")
889
snow_inner_add_yblock_mmx_accum("1", "520", "8")
890
snow_inner_add_yblock_mmx_accum("0", "536", "8")
891
snow_inner_add_yblock_mmx_mix("32", "8")
892
snow_inner_add_yblock_mmx_end("32")
895
void ff_snow_inner_add_yblock_sse2(uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
896
int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8){
899
inner_add_yblock_bw_16_obmc_32_sse2(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
900
else if (b_w == 8 && obmc_stride == 16) {
902
inner_add_yblock_bw_8_obmc_16_bh_even_sse2(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
904
inner_add_yblock_bw_8_obmc_16_mmx(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
906
ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
909
void ff_snow_inner_add_yblock_mmx(uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h,
910
int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8){
912
inner_add_yblock_bw_16_obmc_32_mmx(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
913
else if (b_w == 8 && obmc_stride == 16)
914
inner_add_yblock_bw_8_obmc_16_mmx(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);
916
ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8);