2
* AltiVec-enhanced yuv2yuvX
4
* Copyright (C) 2004 Romain Dolbeau <romain@dolbeau.org>
5
* based on the equivalent C code in swscale.c
7
* This file is part of Libav.
9
* Libav is free software; you can redistribute it and/or
10
* modify it under the terms of the GNU Lesser General Public
11
* License as published by the Free Software Foundation; either
12
* version 2.1 of the License, or (at your option) any later version.
14
* Libav is distributed in the hope that it will be useful,
15
* but WITHOUT ANY WARRANTY; without even the implied warranty of
16
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17
* Lesser General Public License for more details.
19
* You should have received a copy of the GNU Lesser General Public
20
* License along with Libav; if not, write to the Free Software
21
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26
#include "libswscale/swscale.h"
27
#include "libswscale/swscale_internal.h"
28
#include "libavutil/cpu.h"
29
#include "yuv2rgb_altivec.h"
31
#define vzero vec_splat_s32(0)
34
altivec_packIntArrayToCharArray(int *val, uint8_t* dest, int dstW)
37
vector unsigned int altivec_vectorShiftInt19 =
38
vec_add(vec_splat_u32(10), vec_splat_u32(9));
39
if ((unsigned int)dest % 16) {
40
/* badly aligned store, we force store alignment */
41
/* and will handle load misalignment on val w/ vec_perm */
42
vector unsigned char perm1;
44
for (i = 0 ; (i < dstW) &&
45
(((unsigned int)dest + i) % 16) ; i++) {
47
dest[i] = (t < 0) ? 0 : ((t > 255) ? 255 : t);
49
perm1 = vec_lvsl(i << 2, val);
50
v1 = vec_ld(i << 2, val);
51
for ( ; i < (dstW - 15); i+=16) {
53
vector signed int v2 = vec_ld(offset + 16, val);
54
vector signed int v3 = vec_ld(offset + 32, val);
55
vector signed int v4 = vec_ld(offset + 48, val);
56
vector signed int v5 = vec_ld(offset + 64, val);
57
vector signed int v12 = vec_perm(v1, v2, perm1);
58
vector signed int v23 = vec_perm(v2, v3, perm1);
59
vector signed int v34 = vec_perm(v3, v4, perm1);
60
vector signed int v45 = vec_perm(v4, v5, perm1);
62
vector signed int vA = vec_sra(v12, altivec_vectorShiftInt19);
63
vector signed int vB = vec_sra(v23, altivec_vectorShiftInt19);
64
vector signed int vC = vec_sra(v34, altivec_vectorShiftInt19);
65
vector signed int vD = vec_sra(v45, altivec_vectorShiftInt19);
66
vector unsigned short vs1 = vec_packsu(vA, vB);
67
vector unsigned short vs2 = vec_packsu(vC, vD);
68
vector unsigned char vf = vec_packsu(vs1, vs2);
72
} else { // dest is properly aligned, great
73
for (i = 0; i < (dstW - 15); i+=16) {
75
vector signed int v1 = vec_ld(offset, val);
76
vector signed int v2 = vec_ld(offset + 16, val);
77
vector signed int v3 = vec_ld(offset + 32, val);
78
vector signed int v4 = vec_ld(offset + 48, val);
79
vector signed int v5 = vec_sra(v1, altivec_vectorShiftInt19);
80
vector signed int v6 = vec_sra(v2, altivec_vectorShiftInt19);
81
vector signed int v7 = vec_sra(v3, altivec_vectorShiftInt19);
82
vector signed int v8 = vec_sra(v4, altivec_vectorShiftInt19);
83
vector unsigned short vs1 = vec_packsu(v5, v6);
84
vector unsigned short vs2 = vec_packsu(v7, v8);
85
vector unsigned char vf = vec_packsu(vs1, vs2);
89
for ( ; i < dstW ; i++) {
91
dest[i] = (t < 0) ? 0 : ((t > 255) ? 255 : t);
96
yuv2yuvX_altivec_real(SwsContext *c,
97
const int16_t *lumFilter, const int16_t **lumSrc,
98
int lumFilterSize, const int16_t *chrFilter,
99
const int16_t **chrUSrc, const int16_t **chrVSrc,
100
int chrFilterSize, const int16_t **alpSrc,
101
uint8_t *dest, uint8_t *uDest,
102
uint8_t *vDest, uint8_t *aDest,
103
int dstW, int chrDstW)
105
const vector signed int vini = {(1 << 18), (1 << 18), (1 << 18), (1 << 18)};
108
DECLARE_ALIGNED(16, int, val)[dstW];
110
for (i = 0; i < (dstW -7); i+=4) {
111
vec_st(vini, i << 2, val);
113
for (; i < dstW; i++) {
117
for (j = 0; j < lumFilterSize; j++) {
118
vector signed short l1, vLumFilter = vec_ld(j << 1, lumFilter);
119
vector unsigned char perm, perm0 = vec_lvsl(j << 1, lumFilter);
120
vLumFilter = vec_perm(vLumFilter, vLumFilter, perm0);
121
vLumFilter = vec_splat(vLumFilter, 0); // lumFilter[j] is loaded 8 times in vLumFilter
123
perm = vec_lvsl(0, lumSrc[j]);
124
l1 = vec_ld(0, lumSrc[j]);
126
for (i = 0; i < (dstW - 7); i+=8) {
128
vector signed short l2 = vec_ld((i << 1) + 16, lumSrc[j]);
130
vector signed int v1 = vec_ld(offset, val);
131
vector signed int v2 = vec_ld(offset + 16, val);
133
vector signed short ls = vec_perm(l1, l2, perm); // lumSrc[j][i] ... lumSrc[j][i+7]
135
vector signed int i1 = vec_mule(vLumFilter, ls);
136
vector signed int i2 = vec_mulo(vLumFilter, ls);
138
vector signed int vf1 = vec_mergeh(i1, i2);
139
vector signed int vf2 = vec_mergel(i1, i2); // lumSrc[j][i] * lumFilter[j] ... lumSrc[j][i+7] * lumFilter[j]
141
vector signed int vo1 = vec_add(v1, vf1);
142
vector signed int vo2 = vec_add(v2, vf2);
144
vec_st(vo1, offset, val);
145
vec_st(vo2, offset + 16, val);
149
for ( ; i < dstW; i++) {
150
val[i] += lumSrc[j][i] * lumFilter[j];
153
altivec_packIntArrayToCharArray(val, dest, dstW);
156
DECLARE_ALIGNED(16, int, u)[chrDstW];
157
DECLARE_ALIGNED(16, int, v)[chrDstW];
159
for (i = 0; i < (chrDstW -7); i+=4) {
160
vec_st(vini, i << 2, u);
161
vec_st(vini, i << 2, v);
163
for (; i < chrDstW; i++) {
168
for (j = 0; j < chrFilterSize; j++) {
169
vector signed short l1, l1_V, vChrFilter = vec_ld(j << 1, chrFilter);
170
vector unsigned char perm, perm0 = vec_lvsl(j << 1, chrFilter);
171
vChrFilter = vec_perm(vChrFilter, vChrFilter, perm0);
172
vChrFilter = vec_splat(vChrFilter, 0); // chrFilter[j] is loaded 8 times in vChrFilter
174
perm = vec_lvsl(0, chrUSrc[j]);
175
l1 = vec_ld(0, chrUSrc[j]);
176
l1_V = vec_ld(0, chrVSrc[j]);
178
for (i = 0; i < (chrDstW - 7); i+=8) {
180
vector signed short l2 = vec_ld((i << 1) + 16, chrUSrc[j]);
181
vector signed short l2_V = vec_ld((i << 1) + 16, chrVSrc[j]);
183
vector signed int v1 = vec_ld(offset, u);
184
vector signed int v2 = vec_ld(offset + 16, u);
185
vector signed int v1_V = vec_ld(offset, v);
186
vector signed int v2_V = vec_ld(offset + 16, v);
188
vector signed short ls = vec_perm(l1, l2, perm); // chrUSrc[j][i] ... chrUSrc[j][i+7]
189
vector signed short ls_V = vec_perm(l1_V, l2_V, perm); // chrVSrc[j][i] ... chrVSrc[j][i]
191
vector signed int i1 = vec_mule(vChrFilter, ls);
192
vector signed int i2 = vec_mulo(vChrFilter, ls);
193
vector signed int i1_V = vec_mule(vChrFilter, ls_V);
194
vector signed int i2_V = vec_mulo(vChrFilter, ls_V);
196
vector signed int vf1 = vec_mergeh(i1, i2);
197
vector signed int vf2 = vec_mergel(i1, i2); // chrUSrc[j][i] * chrFilter[j] ... chrUSrc[j][i+7] * chrFilter[j]
198
vector signed int vf1_V = vec_mergeh(i1_V, i2_V);
199
vector signed int vf2_V = vec_mergel(i1_V, i2_V); // chrVSrc[j][i] * chrFilter[j] ... chrVSrc[j][i+7] * chrFilter[j]
201
vector signed int vo1 = vec_add(v1, vf1);
202
vector signed int vo2 = vec_add(v2, vf2);
203
vector signed int vo1_V = vec_add(v1_V, vf1_V);
204
vector signed int vo2_V = vec_add(v2_V, vf2_V);
206
vec_st(vo1, offset, u);
207
vec_st(vo2, offset + 16, u);
208
vec_st(vo1_V, offset, v);
209
vec_st(vo2_V, offset + 16, v);
214
for ( ; i < chrDstW; i++) {
215
u[i] += chrUSrc[j][i] * chrFilter[j];
216
v[i] += chrVSrc[j][i] * chrFilter[j];
219
altivec_packIntArrayToCharArray(u, uDest, chrDstW);
220
altivec_packIntArrayToCharArray(v, vDest, chrDstW);
224
static void hScale_altivec_real(int16_t *dst, int dstW,
225
const uint8_t *src, int srcW,
226
int xInc, const int16_t *filter,
227
const int16_t *filterPos, int filterSize)
230
DECLARE_ALIGNED(16, int, tempo)[4];
232
if (filterSize % 4) {
233
for (i=0; i<dstW; i++) {
235
register int srcPos = filterPos[i];
236
register int val = 0;
237
for (j=0; j<filterSize; j++) {
238
val += ((int)src[srcPos + j])*filter[filterSize*i + j];
240
dst[i] = FFMIN(val>>7, (1<<15)-1);
244
switch (filterSize) {
247
for (i=0; i<dstW; i++) {
248
register int srcPos = filterPos[i];
250
vector unsigned char src_v0 = vec_ld(srcPos, src);
251
vector unsigned char src_v1, src_vF;
252
vector signed short src_v, filter_v;
253
vector signed int val_vEven, val_s;
254
if ((((int)src + srcPos)% 16) > 12) {
255
src_v1 = vec_ld(srcPos + 16, src);
257
src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src));
259
src_v = // vec_unpackh sign-extends...
260
(vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
261
// now put our elements in the even slots
262
src_v = vec_mergeh(src_v, (vector signed short)vzero);
264
filter_v = vec_ld(i << 3, filter);
265
// The 3 above is 2 (filterSize == 4) + 1 (sizeof(short) == 2).
267
// The neat trick: We only care for half the elements,
268
// high or low depending on (i<<3)%16 (it's 0 or 8 here),
269
// and we're going to use vec_mule, so we choose
270
// carefully how to "unpack" the elements into the even slots.
272
filter_v = vec_mergel(filter_v, (vector signed short)vzero);
274
filter_v = vec_mergeh(filter_v, (vector signed short)vzero);
276
val_vEven = vec_mule(src_v, filter_v);
277
val_s = vec_sums(val_vEven, vzero);
278
vec_st(val_s, 0, tempo);
279
dst[i] = FFMIN(tempo[3]>>7, (1<<15)-1);
286
for (i=0; i<dstW; i++) {
287
register int srcPos = filterPos[i];
289
vector unsigned char src_v0 = vec_ld(srcPos, src);
290
vector unsigned char src_v1, src_vF;
291
vector signed short src_v, filter_v;
292
vector signed int val_v, val_s;
293
if ((((int)src + srcPos)% 16) > 8) {
294
src_v1 = vec_ld(srcPos + 16, src);
296
src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src));
298
src_v = // vec_unpackh sign-extends...
299
(vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
300
filter_v = vec_ld(i << 4, filter);
301
// the 4 above is 3 (filterSize == 8) + 1 (sizeof(short) == 2)
303
val_v = vec_msums(src_v, filter_v, (vector signed int)vzero);
304
val_s = vec_sums(val_v, vzero);
305
vec_st(val_s, 0, tempo);
306
dst[i] = FFMIN(tempo[3]>>7, (1<<15)-1);
313
for (i=0; i<dstW; i++) {
314
register int srcPos = filterPos[i];
316
vector unsigned char src_v0 = vec_ld(srcPos, src);
317
vector unsigned char src_v1 = vec_ld(srcPos + 16, src);
318
vector unsigned char src_vF = vec_perm(src_v0, src_v1, vec_lvsl(srcPos, src));
320
vector signed short src_vA = // vec_unpackh sign-extends...
321
(vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
322
vector signed short src_vB = // vec_unpackh sign-extends...
323
(vector signed short)(vec_mergel((vector unsigned char)vzero, src_vF));
325
vector signed short filter_v0 = vec_ld(i << 5, filter);
326
vector signed short filter_v1 = vec_ld((i << 5) + 16, filter);
327
// the 5 above are 4 (filterSize == 16) + 1 (sizeof(short) == 2)
329
vector signed int val_acc = vec_msums(src_vA, filter_v0, (vector signed int)vzero);
330
vector signed int val_v = vec_msums(src_vB, filter_v1, val_acc);
332
vector signed int val_s = vec_sums(val_v, vzero);
334
vec_st(val_s, 0, tempo);
335
dst[i] = FFMIN(tempo[3]>>7, (1<<15)-1);
342
for (i=0; i<dstW; i++) {
344
register int srcPos = filterPos[i];
346
vector signed int val_s, val_v = (vector signed int)vzero;
347
vector signed short filter_v0R = vec_ld(i * 2 * filterSize, filter);
348
vector unsigned char permF = vec_lvsl((i * 2 * filterSize), filter);
350
vector unsigned char src_v0 = vec_ld(srcPos, src);
351
vector unsigned char permS = vec_lvsl(srcPos, src);
353
for (j = 0 ; j < filterSize - 15; j += 16) {
354
vector unsigned char src_v1 = vec_ld(srcPos + j + 16, src);
355
vector unsigned char src_vF = vec_perm(src_v0, src_v1, permS);
357
vector signed short src_vA = // vec_unpackh sign-extends...
358
(vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
359
vector signed short src_vB = // vec_unpackh sign-extends...
360
(vector signed short)(vec_mergel((vector unsigned char)vzero, src_vF));
362
vector signed short filter_v1R = vec_ld((i * 2 * filterSize) + (j * 2) + 16, filter);
363
vector signed short filter_v2R = vec_ld((i * 2 * filterSize) + (j * 2) + 32, filter);
364
vector signed short filter_v0 = vec_perm(filter_v0R, filter_v1R, permF);
365
vector signed short filter_v1 = vec_perm(filter_v1R, filter_v2R, permF);
367
vector signed int val_acc = vec_msums(src_vA, filter_v0, val_v);
368
val_v = vec_msums(src_vB, filter_v1, val_acc);
370
filter_v0R = filter_v2R;
374
if (j < filterSize-7) {
375
// loading src_v0 is useless, it's already done above
376
//vector unsigned char src_v0 = vec_ld(srcPos + j, src);
377
vector unsigned char src_v1, src_vF;
378
vector signed short src_v, filter_v1R, filter_v;
379
if ((((int)src + srcPos)% 16) > 8) {
380
src_v1 = vec_ld(srcPos + j + 16, src);
382
src_vF = vec_perm(src_v0, src_v1, permS);
384
src_v = // vec_unpackh sign-extends...
385
(vector signed short)(vec_mergeh((vector unsigned char)vzero, src_vF));
386
// loading filter_v0R is useless, it's already done above
387
//vector signed short filter_v0R = vec_ld((i * 2 * filterSize) + j, filter);
388
filter_v1R = vec_ld((i * 2 * filterSize) + (j * 2) + 16, filter);
389
filter_v = vec_perm(filter_v0R, filter_v1R, permF);
391
val_v = vec_msums(src_v, filter_v, val_v);
394
val_s = vec_sums(val_v, vzero);
396
vec_st(val_s, 0, tempo);
397
dst[i] = FFMIN(tempo[3]>>7, (1<<15)-1);
404
void ff_sws_init_swScale_altivec(SwsContext *c)
406
enum PixelFormat dstFormat = c->dstFormat;
408
if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC))
411
c->hScale = hScale_altivec_real;
412
if (!is16BPS(dstFormat) && !is9_OR_10BPS(dstFormat)) {
413
c->yuv2yuvX = yuv2yuvX_altivec_real;
416
/* The following list of supported dstFormat values should
417
* match what's found in the body of ff_yuv2packedX_altivec() */
418
if (!(c->flags & (SWS_BITEXACT | SWS_FULL_CHR_H_INT)) && !c->alpPixBuf &&
419
(c->dstFormat==PIX_FMT_ABGR || c->dstFormat==PIX_FMT_BGRA ||
420
c->dstFormat==PIX_FMT_BGR24 || c->dstFormat==PIX_FMT_RGB24 ||
421
c->dstFormat==PIX_FMT_RGBA || c->dstFormat==PIX_FMT_ARGB)) {
422
c->yuv2packedX = ff_yuv2packedX_altivec;