95
//FIXME remove the usage of scratch buffers.
96
yuv2yuvX_altivec_real(SwsContext *c,
97
const int16_t *lumFilter, const int16_t **lumSrc,
98
int lumFilterSize, const int16_t *chrFilter,
99
const int16_t **chrUSrc, const int16_t **chrVSrc,
100
int chrFilterSize, const int16_t **alpSrc,
101
uint8_t *dest, uint8_t *uDest,
102
uint8_t *vDest, uint8_t *aDest,
103
int dstW, int chrDstW)
97
yuv2planeX_altivec(const int16_t *filter, int filterSize,
98
const int16_t **src, uint8_t *dest, int dstW,
99
const uint8_t *dither, int offset)
105
const vector signed int vini = {(1 << 18), (1 << 18), (1 << 18), (1 << 18)};
106
101
register int i, j;
108
103
DECLARE_ALIGNED(16, int, val)[dstW];
110
for (i = 0; i < (dstW -7); i+=4) {
111
vec_st(vini, i << 2, val);
113
for (; i < dstW; i++) {
105
for (i=0; i<dstW; i++)
106
val[i] = dither[(i + offset) & 7] << 12;
117
for (j = 0; j < lumFilterSize; j++) {
118
vector signed short l1, vLumFilter = vec_ld(j << 1, lumFilter);
119
vector unsigned char perm, perm0 = vec_lvsl(j << 1, lumFilter);
108
for (j = 0; j < filterSize; j++) {
109
vector signed short l1, vLumFilter = vec_ld(j << 1, filter);
110
vector unsigned char perm, perm0 = vec_lvsl(j << 1, filter);
120
111
vLumFilter = vec_perm(vLumFilter, vLumFilter, perm0);
121
112
vLumFilter = vec_splat(vLumFilter, 0); // lumFilter[j] is loaded 8 times in vLumFilter
123
perm = vec_lvsl(0, lumSrc[j]);
124
l1 = vec_ld(0, lumSrc[j]);
114
perm = vec_lvsl(0, src[j]);
115
l1 = vec_ld(0, src[j]);
126
117
for (i = 0; i < (dstW - 7); i+=8) {
127
118
int offset = i << 2;
128
vector signed short l2 = vec_ld((i << 1) + 16, lumSrc[j]);
119
vector signed short l2 = vec_ld((i << 1) + 16, src[j]);
130
121
vector signed int v1 = vec_ld(offset, val);
131
122
vector signed int v2 = vec_ld(offset + 16, val);
149
140
for ( ; i < dstW; i++) {
150
val[i] += lumSrc[j][i] * lumFilter[j];
141
val[i] += src[j][i] * filter[j];
153
144
altivec_packIntArrayToCharArray(val, dest, dstW);
156
DECLARE_ALIGNED(16, int, u)[chrDstW];
157
DECLARE_ALIGNED(16, int, v)[chrDstW];
159
for (i = 0; i < (chrDstW -7); i+=4) {
160
vec_st(vini, i << 2, u);
161
vec_st(vini, i << 2, v);
163
for (; i < chrDstW; i++) {
168
for (j = 0; j < chrFilterSize; j++) {
169
vector signed short l1, l1_V, vChrFilter = vec_ld(j << 1, chrFilter);
170
vector unsigned char perm, perm0 = vec_lvsl(j << 1, chrFilter);
171
vChrFilter = vec_perm(vChrFilter, vChrFilter, perm0);
172
vChrFilter = vec_splat(vChrFilter, 0); // chrFilter[j] is loaded 8 times in vChrFilter
174
perm = vec_lvsl(0, chrUSrc[j]);
175
l1 = vec_ld(0, chrUSrc[j]);
176
l1_V = vec_ld(0, chrVSrc[j]);
178
for (i = 0; i < (chrDstW - 7); i+=8) {
180
vector signed short l2 = vec_ld((i << 1) + 16, chrUSrc[j]);
181
vector signed short l2_V = vec_ld((i << 1) + 16, chrVSrc[j]);
183
vector signed int v1 = vec_ld(offset, u);
184
vector signed int v2 = vec_ld(offset + 16, u);
185
vector signed int v1_V = vec_ld(offset, v);
186
vector signed int v2_V = vec_ld(offset + 16, v);
188
vector signed short ls = vec_perm(l1, l2, perm); // chrUSrc[j][i] ... chrUSrc[j][i+7]
189
vector signed short ls_V = vec_perm(l1_V, l2_V, perm); // chrVSrc[j][i] ... chrVSrc[j][i]
191
vector signed int i1 = vec_mule(vChrFilter, ls);
192
vector signed int i2 = vec_mulo(vChrFilter, ls);
193
vector signed int i1_V = vec_mule(vChrFilter, ls_V);
194
vector signed int i2_V = vec_mulo(vChrFilter, ls_V);
196
vector signed int vf1 = vec_mergeh(i1, i2);
197
vector signed int vf2 = vec_mergel(i1, i2); // chrUSrc[j][i] * chrFilter[j] ... chrUSrc[j][i+7] * chrFilter[j]
198
vector signed int vf1_V = vec_mergeh(i1_V, i2_V);
199
vector signed int vf2_V = vec_mergel(i1_V, i2_V); // chrVSrc[j][i] * chrFilter[j] ... chrVSrc[j][i+7] * chrFilter[j]
201
vector signed int vo1 = vec_add(v1, vf1);
202
vector signed int vo2 = vec_add(v2, vf2);
203
vector signed int vo1_V = vec_add(v1_V, vf1_V);
204
vector signed int vo2_V = vec_add(v2_V, vf2_V);
206
vec_st(vo1, offset, u);
207
vec_st(vo2, offset + 16, u);
208
vec_st(vo1_V, offset, v);
209
vec_st(vo2_V, offset + 16, v);
214
for ( ; i < chrDstW; i++) {
215
u[i] += chrUSrc[j][i] * chrFilter[j];
216
v[i] += chrVSrc[j][i] * chrFilter[j];
219
altivec_packIntArrayToCharArray(u, uDest, chrDstW);
220
altivec_packIntArrayToCharArray(v, vDest, chrDstW);
224
static void hScale_altivec_real(int16_t *dst, int dstW,
225
const uint8_t *src, int srcW,
226
int xInc, const int16_t *filter,
148
static void hScale_altivec_real(SwsContext *c, int16_t *dst, int dstW,
149
const uint8_t *src, const int16_t *filter,
227
150
const int16_t *filterPos, int filterSize)
408
331
if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC))
411
c->hScale = hScale_altivec_real;
412
if (!is16BPS(dstFormat) && !is9_OR_10BPS(dstFormat)) {
413
c->yuv2yuvX = yuv2yuvX_altivec_real;
334
if (c->srcBpc == 8 && c->dstBpc <= 10) {
335
c->hyScale = c->hcScale = hScale_altivec_real;
337
if (!is16BPS(dstFormat) && !is9_OR_10BPS(dstFormat) &&
338
dstFormat != PIX_FMT_NV12 && dstFormat != PIX_FMT_NV21 &&
340
c->yuv2planeX = yuv2planeX_altivec;
416
343
/* The following list of supported dstFormat values should
417
344
* match what's found in the body of ff_yuv2packedX_altivec() */
418
if (!(c->flags & (SWS_BITEXACT | SWS_FULL_CHR_H_INT)) && !c->alpPixBuf &&
419
(c->dstFormat==PIX_FMT_ABGR || c->dstFormat==PIX_FMT_BGRA ||
420
c->dstFormat==PIX_FMT_BGR24 || c->dstFormat==PIX_FMT_RGB24 ||
421
c->dstFormat==PIX_FMT_RGBA || c->dstFormat==PIX_FMT_ARGB)) {
422
c->yuv2packedX = ff_yuv2packedX_altivec;
345
if (!(c->flags & (SWS_BITEXACT | SWS_FULL_CHR_H_INT)) && !c->alpPixBuf) {
346
switch (c->dstFormat) {
347
case PIX_FMT_ABGR: c->yuv2packedX = ff_yuv2abgr_X_altivec; break;
348
case PIX_FMT_BGRA: c->yuv2packedX = ff_yuv2bgra_X_altivec; break;
349
case PIX_FMT_ARGB: c->yuv2packedX = ff_yuv2argb_X_altivec; break;
350
case PIX_FMT_RGBA: c->yuv2packedX = ff_yuv2rgba_X_altivec; break;
351
case PIX_FMT_BGR24: c->yuv2packedX = ff_yuv2bgr24_X_altivec; break;
352
case PIX_FMT_RGB24: c->yuv2packedX = ff_yuv2rgb24_X_altivec; break;