2
* Copyright (c) 2007 Luca Barbato <lu_zero@gentoo.org>
4
* This file is part of FFmpeg.
6
* FFmpeg is free software; you can redistribute it and/or
7
* modify it under the terms of the GNU Lesser General Public
8
* License as published by the Free Software Foundation; either
9
* version 2.1 of the License, or (at your option) any later version.
11
* FFmpeg is distributed in the hope that it will be useful,
12
* but WITHOUT ANY WARRANTY; without even the implied warranty of
13
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14
* Lesser General Public License for more details.
16
* You should have received a copy of the GNU Lesser General Public
17
* License along with FFmpeg; if not, write to the Free Software
18
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
31
#include "libavcodec/dsputil.h"
33
#include "dsputil_altivec.h"
35
#include "types_altivec.h"
37
static int ssd_int8_vs_int16_altivec(const int8_t *pix1, const int16_t *pix2,
40
vector signed char vpix1;
41
vector signed short vpix2, vdiff, vpix1l,vpix1h;
42
union { vector signed int vscore;
45
u.vscore = vec_splat_s32(0);
47
//XXX lazy way, fix it later
49
#define vec_unaligned_load(b) \
50
vec_perm(vec_ld(0,b),vec_ld(15,b),vec_lvsl(0, b));
54
// score += (pix1[i]-pix2[i])*(pix1[i]-pix2[i]);
55
//load pix1 and the first batch of pix2
57
vpix1 = vec_unaligned_load(pix1);
58
vpix2 = vec_unaligned_load(pix2);
61
vpix1h = vec_unpackh(vpix1);
62
vdiff = vec_sub(vpix1h, vpix2);
63
vpix1l = vec_unpackl(vpix1);
64
// load another batch from pix2
65
vpix2 = vec_unaligned_load(pix2);
66
u.vscore = vec_msum(vdiff, vdiff, u.vscore);
67
vdiff = vec_sub(vpix1l, vpix2);
68
u.vscore = vec_msum(vdiff, vdiff, u.vscore);
73
u.vscore = vec_sums(u.vscore, vec_splat_s32(0));
76
for (i = 0; i < size; i++) {
77
u.score[3] += (pix1[i]-pix2[i])*(pix1[i]-pix2[i]);
82
static int32_t scalarproduct_int16_altivec(int16_t * v1, int16_t * v2, int order, const int shift)
86
register vec_s16 vec1, *pv;
87
register vec_s32 res = vec_splat_s32(0), t;
88
register vec_u32 shifts;
92
if(shift & 0x10) shifts = vec_add(shifts, vec_sl(vec_splat_u32(0x08), vec_splat_u32(0x1)));
93
if(shift & 0x08) shifts = vec_add(shifts, vec_splat_u32(0x08));
94
if(shift & 0x04) shifts = vec_add(shifts, vec_splat_u32(0x04));
95
if(shift & 0x02) shifts = vec_add(shifts, vec_splat_u32(0x02));
96
if(shift & 0x01) shifts = vec_add(shifts, vec_splat_u32(0x01));
98
for(i = 0; i < order; i += 8){
100
vec1 = vec_perm(pv[0], pv[1], vec_lvsl(0, v1));
101
t = vec_msum(vec1, vec_ld(0, v2), zero_s32v);
102
t = vec_sr(t, shifts);
103
res = vec_sums(t, res);
107
res = vec_splat(res, 3);
108
vec_ste(res, 0, &ires);
112
static int32_t scalarproduct_and_madd_int16_altivec(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul)
115
vec_s16 *pv1 = (vec_s16*)v1;
116
vec_s16 *pv2 = (vec_s16*)v2;
117
vec_s16 *pv3 = (vec_s16*)v3;
118
register vec_s16 muls = {mul,mul,mul,mul,mul,mul,mul,mul};
119
register vec_s16 t0, t1, i0, i1;
120
register vec_s16 i2 = pv2[0], i3 = pv3[0];
121
register vec_s32 res = zero_s32v;
122
register vec_u8 align = vec_lvsl(0, v2);
126
t0 = vec_perm(i2, pv2[1], align);
128
t1 = vec_perm(pv2[1], i2, align);
131
res = vec_msum(t0, i0, res);
132
res = vec_msum(t1, i1, res);
133
t0 = vec_perm(i3, pv3[1], align);
135
t1 = vec_perm(pv3[1], i3, align);
136
pv1[0] = vec_mladd(t0, muls, i0);
137
pv1[1] = vec_mladd(t1, muls, i1);
142
res = vec_splat(vec_sums(res, zero_s32v), 3);
143
vec_ste(res, 0, &ires);
147
void int_init_altivec(DSPContext* c, AVCodecContext *avctx)
149
c->ssd_int8_vs_int16 = ssd_int8_vs_int16_altivec;
150
c->scalarproduct_int16 = scalarproduct_int16_altivec;
151
c->scalarproduct_and_madd_int16 = scalarproduct_and_madd_int16_altivec;