summaryrefslogtreecommitdiff
path: root/libavcodec/ppc/int_altivec.c
diff options
context:
space:
mode:
authorMans Rullgard <mans@mansr.com>2012-04-27 02:46:14 +0100
committerMans Rullgard <mans@mansr.com>2012-05-01 00:21:30 +0100
commit4c387c7070e24fa1d1c6397492fef9e46f9bf7aa (patch)
treedc06c0e7f772dd5c66da105db71520a46a40f9b0 /libavcodec/ppc/int_altivec.c
parentc75eca9d37d692dcbd2c80af0f37e33b10ac333b (diff)
ppc: dsputil: do unaligned block accesses correctly
To load unaligned vector data in the usual way, explicit vec_ld() should be used rather than dereferencing a pointer to a vector type. When the VSX extension is enabled, gcc may compile vector pointer dereferences using the VSX lxvw4x instruction instead of the lvx instruction typically used with Altivec/VMX. As the behaviour of these instructions with unaligned addresses differs, it is important that only lvx is used here. Signed-off-by: Mans Rullgard <mans@mansr.com>
Diffstat (limited to 'libavcodec/ppc/int_altivec.c')
-rw-r--r--libavcodec/ppc/int_altivec.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/libavcodec/ppc/int_altivec.c b/libavcodec/ppc/int_altivec.c
index f81b478449..b94b636dec 100644
--- a/libavcodec/ppc/int_altivec.c
+++ b/libavcodec/ppc/int_altivec.c
@@ -114,31 +114,31 @@ static int32_t scalarproduct_and_madd_int16_altivec(int16_t *v1, const int16_t *
{
LOAD_ZERO;
vec_s16 *pv1 = (vec_s16*)v1;
- vec_s16 *pv2 = (vec_s16*)v2;
- vec_s16 *pv3 = (vec_s16*)v3;
register vec_s16 muls = {mul,mul,mul,mul,mul,mul,mul,mul};
- register vec_s16 t0, t1, i0, i1;
- register vec_s16 i2 = pv2[0], i3 = pv3[0];
+ register vec_s16 t0, t1, i0, i1, i4;
+ register vec_s16 i2 = vec_ld(0, v2), i3 = vec_ld(0, v3);
register vec_s32 res = zero_s32v;
register vec_u8 align = vec_lvsl(0, v2);
int32_t ires;
order >>= 4;
do {
- t0 = vec_perm(i2, pv2[1], align);
- i2 = pv2[2];
- t1 = vec_perm(pv2[1], i2, align);
+ i1 = vec_ld(16, v2);
+ t0 = vec_perm(i2, i1, align);
+ i2 = vec_ld(32, v2);
+ t1 = vec_perm(i1, i2, align);
i0 = pv1[0];
i1 = pv1[1];
res = vec_msum(t0, i0, res);
res = vec_msum(t1, i1, res);
- t0 = vec_perm(i3, pv3[1], align);
- i3 = pv3[2];
- t1 = vec_perm(pv3[1], i3, align);
+ i4 = vec_ld(16, v3);
+ t0 = vec_perm(i3, i4, align);
+ i3 = vec_ld(32, v3);
+ t1 = vec_perm(i4, i3, align);
pv1[0] = vec_mladd(t0, muls, i0);
pv1[1] = vec_mladd(t1, muls, i1);
pv1 += 2;
- pv2 += 2;
- pv3 += 2;
+ v2 += 8;
+ v3 += 8;
} while(--order);
res = vec_splat(vec_sums(res, zero_s32v), 3);
vec_ste(res, 0, &ires);