summaryrefslogtreecommitdiff
path: root/libavcodec/ppc
diff options
context:
space:
mode:
Diffstat (limited to 'libavcodec/ppc')
-rw-r--r--libavcodec/ppc/Makefile10
-rw-r--r--libavcodec/ppc/asm.S16
-rw-r--r--libavcodec/ppc/audiodsp.c14
-rw-r--r--libavcodec/ppc/blockdsp.c8
-rw-r--r--libavcodec/ppc/fdct.h8
-rw-r--r--libavcodec/ppc/fdctdsp.c57
-rw-r--r--libavcodec/ppc/fft_altivec.S32
-rw-r--r--libavcodec/ppc/fft_init.c146
-rw-r--r--libavcodec/ppc/fft_vsx.c227
-rw-r--r--libavcodec/ppc/fft_vsx.h830
-rw-r--r--libavcodec/ppc/fmtconvert_altivec.c16
-rw-r--r--libavcodec/ppc/h264chroma_init.c16
-rw-r--r--libavcodec/ppc/h264chroma_template.c231
-rw-r--r--libavcodec/ppc/h264dsp.c131
-rw-r--r--libavcodec/ppc/h264qpel.c125
-rw-r--r--libavcodec/ppc/h264qpel_template.c383
-rw-r--r--libavcodec/ppc/hpeldsp_altivec.c237
-rw-r--r--libavcodec/ppc/hpeldsp_altivec.h8
-rw-r--r--libavcodec/ppc/idctdsp.c57
-rw-r--r--libavcodec/ppc/lossless_audiodsp_altivec.c (renamed from libavcodec/ppc/apedsp_altivec.c)51
-rw-r--r--libavcodec/ppc/lossless_videodsp_altivec.c (renamed from libavcodec/ppc/huffyuvdsp_altivec.c)22
-rw-r--r--libavcodec/ppc/mathops.h8
-rw-r--r--libavcodec/ppc/mdct_init.c154
-rw-r--r--libavcodec/ppc/me_cmp.c194
-rw-r--r--libavcodec/ppc/mpegaudiodsp_altivec.c16
-rw-r--r--libavcodec/ppc/mpegvideo_altivec.c19
-rw-r--r--libavcodec/ppc/mpegvideodsp.c20
-rw-r--r--libavcodec/ppc/mpegvideoencdsp.c76
-rw-r--r--libavcodec/ppc/pixblockdsp.c128
-rw-r--r--libavcodec/ppc/svq1enc_altivec.c16
-rw-r--r--libavcodec/ppc/vc1dsp_altivec.c29
-rw-r--r--libavcodec/ppc/videodsp_ppc.c8
-rw-r--r--libavcodec/ppc/vorbisdsp_altivec.c16
-rw-r--r--libavcodec/ppc/vp3dsp_altivec.c34
-rw-r--r--libavcodec/ppc/vp8dsp_altivec.c103
35 files changed, 2287 insertions, 1159 deletions
diff --git a/libavcodec/ppc/Makefile b/libavcodec/ppc/Makefile
index 9234e77287..ae774a07f2 100644
--- a/libavcodec/ppc/Makefile
+++ b/libavcodec/ppc/Makefile
@@ -2,16 +2,16 @@
OBJS-$(CONFIG_AUDIODSP) += ppc/audiodsp.o
OBJS-$(CONFIG_BLOCKDSP) += ppc/blockdsp.o
OBJS-$(CONFIG_FFT) += ppc/fft_init.o \
- ppc/fft_altivec.o
+ ppc/fft_altivec.o \
+ ppc/fft_vsx.o
OBJS-$(CONFIG_FDCTDSP) += ppc/fdctdsp.o
OBJS-$(CONFIG_FMTCONVERT) += ppc/fmtconvert_altivec.o
OBJS-$(CONFIG_H264CHROMA) += ppc/h264chroma_init.o
-OBJS-$(CONFIG_H264DSP) += ppc/h264dsp.o
+OBJS-$(CONFIG_H264DSP) += ppc/h264dsp.o ppc/hpeldsp_altivec.o
OBJS-$(CONFIG_H264QPEL) += ppc/h264qpel.o
OBJS-$(CONFIG_HPELDSP) += ppc/hpeldsp_altivec.o
-OBJS-$(CONFIG_HUFFYUVDSP) += ppc/huffyuvdsp_altivec.o
OBJS-$(CONFIG_IDCTDSP) += ppc/idctdsp.o
-OBJS-$(CONFIG_MDCT) += ppc/mdct_init.o
+OBJS-$(CONFIG_LLVIDDSP) += ppc/lossless_videodsp_altivec.o
OBJS-$(CONFIG_ME_CMP) += ppc/me_cmp.o
OBJS-$(CONFIG_MPEGAUDIODSP) += ppc/mpegaudiodsp_altivec.o
OBJS-$(CONFIG_MPEGVIDEO) += ppc/mpegvideo_altivec.o \
@@ -24,7 +24,7 @@ OBJS-$(CONFIG_VP3DSP) += ppc/vp3dsp_altivec.o
OBJS-$(CONFIG_VP8DSP) += ppc/vp8dsp_altivec.o
# decoders/encoders
-OBJS-$(CONFIG_APE_DECODER) += ppc/apedsp_altivec.o
+OBJS-$(CONFIG_LLAUDDSP) += ppc/lossless_audiodsp_altivec.o
OBJS-$(CONFIG_SVQ1_ENCODER) += ppc/svq1enc_altivec.o
OBJS-$(CONFIG_VORBIS_DECODER) += ppc/vorbisdsp_altivec.o
OBJS-$(CONFIG_VP7_DECODER) += ppc/vp8dsp_altivec.o
diff --git a/libavcodec/ppc/asm.S b/libavcodec/ppc/asm.S
index 141dee9b78..6222b8be06 100644
--- a/libavcodec/ppc/asm.S
+++ b/libavcodec/ppc/asm.S
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2009 Loren Merritt
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -24,6 +24,12 @@
#define JOIN(a, b) GLUE(a, b)
#define X(s) JOIN(EXTERN_ASM, s)
+#if __APPLE__
+#define R(n) r ## n
+#else
+#define R(n) n
+#endif
+
#if ARCH_PPC64
#define PTR .quad
@@ -53,7 +59,7 @@ L(\name):
.endm
.macro movrel rd, sym, gp
- ld \rd, \sym@got(r2)
+ ld \rd, \sym@got(R(2))
.endm
.macro get_got rd
diff --git a/libavcodec/ppc/audiodsp.c b/libavcodec/ppc/audiodsp.c
index 52a1e75cc1..4ee3da42d2 100644
--- a/libavcodec/ppc/audiodsp.c
+++ b/libavcodec/ppc/audiodsp.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2007 Luca Barbato <lu_zero@gentoo.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -35,7 +35,7 @@
#include "libavutil/ppc/util_altivec.h"
#include "libavcodec/audiodsp.h"
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
static int32_t scalarproduct_int16_altivec(const int16_t *v1, const int16_t *v2,
int order)
@@ -59,7 +59,7 @@ static int32_t scalarproduct_int16_altivec(const int16_t *v1, const int16_t *v2,
return ires;
}
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
#if HAVE_VSX
@@ -88,7 +88,7 @@ static int32_t scalarproduct_int16_vsx(const int16_t *v1, const int16_t *v2, int
av_cold void ff_audiodsp_init_ppc(AudioDSPContext *c)
{
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
if (!PPC_ALTIVEC(av_get_cpu_flags()))
return;
diff --git a/libavcodec/ppc/blockdsp.c b/libavcodec/ppc/blockdsp.c
index d2c1d0e766..45c492ab3b 100644
--- a/libavcodec/ppc/blockdsp.c
+++ b/libavcodec/ppc/blockdsp.c
@@ -3,20 +3,20 @@
* Copyright (c) 2002 Dieter Shirley
* Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/fdct.h b/libavcodec/ppc/fdct.h
index 74710354ef..437f815258 100644
--- a/libavcodec/ppc/fdct.h
+++ b/libavcodec/ppc/fdct.h
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/fdctdsp.c b/libavcodec/ppc/fdctdsp.c
index cadca80469..6659046f98 100644
--- a/libavcodec/ppc/fdctdsp.c
+++ b/libavcodec/ppc/fdctdsp.c
@@ -1,20 +1,20 @@
/*
* Copyright (C) 2003 James Klicman <james@klicman.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -29,7 +29,7 @@
#include "libavcodec/fdctdsp.h"
#include "fdct.h"
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
#define vs16(v) ((vector signed short) (v))
#define vs32(v) ((vector signed int) (v))
@@ -37,29 +37,28 @@
#define vu16(v) ((vector unsigned short) (v))
#define vu32(v) ((vector unsigned int) (v))
-#define C1 0.98078525066375732421875000 /* cos(1 * PI / 16) */
-#define C2 0.92387950420379638671875000 /* cos(2 * PI / 16) */
-#define C3 0.83146959543228149414062500 /* cos(3 * PI / 16) */
-#define C4 0.70710676908493041992187500 /* cos(4 * PI / 16) */
-#define C5 0.55557024478912353515625000 /* cos(5 * PI / 16) */
-#define C6 0.38268342614173889160156250 /* cos(6 * PI / 16) */
-#define C7 0.19509032368659973144531250 /* cos(7 * PI / 16) */
-#define SQRT_2 1.41421353816986083984375000 /* sqrt(2) */
+#define C1 0.98078528040323044912618224 /* cos(1 * PI / 16) */
+#define C2 0.92387953251128675612818319 /* cos(2 * PI / 16) */
+#define C3 0.83146961230254523707878838 /* cos(3 * PI / 16) */
+#define C4 0.70710678118654752440084436 /* cos(4 * PI / 16) */
+#define C5 0.55557023301960222474283081 /* cos(5 * PI / 16) */
+#define C6 0.38268343236508977172845998 /* cos(6 * PI / 16) */
+#define C7 0.19509032201612826784828487 /* cos(7 * PI / 16) */
#define W0 -(2 * C2)
#define W1 (2 * C6)
-#define W2 (SQRT_2 * C6)
-#define W3 (SQRT_2 * C3)
-#define W4 (SQRT_2 * (-C1 + C3 + C5 - C7))
-#define W5 (SQRT_2 * (C1 + C3 - C5 + C7))
-#define W6 (SQRT_2 * (C1 + C3 + C5 - C7))
-#define W7 (SQRT_2 * (C1 + C3 - C5 - C7))
-#define W8 (SQRT_2 * (C7 - C3))
-#define W9 (SQRT_2 * (-C1 - C3))
-#define WA (SQRT_2 * (-C3 - C5))
-#define WB (SQRT_2 * (C5 - C3))
-
-static vector float fdctconsts[3] = {
+#define W2 (M_SQRT2 * C6)
+#define W3 (M_SQRT2 * C3)
+#define W4 (M_SQRT2 * (-C1 + C3 + C5 - C7))
+#define W5 (M_SQRT2 * (C1 + C3 - C5 + C7))
+#define W6 (M_SQRT2 * (C1 + C3 + C5 - C7))
+#define W7 (M_SQRT2 * (C1 + C3 - C5 - C7))
+#define W8 (M_SQRT2 * (C7 - C3))
+#define W9 (M_SQRT2 * (-C1 - C3))
+#define WA (M_SQRT2 * (-C3 - C5))
+#define WB (M_SQRT2 * (C5 - C3))
+
+static const vector float fdctconsts[3] = {
{ W0, W1, W2, W3 },
{ W4, W5, W6, W7 },
{ W8, W9, WA, WB }
@@ -196,7 +195,7 @@ static vector float fdctconsts[3] = {
void ff_fdct_altivec(int16_t *block)
{
vector signed short *bp;
- vector float *cp = fdctconsts;
+ const vector float *cp = fdctconsts;
vector float b00, b10, b20, b30, b40, b50, b60, b70;
vector float b01, b11, b21, b31, b41, b51, b61, b71;
vector float mzero, cnst, cnsts0, cnsts1, cnsts2;
@@ -460,12 +459,12 @@ void ff_fdct_altivec(int16_t *block)
/* }}} */
}
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
av_cold void ff_fdctdsp_init_ppc(FDCTDSPContext *c, AVCodecContext *avctx,
unsigned high_bit_depth)
{
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
if (!PPC_ALTIVEC(av_get_cpu_flags()))
return;
@@ -475,5 +474,5 @@ av_cold void ff_fdctdsp_init_ppc(FDCTDSPContext *c, AVCodecContext *avctx,
c->fdct = ff_fdct_altivec;
}
}
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
}
diff --git a/libavcodec/ppc/fft_altivec.S b/libavcodec/ppc/fft_altivec.S
index cb7c8716cd..8cd68d6a90 100644
--- a/libavcodec/ppc/fft_altivec.S
+++ b/libavcodec/ppc/fft_altivec.S
@@ -5,20 +5,20 @@
* This algorithm (though not any of the implementation details) is
* based on libdjbfft by D. J. Bernstein.
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -354,14 +354,18 @@ fft_data:
.macro fft_calc interleave
extfunc ff_fft_calc\interleave\()_altivec
mflr r0
- stp r0, 2*PS(r1)
- stpu r1, -(160+16*PS)(r1)
+ stp r0, 2*PS(R(1))
+ stpu r1, -(160+16*PS)(R(1))
get_got r11
addi r6, r1, 16*PS
stvm r6, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
mfvrsave r0
- stw r0, 15*PS(r1)
+ stw r0, 15*PS(R(1))
+#if __APPLE__
li r6, 0xfffffffc
+#else
+ li r6, -4
+#endif
mtvrsave r6
movrel r6, fft_data, r11
@@ -372,7 +376,7 @@ extfunc ff_fft_calc\interleave\()_altivec
movrel r12, X(ff_cos_tabs), r11
movrel r6, fft_dispatch_tab\interleave\()_altivec, r11
- lwz r3, 0(r3)
+ lwz r3, 0(R(3))
subi r3, r3, 2
slwi r3, r3, 2+ARCH_PPC64
lpx r3, r3, r6
@@ -382,10 +386,10 @@ extfunc ff_fft_calc\interleave\()_altivec
addi r6, r1, 16*PS
lvm r6, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29
- lwz r6, 15*PS(r1)
+ lwz r6, 15*PS(R(1))
mtvrsave r6
- lp r1, 0(r1)
- lp r0, 2*PS(r1)
+ lp r1, 0(R(1))
+ lp r0, 2*PS(R(1))
mtlr r0
blr
.endm
@@ -393,15 +397,15 @@ extfunc ff_fft_calc\interleave\()_altivec
.macro DECL_FFT suffix, bits, n, n2, n4
fft\n\suffix\()_altivec:
mflr r0
- stp r0,PS*(\bits-3)(r1)
+ stp r0,PS*(\bits-3)(R(1))
bl fft\n2\()_altivec
addi2 r3,\n*4
bl fft\n4\()_altivec
addi2 r3,\n*2
bl fft\n4\()_altivec
addi2 r3,\n*-6
- lp r0,PS*(\bits-3)(r1)
- lp r4,\bits*PS(r12)
+ lp r0,PS*(\bits-3)(R(1))
+ lp r4,\bits*PS(R(12))
mtlr r0
li r5,\n/16
b fft_pass\suffix\()_altivec
diff --git a/libavcodec/ppc/fft_init.c b/libavcodec/ppc/fft_init.c
index 56eafb91be..cbeaf98952 100644
--- a/libavcodec/ppc/fft_init.c
+++ b/libavcodec/ppc/fft_init.c
@@ -1,36 +1,168 @@
/*
- * This file is part of Libav.
+ * FFT/IFFT transforms
+ * AltiVec-enabled
+ * Copyright (c) 2009 Loren Merritt
*
- * Libav is free software; you can redistribute it and/or
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
-
#include "libavutil/cpu.h"
#include "libavutil/ppc/cpu.h"
-
+#include "libavutil/ppc/types_altivec.h"
+#include "libavutil/ppc/util_altivec.h"
#include "libavcodec/fft.h"
+/**
+ * Do a complex FFT with the parameters defined in ff_fft_init().
+ * The input data must be permuted before with s->revtab table.
+ * No 1.0 / sqrt(n) normalization is done.
+ * AltiVec-enabled:
+ * This code assumes that the 'z' pointer is 16 bytes-aligned.
+ * It also assumes all FFTComplex are 8 bytes-aligned pairs of floats.
+ */
+
+#if HAVE_VSX
+#include "fft_vsx.h"
+#else
+void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z);
void ff_fft_calc_interleave_altivec(FFTContext *s, FFTComplex *z);
+#endif
+
+#if HAVE_GNU_AS && HAVE_ALTIVEC
+static void imdct_half_altivec(FFTContext *s, FFTSample *output, const FFTSample *input)
+{
+ int j, k;
+ int n = 1 << s->mdct_bits;
+ int n4 = n >> 2;
+ int n8 = n >> 3;
+ int n32 = n >> 5;
+ const uint16_t *revtabj = s->revtab;
+ const uint16_t *revtabk = s->revtab+n4;
+ const vec_f *tcos = (const vec_f*)(s->tcos+n8);
+ const vec_f *tsin = (const vec_f*)(s->tsin+n8);
+ const vec_f *pin = (const vec_f*)(input+n4);
+ vec_f *pout = (vec_f*)(output+n4);
+
+ /* pre rotation */
+ k = n32-1;
+ do {
+ vec_f cos,sin,cos0,sin0,cos1,sin1,re,im,r0,i0,r1,i1,a,b,c,d;
+#define CMULA(p,o0,o1,o2,o3)\
+ a = pin[ k*2+p]; /* { z[k].re, z[k].im, z[k+1].re, z[k+1].im } */\
+ b = pin[-k*2-p-1]; /* { z[-k-2].re, z[-k-2].im, z[-k-1].re, z[-k-1].im } */\
+ re = vec_perm(a, b, vcprm(0,2,s0,s2)); /* { z[k].re, z[k+1].re, z[-k-2].re, z[-k-1].re } */\
+ im = vec_perm(a, b, vcprm(s3,s1,3,1)); /* { z[-k-1].im, z[-k-2].im, z[k+1].im, z[k].im } */\
+ cos = vec_perm(cos0, cos1, vcprm(o0,o1,s##o2,s##o3)); /* { cos[k], cos[k+1], cos[-k-2], cos[-k-1] } */\
+ sin = vec_perm(sin0, sin1, vcprm(o0,o1,s##o2,s##o3));\
+ r##p = im*cos - re*sin;\
+ i##p = re*cos + im*sin;
+#define STORE2(v,dst)\
+ j = dst;\
+ vec_ste(v, 0, output+j*2);\
+ vec_ste(v, 4, output+j*2);
+#define STORE8(p)\
+ a = vec_perm(r##p, i##p, vcprm(0,s0,0,s0));\
+ b = vec_perm(r##p, i##p, vcprm(1,s1,1,s1));\
+ c = vec_perm(r##p, i##p, vcprm(2,s2,2,s2));\
+ d = vec_perm(r##p, i##p, vcprm(3,s3,3,s3));\
+ STORE2(a, revtabk[ p*2-4]);\
+ STORE2(b, revtabk[ p*2-3]);\
+ STORE2(c, revtabj[-p*2+2]);\
+ STORE2(d, revtabj[-p*2+3]);
+
+ cos0 = tcos[k];
+ sin0 = tsin[k];
+ cos1 = tcos[-k-1];
+ sin1 = tsin[-k-1];
+ CMULA(0, 0,1,2,3);
+ CMULA(1, 2,3,0,1);
+ STORE8(0);
+ STORE8(1);
+ revtabj += 4;
+ revtabk -= 4;
+ k--;
+ } while(k >= 0);
+
+#if HAVE_VSX
+ ff_fft_calc_vsx(s, (FFTComplex*)output);
+#else
+ ff_fft_calc_altivec(s, (FFTComplex*)output);
+#endif
+
+ /* post rotation + reordering */
+ j = -n32;
+ k = n32-1;
+ do {
+ vec_f cos,sin,re,im,a,b,c,d;
+#define CMULB(d0,d1,o)\
+ re = pout[o*2];\
+ im = pout[o*2+1];\
+ cos = tcos[o];\
+ sin = tsin[o];\
+ d0 = im*sin - re*cos;\
+ d1 = re*sin + im*cos;
+
+ CMULB(a,b,j);
+ CMULB(c,d,k);
+ pout[2*j] = vec_perm(a, d, vcprm(0,s3,1,s2));
+ pout[2*j+1] = vec_perm(a, d, vcprm(2,s1,3,s0));
+ pout[2*k] = vec_perm(c, b, vcprm(0,s3,1,s2));
+ pout[2*k+1] = vec_perm(c, b, vcprm(2,s1,3,s0));
+ j++;
+ k--;
+ } while(k >= 0);
+}
+
+static void imdct_calc_altivec(FFTContext *s, FFTSample *output, const FFTSample *input)
+{
+ int k;
+ int n = 1 << s->mdct_bits;
+ int n4 = n >> 2;
+ int n16 = n >> 4;
+ vec_u32 sign = {1U<<31,1U<<31,1U<<31,1U<<31};
+ vec_u32 *p0 = (vec_u32*)(output+n4);
+ vec_u32 *p1 = (vec_u32*)(output+n4*3);
+
+ imdct_half_altivec(s, output + n4, input);
+
+ for (k = 0; k < n16; k++) {
+ vec_u32 a = p0[k] ^ sign;
+ vec_u32 b = p1[-k-1];
+ p0[-k-1] = vec_perm(a, a, vcprm(3,2,1,0));
+ p1[k] = vec_perm(b, b, vcprm(3,2,1,0));
+ }
+}
+#endif /* HAVE_GNU_AS && HAVE_ALTIVEC && HAVE_BIGENDIAN */
av_cold void ff_fft_init_ppc(FFTContext *s)
{
-#if HAVE_GNU_AS && HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_GNU_AS && HAVE_ALTIVEC
if (!PPC_ALTIVEC(av_get_cpu_flags()))
return;
+#if HAVE_VSX
+ s->fft_calc = ff_fft_calc_interleave_vsx;
+#else
s->fft_calc = ff_fft_calc_interleave_altivec;
+#endif
+ if (s->mdct_bits >= 5) {
+ s->imdct_calc = imdct_calc_altivec;
+ s->imdct_half = imdct_half_altivec;
+ }
#endif /* HAVE_GNU_AS && HAVE_ALTIVEC && HAVE_BIGENDIAN */
}
diff --git a/libavcodec/ppc/fft_vsx.c b/libavcodec/ppc/fft_vsx.c
new file mode 100644
index 0000000000..e92975f74e
--- /dev/null
+++ b/libavcodec/ppc/fft_vsx.c
@@ -0,0 +1,227 @@
+/*
+ * FFT transform, optimized with VSX built-in functions
+ * Copyright (c) 2014 Rong Yan
+ *
+ * This algorithm (though not any of the implementation details) is
+ * based on libdjbfft by D. J. Bernstein.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+#include "config.h"
+#include "libavutil/cpu.h"
+#include "libavutil/ppc/types_altivec.h"
+#include "libavutil/ppc/util_altivec.h"
+#include "libavcodec/fft.h"
+#include "libavcodec/fft-internal.h"
+#include "fft_vsx.h"
+
+#if HAVE_VSX
+
+static void fft32_vsx_interleave(FFTComplex *z)
+{
+ fft16_vsx_interleave(z);
+ fft8_vsx_interleave(z+16);
+ fft8_vsx_interleave(z+24);
+ pass_vsx_interleave(z,ff_cos_32,4);
+}
+
+static void fft64_vsx_interleave(FFTComplex *z)
+{
+ fft32_vsx_interleave(z);
+ fft16_vsx_interleave(z+32);
+ fft16_vsx_interleave(z+48);
+ pass_vsx_interleave(z,ff_cos_64, 8);
+}
+static void fft128_vsx_interleave(FFTComplex *z)
+{
+ fft64_vsx_interleave(z);
+ fft32_vsx_interleave(z+64);
+ fft32_vsx_interleave(z+96);
+ pass_vsx_interleave(z,ff_cos_128,16);
+}
+static void fft256_vsx_interleave(FFTComplex *z)
+{
+ fft128_vsx_interleave(z);
+ fft64_vsx_interleave(z+128);
+ fft64_vsx_interleave(z+192);
+ pass_vsx_interleave(z,ff_cos_256,32);
+}
+static void fft512_vsx_interleave(FFTComplex *z)
+{
+ fft256_vsx_interleave(z);
+ fft128_vsx_interleave(z+256);
+ fft128_vsx_interleave(z+384);
+ pass_vsx_interleave(z,ff_cos_512,64);
+}
+static void fft1024_vsx_interleave(FFTComplex *z)
+{
+ fft512_vsx_interleave(z);
+ fft256_vsx_interleave(z+512);
+ fft256_vsx_interleave(z+768);
+ pass_vsx_interleave(z,ff_cos_1024,128);
+
+}
+static void fft2048_vsx_interleave(FFTComplex *z)
+{
+ fft1024_vsx_interleave(z);
+ fft512_vsx_interleave(z+1024);
+ fft512_vsx_interleave(z+1536);
+ pass_vsx_interleave(z,ff_cos_2048,256);
+}
+static void fft4096_vsx_interleave(FFTComplex *z)
+{
+ fft2048_vsx_interleave(z);
+ fft1024_vsx_interleave(z+2048);
+ fft1024_vsx_interleave(z+3072);
+ pass_vsx_interleave(z,ff_cos_4096, 512);
+}
+static void fft8192_vsx_interleave(FFTComplex *z)
+{
+ fft4096_vsx_interleave(z);
+ fft2048_vsx_interleave(z+4096);
+ fft2048_vsx_interleave(z+6144);
+ pass_vsx_interleave(z,ff_cos_8192,1024);
+}
+static void fft16384_vsx_interleave(FFTComplex *z)
+{
+ fft8192_vsx_interleave(z);
+ fft4096_vsx_interleave(z+8192);
+ fft4096_vsx_interleave(z+12288);
+ pass_vsx_interleave(z,ff_cos_16384,2048);
+}
+static void fft32768_vsx_interleave(FFTComplex *z)
+{
+ fft16384_vsx_interleave(z);
+ fft8192_vsx_interleave(z+16384);
+ fft8192_vsx_interleave(z+24576);
+ pass_vsx_interleave(z,ff_cos_32768,4096);
+}
+static void fft65536_vsx_interleave(FFTComplex *z)
+{
+ fft32768_vsx_interleave(z);
+ fft16384_vsx_interleave(z+32768);
+ fft16384_vsx_interleave(z+49152);
+ pass_vsx_interleave(z,ff_cos_65536,8192);
+}
+
+static void fft32_vsx(FFTComplex *z)
+{
+ fft16_vsx(z);
+ fft8_vsx(z+16);
+ fft8_vsx(z+24);
+ pass_vsx(z,ff_cos_32,4);
+}
+
+static void fft64_vsx(FFTComplex *z)
+{
+ fft32_vsx(z);
+ fft16_vsx(z+32);
+ fft16_vsx(z+48);
+ pass_vsx(z,ff_cos_64, 8);
+}
+static void fft128_vsx(FFTComplex *z)
+{
+ fft64_vsx(z);
+ fft32_vsx(z+64);
+ fft32_vsx(z+96);
+ pass_vsx(z,ff_cos_128,16);
+}
+static void fft256_vsx(FFTComplex *z)
+{
+ fft128_vsx(z);
+ fft64_vsx(z+128);
+ fft64_vsx(z+192);
+ pass_vsx(z,ff_cos_256,32);
+}
+static void fft512_vsx(FFTComplex *z)
+{
+ fft256_vsx(z);
+ fft128_vsx(z+256);
+ fft128_vsx(z+384);
+ pass_vsx(z,ff_cos_512,64);
+}
+static void fft1024_vsx(FFTComplex *z)
+{
+ fft512_vsx(z);
+ fft256_vsx(z+512);
+ fft256_vsx(z+768);
+ pass_vsx(z,ff_cos_1024,128);
+
+}
+static void fft2048_vsx(FFTComplex *z)
+{
+ fft1024_vsx(z);
+ fft512_vsx(z+1024);
+ fft512_vsx(z+1536);
+ pass_vsx(z,ff_cos_2048,256);
+}
+static void fft4096_vsx(FFTComplex *z)
+{
+ fft2048_vsx(z);
+ fft1024_vsx(z+2048);
+ fft1024_vsx(z+3072);
+ pass_vsx(z,ff_cos_4096, 512);
+}
+static void fft8192_vsx(FFTComplex *z)
+{
+ fft4096_vsx(z);
+ fft2048_vsx(z+4096);
+ fft2048_vsx(z+6144);
+ pass_vsx(z,ff_cos_8192,1024);
+}
+static void fft16384_vsx(FFTComplex *z)
+{
+ fft8192_vsx(z);
+ fft4096_vsx(z+8192);
+ fft4096_vsx(z+12288);
+ pass_vsx(z,ff_cos_16384,2048);
+}
+static void fft32768_vsx(FFTComplex *z)
+{
+ fft16384_vsx(z);
+ fft8192_vsx(z+16384);
+ fft8192_vsx(z+24576);
+ pass_vsx(z,ff_cos_32768,4096);
+}
+static void fft65536_vsx(FFTComplex *z)
+{
+ fft32768_vsx(z);
+ fft16384_vsx(z+32768);
+ fft16384_vsx(z+49152);
+ pass_vsx(z,ff_cos_65536,8192);
+}
+
+static void (* const fft_dispatch_vsx[])(FFTComplex*) = {
+ fft4_vsx, fft8_vsx, fft16_vsx, fft32_vsx, fft64_vsx, fft128_vsx, fft256_vsx, fft512_vsx, fft1024_vsx,
+ fft2048_vsx, fft4096_vsx, fft8192_vsx, fft16384_vsx, fft32768_vsx, fft65536_vsx,
+};
+static void (* const fft_dispatch_vsx_interleave[])(FFTComplex*) = {
+ fft4_vsx_interleave, fft8_vsx_interleave, fft16_vsx_interleave, fft32_vsx_interleave, fft64_vsx_interleave,
+ fft128_vsx_interleave, fft256_vsx_interleave, fft512_vsx_interleave, fft1024_vsx_interleave,
+ fft2048_vsx_interleave, fft4096_vsx_interleave, fft8192_vsx_interleave, fft16384_vsx_interleave, fft32768_vsx_interleave, fft65536_vsx_interleave,
+};
+void ff_fft_calc_interleave_vsx(FFTContext *s, FFTComplex *z)
+{
+ fft_dispatch_vsx_interleave[s->nbits-2](z);
+}
+void ff_fft_calc_vsx(FFTContext *s, FFTComplex *z)
+{
+ fft_dispatch_vsx[s->nbits-2](z);
+}
+#endif /* HAVE_VSX */
diff --git a/libavcodec/ppc/fft_vsx.h b/libavcodec/ppc/fft_vsx.h
new file mode 100644
index 0000000000..a85475d160
--- /dev/null
+++ b/libavcodec/ppc/fft_vsx.h
@@ -0,0 +1,830 @@
+#ifndef AVCODEC_PPC_FFT_VSX_H
+#define AVCODEC_PPC_FFT_VSX_H
+/*
+ * FFT transform, optimized with VSX built-in functions
+ * Copyright (c) 2014 Rong Yan Copyright (c) 2009 Loren Merritt
+ *
+ * This algorithm (though not any of the implementation details) is
+ * based on libdjbfft by D. J. Bernstein, and fft_altivec_s.S.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+#include "config.h"
+#include "libavutil/cpu.h"
+#include "libavutil/ppc/types_altivec.h"
+#include "libavutil/ppc/util_altivec.h"
+#include "libavcodec/fft.h"
+#include "libavcodec/fft-internal.h"
+
+#if HAVE_VSX
+
+void ff_fft_calc_interleave_vsx(FFTContext *s, FFTComplex *z);
+void ff_fft_calc_vsx(FFTContext *s, FFTComplex *z);
+
+
+#define byte_2complex (2*sizeof(FFTComplex))
+#define byte_4complex (4*sizeof(FFTComplex))
+#define byte_6complex (6*sizeof(FFTComplex))
+#define byte_8complex (8*sizeof(FFTComplex))
+#define byte_10complex (10*sizeof(FFTComplex))
+#define byte_12complex (12*sizeof(FFTComplex))
+#define byte_14complex (14*sizeof(FFTComplex))
+
+inline static void pass_vsx_interleave(FFTComplex *z, const FFTSample *wre, unsigned int n)
+{
+ int o1 = n<<1;
+ int o2 = n<<2;
+ int o3 = o1+o2;
+ int i1, i2, i3;
+ FFTSample* out = (FFTSample*)z;
+ const FFTSample *wim = wre+o1;
+ vec_f vz0, vzo1, vzo2, vzo3;
+ vec_f x0, x1, x2, x3;
+ vec_f x4, x5, x6, x7;
+ vec_f x8, x9, x10, x11;
+ vec_f x12, x13, x14, x15;
+ vec_f x16, x17, x18, x19;
+ vec_f x20, x21, x22, x23;
+ vec_f vz0plus1, vzo1plus1, vzo2plus1, vzo3plus1;
+ vec_f y0, y1, y2, y3;
+ vec_f y4, y5, y8, y9;
+ vec_f y10, y13, y14, y15;
+ vec_f y16, y17, y18, y19;
+ vec_f y20, y21, y22, y23;
+ vec_f wr1, wi1, wr0, wi0;
+ vec_f wr2, wi2, wr3, wi3;
+ vec_f xmulwi0, xmulwi1, ymulwi2, ymulwi3;
+
+ n = n-2;
+ i1 = o1*sizeof(FFTComplex);
+ i2 = o2*sizeof(FFTComplex);
+ i3 = o3*sizeof(FFTComplex);
+ vzo2 = vec_ld(i2, &(out[0])); // zo2.r zo2.i z(o2+1).r z(o2+1).i
+ vzo2plus1 = vec_ld(i2+16, &(out[0]));
+ vzo3 = vec_ld(i3, &(out[0])); // zo3.r zo3.i z(o3+1).r z(o3+1).i
+ vzo3plus1 = vec_ld(i3+16, &(out[0]));
+ vz0 = vec_ld(0, &(out[0])); // z0.r z0.i z1.r z1.i
+ vz0plus1 = vec_ld(16, &(out[0]));
+ vzo1 = vec_ld(i1, &(out[0])); // zo1.r zo1.i z(o1+1).r z(o1+1).i
+ vzo1plus1 = vec_ld(i1+16, &(out[0]));
+
+ x0 = vec_add(vzo2, vzo3);
+ x1 = vec_sub(vzo2, vzo3);
+ y0 = vec_add(vzo2plus1, vzo3plus1);
+ y1 = vec_sub(vzo2plus1, vzo3plus1);
+
+ wr1 = vec_splats(wre[1]);
+ wi1 = vec_splats(wim[-1]);
+ wi2 = vec_splats(wim[-2]);
+ wi3 = vec_splats(wim[-3]);
+ wr2 = vec_splats(wre[2]);
+ wr3 = vec_splats(wre[3]);
+
+ x2 = vec_perm(x0, x1, vcprm(2,s2,3,s3));
+ x3 = vec_perm(x0, x1, vcprm(s3,3,s2,2));
+
+ y4 = vec_perm(y0, y1, vcprm(s1,1,s0,0));
+ y5 = vec_perm(y0, y1, vcprm(s3,3,s2,2));
+ y2 = vec_perm(y0, y1, vcprm(0,s0,1,s1));
+ y3 = vec_perm(y0, y1, vcprm(2,s2,3,s3));
+
+ ymulwi2 = vec_mul(y4, wi2);
+ ymulwi3 = vec_mul(y5, wi3);
+ x4 = vec_mul(x2, wr1);
+ x5 = vec_mul(x3, wi1);
+ y8 = vec_madd(y2, wr2, ymulwi2);
+ y9 = vec_msub(y2, wr2, ymulwi2);
+ x6 = vec_add(x4, x5);
+ x7 = vec_sub(x4, x5);
+ y13 = vec_madd(y3, wr3, ymulwi3);
+ y14 = vec_msub(y3, wr3, ymulwi3);
+
+ x8 = vec_perm(x6, x7, vcprm(0,1,s2,s3));
+ y10 = vec_perm(y8, y9, vcprm(0,1,s2,s3));
+ y15 = vec_perm(y13, y14, vcprm(0,1,s2,s3));
+
+ x9 = vec_perm(x0, x8, vcprm(0,1,s0,s2));
+ x10 = vec_perm(x1, x8, vcprm(1,0,s3,s1));
+
+ y16 = vec_perm(y10, y15, vcprm(0,2,s0,s2));
+ y17 = vec_perm(y10, y15, vcprm(3,1,s3,s1));
+
+ x11 = vec_add(vz0, x9);
+ x12 = vec_sub(vz0, x9);
+ x13 = vec_add(vzo1, x10);
+ x14 = vec_sub(vzo1, x10);
+
+ y18 = vec_add(vz0plus1, y16);
+ y19 = vec_sub(vz0plus1, y16);
+ y20 = vec_add(vzo1plus1, y17);
+ y21 = vec_sub(vzo1plus1, y17);
+
+ x15 = vec_perm(x13, x14, vcprm(0,s1,2,s3));
+ x16 = vec_perm(x13, x14, vcprm(s0,1,s2,3));
+ y22 = vec_perm(y20, y21, vcprm(0,s1,2,s3));
+ y23 = vec_perm(y20, y21, vcprm(s0,1,s2,3));
+
+
+ vec_st(x11, 0, &(out[0]));
+ vec_st(y18, 16, &(out[0]));
+ vec_st(x15, i1, &(out[0]));
+ vec_st(y22, i1+16, &(out[0]));
+ vec_st(x12, i2, &(out[0]));
+ vec_st(y19, i2+16, &(out[0]));
+ vec_st(x16, i3, &(out[0]));
+ vec_st(y23, i3+16, &(out[0]));
+
+ do {
+ out += 8;
+ wre += 4;
+ wim -= 4;
+ wr0 = vec_splats(wre[0]);
+ wr1 = vec_splats(wre[1]);
+ wi0 = vec_splats(wim[0]);
+ wi1 = vec_splats(wim[-1]);
+
+ wr2 = vec_splats(wre[2]);
+ wr3 = vec_splats(wre[3]);
+ wi2 = vec_splats(wim[-2]);
+ wi3 = vec_splats(wim[-3]);
+
+ vzo2 = vec_ld(i2, &(out[0])); // zo2.r zo2.i z(o2+1).r z(o2+1).i
+ vzo2plus1 = vec_ld(i2+16, &(out[0]));
+ vzo3 = vec_ld(i3, &(out[0])); // zo3.r zo3.i z(o3+1).r z(o3+1).i
+ vzo3plus1 = vec_ld(i3+16, &(out[0]));
+ vz0 = vec_ld(0, &(out[0])); // z0.r z0.i z1.r z1.i
+ vz0plus1 = vec_ld(16, &(out[0]));
+ vzo1 = vec_ld(i1, &(out[0])); // zo1.r zo1.i z(o1+1).r z(o1+1).i
+ vzo1plus1 = vec_ld(i1+16, &(out[0]));
+
+ x0 = vec_add(vzo2, vzo3);
+ x1 = vec_sub(vzo2, vzo3);
+
+ y0 = vec_add(vzo2plus1, vzo3plus1);
+ y1 = vec_sub(vzo2plus1, vzo3plus1);
+
+ x4 = vec_perm(x0, x1, vcprm(s1,1,s0,0));
+ x5 = vec_perm(x0, x1, vcprm(s3,3,s2,2));
+ x2 = vec_perm(x0, x1, vcprm(0,s0,1,s1));
+ x3 = vec_perm(x0, x1, vcprm(2,s2,3,s3));
+
+ y2 = vec_perm(y0, y1, vcprm(0,s0,1,s1));
+ y3 = vec_perm(y0, y1, vcprm(2,s2,3,s3));
+ xmulwi0 = vec_mul(x4, wi0);
+ xmulwi1 = vec_mul(x5, wi1);
+
+ y4 = vec_perm(y0, y1, vcprm(s1,1,s0,0));
+ y5 = vec_perm(y0, y1, vcprm(s3,3,s2,2));
+
+ x8 = vec_madd(x2, wr0, xmulwi0);
+ x9 = vec_msub(x2, wr0, xmulwi0);
+ ymulwi2 = vec_mul(y4, wi2);
+ ymulwi3 = vec_mul(y5, wi3);
+
+ x13 = vec_madd(x3, wr1, xmulwi1);
+ x14 = vec_msub(x3, wr1, xmulwi1);
+
+ y8 = vec_madd(y2, wr2, ymulwi2);
+ y9 = vec_msub(y2, wr2, ymulwi2);
+ y13 = vec_madd(y3, wr3, ymulwi3);
+ y14 = vec_msub(y3, wr3, ymulwi3);
+
+ x10 = vec_perm(x8, x9, vcprm(0,1,s2,s3));
+ x15 = vec_perm(x13, x14, vcprm(0,1,s2,s3));
+
+ y10 = vec_perm(y8, y9, vcprm(0,1,s2,s3));
+ y15 = vec_perm(y13, y14, vcprm(0,1,s2,s3));
+
+ x16 = vec_perm(x10, x15, vcprm(0,2,s0,s2));
+ x17 = vec_perm(x10, x15, vcprm(3,1,s3,s1));
+
+ y16 = vec_perm(y10, y15, vcprm(0,2,s0,s2));
+ y17 = vec_perm(y10, y15, vcprm(3,1,s3,s1));
+
+ x18 = vec_add(vz0, x16);
+ x19 = vec_sub(vz0, x16);
+ x20 = vec_add(vzo1, x17);
+ x21 = vec_sub(vzo1, x17);
+
+ y18 = vec_add(vz0plus1, y16);
+ y19 = vec_sub(vz0plus1, y16);
+ y20 = vec_add(vzo1plus1, y17);
+ y21 = vec_sub(vzo1plus1, y17);
+
+ x22 = vec_perm(x20, x21, vcprm(0,s1,2,s3));
+ x23 = vec_perm(x20, x21, vcprm(s0,1,s2,3));
+
+ y22 = vec_perm(y20, y21, vcprm(0,s1,2,s3));
+ y23 = vec_perm(y20, y21, vcprm(s0,1,s2,3));
+
+ vec_st(x18, 0, &(out[0]));
+ vec_st(y18, 16, &(out[0]));
+ vec_st(x22, i1, &(out[0]));
+ vec_st(y22, i1+16, &(out[0]));
+ vec_st(x19, i2, &(out[0]));
+ vec_st(y19, i2+16, &(out[0]));
+ vec_st(x23, i3, &(out[0]));
+ vec_st(y23, i3+16, &(out[0]));
+ } while (n-=2);
+}
+
+inline static void fft2_vsx_interleave(FFTComplex *z)
+{
+ FFTSample r1, i1;
+
+ r1 = z[0].re - z[1].re;
+ z[0].re += z[1].re;
+ z[1].re = r1;
+
+ i1 = z[0].im - z[1].im;
+ z[0].im += z[1].im;
+ z[1].im = i1;
+ }
+
+inline static void fft4_vsx_interleave(FFTComplex *z)
+{
+ vec_f a, b, c, d;
+ float* out= (float*)z;
+ a = vec_ld(0, &(out[0]));
+ b = vec_ld(byte_2complex, &(out[0]));
+
+ c = vec_perm(a, b, vcprm(0,1,s2,s1));
+ d = vec_perm(a, b, vcprm(2,3,s0,s3));
+ a = vec_add(c, d);
+ b = vec_sub(c, d);
+
+ c = vec_perm(a, b, vcprm(0,1,s0,s1));
+ d = vec_perm(a, b, vcprm(2,3,s3,s2));
+
+ a = vec_add(c, d);
+ b = vec_sub(c, d);
+ vec_st(a, 0, &(out[0]));
+ vec_st(b, byte_2complex, &(out[0]));
+}
+
+inline static void fft8_vsx_interleave(FFTComplex *z)
+{
+ vec_f vz0, vz1, vz2, vz3;
+ vec_f x0, x1, x2, x3;
+ vec_f x4, x5, x6, x7;
+ vec_f x8, x9, x10, x11;
+ vec_f x12, x13, x14, x15;
+ vec_f x16, x17, x18, x19;
+ vec_f x20, x21, x22, x23;
+ vec_f x24, x25, x26, x27;
+ vec_f x28, x29, x30, x31;
+ vec_f x32, x33, x34;
+
+ float* out= (float*)z;
+ vec_f vc1 = {sqrthalf, sqrthalf, sqrthalf, sqrthalf};
+
+ vz0 = vec_ld(0, &(out[0]));
+ vz1 = vec_ld(byte_2complex, &(out[0]));
+ vz2 = vec_ld(byte_4complex, &(out[0]));
+ vz3 = vec_ld(byte_6complex, &(out[0]));
+
+ x0 = vec_perm(vz0, vz1, vcprm(0,1,s2,s1));
+ x1 = vec_perm(vz0, vz1, vcprm(2,3,s0,s3));
+ x2 = vec_perm(vz2, vz3, vcprm(2,1,s0,s1));
+ x3 = vec_perm(vz2, vz3, vcprm(0,3,s2,s3));
+
+ x4 = vec_add(x0, x1);
+ x5 = vec_sub(x0, x1);
+ x6 = vec_add(x2, x3);
+ x7 = vec_sub(x2, x3);
+
+ x8 = vec_perm(x4, x5, vcprm(0,1,s0,s1));
+ x9 = vec_perm(x4, x5, vcprm(2,3,s3,s2));
+ x10 = vec_perm(x6, x7, vcprm(2,1,s2,s1));
+ x11 = vec_perm(x6, x7, vcprm(0,3,s0,s3));
+
+ x12 = vec_add(x8, x9);
+ x13 = vec_sub(x8, x9);
+ x14 = vec_add(x10, x11);
+ x15 = vec_sub(x10, x11);
+ x16 = vec_perm(x12, x13, vcprm(0,s0,1,s1));
+ x17 = vec_perm(x14, x15, vcprm(0,s0,1,s1));
+ x18 = vec_perm(x16, x17, vcprm(s0,s3,s2,s1));
+ x19 = vec_add(x16, x18); // z0.r z2.r z0.i z2.i
+ x20 = vec_sub(x16, x18); // z4.r z6.r z4.i z6.i
+
+ x21 = vec_perm(x12, x13, vcprm(2,s2,3,s3));
+ x22 = vec_perm(x14, x15, vcprm(2,3,s2,s3));
+ x23 = vec_perm(x14, x15, vcprm(3,2,s3,s2));
+ x24 = vec_add(x22, x23);
+ x25 = vec_sub(x22, x23);
+ x26 = vec_mul( vec_perm(x24, x25, vcprm(2,s2,0,s0)), vc1);
+
+ x27 = vec_add(x21, x26); // z1.r z7.r z1.i z3.i
+ x28 = vec_sub(x21, x26); //z5.r z3.r z5.i z7.i
+
+ x29 = vec_perm(x19, x27, vcprm(0,2,s0,s2)); // z0.r z0.i z1.r z1.i
+ x30 = vec_perm(x19, x27, vcprm(1,3,s1,s3)); // z2.r z2.i z7.r z3.i
+ x31 = vec_perm(x20, x28, vcprm(0,2,s0,s2)); // z4.r z4.i z5.r z5.i
+ x32 = vec_perm(x20, x28, vcprm(1,3,s1,s3)); // z6.r z6.i z3.r z7.i
+ x33 = vec_perm(x30, x32, vcprm(0,1,s2,3)); // z2.r z2.i z3.r z3.i
+ x34 = vec_perm(x30, x32, vcprm(s0,s1,2,s3)); // z6.r z6.i z7.r z7.i
+
+ vec_st(x29, 0, &(out[0]));
+ vec_st(x33, byte_2complex, &(out[0]));
+ vec_st(x31, byte_4complex, &(out[0]));
+ vec_st(x34, byte_6complex, &(out[0]));
+}
+
+inline static void fft16_vsx_interleave(FFTComplex *z)
+{
+ float* out= (float*)z;
+ vec_f vc0 = {sqrthalf, sqrthalf, sqrthalf, sqrthalf};
+ vec_f vc1 = {ff_cos_16[1], ff_cos_16[1], ff_cos_16[1], ff_cos_16[1]};
+ vec_f vc2 = {ff_cos_16[3], ff_cos_16[3], ff_cos_16[3], ff_cos_16[3]};
+ vec_f vz0, vz1, vz2, vz3;
+ vec_f vz4, vz5, vz6, vz7;
+ vec_f x0, x1, x2, x3;
+ vec_f x4, x5, x6, x7;
+ vec_f x8, x9, x10, x11;
+ vec_f x12, x13, x14, x15;
+ vec_f x16, x17, x18, x19;
+ vec_f x20, x21, x22, x23;
+ vec_f x24, x25, x26, x27;
+ vec_f x28, x29, x30, x31;
+ vec_f x32, x33, x34, x35;
+ vec_f x36, x37, x38, x39;
+ vec_f x40, x41, x42, x43;
+ vec_f x44, x45, x46, x47;
+ vec_f x48, x49, x50, x51;
+ vec_f x52, x53, x54, x55;
+ vec_f x56, x57, x58, x59;
+ vec_f x60, x61, x62, x63;
+ vec_f x64, x65, x66, x67;
+ vec_f x68, x69, x70, x71;
+ vec_f x72, x73, x74, x75;
+ vec_f x76, x77, x78, x79;
+ vec_f x80, x81, x82, x83;
+ vec_f x84, x85, x86;
+
+ vz0 = vec_ld(0, &(out[0]));
+ vz1 = vec_ld(byte_2complex, &(out[0]));
+ vz2 = vec_ld(byte_4complex, &(out[0]));
+ vz3 = vec_ld(byte_6complex, &(out[0]));
+ vz4 = vec_ld(byte_8complex, &(out[0]));
+ vz5 = vec_ld(byte_10complex, &(out[0]));
+ vz6 = vec_ld(byte_12complex, &(out[0]));
+ vz7 = vec_ld(byte_14complex, &(out[0]));
+
+ x0 = vec_perm(vz0, vz1, vcprm(0,1,s2,s1));
+ x1 = vec_perm(vz0, vz1, vcprm(2,3,s0,s3));
+ x2 = vec_perm(vz2, vz3, vcprm(0,1,s0,s1));
+ x3 = vec_perm(vz2, vz3, vcprm(2,3,s2,s3));
+
+ x4 = vec_perm(vz4, vz5, vcprm(0,1,s2,s1));
+ x5 = vec_perm(vz4, vz5, vcprm(2,3,s0,s3));
+ x6 = vec_perm(vz6, vz7, vcprm(0,1,s2,s1));
+ x7 = vec_perm(vz6, vz7, vcprm(2,3,s0,s3));
+
+ x8 = vec_add(x0, x1);
+ x9 = vec_sub(x0, x1);
+ x10 = vec_add(x2, x3);
+ x11 = vec_sub(x2, x3);
+
+ x12 = vec_add(x4, x5);
+ x13 = vec_sub(x4, x5);
+ x14 = vec_add(x6, x7);
+ x15 = vec_sub(x6, x7);
+
+ x16 = vec_perm(x8, x9, vcprm(0,1,s0,s1));
+ x17 = vec_perm(x8, x9, vcprm(2,3,s3,s2));
+ x18 = vec_perm(x10, x11, vcprm(2,1,s1,s2));
+ x19 = vec_perm(x10, x11, vcprm(0,3,s0,s3));
+ x20 = vec_perm(x12, x14, vcprm(0,1,s0, s1));
+ x21 = vec_perm(x12, x14, vcprm(2,3,s2,s3));
+ x22 = vec_perm(x13, x15, vcprm(0,1,s0,s1));
+ x23 = vec_perm(x13, x15, vcprm(3,2,s3,s2));
+
+ x24 = vec_add(x16, x17);
+ x25 = vec_sub(x16, x17);
+ x26 = vec_add(x18, x19);
+ x27 = vec_sub(x18, x19);
+ x28 = vec_add(x20, x21);
+ x29 = vec_sub(x20, x21);
+ x30 = vec_add(x22, x23);
+ x31 = vec_sub(x22, x23);
+
+ x32 = vec_add(x24, x26);
+ x33 = vec_sub(x24, x26);
+ x34 = vec_perm(x32, x33, vcprm(0,1,s0,s1));
+
+ x35 = vec_perm(x28, x29, vcprm(2,1,s1,s2));
+ x36 = vec_perm(x28, x29, vcprm(0,3,s0,s3));
+ x37 = vec_add(x35, x36);
+ x38 = vec_sub(x35, x36);
+ x39 = vec_perm(x37, x38, vcprm(0,1,s1,s0));
+
+ x40 = vec_perm(x27, x38, vcprm(3,2,s2,s3));
+ x41 = vec_perm(x26, x37, vcprm(2,3,s3,s2));
+ x42 = vec_add(x40, x41);
+ x43 = vec_sub(x40, x41);
+ x44 = vec_mul(x42, vc0);
+ x45 = vec_mul(x43, vc0);
+
+ x46 = vec_add(x34, x39); // z0.r z0.i z4.r z4.i
+ x47 = vec_sub(x34, x39); // z8.r z8.i z12.r z12.i
+
+ x48 = vec_perm(x30, x31, vcprm(2,1,s1,s2));
+ x49 = vec_perm(x30, x31, vcprm(0,3,s3,s0));
+ x50 = vec_add(x48, x49);
+ x51 = vec_sub(x48, x49);
+ x52 = vec_mul(x50, vc1);
+ x53 = vec_mul(x50, vc2);
+ x54 = vec_mul(x51, vc1);
+ x55 = vec_mul(x51, vc2);
+
+ x56 = vec_perm(x24, x25, vcprm(2,3,s2,s3));
+ x57 = vec_perm(x44, x45, vcprm(0,1,s1,s0));
+ x58 = vec_add(x56, x57);
+ x59 = vec_sub(x56, x57);
+
+ x60 = vec_perm(x54, x55, vcprm(1,0,3,2));
+ x61 = vec_perm(x54, x55, vcprm(s1,s0,s3,s2));
+ x62 = vec_add(x52, x61);
+ x63 = vec_sub(x52, x61);
+ x64 = vec_add(x60, x53);
+ x65 = vec_sub(x60, x53);
+ x66 = vec_perm(x62, x64, vcprm(0,1,s3,s2));
+ x67 = vec_perm(x63, x65, vcprm(s0,s1,3,2));
+
+ x68 = vec_add(x58, x66); // z1.r z1.i z3.r z3.i
+ x69 = vec_sub(x58, x66); // z9.r z9.i z11.r z11.i
+ x70 = vec_add(x59, x67); // z5.r z5.i z15.r z15.i
+ x71 = vec_sub(x59, x67); // z13.r z13.i z7.r z7.i
+
+ x72 = vec_perm(x25, x27, vcprm(s1,s0,s2,s3));
+ x73 = vec_add(x25, x72);
+ x74 = vec_sub(x25, x72);
+ x75 = vec_perm(x73, x74, vcprm(0,1,s0,s1));
+ x76 = vec_perm(x44, x45, vcprm(3,2,s2,s3));
+ x77 = vec_add(x75, x76); // z2.r z2.i z6.r z6.i
+ x78 = vec_sub(x75, x76); // z10.r z10.i z14.r z14.i
+
+ x79 = vec_perm(x46, x68, vcprm(0,1,s0,s1)); // z0.r z0.i z1.r z1.i
+ x80 = vec_perm(x77, x68, vcprm(0,1,s2,s3)); // z2.r z2.i z3.r z3.i
+ x81 = vec_perm(x46, x70, vcprm(2,3,s0,s1)); // z4.r z4.i z5.r z5.i
+ x82 = vec_perm(x71, x77, vcprm(s2,s3,2,3)); // z6.r z6.i z7.r z7.i
+ vec_st(x79, 0, &(out[0]));
+ vec_st(x80, byte_2complex, &(out[0]));
+ vec_st(x81, byte_4complex, &(out[0]));
+ vec_st(x82, byte_6complex, &(out[0]));
+ x83 = vec_perm(x47, x69, vcprm(0,1,s0,s1)); // z8.r z8.i z9.r z9.i
+ x84 = vec_perm(x78, x69, vcprm(0,1,s2,s3)); // z10.r z10.i z11.r z11.i
+ x85 = vec_perm(x47, x71, vcprm(2,3,s0,s1)); // z12.r z12.i z13.r z13.i
+ x86 = vec_perm(x70, x78, vcprm(s2,s3,2,3)); // z14.r z14.i z15.r z15.i
+ vec_st(x83, byte_8complex, &(out[0]));
+ vec_st(x84, byte_10complex, &(out[0]));
+ vec_st(x85, byte_12complex, &(out[0]));
+ vec_st(x86, byte_14complex, &(out[0]));
+}
+
+inline static void fft4_vsx(FFTComplex *z)
+{
+ vec_f a, b, c, d;
+ float* out= (float*)z;
+ a = vec_ld(0, &(out[0]));
+ b = vec_ld(byte_2complex, &(out[0]));
+
+ c = vec_perm(a, b, vcprm(0,1,s2,s1));
+ d = vec_perm(a, b, vcprm(2,3,s0,s3));
+ a = vec_add(c, d);
+ b = vec_sub(c, d);
+
+ c = vec_perm(a,b, vcprm(0,s0,1,s1));
+ d = vec_perm(a, b, vcprm(2,s3,3,s2));
+
+ a = vec_add(c, d);
+ b = vec_sub(c, d);
+
+ c = vec_perm(a, b, vcprm(0,1,s0,s1));
+ d = vec_perm(a, b, vcprm(2,3,s2,s3));
+
+ vec_st(c, 0, &(out[0]));
+ vec_st(d, byte_2complex, &(out[0]));
+ return;
+}
+
+inline static void fft8_vsx(FFTComplex *z)
+{
+ vec_f vz0, vz1, vz2, vz3;
+ vec_f vz4, vz5, vz6, vz7, vz8;
+
+ float* out= (float*)z;
+ vec_f vc0 = {0.0, 0.0, 0.0, 0.0};
+ vec_f vc1 = {-sqrthalf, sqrthalf, sqrthalf, -sqrthalf};
+ vec_f vc2 = {sqrthalf, sqrthalf, sqrthalf, sqrthalf};
+
+ vz0 = vec_ld(0, &(out[0]));
+ vz1 = vec_ld(byte_2complex, &(out[0]));
+ vz2 = vec_ld(byte_4complex, &(out[0]));
+ vz3 = vec_ld(byte_6complex, &(out[0]));
+
+ vz6 = vec_perm(vz2, vz3, vcprm(0,s0,1,s1));
+ vz7 = vec_perm(vz2, vz3, vcprm(2,s2,3,s3));
+ vz4 = vec_perm(vz0, vz1, vcprm(0,1,s2,s1));
+ vz5 = vec_perm(vz0, vz1, vcprm(2,3,s0,s3));
+
+ vz2 = vec_add(vz6, vz7);
+ vz3 = vec_sub(vz6, vz7);
+ vz8 = vec_perm(vz3, vz3, vcprm(2,3,0,1));
+
+ vz0 = vec_add(vz4, vz5);
+ vz1 = vec_sub(vz4, vz5);
+
+ vz3 = vec_madd(vz3, vc1, vc0);
+ vz3 = vec_madd(vz8, vc2, vz3);
+
+ vz4 = vec_perm(vz0, vz1, vcprm(0,s0,1,s1));
+ vz5 = vec_perm(vz0, vz1, vcprm(2,s3,3,s2));
+ vz6 = vec_perm(vz2, vz3, vcprm(1,2,s3,s0));
+ vz7 = vec_perm(vz2, vz3, vcprm(0,3,s2,s1));
+
+ vz0 = vec_add(vz4, vz5);
+ vz1 = vec_sub(vz4, vz5);
+ vz2 = vec_add(vz6, vz7);
+ vz3 = vec_sub(vz6, vz7);
+
+ vz4 = vec_perm(vz0, vz1, vcprm(0,1,s0,s1));
+ vz5 = vec_perm(vz0, vz1, vcprm(2,3,s2,s3));
+ vz6 = vec_perm(vz2, vz3, vcprm(0,2,s1,s3));
+ vz7 = vec_perm(vz2, vz3, vcprm(1,3,s0,s2));
+
+
+ vz2 = vec_sub(vz4, vz6);
+ vz3 = vec_sub(vz5, vz7);
+
+ vz0 = vec_add(vz4, vz6);
+ vz1 = vec_add(vz5, vz7);
+
+ vec_st(vz0, 0, &(out[0]));
+ vec_st(vz1, byte_2complex, &(out[0]));
+ vec_st(vz2, byte_4complex, &(out[0]));
+ vec_st(vz3, byte_6complex, &(out[0]));
+ return;
+}
+
+inline static void fft16_vsx(FFTComplex *z)
+{
+ float* out= (float*)z;
+ vec_f vc0 = {0.0, 0.0, 0.0, 0.0};
+ vec_f vc1 = {-sqrthalf, sqrthalf, sqrthalf, -sqrthalf};
+ vec_f vc2 = {sqrthalf, sqrthalf, sqrthalf, sqrthalf};
+ vec_f vc3 = {1.0, 0.92387953, sqrthalf, 0.38268343};
+ vec_f vc4 = {0.0, 0.38268343, sqrthalf, 0.92387953};
+ vec_f vc5 = {-0.0, -0.38268343, -sqrthalf, -0.92387953};
+
+ vec_f vz0, vz1, vz2, vz3;
+ vec_f vz4, vz5, vz6, vz7;
+ vec_f vz8, vz9, vz10, vz11;
+ vec_f vz12, vz13;
+
+ vz0 = vec_ld(byte_8complex, &(out[0]));
+ vz1 = vec_ld(byte_10complex, &(out[0]));
+ vz2 = vec_ld(byte_12complex, &(out[0]));
+ vz3 = vec_ld(byte_14complex, &(out[0]));
+
+ vz4 = vec_perm(vz0, vz1, vcprm(0,1,s2,s1));
+ vz5 = vec_perm(vz0, vz1, vcprm(2,3,s0,s3));
+ vz6 = vec_perm(vz2, vz3, vcprm(0,1,s2,s1));
+ vz7 = vec_perm(vz2, vz3, vcprm(2,3,s0,s3));
+
+ vz0 = vec_add(vz4, vz5);
+ vz1= vec_sub(vz4, vz5);
+ vz2 = vec_add(vz6, vz7);
+ vz3 = vec_sub(vz6, vz7);
+
+ vz4 = vec_perm(vz0, vz1, vcprm(0,s0,1,s1));
+ vz5 = vec_perm(vz0, vz1, vcprm(2,s3,3,s2));
+ vz6 = vec_perm(vz2, vz3, vcprm(0,s0,1,s1));
+ vz7 = vec_perm(vz2, vz3, vcprm(2,s3,3,s2));
+
+ vz0 = vec_add(vz4, vz5);
+ vz1 = vec_sub(vz4, vz5);
+ vz2 = vec_add(vz6, vz7);
+ vz3 = vec_sub(vz6, vz7);
+
+ vz4 = vec_perm(vz0, vz1, vcprm(0,1,s0,s1));
+ vz5 = vec_perm(vz0, vz1, vcprm(2,3,s2,s3));
+
+ vz6 = vec_perm(vz2, vz3, vcprm(0,1,s0,s1));
+ vz7 = vec_perm(vz2, vz3, vcprm(2,3,s2,s3));
+
+ vz0 = vec_ld(0, &(out[0]));
+ vz1 = vec_ld(byte_2complex, &(out[0]));
+ vz2 = vec_ld(byte_4complex, &(out[0]));
+ vz3 = vec_ld(byte_6complex, &(out[0]));
+ vz10 = vec_perm(vz2, vz3, vcprm(0,s0,1,s1));
+ vz11 = vec_perm(vz2, vz3, vcprm(2,s2,3,s3));
+ vz8 = vec_perm(vz0, vz1, vcprm(0,1,s2,s1));
+ vz9 = vec_perm(vz0, vz1, vcprm(2,3,s0,s3));
+
+ vz2 = vec_add(vz10, vz11);
+ vz3 = vec_sub(vz10, vz11);
+ vz12 = vec_perm(vz3, vz3, vcprm(2,3,0,1));
+ vz0 = vec_add(vz8, vz9);
+ vz1 = vec_sub(vz8, vz9);
+
+ vz3 = vec_madd(vz3, vc1, vc0);
+ vz3 = vec_madd(vz12, vc2, vz3);
+ vz8 = vec_perm(vz0, vz1, vcprm(0,s0,1,s1));
+ vz9 = vec_perm(vz0, vz1, vcprm(2,s3,3,s2));
+ vz10 = vec_perm(vz2, vz3, vcprm(1,2,s3,s0));
+ vz11 = vec_perm(vz2, vz3, vcprm(0,3,s2,s1));
+
+ vz0 = vec_add(vz8, vz9);
+ vz1 = vec_sub(vz8, vz9);
+ vz2 = vec_add(vz10, vz11);
+ vz3 = vec_sub(vz10, vz11);
+
+ vz8 = vec_perm(vz0, vz1, vcprm(0,1,s0,s1));
+ vz9 = vec_perm(vz0, vz1, vcprm(2,3,s2,s3));
+ vz10 = vec_perm(vz2, vz3, vcprm(0,2,s1,s3));
+ vz11 = vec_perm(vz2, vz3, vcprm(1,3,s0,s2));
+
+ vz2 = vec_sub(vz8, vz10);
+ vz3 = vec_sub(vz9, vz11);
+ vz0 = vec_add(vz8, vz10);
+ vz1 = vec_add(vz9, vz11);
+
+ vz8 = vec_madd(vz4, vc3, vc0);
+ vz9 = vec_madd(vz5, vc3, vc0);
+ vz10 = vec_madd(vz6, vc3, vc0);
+ vz11 = vec_madd(vz7, vc3, vc0);
+
+ vz8 = vec_madd(vz5, vc4, vz8);
+ vz9 = vec_madd(vz4, vc5, vz9);
+ vz10 = vec_madd(vz7, vc5, vz10);
+ vz11 = vec_madd(vz6, vc4, vz11);
+
+ vz12 = vec_sub(vz10, vz8);
+ vz10 = vec_add(vz10, vz8);
+
+ vz13 = vec_sub(vz9, vz11);
+ vz11 = vec_add(vz9, vz11);
+
+ vz4 = vec_sub(vz0, vz10);
+ vz0 = vec_add(vz0, vz10);
+
+ vz7= vec_sub(vz3, vz12);
+ vz3= vec_add(vz3, vz12);
+
+ vz5 = vec_sub(vz1, vz11);
+ vz1 = vec_add(vz1, vz11);
+
+ vz6 = vec_sub(vz2, vz13);
+ vz2 = vec_add(vz2, vz13);
+
+ vec_st(vz0, 0, &(out[0]));
+ vec_st(vz1, byte_2complex, &(out[0]));
+ vec_st(vz2, byte_4complex, &(out[0]));
+ vec_st(vz3, byte_6complex, &(out[0]));
+ vec_st(vz4, byte_8complex, &(out[0]));
+ vec_st(vz5, byte_10complex, &(out[0]));
+ vec_st(vz6, byte_12complex, &(out[0]));
+ vec_st(vz7, byte_14complex, &(out[0]));
+ return;
+
+}
+inline static void pass_vsx(FFTComplex * z, const FFTSample * wre, unsigned int n)
+{
+ int o1 = n<<1;
+ int o2 = n<<2;
+ int o3 = o1+o2;
+ int i1, i2, i3;
+ FFTSample* out = (FFTSample*)z;
+ const FFTSample *wim = wre+o1;
+ vec_f v0, v1, v2, v3;
+ vec_f v4, v5, v6, v7;
+ vec_f v8, v9, v10, v11;
+ vec_f v12, v13;
+
+ n = n-2;
+ i1 = o1*sizeof(FFTComplex);
+ i2 = o2*sizeof(FFTComplex);
+ i3 = o3*sizeof(FFTComplex);
+
+ v8 = vec_ld(0, &(wre[0]));
+ v10 = vec_ld(0, &(wim[0]));
+ v9 = vec_ld(0, &(wim[-4]));
+ v9 = vec_perm(v9, v10, vcprm(s0,3,2,1));
+
+ v4 = vec_ld(i2, &(out[0]));
+ v5 = vec_ld(i2+16, &(out[0]));
+ v6 = vec_ld(i3, &(out[0]));
+ v7 = vec_ld(i3+16, &(out[0]));
+ v10 = vec_mul(v4, v8); // r2*wre
+ v11 = vec_mul(v5, v8); // i2*wre
+ v12 = vec_mul(v6, v8); // r3*wre
+ v13 = vec_mul(v7, v8); // i3*wre
+
+ v0 = vec_ld(0, &(out[0])); // r0
+ v3 = vec_ld(i1+16, &(out[0])); // i1
+ v10 = vec_madd(v5, v9, v10); // r2*wim
+ v11 = vec_nmsub(v4, v9, v11); // i2*wim
+ v12 = vec_nmsub(v7, v9, v12); // r3*wim
+ v13 = vec_madd(v6, v9, v13); // i3*wim
+
+ v1 = vec_ld(16, &(out[0])); // i0
+ v2 = vec_ld(i1, &(out[0])); // r1
+ v8 = vec_sub(v12, v10);
+ v12 = vec_add(v12, v10);
+ v9 = vec_sub(v11, v13);
+ v13 = vec_add(v11, v13);
+ v4 = vec_sub(v0, v12);
+ v0 = vec_add(v0, v12);
+ v7 = vec_sub(v3, v8);
+ v3 = vec_add(v3, v8);
+
+ vec_st(v0, 0, &(out[0])); // r0
+ vec_st(v3, i1+16, &(out[0])); // i1
+ vec_st(v4, i2, &(out[0])); // r2
+ vec_st(v7, i3+16, &(out[0]));// i3
+
+ v5 = vec_sub(v1, v13);
+ v1 = vec_add(v1, v13);
+ v6 = vec_sub(v2, v9);
+ v2 = vec_add(v2, v9);
+
+ vec_st(v1, 16, &(out[0])); // i0
+ vec_st(v2, i1, &(out[0])); // r1
+ vec_st(v5, i2+16, &(out[0])); // i2
+ vec_st(v6, i3, &(out[0])); // r3
+
+ do {
+ out += 8;
+ wre += 4;
+ wim -= 4;
+
+ v8 = vec_ld(0, &(wre[0]));
+ v10 = vec_ld(0, &(wim[0]));
+ v9 = vec_ld(0, &(wim[-4]));
+ v9 = vec_perm(v9, v10, vcprm(s0,3,2,1));
+
+ v4 = vec_ld(i2, &(out[0])); // r2
+ v5 = vec_ld(i2+16, &(out[0])); // i2
+ v6 = vec_ld(i3, &(out[0])); // r3
+ v7 = vec_ld(i3+16, &(out[0]));// i3
+ v10 = vec_mul(v4, v8); // r2*wre
+ v11 = vec_mul(v5, v8); // i2*wre
+ v12 = vec_mul(v6, v8); // r3*wre
+ v13 = vec_mul(v7, v8); // i3*wre
+
+ v0 = vec_ld(0, &(out[0])); // r0
+ v3 = vec_ld(i1+16, &(out[0])); // i1
+ v10 = vec_madd(v5, v9, v10); // r2*wim
+ v11 = vec_nmsub(v4, v9, v11); // i2*wim
+ v12 = vec_nmsub(v7, v9, v12); // r3*wim
+ v13 = vec_madd(v6, v9, v13); // i3*wim
+
+ v1 = vec_ld(16, &(out[0])); // i0
+ v2 = vec_ld(i1, &(out[0])); // r1
+ v8 = vec_sub(v12, v10);
+ v12 = vec_add(v12, v10);
+ v9 = vec_sub(v11, v13);
+ v13 = vec_add(v11, v13);
+ v4 = vec_sub(v0, v12);
+ v0 = vec_add(v0, v12);
+ v7 = vec_sub(v3, v8);
+ v3 = vec_add(v3, v8);
+
+ vec_st(v0, 0, &(out[0])); // r0
+ vec_st(v3, i1+16, &(out[0])); // i1
+ vec_st(v4, i2, &(out[0])); // r2
+ vec_st(v7, i3+16, &(out[0])); // i3
+
+ v5 = vec_sub(v1, v13);
+ v1 = vec_add(v1, v13);
+ v6 = vec_sub(v2, v9);
+ v2 = vec_add(v2, v9);
+
+ vec_st(v1, 16, &(out[0])); // i0
+ vec_st(v2, i1, &(out[0])); // r1
+ vec_st(v5, i2+16, &(out[0])); // i2
+ vec_st(v6, i3, &(out[0])); // r3
+ } while (n-=2);
+}
+
+#endif
+
+#endif /* AVCODEC_PPC_FFT_VSX_H */
diff --git a/libavcodec/ppc/fmtconvert_altivec.c b/libavcodec/ppc/fmtconvert_altivec.c
index 153f44ac9c..7323eff0bd 100644
--- a/libavcodec/ppc/fmtconvert_altivec.c
+++ b/libavcodec/ppc/fmtconvert_altivec.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2006 Luca Barbato <lu_zero@gentoo.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -26,7 +26,7 @@
#include "libavutil/ppc/util_altivec.h"
#include "libavcodec/fmtconvert.h"
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
static void int32_to_float_fmul_scalar_altivec(float *dst, const int32_t *src,
float mul, int len)
@@ -52,15 +52,15 @@ static void int32_to_float_fmul_scalar_altivec(float *dst, const int32_t *src,
}
}
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
av_cold void ff_fmt_convert_init_ppc(FmtConvertContext *c,
AVCodecContext *avctx)
{
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
if (!PPC_ALTIVEC(av_get_cpu_flags()))
return;
c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_altivec;
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
}
diff --git a/libavcodec/ppc/h264chroma_init.c b/libavcodec/ppc/h264chroma_init.c
index 4a24b7f82f..876efeca09 100644
--- a/libavcodec/ppc/h264chroma_init.c
+++ b/libavcodec/ppc/h264chroma_init.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -27,7 +27,7 @@
#include "libavutil/ppc/util_altivec.h"
#include "libavcodec/h264chroma.h"
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
#define PUT_OP_U8_ALTIVEC(d, s, dst) d = s
#define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s)
@@ -46,11 +46,11 @@
#undef OP_U8_ALTIVEC
#undef PREFIX_h264_chroma_mc8_altivec
#undef PREFIX_h264_chroma_mc8_num
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
av_cold void ff_h264chroma_init_ppc(H264ChromaContext *c, int bit_depth)
{
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
const int high_bit_depth = bit_depth > 8;
if (!PPC_ALTIVEC(av_get_cpu_flags()))
@@ -60,5 +60,5 @@ av_cold void ff_h264chroma_init_ppc(H264ChromaContext *c, int bit_depth)
c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec;
c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_altivec;
}
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
}
diff --git a/libavcodec/ppc/h264chroma_template.c b/libavcodec/ppc/h264chroma_template.c
index 293fef5c90..cb1e0957e1 100644
--- a/libavcodec/ppc/h264chroma_template.c
+++ b/libavcodec/ppc/h264chroma_template.c
@@ -1,30 +1,32 @@
/*
* Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/mem.h"
+#include "libavutil/ppc/types_altivec.h"
+#include "libavutil/ppc/util_altivec.h"
/* this code assume that stride % 16 == 0 */
#define CHROMA_MC8_ALTIVEC_CORE(BIAS1, BIAS2) \
- vsrc2ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc2uc);\
- vsrc3ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc3uc);\
+ vsrc2ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc2uc);\
+ vsrc3ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc3uc);\
\
psum = vec_mladd(vA, vsrc0ssH, BIAS1);\
psum = vec_mladd(vB, vsrc1ssH, psum);\
@@ -49,8 +51,8 @@
#define CHROMA_MC8_ALTIVEC_CORE_SIMPLE \
\
- vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc0uc);\
- vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc1uc);\
+ vsrc0ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc0uc);\
+ vsrc1ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc1uc);\
\
psum = vec_mladd(vA, vsrc0ssH, v32ss);\
psum = vec_mladd(vE, vsrc1ssH, psum);\
@@ -70,6 +72,43 @@
#define noop(a) a
#define add28(a) vec_add(v28ss, a)
+#if HAVE_BIGENDIAN
+#define GET_VSRC1(vs0, off, b, perm0, s){ \
+ vec_u8 vsrcCuc, vsrcDuc; \
+ vsrcCuc = vec_ld(off, s); \
+ if (loadSecond){ \
+ vsrcDuc = vec_ld(off + b, s); \
+ } else \
+ vsrcDuc = vsrcCuc; \
+ \
+ vs0 = vec_perm(vsrcCuc, vsrcDuc, perm0); \
+}
+#define GET_VSRC(vs0, vs1, off, b, perm0, perm1, s){ \
+ vec_u8 vsrcCuc, vsrcDuc; \
+ vsrcCuc = vec_ld(off, s); \
+ if (loadSecond){ \
+ vsrcDuc = vec_ld(off + b, s); \
+ } else \
+ vsrcDuc = vsrcCuc; \
+ \
+ vs0 = vec_perm(vsrcCuc, vsrcDuc, perm0); \
+ if (reallyBadAlign){ \
+ vs1 = vsrcDuc; \
+ } else \
+ vs1 = vec_perm(vsrcCuc, vsrcDuc, perm1); \
+ }
+
+#else
+
+#define GET_VSRC1(vs0, off, b, perm0, s){ \
+ vs0 = vec_vsx_ld(off, s); \
+ }
+#define GET_VSRC(vs0, vs1, off, b, perm0, perm1, s){ \
+ vs0 = vec_vsx_ld(off, s); \
+ vs1 = vec_vsx_ld(off + 1, s); \
+ }
+#endif /* HAVE_BIGENDIAN */
+
#ifdef PREFIX_h264_chroma_mc8_altivec
static void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src,
int stride, int h, int x, int y) {
@@ -80,23 +119,27 @@ static void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src,
(( x) * ( y))};
register int i;
vec_u8 fperm;
- const vec_s32 vABCD = vec_ld(0, ABCD);
- const vec_s16 vA = vec_splat((vec_s16)vABCD, 1);
- const vec_s16 vB = vec_splat((vec_s16)vABCD, 3);
- const vec_s16 vC = vec_splat((vec_s16)vABCD, 5);
- const vec_s16 vD = vec_splat((vec_s16)vABCD, 7);
LOAD_ZERO;
+ const vec_s32 vABCD = vec_ld(0, ABCD);
+ const vec_s16 vA = VEC_SPLAT16(vABCD, 1);
+ const vec_s16 vB = VEC_SPLAT16(vABCD, 3);
+ const vec_s16 vC = VEC_SPLAT16(vABCD, 5);
+ const vec_s16 vD = VEC_SPLAT16(vABCD, 7);
const vec_s16 v32ss = vec_sl(vec_splat_s16(1),vec_splat_u16(5));
const vec_u16 v6us = vec_splat_u16(6);
- register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
- register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
- vec_u8 vsrcAuc, av_uninit(vsrcBuc), vsrcperm0, vsrcperm1;
+ vec_u8 vsrcperm0, vsrcperm1;
vec_u8 vsrc0uc, vsrc1uc;
vec_s16 vsrc0ssH, vsrc1ssH;
- vec_u8 vsrcCuc, vsrc2uc, vsrc3uc;
+ vec_u8 vsrc2uc, vsrc3uc;
vec_s16 vsrc2ssH, vsrc3ssH, psum;
vec_u8 vdst, ppsum, vfdst, fsum;
+#if HAVE_BIGENDIAN
+ register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
+ register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
+ vsrcperm0 = vec_lvsl(0, src);
+ vsrcperm1 = vec_lvsl(1, src);
+#endif
if (((unsigned long)dst) % 16 == 0) {
fperm = (vec_u8){0x10, 0x11, 0x12, 0x13,
@@ -110,89 +153,28 @@ static void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src,
0x1C, 0x1D, 0x1E, 0x1F};
}
- vsrcAuc = vec_ld(0, src);
-
- if (loadSecond)
- vsrcBuc = vec_ld(16, src);
- vsrcperm0 = vec_lvsl(0, src);
- vsrcperm1 = vec_lvsl(1, src);
-
- vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
- if (reallyBadAlign)
- vsrc1uc = vsrcBuc;
- else
- vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
+ GET_VSRC(vsrc0uc, vsrc1uc, 0, 16, vsrcperm0, vsrcperm1, src);
- vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc0uc);
- vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc1uc);
+ vsrc0ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc0uc);
+ vsrc1ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc1uc);
if (ABCD[3]) {
- if (!loadSecond) {// -> !reallyBadAlign
- for (i = 0 ; i < h ; i++) {
- vsrcCuc = vec_ld(stride + 0, src);
- vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
- vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
-
- CHROMA_MC8_ALTIVEC_CORE(v32ss, noop)
- }
- } else {
- vec_u8 vsrcDuc;
- for (i = 0 ; i < h ; i++) {
- vsrcCuc = vec_ld(stride + 0, src);
- vsrcDuc = vec_ld(stride + 16, src);
- vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
- if (reallyBadAlign)
- vsrc3uc = vsrcDuc;
- else
- vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
-
- CHROMA_MC8_ALTIVEC_CORE(v32ss, noop)
- }
+ for (i = 0 ; i < h ; i++) {
+ GET_VSRC(vsrc2uc, vsrc3uc, stride, 16, vsrcperm0, vsrcperm1, src);
+ CHROMA_MC8_ALTIVEC_CORE(v32ss, noop);
}
} else {
const vec_s16 vE = vec_add(vB, vC);
if (ABCD[2]) { // x == 0 B == 0
- if (!loadSecond) {// -> !reallyBadAlign
- for (i = 0 ; i < h ; i++) {
- vsrcCuc = vec_ld(stride + 0, src);
- vsrc1uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
- CHROMA_MC8_ALTIVEC_CORE_SIMPLE
-
- vsrc0uc = vsrc1uc;
- }
- } else {
- vec_u8 vsrcDuc;
- for (i = 0 ; i < h ; i++) {
- vsrcCuc = vec_ld(stride + 0, src);
- vsrcDuc = vec_ld(stride + 15, src);
- vsrc1uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
- CHROMA_MC8_ALTIVEC_CORE_SIMPLE
-
- vsrc0uc = vsrc1uc;
- }
+ for (i = 0 ; i < h ; i++) {
+ GET_VSRC1(vsrc1uc, stride, 15, vsrcperm0, src);
+ CHROMA_MC8_ALTIVEC_CORE_SIMPLE;
+ vsrc0uc = vsrc1uc;
}
} else { // y == 0 C == 0
- if (!loadSecond) {// -> !reallyBadAlign
- for (i = 0 ; i < h ; i++) {
- vsrcCuc = vec_ld(0, src);
- vsrc0uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
- vsrc1uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
-
- CHROMA_MC8_ALTIVEC_CORE_SIMPLE
- }
- } else {
- vec_u8 vsrcDuc;
- for (i = 0 ; i < h ; i++) {
- vsrcCuc = vec_ld(0, src);
- vsrcDuc = vec_ld(15, src);
- vsrc0uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
- if (reallyBadAlign)
- vsrc1uc = vsrcDuc;
- else
- vsrc1uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
-
- CHROMA_MC8_ALTIVEC_CORE_SIMPLE
- }
+ for (i = 0 ; i < h ; i++) {
+ GET_VSRC(vsrc0uc, vsrc1uc, 0, 15, vsrcperm0, vsrcperm1, src);
+ CHROMA_MC8_ALTIVEC_CORE_SIMPLE;
}
}
}
@@ -209,23 +191,27 @@ static void PREFIX_no_rnd_vc1_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, i
(( x) * ( y))};
register int i;
vec_u8 fperm;
- const vec_s32 vABCD = vec_ld(0, ABCD);
- const vec_s16 vA = vec_splat((vec_s16)vABCD, 1);
- const vec_s16 vB = vec_splat((vec_s16)vABCD, 3);
- const vec_s16 vC = vec_splat((vec_s16)vABCD, 5);
- const vec_s16 vD = vec_splat((vec_s16)vABCD, 7);
LOAD_ZERO;
+ const vec_s32 vABCD = vec_ld(0, ABCD);
+ const vec_s16 vA = VEC_SPLAT16(vABCD, 1);
+ const vec_s16 vB = VEC_SPLAT16(vABCD, 3);
+ const vec_s16 vC = VEC_SPLAT16(vABCD, 5);
+ const vec_s16 vD = VEC_SPLAT16(vABCD, 7);
const vec_s16 v28ss = vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4));
const vec_u16 v6us = vec_splat_u16(6);
- register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
- register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
- vec_u8 vsrcAuc, av_uninit(vsrcBuc), vsrcperm0, vsrcperm1;
+ vec_u8 vsrcperm0, vsrcperm1;
vec_u8 vsrc0uc, vsrc1uc;
vec_s16 vsrc0ssH, vsrc1ssH;
- vec_u8 vsrcCuc, vsrc2uc, vsrc3uc;
+ vec_u8 vsrc2uc, vsrc3uc;
vec_s16 vsrc2ssH, vsrc3ssH, psum;
vec_u8 vdst, ppsum, vfdst, fsum;
+#if HAVE_BIGENDIAN
+ register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
+ register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
+ vsrcperm0 = vec_lvsl(0, src);
+ vsrcperm1 = vec_lvsl(1, src);
+#endif
if (((unsigned long)dst) % 16 == 0) {
fperm = (vec_u8){0x10, 0x11, 0x12, 0x13,
@@ -239,47 +225,14 @@ static void PREFIX_no_rnd_vc1_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, i
0x1C, 0x1D, 0x1E, 0x1F};
}
- vsrcAuc = vec_ld(0, src);
-
- if (loadSecond)
- vsrcBuc = vec_ld(16, src);
- vsrcperm0 = vec_lvsl(0, src);
- vsrcperm1 = vec_lvsl(1, src);
-
- vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
- if (reallyBadAlign)
- vsrc1uc = vsrcBuc;
- else
- vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
-
- vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc0uc);
- vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc1uc);
-
- if (!loadSecond) {// -> !reallyBadAlign
- for (i = 0 ; i < h ; i++) {
-
-
- vsrcCuc = vec_ld(stride + 0, src);
+ GET_VSRC(vsrc0uc, vsrc1uc, 0, 16, vsrcperm0, vsrcperm1, src);
- vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
- vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
+ vsrc0ssH = (vec_s16)VEC_MERGEH(zero_u8v, (vec_u8)vsrc0uc);
+ vsrc1ssH = (vec_s16)VEC_MERGEH(zero_u8v, (vec_u8)vsrc1uc);
- CHROMA_MC8_ALTIVEC_CORE(vec_splat_s16(0), add28)
- }
- } else {
- vec_u8 vsrcDuc;
- for (i = 0 ; i < h ; i++) {
- vsrcCuc = vec_ld(stride + 0, src);
- vsrcDuc = vec_ld(stride + 16, src);
-
- vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
- if (reallyBadAlign)
- vsrc3uc = vsrcDuc;
- else
- vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
-
- CHROMA_MC8_ALTIVEC_CORE(vec_splat_s16(0), add28)
- }
+ for (i = 0 ; i < h ; i++) {
+ GET_VSRC(vsrc2uc, vsrc3uc, stride, 16, vsrcperm0, vsrcperm1, src);
+ CHROMA_MC8_ALTIVEC_CORE(vec_splat_s16(0), add28);
}
}
#endif
diff --git a/libavcodec/ppc/h264dsp.c b/libavcodec/ppc/h264dsp.c
index a1fd5253b1..22a8d4117b 100644
--- a/libavcodec/ppc/h264dsp.c
+++ b/libavcodec/ppc/h264dsp.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -34,7 +34,7 @@
#include "libavcodec/h264dec.h"
#include "libavcodec/h264dsp.h"
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
/****************************************************************************
* IDCT transform:
@@ -68,10 +68,17 @@
b2 = vec_mergeh( a1, a3 ); \
b3 = vec_mergel( a1, a3 )
+#if HAVE_BIGENDIAN
+#define vdst_load(d) \
+ vdst_orig = vec_ld(0, dst); \
+ vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask);
+#else
+#define vdst_load(d) vdst = vec_vsx_ld(0, dst)
+#endif
+
#define VEC_LOAD_U8_ADD_S16_STORE_U8(va) \
- vdst_orig = vec_ld(0, dst); \
- vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask); \
- vdst_ss = (vec_s16) vec_mergeh(zero_u8v, vdst); \
+ vdst_load(); \
+ vdst_ss = (vec_s16) VEC_MERGEH(zero_u8v, vdst); \
va = vec_add(va, vdst_ss); \
va_u8 = vec_packsu(va, zero_s16v); \
va_u32 = vec_splat((vec_u32)va_u8, 0); \
@@ -171,26 +178,43 @@ static void h264_idct_add_altivec(uint8_t *dst, int16_t *block, int stride)
d7 = vec_sub(b0v, b7v); \
}
+#if HAVE_BIGENDIAN
+#define GET_2PERM(ldv, stv, d) \
+ ldv = vec_lvsl(0, d); \
+ stv = vec_lvsr(8, d);
+#define dstv_load(d) \
+ vec_u8 hv = vec_ld( 0, d ); \
+ vec_u8 lv = vec_ld( 7, d); \
+ vec_u8 dstv = vec_perm( hv, lv, (vec_u8)perm_ldv );
+#define dest_unligned_store(d) \
+ vec_u8 edgehv; \
+ vec_u8 bodyv = vec_perm( idstsum8, idstsum8, perm_stv ); \
+ vec_u8 edgelv = vec_perm( sel, zero_u8v, perm_stv ); \
+ lv = vec_sel( lv, bodyv, edgelv ); \
+ vec_st( lv, 7, d ); \
+ hv = vec_ld( 0, d ); \
+ edgehv = vec_perm( zero_u8v, sel, perm_stv ); \
+ hv = vec_sel( hv, bodyv, edgehv ); \
+ vec_st( hv, 0, d );
+#else
+
+#define GET_2PERM(ldv, stv, d) {}
+#define dstv_load(d) vec_u8 dstv = vec_vsx_ld(0, d)
+#define dest_unligned_store(d)\
+ vec_u8 dst8 = vec_perm((vec_u8)idstsum8, dstv, vcprm(2,3,s2,s3));\
+ vec_vsx_st(dst8, 0, d)
+#endif /* HAVE_BIGENDIAN */
+
#define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel) { \
/* unaligned load */ \
- vec_u8 hv = vec_ld( 0, dest ); \
- vec_u8 lv = vec_ld( 7, dest ); \
- vec_u8 dstv = vec_perm( hv, lv, (vec_u8)perm_ldv ); \
+ dstv_load(dest); \
vec_s16 idct_sh6 = vec_sra(idctv, sixv); \
- vec_u16 dst16 = (vec_u16)vec_mergeh(zero_u8v, dstv); \
+ vec_u16 dst16 = (vec_u16)VEC_MERGEH(zero_u8v, dstv); \
vec_s16 idstsum = vec_adds(idct_sh6, (vec_s16)dst16); \
vec_u8 idstsum8 = vec_packsu(zero_s16v, idstsum); \
- vec_u8 edgehv; \
/* unaligned store */ \
- vec_u8 bodyv = vec_perm( idstsum8, idstsum8, perm_stv );\
- vec_u8 edgelv = vec_perm( sel, zero_u8v, perm_stv ); \
- lv = vec_sel( lv, bodyv, edgelv ); \
- vec_st( lv, 7, dest ); \
- hv = vec_ld( 0, dest ); \
- edgehv = vec_perm( zero_u8v, sel, perm_stv ); \
- hv = vec_sel( hv, bodyv, edgehv ); \
- vec_st( hv, 0, dest ); \
- }
+ dest_unligned_store(dest);\
+}
static void h264_idct8_add_altivec(uint8_t *dst, int16_t *dct, int stride)
{
@@ -198,8 +222,8 @@ static void h264_idct8_add_altivec(uint8_t *dst, int16_t *dct, int stride)
vec_s16 d0, d1, d2, d3, d4, d5, d6, d7;
vec_s16 idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7;
- vec_u8 perm_ldv = vec_lvsl(0, dst);
- vec_u8 perm_stv = vec_lvsr(8, dst);
+ vec_u8 perm_ldv, perm_stv;
+ GET_2PERM(perm_ldv, perm_stv, dst);
const vec_u16 onev = vec_splat_u16(1);
const vec_u16 twov = vec_splat_u16(2);
@@ -238,32 +262,41 @@ static void h264_idct8_add_altivec(uint8_t *dst, int16_t *dct, int stride)
ALTIVEC_STORE_SUM_CLIP(&dst[7*stride], idct7, perm_ldv, perm_stv, sel);
}
+#if HAVE_BIGENDIAN
+#define DST_LD vec_ld
+#else
+#define DST_LD vec_vsx_ld
+#endif
static av_always_inline void h264_idct_dc_add_internal(uint8_t *dst, int16_t *block, int stride, int size)
{
vec_s16 dc16;
vec_u8 dcplus, dcminus, v0, v1, v2, v3, aligner;
+ vec_s32 v_dc32;
LOAD_ZERO;
DECLARE_ALIGNED(16, int, dc);
int i;
dc = (block[0] + 32) >> 6;
block[0] = 0;
- dc16 = vec_splat((vec_s16) vec_lde(0, &dc), 1);
+ v_dc32 = vec_lde(0, &dc);
+ dc16 = VEC_SPLAT16((vec_s16)v_dc32, 1);
if (size == 4)
- dc16 = vec_sld(dc16, zero_s16v, 8);
+ dc16 = VEC_SLD16(dc16, zero_s16v, 8);
dcplus = vec_packsu(dc16, zero_s16v);
dcminus = vec_packsu(vec_sub(zero_s16v, dc16), zero_s16v);
+#if HAVE_BIGENDIAN
aligner = vec_lvsr(0, dst);
dcplus = vec_perm(dcplus, dcplus, aligner);
dcminus = vec_perm(dcminus, dcminus, aligner);
+#endif
for (i = 0; i < size; i += 4) {
- v0 = vec_ld(0, dst+0*stride);
- v1 = vec_ld(0, dst+1*stride);
- v2 = vec_ld(0, dst+2*stride);
- v3 = vec_ld(0, dst+3*stride);
+ v0 = DST_LD(0, dst+0*stride);
+ v1 = DST_LD(0, dst+1*stride);
+ v2 = DST_LD(0, dst+2*stride);
+ v3 = DST_LD(0, dst+3*stride);
v0 = vec_adds(v0, dcplus);
v1 = vec_adds(v1, dcplus);
@@ -275,10 +308,10 @@ static av_always_inline void h264_idct_dc_add_internal(uint8_t *dst, int16_t *bl
v2 = vec_subs(v2, dcminus);
v3 = vec_subs(v3, dcminus);
- vec_st(v0, 0, dst+0*stride);
- vec_st(v1, 0, dst+1*stride);
- vec_st(v2, 0, dst+2*stride);
- vec_st(v3, 0, dst+3*stride);
+ VEC_ST(v0, 0, dst+0*stride);
+ VEC_ST(v1, 0, dst+1*stride);
+ VEC_ST(v2, 0, dst+2*stride);
+ VEC_ST(v3, 0, dst+3*stride);
dst += 4*stride;
}
@@ -497,7 +530,7 @@ static inline vec_u8 h264_deblock_q1(register vec_u8 p0,
register vec_u8 average = vec_avg(p0, q0);
register vec_u8 temp;
- register vec_u8 uncliped;
+ register vec_u8 unclipped;
register vec_u8 ones;
register vec_u8 max;
register vec_u8 min;
@@ -507,10 +540,10 @@ static inline vec_u8 h264_deblock_q1(register vec_u8 p0,
average = vec_avg(average, p2); /*avg(p2, avg(p0, q0)) */
ones = vec_splat_u8(1);
temp = vec_and(temp, ones); /*(p2^avg(p0, q0)) & 1 */
- uncliped = vec_subs(average, temp); /*(p2+((p0+q0+1)>>1))>>1 */
+ unclipped = vec_subs(average, temp); /*(p2+((p0+q0+1)>>1))>>1 */
max = vec_adds(p1, tc0);
min = vec_subs(p1, tc0);
- newp1 = vec_max(min, uncliped);
+ newp1 = vec_max(min, unclipped);
newp1 = vec_min(max, newp1);
return newp1;
}
@@ -639,6 +672,9 @@ void weight_h264_W_altivec(uint8_t *block, int stride, int height,
temp[2] = offset;
vtemp = (vec_s16)vec_ld(0, temp);
+#if !HAVE_BIGENDIAN
+ vtemp =(vec_s16)vec_perm(vtemp, vtemp, vcswapi2s(0,1,2,3));
+#endif
vlog2_denom = (vec_u16)vec_splat(vtemp, 1);
vweight = vec_splat(vtemp, 3);
voffset = vec_splat(vtemp, 5);
@@ -647,8 +683,8 @@ void weight_h264_W_altivec(uint8_t *block, int stride, int height,
for (y = 0; y < height; y++) {
vblock = vec_ld(0, block);
- v0 = (vec_s16)vec_mergeh(zero_u8v, vblock);
- v1 = (vec_s16)vec_mergel(zero_u8v, vblock);
+ v0 = (vec_s16)VEC_MERGEH(zero_u8v, vblock);
+ v1 = (vec_s16)VEC_MERGEL(zero_u8v, vblock);
if (w == 16 || aligned) {
v0 = vec_mladd(v0, vweight, zero_s16v);
@@ -685,6 +721,9 @@ void biweight_h264_W_altivec(uint8_t *dst, uint8_t *src, int stride, int height,
temp[3] = offset;
vtemp = (vec_s16)vec_ld(0, temp);
+#if !HAVE_BIGENDIAN
+ vtemp =(vec_s16)vec_perm(vtemp, vtemp, vcswapi2s(0,1,2,3));
+#endif
vlog2_denom = (vec_u16)vec_splat(vtemp, 1);
vweights = vec_splat(vtemp, 3);
vweightd = vec_splat(vtemp, 5);
@@ -696,10 +735,10 @@ void biweight_h264_W_altivec(uint8_t *dst, uint8_t *src, int stride, int height,
vdst = vec_ld(0, dst);
vsrc = vec_ld(0, src);
- v0 = (vec_s16)vec_mergeh(zero_u8v, vdst);
- v1 = (vec_s16)vec_mergel(zero_u8v, vdst);
- v2 = (vec_s16)vec_mergeh(zero_u8v, vsrc);
- v3 = (vec_s16)vec_mergel(zero_u8v, vsrc);
+ v0 = (vec_s16)VEC_MERGEH(zero_u8v, vdst);
+ v1 = (vec_s16)VEC_MERGEL(zero_u8v, vdst);
+ v2 = (vec_s16)VEC_MERGEH(zero_u8v, vsrc);
+ v3 = (vec_s16)VEC_MERGEL(zero_u8v, vsrc);
if (w == 8) {
if (src_aligned)
@@ -746,12 +785,12 @@ static void biweight_h264_pixels ## W ## _altivec(uint8_t *dst, uint8_t *src, in
H264_WEIGHT(16)
H264_WEIGHT( 8)
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
av_cold void ff_h264dsp_init_ppc(H264DSPContext *c, const int bit_depth,
const int chroma_format_idc)
{
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
if (!PPC_ALTIVEC(av_get_cpu_flags()))
return;
@@ -773,5 +812,5 @@ av_cold void ff_h264dsp_init_ppc(H264DSPContext *c, const int bit_depth,
c->biweight_h264_pixels_tab[0] = biweight_h264_pixels16_altivec;
c->biweight_h264_pixels_tab[1] = biweight_h264_pixels8_altivec;
}
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
}
diff --git a/libavcodec/ppc/h264qpel.c b/libavcodec/ppc/h264qpel.c
index aff20c15a2..575f504d32 100644
--- a/libavcodec/ppc/h264qpel.c
+++ b/libavcodec/ppc/h264qpel.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -28,7 +28,7 @@
#include "libavcodec/h264qpel.h"
#include "hpeldsp_altivec.h"
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
#define PUT_OP_U8_ALTIVEC(d, s, dst) d = s
#define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s)
@@ -191,86 +191,79 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, cons
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
}\
+#if HAVE_BIGENDIAN
+#define put_unligned_store(s, dest) { \
+ tmp1 = vec_ld(0, dest); \
+ mask = vec_lvsl(0, dest); \
+ tmp2 = vec_ld(15, dest); \
+ edges = vec_perm(tmp2, tmp1, mask); \
+ align = vec_lvsr(0, dest); \
+ tmp2 = vec_perm(s, edges, align); \
+ tmp1 = vec_perm(edges, s, align); \
+ vec_st(tmp2, 15, dest); \
+ vec_st(tmp1, 0 , dest); \
+ }
+#else
+#define put_unligned_store(s, dest) vec_vsx_st(s, 0, dest);
+#endif /* HAVE_BIGENDIAN */
+
static inline void put_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
const uint8_t * src2, int dst_stride,
int src_stride1, int h)
{
int i;
- vec_u8 a, b, d, tmp1, tmp2, mask, mask_, edges, align;
-
+ vec_u8 a, b, d, mask_;
+#if HAVE_BIGENDIAN
+ vec_u8 tmp1, tmp2, mask, edges, align;
mask_ = vec_lvsl(0, src2);
+#endif
for (i = 0; i < h; i++) {
-
- tmp1 = vec_ld(i * src_stride1, src1);
- mask = vec_lvsl(i * src_stride1, src1);
- tmp2 = vec_ld(i * src_stride1 + 15, src1);
-
- a = vec_perm(tmp1, tmp2, mask);
-
- tmp1 = vec_ld(i * 16, src2);
- tmp2 = vec_ld(i * 16 + 15, src2);
-
- b = vec_perm(tmp1, tmp2, mask_);
-
- tmp1 = vec_ld(0, dst);
- mask = vec_lvsl(0, dst);
- tmp2 = vec_ld(15, dst);
-
+ a = unaligned_load(i * src_stride1, src1);
+ b = load_with_perm_vec(i * 16, src2, mask_);
d = vec_avg(a, b);
-
- edges = vec_perm(tmp2, tmp1, mask);
-
- align = vec_lvsr(0, dst);
-
- tmp2 = vec_perm(d, edges, align);
- tmp1 = vec_perm(edges, d, align);
-
- vec_st(tmp2, 15, dst);
- vec_st(tmp1, 0 , dst);
-
+ put_unligned_store(d, dst);
dst += dst_stride;
}
}
+#if HAVE_BIGENDIAN
+#define avg_unligned_store(s, dest){ \
+ tmp1 = vec_ld(0, dest); \
+ mask = vec_lvsl(0, dest); \
+ tmp2 = vec_ld(15, dest); \
+ a = vec_avg(vec_perm(tmp1, tmp2, mask), s); \
+ edges = vec_perm(tmp2, tmp1, mask); \
+ align = vec_lvsr(0, dest); \
+ tmp2 = vec_perm(a, edges, align); \
+ tmp1 = vec_perm(edges, a, align); \
+ vec_st(tmp2, 15, dest); \
+ vec_st(tmp1, 0 , dest); \
+ }
+#else
+#define avg_unligned_store(s, dest){ \
+ a = vec_avg(vec_vsx_ld(0, dst), s); \
+ vec_vsx_st(a, 0, dst); \
+ }
+#endif /* HAVE_BIGENDIAN */
+
static inline void avg_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
const uint8_t * src2, int dst_stride,
int src_stride1, int h)
{
int i;
- vec_u8 a, b, d, tmp1, tmp2, mask, mask_, edges, align;
+ vec_u8 a, b, d, mask_;
+#if HAVE_BIGENDIAN
+ vec_u8 tmp1, tmp2, mask, edges, align;
mask_ = vec_lvsl(0, src2);
+#endif
for (i = 0; i < h; i++) {
-
- tmp1 = vec_ld(i * src_stride1, src1);
- mask = vec_lvsl(i * src_stride1, src1);
- tmp2 = vec_ld(i * src_stride1 + 15, src1);
-
- a = vec_perm(tmp1, tmp2, mask);
-
- tmp1 = vec_ld(i * 16, src2);
- tmp2 = vec_ld(i * 16 + 15, src2);
-
- b = vec_perm(tmp1, tmp2, mask_);
-
- tmp1 = vec_ld(0, dst);
- mask = vec_lvsl(0, dst);
- tmp2 = vec_ld(15, dst);
-
- d = vec_avg(vec_perm(tmp1, tmp2, mask), vec_avg(a, b));
-
- edges = vec_perm(tmp2, tmp1, mask);
-
- align = vec_lvsr(0, dst);
-
- tmp2 = vec_perm(d, edges, align);
- tmp1 = vec_perm(edges, d, align);
-
- vec_st(tmp2, 15, dst);
- vec_st(tmp1, 0 , dst);
-
+ a = unaligned_load(i * src_stride1, src1);
+ b = load_with_perm_vec(i * 16, src2, mask_);
+ d = vec_avg(a, b);
+ avg_unligned_store(d, dst);
dst += dst_stride;
}
}
@@ -282,11 +275,11 @@ static inline void avg_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
H264_MC(put_, 16, altivec)
H264_MC(avg_, 16, altivec)
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
av_cold void ff_h264qpel_init_ppc(H264QpelContext *c, int bit_depth)
{
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
const int high_bit_depth = bit_depth > 8;
if (!PPC_ALTIVEC(av_get_cpu_flags()))
@@ -315,5 +308,5 @@ av_cold void ff_h264qpel_init_ppc(H264QpelContext *c, int bit_depth)
dspfunc(avg_h264_qpel, 0, 16);
#undef dspfunc
}
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
}
diff --git a/libavcodec/ppc/h264qpel_template.c b/libavcodec/ppc/h264qpel_template.c
index 6de063a719..2f25e74840 100644
--- a/libavcodec/ppc/h264qpel_template.c
+++ b/libavcodec/ppc/h264qpel_template.c
@@ -1,30 +1,104 @@
/*
* Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include "libavutil/mem.h"
+#include "config.h"
+#if HAVE_UNISTD_H
+#include <unistd.h>
+#endif
-#ifdef DEBUG
-#define ASSERT_ALIGNED(ptr) assert(((unsigned long)ptr&0x0000000F));
+#include "libavutil/avassert.h"
+#include "libavutil/mem.h"
+#include "libavutil/ppc/types_altivec.h"
+#include "libavutil/ppc/util_altivec.h"
+
+#define ASSERT_ALIGNED(ptr) av_assert2(!((uintptr_t)ptr&0x0000000F));
+
+#if HAVE_BIGENDIAN
+#define load_alignment(s, ali, pm2, pm1, pp0, pp1, pp2, pp3){\
+ vec_u8 srcR1 = vec_ld(-2, s);\
+ vec_u8 srcR2 = vec_ld(14, s);\
+ switch (ali) {\
+ default: {\
+ srcM2 = vec_perm(srcR1, srcR2, pm2);\
+ srcM1 = vec_perm(srcR1, srcR2, pm1);\
+ srcP0 = vec_perm(srcR1, srcR2, pp0);\
+ srcP1 = vec_perm(srcR1, srcR2, pp1);\
+ srcP2 = vec_perm(srcR1, srcR2, pp2);\
+ srcP3 = vec_perm(srcR1, srcR2, pp3);\
+ } break;\
+ case 11: {\
+ srcM2 = vec_perm(srcR1, srcR2, pm2);\
+ srcM1 = vec_perm(srcR1, srcR2, pm1);\
+ srcP0 = vec_perm(srcR1, srcR2, pp0);\
+ srcP1 = vec_perm(srcR1, srcR2, pp1);\
+ srcP2 = vec_perm(srcR1, srcR2, pp2);\
+ srcP3 = srcR2;\
+ } break;\
+ case 12: {\
+ vec_u8 srcR3 = vec_ld(30, s);\
+ srcM2 = vec_perm(srcR1, srcR2, pm2);\
+ srcM1 = vec_perm(srcR1, srcR2, pm1);\
+ srcP0 = vec_perm(srcR1, srcR2, pp0);\
+ srcP1 = vec_perm(srcR1, srcR2, pp1);\
+ srcP2 = srcR2;\
+ srcP3 = vec_perm(srcR2, srcR3, pp3);\
+ } break;\
+ case 13: {\
+ vec_u8 srcR3 = vec_ld(30, s);\
+ srcM2 = vec_perm(srcR1, srcR2, pm2);\
+ srcM1 = vec_perm(srcR1, srcR2, pm1);\
+ srcP0 = vec_perm(srcR1, srcR2, pp0);\
+ srcP1 = srcR2;\
+ srcP2 = vec_perm(srcR2, srcR3, pp2);\
+ srcP3 = vec_perm(srcR2, srcR3, pp3);\
+ } break;\
+ case 14: {\
+ vec_u8 srcR3 = vec_ld(30, s);\
+ srcM2 = vec_perm(srcR1, srcR2, pm2);\
+ srcM1 = vec_perm(srcR1, srcR2, pm1);\
+ srcP0 = srcR2;\
+ srcP1 = vec_perm(srcR2, srcR3, pp1);\
+ srcP2 = vec_perm(srcR2, srcR3, pp2);\
+ srcP3 = vec_perm(srcR2, srcR3, pp3);\
+ } break;\
+ case 15: {\
+ vec_u8 srcR3 = vec_ld(30, s);\
+ srcM2 = vec_perm(srcR1, srcR2, pm2);\
+ srcM1 = srcR2;\
+ srcP0 = vec_perm(srcR2, srcR3, pp0);\
+ srcP1 = vec_perm(srcR2, srcR3, pp1);\
+ srcP2 = vec_perm(srcR2, srcR3, pp2);\
+ srcP3 = vec_perm(srcR2, srcR3, pp3);\
+ } break;\
+ }\
+ }
#else
-#define ASSERT_ALIGNED(ptr) ;
-#endif
+#define load_alignment(s, ali, pm2, pm1, pp0, pp1, pp2, pp3){\
+ srcM2 = vec_vsx_ld(-2, s);\
+ srcM1 = vec_vsx_ld(-1, s);\
+ srcP0 = vec_vsx_ld(0, s);\
+ srcP1 = vec_vsx_ld(1, s);\
+ srcP2 = vec_vsx_ld(2, s);\
+ srcP3 = vec_vsx_ld(3, s);\
+ }
+#endif /* HAVE_BIGENDIAN */
/* this code assume stride % 16 == 0 */
#ifdef PREFIX_h264_qpel16_h_lowpass_altivec
@@ -35,12 +109,7 @@ static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t *dst,
register int i;
LOAD_ZERO;
- const vec_u8 permM2 = vec_lvsl(-2, src);
- const vec_u8 permM1 = vec_lvsl(-1, src);
- const vec_u8 permP0 = vec_lvsl(+0, src);
- const vec_u8 permP1 = vec_lvsl(+1, src);
- const vec_u8 permP2 = vec_lvsl(+2, src);
- const vec_u8 permP3 = vec_lvsl(+3, src);
+ vec_u8 permM2, permM1, permP0, permP1, permP2, permP3;
const vec_s16 v5ss = vec_splat_s16(5);
const vec_u16 v5us = vec_splat_u16(5);
const vec_s16 v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
@@ -59,79 +128,32 @@ static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t *dst,
vec_u8 sum, fsum;
+#if HAVE_BIGENDIAN
+ permM2 = vec_lvsl(-2, src);
+ permM1 = vec_lvsl(-1, src);
+ permP0 = vec_lvsl(+0, src);
+ permP1 = vec_lvsl(+1, src);
+ permP2 = vec_lvsl(+2, src);
+ permP3 = vec_lvsl(+3, src);
+#endif /* HAVE_BIGENDIAN */
+
for (i = 0 ; i < 16 ; i ++) {
- vec_u8 srcR1 = vec_ld(-2, src);
- vec_u8 srcR2 = vec_ld(14, src);
-
- switch (align) {
- default: {
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = vec_perm(srcR1, srcR2, permM1);
- srcP0 = vec_perm(srcR1, srcR2, permP0);
- srcP1 = vec_perm(srcR1, srcR2, permP1);
- srcP2 = vec_perm(srcR1, srcR2, permP2);
- srcP3 = vec_perm(srcR1, srcR2, permP3);
- } break;
- case 11: {
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = vec_perm(srcR1, srcR2, permM1);
- srcP0 = vec_perm(srcR1, srcR2, permP0);
- srcP1 = vec_perm(srcR1, srcR2, permP1);
- srcP2 = vec_perm(srcR1, srcR2, permP2);
- srcP3 = srcR2;
- } break;
- case 12: {
- vec_u8 srcR3 = vec_ld(30, src);
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = vec_perm(srcR1, srcR2, permM1);
- srcP0 = vec_perm(srcR1, srcR2, permP0);
- srcP1 = vec_perm(srcR1, srcR2, permP1);
- srcP2 = srcR2;
- srcP3 = vec_perm(srcR2, srcR3, permP3);
- } break;
- case 13: {
- vec_u8 srcR3 = vec_ld(30, src);
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = vec_perm(srcR1, srcR2, permM1);
- srcP0 = vec_perm(srcR1, srcR2, permP0);
- srcP1 = srcR2;
- srcP2 = vec_perm(srcR2, srcR3, permP2);
- srcP3 = vec_perm(srcR2, srcR3, permP3);
- } break;
- case 14: {
- vec_u8 srcR3 = vec_ld(30, src);
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = vec_perm(srcR1, srcR2, permM1);
- srcP0 = srcR2;
- srcP1 = vec_perm(srcR2, srcR3, permP1);
- srcP2 = vec_perm(srcR2, srcR3, permP2);
- srcP3 = vec_perm(srcR2, srcR3, permP3);
- } break;
- case 15: {
- vec_u8 srcR3 = vec_ld(30, src);
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = srcR2;
- srcP0 = vec_perm(srcR2, srcR3, permP0);
- srcP1 = vec_perm(srcR2, srcR3, permP1);
- srcP2 = vec_perm(srcR2, srcR3, permP2);
- srcP3 = vec_perm(srcR2, srcR3, permP3);
- } break;
- }
-
- srcP0A = (vec_s16) vec_mergeh(zero_u8v, srcP0);
- srcP0B = (vec_s16) vec_mergel(zero_u8v, srcP0);
- srcP1A = (vec_s16) vec_mergeh(zero_u8v, srcP1);
- srcP1B = (vec_s16) vec_mergel(zero_u8v, srcP1);
-
- srcP2A = (vec_s16) vec_mergeh(zero_u8v, srcP2);
- srcP2B = (vec_s16) vec_mergel(zero_u8v, srcP2);
- srcP3A = (vec_s16) vec_mergeh(zero_u8v, srcP3);
- srcP3B = (vec_s16) vec_mergel(zero_u8v, srcP3);
-
- srcM1A = (vec_s16) vec_mergeh(zero_u8v, srcM1);
- srcM1B = (vec_s16) vec_mergel(zero_u8v, srcM1);
- srcM2A = (vec_s16) vec_mergeh(zero_u8v, srcM2);
- srcM2B = (vec_s16) vec_mergel(zero_u8v, srcM2);
+ load_alignment(src, align, permM2, permM1, permP0, permP1, permP2, permP3);
+
+ srcP0A = (vec_s16) VEC_MERGEH(zero_u8v, srcP0);
+ srcP0B = (vec_s16) VEC_MERGEL(zero_u8v, srcP0);
+ srcP1A = (vec_s16) VEC_MERGEH(zero_u8v, srcP1);
+ srcP1B = (vec_s16) VEC_MERGEL(zero_u8v, srcP1);
+
+ srcP2A = (vec_s16) VEC_MERGEH(zero_u8v, srcP2);
+ srcP2B = (vec_s16) VEC_MERGEL(zero_u8v, srcP2);
+ srcP3A = (vec_s16) VEC_MERGEH(zero_u8v, srcP3);
+ srcP3B = (vec_s16) VEC_MERGEL(zero_u8v, srcP3);
+
+ srcM1A = (vec_s16) VEC_MERGEH(zero_u8v, srcM1);
+ srcM1B = (vec_s16) VEC_MERGEL(zero_u8v, srcM1);
+ srcM2A = (vec_s16) VEC_MERGEH(zero_u8v, srcM2);
+ srcM2B = (vec_s16) VEC_MERGEL(zero_u8v, srcM2);
sum1A = vec_adds(srcP0A, srcP1A);
sum1B = vec_adds(srcP0B, srcP1B);
@@ -178,7 +200,10 @@ static void PREFIX_h264_qpel16_v_lowpass_altivec(uint8_t *dst,
register int i;
LOAD_ZERO;
- const vec_u8 perm = vec_lvsl(0, src);
+ vec_u8 perm;
+#if HAVE_BIGENDIAN
+ perm = vec_lvsl(0, src);
+#endif
const vec_s16 v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
const vec_u16 v5us = vec_splat_u16(5);
const vec_s16 v5ss = vec_splat_s16(5);
@@ -186,52 +211,41 @@ static void PREFIX_h264_qpel16_v_lowpass_altivec(uint8_t *dst,
const uint8_t *srcbis = src - (srcStride * 2);
- const vec_u8 srcM2a = vec_ld(0, srcbis);
- const vec_u8 srcM2b = vec_ld(16, srcbis);
- const vec_u8 srcM2 = vec_perm(srcM2a, srcM2b, perm);
- //srcbis += srcStride;
- const vec_u8 srcM1a = vec_ld(0, srcbis += srcStride);
- const vec_u8 srcM1b = vec_ld(16, srcbis);
- const vec_u8 srcM1 = vec_perm(srcM1a, srcM1b, perm);
- //srcbis += srcStride;
- const vec_u8 srcP0a = vec_ld(0, srcbis += srcStride);
- const vec_u8 srcP0b = vec_ld(16, srcbis);
- const vec_u8 srcP0 = vec_perm(srcP0a, srcP0b, perm);
- //srcbis += srcStride;
- const vec_u8 srcP1a = vec_ld(0, srcbis += srcStride);
- const vec_u8 srcP1b = vec_ld(16, srcbis);
- const vec_u8 srcP1 = vec_perm(srcP1a, srcP1b, perm);
- //srcbis += srcStride;
- const vec_u8 srcP2a = vec_ld(0, srcbis += srcStride);
- const vec_u8 srcP2b = vec_ld(16, srcbis);
- const vec_u8 srcP2 = vec_perm(srcP2a, srcP2b, perm);
- //srcbis += srcStride;
-
- vec_s16 srcM2ssA = (vec_s16) vec_mergeh(zero_u8v, srcM2);
- vec_s16 srcM2ssB = (vec_s16) vec_mergel(zero_u8v, srcM2);
- vec_s16 srcM1ssA = (vec_s16) vec_mergeh(zero_u8v, srcM1);
- vec_s16 srcM1ssB = (vec_s16) vec_mergel(zero_u8v, srcM1);
- vec_s16 srcP0ssA = (vec_s16) vec_mergeh(zero_u8v, srcP0);
- vec_s16 srcP0ssB = (vec_s16) vec_mergel(zero_u8v, srcP0);
- vec_s16 srcP1ssA = (vec_s16) vec_mergeh(zero_u8v, srcP1);
- vec_s16 srcP1ssB = (vec_s16) vec_mergel(zero_u8v, srcP1);
- vec_s16 srcP2ssA = (vec_s16) vec_mergeh(zero_u8v, srcP2);
- vec_s16 srcP2ssB = (vec_s16) vec_mergel(zero_u8v, srcP2);
+ const vec_u8 srcM2 = load_with_perm_vec(0, srcbis, perm);
+ srcbis += srcStride;
+ const vec_u8 srcM1 = load_with_perm_vec(0, srcbis, perm);
+ srcbis += srcStride;
+ const vec_u8 srcP0 = load_with_perm_vec(0, srcbis, perm);
+ srcbis += srcStride;
+ const vec_u8 srcP1 = load_with_perm_vec(0, srcbis, perm);
+ srcbis += srcStride;
+ const vec_u8 srcP2 = load_with_perm_vec(0, srcbis, perm);
+ srcbis += srcStride;
+
+ vec_s16 srcM2ssA = (vec_s16) VEC_MERGEH(zero_u8v, srcM2);
+ vec_s16 srcM2ssB = (vec_s16) VEC_MERGEL(zero_u8v, srcM2);
+ vec_s16 srcM1ssA = (vec_s16) VEC_MERGEH(zero_u8v, srcM1);
+ vec_s16 srcM1ssB = (vec_s16) VEC_MERGEL(zero_u8v, srcM1);
+ vec_s16 srcP0ssA = (vec_s16) VEC_MERGEH(zero_u8v, srcP0);
+ vec_s16 srcP0ssB = (vec_s16) VEC_MERGEL(zero_u8v, srcP0);
+ vec_s16 srcP1ssA = (vec_s16) VEC_MERGEH(zero_u8v, srcP1);
+ vec_s16 srcP1ssB = (vec_s16) VEC_MERGEL(zero_u8v, srcP1);
+ vec_s16 srcP2ssA = (vec_s16) VEC_MERGEH(zero_u8v, srcP2);
+ vec_s16 srcP2ssB = (vec_s16) VEC_MERGEL(zero_u8v, srcP2);
vec_s16 pp1A, pp1B, pp2A, pp2B, pp3A, pp3B,
psumA, psumB, sumA, sumB,
srcP3ssA, srcP3ssB,
sum1A, sum1B, sum2A, sum2B, sum3A, sum3B;
- vec_u8 sum, fsum, srcP3a, srcP3b, srcP3;
+ vec_u8 sum, fsum, srcP3;
for (i = 0 ; i < 16 ; i++) {
- srcP3a = vec_ld(0, srcbis += srcStride);
- srcP3b = vec_ld(16, srcbis);
- srcP3 = vec_perm(srcP3a, srcP3b, perm);
- srcP3ssA = (vec_s16) vec_mergeh(zero_u8v, srcP3);
- srcP3ssB = (vec_s16) vec_mergel(zero_u8v, srcP3);
- //srcbis += srcStride;
+ srcP3 = load_with_perm_vec(0, srcbis, perm);
+ srcbis += srcStride;
+
+ srcP3ssA = (vec_s16) VEC_MERGEH(zero_u8v, srcP3);
+ srcP3ssB = (vec_s16) VEC_MERGEL(zero_u8v, srcP3);
sum1A = vec_adds(srcP0ssA, srcP1ssA);
sum1B = vec_adds(srcP0ssB, srcP1ssB);
@@ -288,12 +302,7 @@ static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t *dst, int16_t *tmp,
{
register int i;
LOAD_ZERO;
- const vec_u8 permM2 = vec_lvsl(-2, src);
- const vec_u8 permM1 = vec_lvsl(-1, src);
- const vec_u8 permP0 = vec_lvsl(+0, src);
- const vec_u8 permP1 = vec_lvsl(+1, src);
- const vec_u8 permP2 = vec_lvsl(+2, src);
- const vec_u8 permP3 = vec_lvsl(+3, src);
+ vec_u8 permM2, permM1, permP0, permP1, permP2, permP3;
const vec_s16 v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
const vec_u32 v10ui = vec_splat_u32(10);
const vec_s16 v5ss = vec_splat_s16(5);
@@ -325,81 +334,35 @@ static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t *dst, int16_t *tmp,
vec_u8 fsum, sumv, sum;
vec_s16 ssume, ssumo;
+#if HAVE_BIGENDIAN
+ permM2 = vec_lvsl(-2, src);
+ permM1 = vec_lvsl(-1, src);
+ permP0 = vec_lvsl(+0, src);
+ permP1 = vec_lvsl(+1, src);
+ permP2 = vec_lvsl(+2, src);
+ permP3 = vec_lvsl(+3, src);
+#endif /* HAVE_BIGENDIAN */
+
src -= (2 * srcStride);
for (i = 0 ; i < 21 ; i ++) {
vec_u8 srcM2, srcM1, srcP0, srcP1, srcP2, srcP3;
- vec_u8 srcR1 = vec_ld(-2, src);
- vec_u8 srcR2 = vec_ld(14, src);
-
- switch (align) {
- default: {
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = vec_perm(srcR1, srcR2, permM1);
- srcP0 = vec_perm(srcR1, srcR2, permP0);
- srcP1 = vec_perm(srcR1, srcR2, permP1);
- srcP2 = vec_perm(srcR1, srcR2, permP2);
- srcP3 = vec_perm(srcR1, srcR2, permP3);
- } break;
- case 11: {
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = vec_perm(srcR1, srcR2, permM1);
- srcP0 = vec_perm(srcR1, srcR2, permP0);
- srcP1 = vec_perm(srcR1, srcR2, permP1);
- srcP2 = vec_perm(srcR1, srcR2, permP2);
- srcP3 = srcR2;
- } break;
- case 12: {
- vec_u8 srcR3 = vec_ld(30, src);
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = vec_perm(srcR1, srcR2, permM1);
- srcP0 = vec_perm(srcR1, srcR2, permP0);
- srcP1 = vec_perm(srcR1, srcR2, permP1);
- srcP2 = srcR2;
- srcP3 = vec_perm(srcR2, srcR3, permP3);
- } break;
- case 13: {
- vec_u8 srcR3 = vec_ld(30, src);
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = vec_perm(srcR1, srcR2, permM1);
- srcP0 = vec_perm(srcR1, srcR2, permP0);
- srcP1 = srcR2;
- srcP2 = vec_perm(srcR2, srcR3, permP2);
- srcP3 = vec_perm(srcR2, srcR3, permP3);
- } break;
- case 14: {
- vec_u8 srcR3 = vec_ld(30, src);
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = vec_perm(srcR1, srcR2, permM1);
- srcP0 = srcR2;
- srcP1 = vec_perm(srcR2, srcR3, permP1);
- srcP2 = vec_perm(srcR2, srcR3, permP2);
- srcP3 = vec_perm(srcR2, srcR3, permP3);
- } break;
- case 15: {
- vec_u8 srcR3 = vec_ld(30, src);
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = srcR2;
- srcP0 = vec_perm(srcR2, srcR3, permP0);
- srcP1 = vec_perm(srcR2, srcR3, permP1);
- srcP2 = vec_perm(srcR2, srcR3, permP2);
- srcP3 = vec_perm(srcR2, srcR3, permP3);
- } break;
- }
-
- srcP0A = (vec_s16) vec_mergeh(zero_u8v, srcP0);
- srcP0B = (vec_s16) vec_mergel(zero_u8v, srcP0);
- srcP1A = (vec_s16) vec_mergeh(zero_u8v, srcP1);
- srcP1B = (vec_s16) vec_mergel(zero_u8v, srcP1);
-
- srcP2A = (vec_s16) vec_mergeh(zero_u8v, srcP2);
- srcP2B = (vec_s16) vec_mergel(zero_u8v, srcP2);
- srcP3A = (vec_s16) vec_mergeh(zero_u8v, srcP3);
- srcP3B = (vec_s16) vec_mergel(zero_u8v, srcP3);
-
- srcM1A = (vec_s16) vec_mergeh(zero_u8v, srcM1);
- srcM1B = (vec_s16) vec_mergel(zero_u8v, srcM1);
- srcM2A = (vec_s16) vec_mergeh(zero_u8v, srcM2);
- srcM2B = (vec_s16) vec_mergel(zero_u8v, srcM2);
+
+ load_alignment(src, align, permM2, permM1, permP0, permP1, permP2, permP3);
+
+ srcP0A = (vec_s16) VEC_MERGEH(zero_u8v, srcP0);
+ srcP0B = (vec_s16) VEC_MERGEL(zero_u8v, srcP0);
+ srcP1A = (vec_s16) VEC_MERGEH(zero_u8v, srcP1);
+ srcP1B = (vec_s16) VEC_MERGEL(zero_u8v, srcP1);
+
+ srcP2A = (vec_s16) VEC_MERGEH(zero_u8v, srcP2);
+ srcP2B = (vec_s16) VEC_MERGEL(zero_u8v, srcP2);
+ srcP3A = (vec_s16) VEC_MERGEH(zero_u8v, srcP3);
+ srcP3B = (vec_s16) VEC_MERGEL(zero_u8v, srcP3);
+
+ srcM1A = (vec_s16) VEC_MERGEH(zero_u8v, srcM1);
+ srcM1B = (vec_s16) VEC_MERGEL(zero_u8v, srcM1);
+ srcM2A = (vec_s16) VEC_MERGEH(zero_u8v, srcM2);
+ srcM2B = (vec_s16) VEC_MERGEL(zero_u8v, srcM2);
sum1A = vec_adds(srcP0A, srcP1A);
sum1B = vec_adds(srcP0B, srcP1B);
@@ -448,8 +411,8 @@ static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t *dst, int16_t *tmp,
const vec_s16 sum1B = vec_adds(tmpP0ssB, tmpP1ssB);
const vec_s16 sum2A = vec_adds(tmpM1ssA, tmpP2ssA);
const vec_s16 sum2B = vec_adds(tmpM1ssB, tmpP2ssB);
- const vec_s16 sum3A = vec_adds(tmpM2ssA, tmpP3ssA);
- const vec_s16 sum3B = vec_adds(tmpM2ssB, tmpP3ssB);
+ vec_s16 sum3A = vec_adds(tmpM2ssA, tmpP3ssA);
+ vec_s16 sum3B = vec_adds(tmpM2ssB, tmpP3ssB);
tmpbis += tmpStride;
@@ -474,10 +437,14 @@ static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t *dst, int16_t *tmp,
pp2Be = vec_mule(sum2B, v5ss);
pp2Bo = vec_mulo(sum2B, v5ss);
- pp3Ae = vec_sra((vec_s32)sum3A, v16ui);
pp3Ao = vec_mulo(sum3A, v1ss);
- pp3Be = vec_sra((vec_s32)sum3B, v16ui);
pp3Bo = vec_mulo(sum3B, v1ss);
+#if !HAVE_BIGENDIAN
+ sum3A = (vec_s16)vec_perm(sum3A, sum3A,vcswapi2s(0,1,2,3));
+ sum3B = (vec_s16)vec_perm(sum3B, sum3B,vcswapi2s(0,1,2,3));
+#endif
+ pp3Ae = vec_sra((vec_s32)sum3A, v16ui);
+ pp3Be = vec_sra((vec_s32)sum3B, v16ui);
pp1cAe = vec_add(pp1Ae, v512si);
pp1cAo = vec_add(pp1Ao, v512si);
diff --git a/libavcodec/ppc/hpeldsp_altivec.c b/libavcodec/ppc/hpeldsp_altivec.c
index 82b5ce7fc7..87a1f05b6a 100644
--- a/libavcodec/ppc/hpeldsp_altivec.c
+++ b/libavcodec/ppc/hpeldsp_altivec.c
@@ -3,20 +3,20 @@
* Copyright (c) 2002 Dieter Shirley
* Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -34,16 +34,15 @@
#include "libavcodec/hpeldsp.h"
#include "hpeldsp_altivec.h"
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
/* next one assumes that ((line_size % 16) == 0) */
void ff_put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
{
- register vector unsigned char pixelsv1, pixelsv2;
- register vector unsigned char pixelsv1B, pixelsv2B;
- register vector unsigned char pixelsv1C, pixelsv2C;
- register vector unsigned char pixelsv1D, pixelsv2D;
+ register vector unsigned char pixelsv1;
+ register vector unsigned char pixelsv1B;
+ register vector unsigned char pixelsv1C;
+ register vector unsigned char pixelsv1D;
- register vector unsigned char perm = vec_lvsl(0, pixels);
int i;
register ptrdiff_t line_size_2 = line_size << 1;
register ptrdiff_t line_size_3 = line_size + line_size_2;
@@ -55,22 +54,14 @@ void ff_put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t li
// -funroll-loops w/ this is bad - 74 cycles again.
// all this is on a 7450, tuning for the 7450
for (i = 0; i < h; i += 4) {
- pixelsv1 = vec_ld( 0, pixels);
- pixelsv2 = vec_ld(15, pixels);
- pixelsv1B = vec_ld(line_size, pixels);
- pixelsv2B = vec_ld(15 + line_size, pixels);
- pixelsv1C = vec_ld(line_size_2, pixels);
- pixelsv2C = vec_ld(15 + line_size_2, pixels);
- pixelsv1D = vec_ld(line_size_3, pixels);
- pixelsv2D = vec_ld(15 + line_size_3, pixels);
- vec_st(vec_perm(pixelsv1, pixelsv2, perm),
- 0, (unsigned char*)block);
- vec_st(vec_perm(pixelsv1B, pixelsv2B, perm),
- line_size, (unsigned char*)block);
- vec_st(vec_perm(pixelsv1C, pixelsv2C, perm),
- line_size_2, (unsigned char*)block);
- vec_st(vec_perm(pixelsv1D, pixelsv2D, perm),
- line_size_3, (unsigned char*)block);
+ pixelsv1 = unaligned_load( 0, pixels);
+ pixelsv1B = unaligned_load(line_size, pixels);
+ pixelsv1C = unaligned_load(line_size_2, pixels);
+ pixelsv1D = unaligned_load(line_size_3, pixels);
+ VEC_ST(pixelsv1, 0, (unsigned char*)block);
+ VEC_ST(pixelsv1B, line_size, (unsigned char*)block);
+ VEC_ST(pixelsv1C, line_size_2, (unsigned char*)block);
+ VEC_ST(pixelsv1D, line_size_3, (unsigned char*)block);
pixels+=line_size_4;
block +=line_size_4;
}
@@ -80,15 +71,12 @@ void ff_put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t li
#define op_avg(a,b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEUL)>>1) )
void ff_avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
{
- register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
- register vector unsigned char perm = vec_lvsl(0, pixels);
- int i;
+ register vector unsigned char pixelsv, blockv;
+ int i;
for (i = 0; i < h; i++) {
- pixelsv1 = vec_ld( 0, pixels);
- pixelsv2 = vec_ld(16,pixels);
blockv = vec_ld(0, block);
- pixelsv = vec_perm(pixelsv1, pixelsv2, perm);
+ pixelsv = VEC_LD( 0, pixels);
blockv = vec_avg(blockv,pixelsv);
vec_st(blockv, 0, (unsigned char*)block);
pixels+=line_size;
@@ -108,9 +96,7 @@ static void avg_pixels8_altivec(uint8_t * block, const uint8_t * pixels, ptrdiff
int rightside = ((unsigned long)block & 0x0000000F);
blockv = vec_ld(0, block);
- pixelsv1 = vec_ld( 0, pixels);
- pixelsv2 = vec_ld(16, pixels);
- pixelsv = vec_perm(pixelsv1, pixelsv2, vec_lvsl(0, pixels));
+ pixelsv = VEC_LD( 0, pixels);
if (rightside) {
pixelsv = vec_perm(blockv, pixelsv, vcprm(0,1,s0,s1));
@@ -132,21 +118,16 @@ static void put_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, ptrdi
{
register int i;
register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
- register vector unsigned char blockv, temp1, temp2;
+ register vector unsigned char blockv;
register vector unsigned short pixelssum1, pixelssum2, temp3;
register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
- temp1 = vec_ld(0, pixels);
- temp2 = vec_ld(16, pixels);
- pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
- if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
- pixelsv2 = temp2;
- } else {
- pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
- }
- pixelsv1 = vec_mergeh(vczero, pixelsv1);
- pixelsv2 = vec_mergeh(vczero, pixelsv2);
+ pixelsv1 = VEC_LD(0, pixels);
+ pixelsv2 = VEC_LD(1, pixels);
+ pixelsv1 = VEC_MERGEH(vczero, pixelsv1);
+ pixelsv2 = VEC_MERGEH(vczero, pixelsv2);
+
pixelssum1 = vec_add((vector unsigned short)pixelsv1,
(vector unsigned short)pixelsv2);
pixelssum1 = vec_add(pixelssum1, vctwo);
@@ -155,17 +136,10 @@ static void put_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, ptrdi
int rightside = ((unsigned long)block & 0x0000000F);
blockv = vec_ld(0, block);
- temp1 = vec_ld(line_size, pixels);
- temp2 = vec_ld(line_size + 16, pixels);
- pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
- if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
- pixelsv2 = temp2;
- } else {
- pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
- }
-
- pixelsv1 = vec_mergeh(vczero, pixelsv1);
- pixelsv2 = vec_mergeh(vczero, pixelsv2);
+ pixelsv1 = unaligned_load(line_size, pixels);
+ pixelsv2 = unaligned_load(line_size+1, pixels);
+ pixelsv1 = VEC_MERGEH(vczero, pixelsv1);
+ pixelsv2 = VEC_MERGEH(vczero, pixelsv2);
pixelssum2 = vec_add((vector unsigned short)pixelsv1,
(vector unsigned short)pixelsv2);
temp3 = vec_add(pixelssum1, pixelssum2);
@@ -191,22 +165,16 @@ static void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels
{
register int i;
register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
- register vector unsigned char blockv, temp1, temp2;
+ register vector unsigned char blockv;
register vector unsigned short pixelssum1, pixelssum2, temp3;
register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
- temp1 = vec_ld(0, pixels);
- temp2 = vec_ld(16, pixels);
- pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
- if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
- pixelsv2 = temp2;
- } else {
- pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
- }
- pixelsv1 = vec_mergeh(vczero, pixelsv1);
- pixelsv2 = vec_mergeh(vczero, pixelsv2);
+ pixelsv1 = VEC_LD(0, pixels);
+ pixelsv2 = VEC_LD(1, pixels);
+ pixelsv1 = VEC_MERGEH(vczero, pixelsv1);
+ pixelsv2 = VEC_MERGEH(vczero, pixelsv2);
pixelssum1 = vec_add((vector unsigned short)pixelsv1,
(vector unsigned short)pixelsv2);
pixelssum1 = vec_add(pixelssum1, vcone);
@@ -215,17 +183,10 @@ static void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels
int rightside = ((unsigned long)block & 0x0000000F);
blockv = vec_ld(0, block);
- temp1 = vec_ld(line_size, pixels);
- temp2 = vec_ld(line_size + 16, pixels);
- pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
- if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
- pixelsv2 = temp2;
- } else {
- pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
- }
-
- pixelsv1 = vec_mergeh(vczero, pixelsv1);
- pixelsv2 = vec_mergeh(vczero, pixelsv2);
+ pixelsv1 = unaligned_load(line_size, pixels);
+ pixelsv2 = unaligned_load(line_size+1, pixels);
+ pixelsv1 = VEC_MERGEH(vczero, pixelsv1);
+ pixelsv2 = VEC_MERGEH(vczero, pixelsv2);
pixelssum2 = vec_add((vector unsigned short)pixelsv1,
(vector unsigned short)pixelsv2);
temp3 = vec_add(pixelssum1, pixelssum2);
@@ -251,24 +212,18 @@ static void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, pt
{
register int i;
register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4;
- register vector unsigned char blockv, temp1, temp2;
+ register vector unsigned char blockv;
register vector unsigned short temp3, temp4,
pixelssum1, pixelssum2, pixelssum3, pixelssum4;
register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
- temp1 = vec_ld(0, pixels);
- temp2 = vec_ld(16, pixels);
- pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
- if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
- pixelsv2 = temp2;
- } else {
- pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
- }
- pixelsv3 = vec_mergel(vczero, pixelsv1);
- pixelsv4 = vec_mergel(vczero, pixelsv2);
- pixelsv1 = vec_mergeh(vczero, pixelsv1);
- pixelsv2 = vec_mergeh(vczero, pixelsv2);
+ pixelsv1 = VEC_LD(0, pixels);
+ pixelsv2 = VEC_LD(1, pixels);
+ pixelsv3 = VEC_MERGEL(vczero, pixelsv1);
+ pixelsv4 = VEC_MERGEL(vczero, pixelsv2);
+ pixelsv1 = VEC_MERGEH(vczero, pixelsv1);
+ pixelsv2 = VEC_MERGEH(vczero, pixelsv2);
pixelssum3 = vec_add((vector unsigned short)pixelsv3,
(vector unsigned short)pixelsv4);
pixelssum3 = vec_add(pixelssum3, vctwo);
@@ -279,20 +234,13 @@ static void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, pt
for (i = 0; i < h ; i++) {
blockv = vec_ld(0, block);
- temp1 = vec_ld(line_size, pixels);
- temp2 = vec_ld(line_size + 16, pixels);
- pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
- if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
- pixelsv2 = temp2;
- } else {
- pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
- }
-
- pixelsv3 = vec_mergel(vczero, pixelsv1);
- pixelsv4 = vec_mergel(vczero, pixelsv2);
- pixelsv1 = vec_mergeh(vczero, pixelsv1);
- pixelsv2 = vec_mergeh(vczero, pixelsv2);
+ pixelsv1 = unaligned_load(line_size, pixels);
+ pixelsv2 = unaligned_load(line_size+1, pixels);
+ pixelsv3 = VEC_MERGEL(vczero, pixelsv1);
+ pixelsv4 = VEC_MERGEL(vczero, pixelsv2);
+ pixelsv1 = VEC_MERGEH(vczero, pixelsv1);
+ pixelsv2 = VEC_MERGEH(vczero, pixelsv2);
pixelssum4 = vec_add((vector unsigned short)pixelsv3,
(vector unsigned short)pixelsv4);
pixelssum2 = vec_add((vector unsigned short)pixelsv1,
@@ -319,25 +267,19 @@ static void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pix
{
register int i;
register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4;
- register vector unsigned char blockv, temp1, temp2;
+ register vector unsigned char blockv;
register vector unsigned short temp3, temp4,
pixelssum1, pixelssum2, pixelssum3, pixelssum4;
register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
- temp1 = vec_ld(0, pixels);
- temp2 = vec_ld(16, pixels);
- pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
- if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
- pixelsv2 = temp2;
- } else {
- pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
- }
- pixelsv3 = vec_mergel(vczero, pixelsv1);
- pixelsv4 = vec_mergel(vczero, pixelsv2);
- pixelsv1 = vec_mergeh(vczero, pixelsv1);
- pixelsv2 = vec_mergeh(vczero, pixelsv2);
+ pixelsv1 = VEC_LD(0, pixels);
+ pixelsv2 = VEC_LD(1, pixels);
+ pixelsv3 = VEC_MERGEL(vczero, pixelsv1);
+ pixelsv4 = VEC_MERGEL(vczero, pixelsv2);
+ pixelsv1 = VEC_MERGEH(vczero, pixelsv1);
+ pixelsv2 = VEC_MERGEH(vczero, pixelsv2);
pixelssum3 = vec_add((vector unsigned short)pixelsv3,
(vector unsigned short)pixelsv4);
pixelssum3 = vec_add(pixelssum3, vcone);
@@ -346,22 +288,13 @@ static void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pix
pixelssum1 = vec_add(pixelssum1, vcone);
for (i = 0; i < h ; i++) {
- blockv = vec_ld(0, block);
-
- temp1 = vec_ld(line_size, pixels);
- temp2 = vec_ld(line_size + 16, pixels);
- pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
- if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
- pixelsv2 = temp2;
- } else {
- pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
- }
-
- pixelsv3 = vec_mergel(vczero, pixelsv1);
- pixelsv4 = vec_mergel(vczero, pixelsv2);
- pixelsv1 = vec_mergeh(vczero, pixelsv1);
- pixelsv2 = vec_mergeh(vczero, pixelsv2);
+ pixelsv1 = unaligned_load(line_size, pixels);
+ pixelsv2 = unaligned_load(line_size+1, pixels);
+ pixelsv3 = VEC_MERGEL(vczero, pixelsv1);
+ pixelsv4 = VEC_MERGEL(vczero, pixelsv2);
+ pixelsv1 = VEC_MERGEH(vczero, pixelsv1);
+ pixelsv2 = VEC_MERGEH(vczero, pixelsv2);
pixelssum4 = vec_add((vector unsigned short)pixelsv3,
(vector unsigned short)pixelsv4);
pixelssum2 = vec_add((vector unsigned short)pixelsv1,
@@ -376,7 +309,7 @@ static void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pix
blockv = vec_packsu(temp3, temp4);
- vec_st(blockv, 0, block);
+ VEC_ST(blockv, 0, block);
block += line_size;
pixels += line_size;
@@ -388,7 +321,7 @@ static void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, ptrdi
{
register int i;
register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
- register vector unsigned char blockv, temp1, temp2, blocktemp;
+ register vector unsigned char blockv, blocktemp;
register vector unsigned short pixelssum1, pixelssum2, temp3;
register const vector unsigned char vczero = (const vector unsigned char)
@@ -396,16 +329,10 @@ static void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, ptrdi
register const vector unsigned short vctwo = (const vector unsigned short)
vec_splat_u16(2);
- temp1 = vec_ld(0, pixels);
- temp2 = vec_ld(16, pixels);
- pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
- if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
- pixelsv2 = temp2;
- } else {
- pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
- }
- pixelsv1 = vec_mergeh(vczero, pixelsv1);
- pixelsv2 = vec_mergeh(vczero, pixelsv2);
+ pixelsv1 = VEC_LD(0, pixels);
+ pixelsv2 = VEC_LD(1, pixels);
+ pixelsv1 = VEC_MERGEH(vczero, pixelsv1);
+ pixelsv2 = VEC_MERGEH(vczero, pixelsv2);
pixelssum1 = vec_add((vector unsigned short)pixelsv1,
(vector unsigned short)pixelsv2);
pixelssum1 = vec_add(pixelssum1, vctwo);
@@ -414,17 +341,11 @@ static void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, ptrdi
int rightside = ((unsigned long)block & 0x0000000F);
blockv = vec_ld(0, block);
- temp1 = vec_ld(line_size, pixels);
- temp2 = vec_ld(line_size + 16, pixels);
- pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
- if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
- pixelsv2 = temp2;
- } else {
- pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
- }
+ pixelsv1 = unaligned_load(line_size, pixels);
+ pixelsv2 = unaligned_load(line_size+1, pixels);
- pixelsv1 = vec_mergeh(vczero, pixelsv1);
- pixelsv2 = vec_mergeh(vczero, pixelsv2);
+ pixelsv1 = VEC_MERGEH(vczero, pixelsv1);
+ pixelsv2 = VEC_MERGEH(vczero, pixelsv2);
pixelssum2 = vec_add((vector unsigned short)pixelsv1,
(vector unsigned short)pixelsv2);
temp3 = vec_add(pixelssum1, pixelssum2);
@@ -445,11 +366,11 @@ static void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, ptrdi
pixels += line_size;
}
}
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
av_cold void ff_hpeldsp_init_ppc(HpelDSPContext *c, int flags)
{
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
if (!PPC_ALTIVEC(av_get_cpu_flags()))
return;
@@ -464,5 +385,5 @@ av_cold void ff_hpeldsp_init_ppc(HpelDSPContext *c, int flags)
c->put_no_rnd_pixels_tab[0][0] = ff_put_pixels16_altivec;
c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_altivec;
c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_altivec;
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
}
diff --git a/libavcodec/ppc/hpeldsp_altivec.h b/libavcodec/ppc/hpeldsp_altivec.h
index 98dd80ea1c..590809f539 100644
--- a/libavcodec/ppc/hpeldsp_altivec.h
+++ b/libavcodec/ppc/hpeldsp_altivec.h
@@ -3,20 +3,20 @@
* Copyright (c) 2002 Dieter Shirley
* Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/idctdsp.c b/libavcodec/ppc/idctdsp.c
index 0aaaac0131..f1b42470fb 100644
--- a/libavcodec/ppc/idctdsp.c
+++ b/libavcodec/ppc/idctdsp.c
@@ -1,28 +1,28 @@
/*
* Copyright (c) 2001 Michel Lespinasse
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/* NOTE: This code is based on GPL code from the libmpeg2 project. The
* author, Michel Lespinasses, has given explicit permission to release
- * under LGPL as part of Libav.
+ * under LGPL as part of FFmpeg.
*
- * Libav integration by Dieter Shirley
+ * FFmpeg integration by Dieter Shirley
*
* This file is a direct copy of the AltiVec IDCT module from the libmpeg2
* project. I've deleted all of the libmpeg2-specific code, renamed the
@@ -43,7 +43,7 @@
#include "libavutil/ppc/types_altivec.h"
#include "libavcodec/idctdsp.h"
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
#define IDCT_HALF \
/* 1st stage */ \
@@ -153,6 +153,22 @@ static const vec_s16 constants[5] = {
{ 19266, 26722, 25172, 22654, 19266, 22654, 25172, 26722 }
};
+static void idct_altivec(int16_t *blk)
+{
+ vec_s16 *block = (vec_s16 *) blk;
+
+ IDCT;
+
+ block[0] = vx0;
+ block[1] = vx1;
+ block[2] = vx2;
+ block[3] = vx3;
+ block[4] = vx4;
+ block[5] = vx5;
+ block[6] = vx6;
+ block[7] = vx7;
+}
+
static void idct_put_altivec(uint8_t *dest, ptrdiff_t stride, int16_t *blk)
{
vec_s16 *block = (vec_s16 *) blk;
@@ -193,16 +209,26 @@ static void idct_add_altivec(uint8_t *dest, ptrdiff_t stride, int16_t *blk)
IDCT;
+#if HAVE_BIGENDIAN
p0 = vec_lvsl(0, dest);
p1 = vec_lvsl(stride, dest);
p = vec_splat_u8(-1);
perm0 = vec_mergeh(p, p0);
perm1 = vec_mergeh(p, p1);
+#endif
-#define ADD(dest, src, perm) \
- /* *(uint64_t *) &tmp = *(uint64_t *) dest; */ \
+#if HAVE_BIGENDIAN
+#define GET_TMP2(dest, prm) \
tmp = vec_ld(0, dest); \
- tmp2 = (vec_s16) vec_perm(tmp, (vec_u8) zero, perm); \
+ tmp2 = (vec_s16) vec_perm(tmp, (vec_u8) zero, prm);
+#else
+#define GET_TMP2(dest, prm) \
+ tmp = vec_vsx_ld(0, dest); \
+ tmp2 = (vec_s16) vec_mergeh(tmp, (vec_u8) zero)
+#endif
+
+#define ADD(dest, src, perm) \
+ GET_TMP2(dest, perm); \
tmp3 = vec_adds(tmp2, src); \
tmp = vec_packsu(tmp3, tmp3); \
vec_ste((vec_u32) tmp, 0, (unsigned int *) dest); \
@@ -225,22 +251,23 @@ static void idct_add_altivec(uint8_t *dest, ptrdiff_t stride, int16_t *blk)
ADD(dest, vx7, perm1);
}
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
av_cold void ff_idctdsp_init_ppc(IDCTDSPContext *c, AVCodecContext *avctx,
unsigned high_bit_depth)
{
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
if (!PPC_ALTIVEC(av_get_cpu_flags()))
return;
- if (!high_bit_depth) {
- if ((avctx->idct_algo == FF_IDCT_AUTO) ||
+ if (!high_bit_depth && avctx->lowres == 0) {
+ if ((avctx->idct_algo == FF_IDCT_AUTO && !(avctx->flags & AV_CODEC_FLAG_BITEXACT)) ||
(avctx->idct_algo == FF_IDCT_ALTIVEC)) {
+ c->idct = idct_altivec;
c->idct_add = idct_add_altivec;
c->idct_put = idct_put_altivec;
c->perm_type = FF_IDCT_PERM_TRANSPOSE;
}
}
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
}
diff --git a/libavcodec/ppc/apedsp_altivec.c b/libavcodec/ppc/lossless_audiodsp_altivec.c
index 21a3d10f18..bdec25223d 100644
--- a/libavcodec/ppc/apedsp_altivec.c
+++ b/libavcodec/ppc/lossless_audiodsp_altivec.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2007 Luca Barbato <lu_zero@gentoo.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -27,9 +27,23 @@
#include "libavutil/cpu.h"
#include "libavutil/ppc/cpu.h"
#include "libavutil/ppc/types_altivec.h"
-#include "libavcodec/apedsp.h"
+#include "libavcodec/lossless_audiodsp.h"
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_BIGENDIAN
+#define GET_T(tt0,tt1,src,a,b){ \
+ a = vec_ld(16, src); \
+ tt0 = vec_perm(b, a, align); \
+ b = vec_ld(32, src); \
+ tt1 = vec_perm(a, b, align); \
+ }
+#else
+#define GET_T(tt0,tt1,src,a,b){ \
+ tt0 = vec_vsx_ld(0, src); \
+ tt1 = vec_vsx_ld(16, src); \
+ }
+#endif
+
+#if HAVE_ALTIVEC
static int32_t scalarproduct_and_madd_int16_altivec(int16_t *v1,
const int16_t *v2,
const int16_t *v3,
@@ -38,26 +52,23 @@ static int32_t scalarproduct_and_madd_int16_altivec(int16_t *v1,
LOAD_ZERO;
vec_s16 *pv1 = (vec_s16 *) v1;
register vec_s16 muls = { mul, mul, mul, mul, mul, mul, mul, mul };
- register vec_s16 t0, t1, i0, i1, i4;
- register vec_s16 i2 = vec_ld(0, v2), i3 = vec_ld(0, v3);
+ register vec_s16 t0, t1, i0, i1, i4, i2, i3;
register vec_s32 res = zero_s32v;
+#if HAVE_BIGENDIAN
register vec_u8 align = vec_lvsl(0, v2);
+ i2 = vec_ld(0, v2);
+ i3 = vec_ld(0, v3);
+#endif
int32_t ires;
order >>= 4;
do {
- i1 = vec_ld(16, v2);
- t0 = vec_perm(i2, i1, align);
- i2 = vec_ld(32, v2);
- t1 = vec_perm(i1, i2, align);
+ GET_T(t0,t1,v2,i1,i2);
i0 = pv1[0];
i1 = pv1[1];
res = vec_msum(t0, i0, res);
res = vec_msum(t1, i1, res);
- i4 = vec_ld(16, v3);
- t0 = vec_perm(i3, i4, align);
- i3 = vec_ld(32, v3);
- t1 = vec_perm(i4, i3, align);
+ GET_T(t0,t1,v3,i4,i3);
pv1[0] = vec_mladd(t0, muls, i0);
pv1[1] = vec_mladd(t1, muls, i1);
pv1 += 2;
@@ -69,14 +80,14 @@ static int32_t scalarproduct_and_madd_int16_altivec(int16_t *v1,
return ires;
}
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
-av_cold void ff_apedsp_init_ppc(APEDSPContext *c)
+av_cold void ff_llauddsp_init_ppc(LLAudDSPContext *c)
{
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
if (!PPC_ALTIVEC(av_get_cpu_flags()))
return;
c->scalarproduct_and_madd_int16 = scalarproduct_and_madd_int16_altivec;
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
}
diff --git a/libavcodec/ppc/huffyuvdsp_altivec.c b/libavcodec/ppc/lossless_videodsp_altivec.c
index 337328ae05..c388dc33af 100644
--- a/libavcodec/ppc/huffyuvdsp_altivec.c
+++ b/libavcodec/ppc/lossless_videodsp_altivec.c
@@ -3,20 +3,20 @@
* Copyright (c) 2002 Dieter Shirley
* Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -30,10 +30,10 @@
#include "libavutil/ppc/cpu.h"
#include "libavutil/ppc/types_altivec.h"
#include "libavutil/ppc/util_altivec.h"
-#include "libavcodec/huffyuvdsp.h"
+#include "libavcodec/lossless_videodsp.h"
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
-static void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w)
+#if HAVE_ALTIVEC
+static void add_bytes_altivec(uint8_t *dst, uint8_t *src, intptr_t w)
{
register int i;
register vector unsigned char vdst, vsrc;
@@ -49,14 +49,14 @@ static void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w)
for (; i < w; i++)
dst[i] = src[i];
}
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
-av_cold void ff_huffyuvdsp_init_ppc(HuffYUVDSPContext *c)
+av_cold void ff_llviddsp_init_ppc(LLVidDSPContext *c)
{
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
if (!PPC_ALTIVEC(av_get_cpu_flags()))
return;
c->add_bytes = add_bytes_altivec;
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
}
diff --git a/libavcodec/ppc/mathops.h b/libavcodec/ppc/mathops.h
index 34ddb11800..dbd714fcd4 100644
--- a/libavcodec/ppc/mathops.h
+++ b/libavcodec/ppc/mathops.h
@@ -3,20 +3,20 @@
* Copyright (c) 2001, 2002 Fabrice Bellard
* Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/mdct_init.c b/libavcodec/ppc/mdct_init.c
deleted file mode 100644
index d3582bc00a..0000000000
--- a/libavcodec/ppc/mdct_init.c
+++ /dev/null
@@ -1,154 +0,0 @@
-/*
- * FFT/IFFT transforms
- * AltiVec-enabled
- * Copyright (c) 2009 Loren Merritt
- *
- * This file is part of Libav.
- *
- * Libav is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * Libav is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include "config.h"
-#include "libavutil/cpu.h"
-#include "libavutil/ppc/cpu.h"
-#include "libavutil/ppc/types_altivec.h"
-#include "libavutil/ppc/util_altivec.h"
-#include "libavcodec/fft.h"
-
-/**
- * Do a complex FFT with the parameters defined in ff_fft_init().
- * The input data must be permuted before with s->revtab table.
- * No 1.0 / sqrt(n) normalization is done.
- * AltiVec-enabled:
- * This code assumes that the 'z' pointer is 16 bytes-aligned.
- * It also assumes all FFTComplex are 8 bytes-aligned pairs of floats.
- */
-
-void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z);
-
-#if HAVE_GNU_AS && HAVE_ALTIVEC && HAVE_BIGENDIAN
-static void imdct_half_altivec(FFTContext *s, FFTSample *output, const FFTSample *input)
-{
- int j, k;
- int n = 1 << s->mdct_bits;
- int n4 = n >> 2;
- int n8 = n >> 3;
- int n32 = n >> 5;
- const uint16_t *revtabj = s->revtab;
- const uint16_t *revtabk = s->revtab+n4;
- const vec_f *tcos = (const vec_f*)(s->tcos+n8);
- const vec_f *tsin = (const vec_f*)(s->tsin+n8);
- const vec_f *pin = (const vec_f*)(input+n4);
- vec_f *pout = (vec_f*)(output+n4);
-
- /* pre rotation */
- k = n32-1;
- do {
- vec_f cos,sin,cos0,sin0,cos1,sin1,re,im,r0,i0,r1,i1,a,b,c,d;
-#define CMULA(p,o0,o1,o2,o3)\
- a = pin[ k*2+p]; /* { z[k].re, z[k].im, z[k+1].re, z[k+1].im } */\
- b = pin[-k*2-p-1]; /* { z[-k-2].re, z[-k-2].im, z[-k-1].re, z[-k-1].im } */\
- re = vec_perm(a, b, vcprm(0,2,s0,s2)); /* { z[k].re, z[k+1].re, z[-k-2].re, z[-k-1].re } */\
- im = vec_perm(a, b, vcprm(s3,s1,3,1)); /* { z[-k-1].im, z[-k-2].im, z[k+1].im, z[k].im } */\
- cos = vec_perm(cos0, cos1, vcprm(o0,o1,s##o2,s##o3)); /* { cos[k], cos[k+1], cos[-k-2], cos[-k-1] } */\
- sin = vec_perm(sin0, sin1, vcprm(o0,o1,s##o2,s##o3));\
- r##p = im*cos - re*sin;\
- i##p = re*cos + im*sin;
-#define STORE2(v,dst)\
- j = dst;\
- vec_ste(v, 0, output+j*2);\
- vec_ste(v, 4, output+j*2);
-#define STORE8(p)\
- a = vec_perm(r##p, i##p, vcprm(0,s0,0,s0));\
- b = vec_perm(r##p, i##p, vcprm(1,s1,1,s1));\
- c = vec_perm(r##p, i##p, vcprm(2,s2,2,s2));\
- d = vec_perm(r##p, i##p, vcprm(3,s3,3,s3));\
- STORE2(a, revtabk[ p*2-4]);\
- STORE2(b, revtabk[ p*2-3]);\
- STORE2(c, revtabj[-p*2+2]);\
- STORE2(d, revtabj[-p*2+3]);
-
- cos0 = tcos[k];
- sin0 = tsin[k];
- cos1 = tcos[-k-1];
- sin1 = tsin[-k-1];
- CMULA(0, 0,1,2,3);
- CMULA(1, 2,3,0,1);
- STORE8(0);
- STORE8(1);
- revtabj += 4;
- revtabk -= 4;
- k--;
- } while(k >= 0);
-
- ff_fft_calc_altivec(s, (FFTComplex*)output);
-
- /* post rotation + reordering */
- j = -n32;
- k = n32-1;
- do {
- vec_f cos,sin,re,im,a,b,c,d;
-#define CMULB(d0,d1,o)\
- re = pout[o*2];\
- im = pout[o*2+1];\
- cos = tcos[o];\
- sin = tsin[o];\
- d0 = im*sin - re*cos;\
- d1 = re*sin + im*cos;
-
- CMULB(a,b,j);
- CMULB(c,d,k);
- pout[2*j] = vec_perm(a, d, vcprm(0,s3,1,s2));
- pout[2*j+1] = vec_perm(a, d, vcprm(2,s1,3,s0));
- pout[2*k] = vec_perm(c, b, vcprm(0,s3,1,s2));
- pout[2*k+1] = vec_perm(c, b, vcprm(2,s1,3,s0));
- j++;
- k--;
- } while(k >= 0);
-}
-
-static void imdct_calc_altivec(FFTContext *s, FFTSample *output, const FFTSample *input)
-{
- int k;
- int n = 1 << s->mdct_bits;
- int n4 = n >> 2;
- int n16 = n >> 4;
- vec_u32 sign = {1U<<31,1U<<31,1U<<31,1U<<31};
- vec_u32 *p0 = (vec_u32*)(output+n4);
- vec_u32 *p1 = (vec_u32*)(output+n4*3);
-
- imdct_half_altivec(s, output + n4, input);
-
- for (k = 0; k < n16; k++) {
- vec_u32 a = p0[k] ^ sign;
- vec_u32 b = p1[-k-1];
- p0[-k-1] = vec_perm(a, a, vcprm(3,2,1,0));
- p1[k] = vec_perm(b, b, vcprm(3,2,1,0));
- }
-}
-#endif /* HAVE_GNU_AS && HAVE_ALTIVEC && HAVE_BIGENDIAN */
-
-av_cold void ff_mdct_init_ppc(FFTContext *s)
-{
-#if HAVE_GNU_AS && HAVE_ALTIVEC && HAVE_BIGENDIAN
- if (!PPC_ALTIVEC(av_get_cpu_flags()))
- return;
-
- if (s->mdct_bits >= 5) {
- s->imdct_calc = imdct_calc_altivec;
- s->imdct_half = imdct_half_altivec;
- }
-#endif /* HAVE_GNU_AS && HAVE_ALTIVEC && HAVE_BIGENDIAN */
-}
diff --git a/libavcodec/ppc/me_cmp.c b/libavcodec/ppc/me_cmp.c
index 6c70f9f0fa..9f75ed256a 100644
--- a/libavcodec/ppc/me_cmp.c
+++ b/libavcodec/ppc/me_cmp.c
@@ -3,20 +3,20 @@
* Copyright (c) 2002 Dieter Shirley
* Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -34,27 +34,44 @@
#include "libavcodec/mpegvideo.h"
#include "libavcodec/me_cmp.h"
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
+
+#if HAVE_BIGENDIAN
+#define GET_PERM(per1, per2, pix) {\
+ per1 = vec_lvsl(0, pix);\
+ per2 = vec_add(per1, vec_splat_u8(1));\
+}
+#define LOAD_PIX(v, iv, pix, per1, per2) {\
+ vector unsigned char pix2l = vec_ld(0, pix);\
+ vector unsigned char pix2r = vec_ld(16, pix);\
+ v = vec_perm(pix2l, pix2r, per1);\
+ iv = vec_perm(pix2l, pix2r, per2);\
+}
+#else
+#define GET_PERM(per1, per2, pix) {}
+#define LOAD_PIX(v, iv, pix, per1, per2) {\
+ v = vec_vsx_ld(0, pix);\
+ iv = vec_vsx_ld(1, pix);\
+}
+#endif
static int sad16_x2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h)
{
- int i, s = 0;
+ int i;
+ int __attribute__((aligned(16))) s = 0;
const vector unsigned char zero =
(const vector unsigned char) vec_splat_u8(0);
- vector unsigned char perm1 = vec_lvsl(0, pix2);
- vector unsigned char perm2 = vec_add(perm1, vec_splat_u8(1));
vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
vector signed int sumdiffs;
+ vector unsigned char perm1, perm2, pix2v, pix2iv;
+ GET_PERM(perm1, perm2, pix2);
for (i = 0; i < h; i++) {
/* Read unaligned pixels into our vectors. The vectors are as follows:
* pix1v: pix1[0] - pix1[15]
* pix2v: pix2[0] - pix2[15] pix2iv: pix2[1] - pix2[16] */
vector unsigned char pix1v = vec_ld(0, pix1);
- vector unsigned char pix2l = vec_ld(0, pix2);
- vector unsigned char pix2r = vec_ld(16, pix2);
- vector unsigned char pix2v = vec_perm(pix2l, pix2r, perm1);
- vector unsigned char pix2iv = vec_perm(pix2l, pix2r, perm2);
+ LOAD_PIX(pix2v, pix2iv, pix2, perm1, perm2);
/* Calculate the average vector. */
vector unsigned char avgv = vec_avg(pix2v, pix2iv);
@@ -80,13 +97,14 @@ static int sad16_x2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h)
{
- int i, s = 0;
+ int i;
+ int __attribute__((aligned(16))) s = 0;
const vector unsigned char zero =
(const vector unsigned char) vec_splat_u8(0);
- vector unsigned char perm = vec_lvsl(0, pix2);
vector unsigned char pix1v, pix3v, avgv, t5;
vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
vector signed int sumdiffs;
+
uint8_t *pix3 = pix2 + stride;
/* Due to the fact that pix3 = pix2 + stride, the pix3 of one
@@ -96,19 +114,14 @@ static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
* Read unaligned pixels into our vectors. The vectors are as follows:
* pix2v: pix2[0] - pix2[15]
* Split the pixel vectors into shorts. */
- vector unsigned char pix2l = vec_ld(0, pix2);
- vector unsigned char pix2r = vec_ld(15, pix2);
- vector unsigned char pix2v = vec_perm(pix2l, pix2r, perm);
+ vector unsigned char pix2v = VEC_LD(0, pix2);
for (i = 0; i < h; i++) {
/* Read unaligned pixels into our vectors. The vectors are as follows:
* pix1v: pix1[0] - pix1[15]
* pix3v: pix3[0] - pix3[15] */
pix1v = vec_ld(0, pix1);
-
- pix2l = vec_ld(0, pix3);
- pix2r = vec_ld(15, pix3);
- pix3v = vec_perm(pix2l, pix2r, perm);
+ pix3v = VEC_LD(0, pix3);
/* Calculate the average vector. */
avgv = vec_avg(pix2v, pix3v);
@@ -134,20 +147,21 @@ static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h)
{
- int i, s = 0;
+ int i;
+ int __attribute__((aligned(16))) s = 0;
uint8_t *pix3 = pix2 + stride;
const vector unsigned char zero =
(const vector unsigned char) vec_splat_u8(0);
const vector unsigned short two =
(const vector unsigned short) vec_splat_u16(2);
vector unsigned char avgv, t5;
- vector unsigned char perm1 = vec_lvsl(0, pix2);
- vector unsigned char perm2 = vec_add(perm1, vec_splat_u8(1));
vector unsigned char pix1v, pix3v, pix3iv;
vector unsigned short pix3lv, pix3hv, pix3ilv, pix3ihv;
vector unsigned short avghv, avglv;
vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
vector signed int sumdiffs;
+ vector unsigned char perm1, perm2, pix2v, pix2iv;
+ GET_PERM(perm1, perm2, pix2);
/* Due to the fact that pix3 = pix2 + stride, the pix3 of one
* iteration becomes pix2 in the next iteration. We can use this
@@ -156,19 +170,16 @@ static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
* Read unaligned pixels into our vectors. The vectors are as follows:
* pix2v: pix2[0] - pix2[15] pix2iv: pix2[1] - pix2[16]
* Split the pixel vectors into shorts. */
- vector unsigned char pix2l = vec_ld(0, pix2);
- vector unsigned char pix2r = vec_ld(16, pix2);
- vector unsigned char pix2v = vec_perm(pix2l, pix2r, perm1);
- vector unsigned char pix2iv = vec_perm(pix2l, pix2r, perm2);
-
+ LOAD_PIX(pix2v, pix2iv, pix2, perm1, perm2);
vector unsigned short pix2hv =
- (vector unsigned short) vec_mergeh(zero, pix2v);
+ (vector unsigned short) VEC_MERGEH(zero, pix2v);
vector unsigned short pix2lv =
- (vector unsigned short) vec_mergel(zero, pix2v);
+ (vector unsigned short) VEC_MERGEL(zero, pix2v);
vector unsigned short pix2ihv =
- (vector unsigned short) vec_mergeh(zero, pix2iv);
+ (vector unsigned short) VEC_MERGEH(zero, pix2iv);
vector unsigned short pix2ilv =
- (vector unsigned short) vec_mergel(zero, pix2iv);
+ (vector unsigned short) VEC_MERGEL(zero, pix2iv);
+
vector unsigned short t1 = vec_add(pix2hv, pix2ihv);
vector unsigned short t2 = vec_add(pix2lv, pix2ilv);
vector unsigned short t3, t4;
@@ -178,11 +189,7 @@ static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
* pix1v: pix1[0] - pix1[15]
* pix3v: pix3[0] - pix3[15] pix3iv: pix3[1] - pix3[16] */
pix1v = vec_ld(0, pix1);
-
- pix2l = vec_ld(0, pix3);
- pix2r = vec_ld(16, pix3);
- pix3v = vec_perm(pix2l, pix2r, perm1);
- pix3iv = vec_perm(pix2l, pix2r, perm2);
+ LOAD_PIX(pix3v, pix3iv, pix3, perm1, perm2);
/* Note that AltiVec does have vec_avg, but this works on vector pairs
* and rounds up. We could do avg(avg(a, b), avg(c, d)), but the
@@ -191,10 +198,10 @@ static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
* vectors of shorts and do the averaging by hand. */
/* Split the pixel vectors into shorts. */
- pix3hv = (vector unsigned short) vec_mergeh(zero, pix3v);
- pix3lv = (vector unsigned short) vec_mergel(zero, pix3v);
- pix3ihv = (vector unsigned short) vec_mergeh(zero, pix3iv);
- pix3ilv = (vector unsigned short) vec_mergel(zero, pix3iv);
+ pix3hv = (vector unsigned short) VEC_MERGEH(zero, pix3v);
+ pix3lv = (vector unsigned short) VEC_MERGEL(zero, pix3v);
+ pix3ihv = (vector unsigned short) VEC_MERGEH(zero, pix3iv);
+ pix3ilv = (vector unsigned short) VEC_MERGEL(zero, pix3iv);
/* Do the averaging on them. */
t3 = vec_add(pix3hv, pix3ihv);
@@ -229,19 +236,17 @@ static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
static int sad16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h)
{
- int i, s;
+ int i;
+ int __attribute__((aligned(16))) s;
const vector unsigned int zero =
(const vector unsigned int) vec_splat_u32(0);
- vector unsigned char perm = vec_lvsl(0, pix2);
vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
vector signed int sumdiffs;
for (i = 0; i < h; i++) {
/* Read potentially unaligned pixels into t1 and t2. */
- vector unsigned char pix2l = vec_ld(0, pix2);
- vector unsigned char pix2r = vec_ld(15, pix2);
- vector unsigned char t1 = vec_ld(0, pix1);
- vector unsigned char t2 = vec_perm(pix2l, pix2r, perm);
+ vector unsigned char t1 =vec_ld(0, pix1);
+ vector unsigned char t2 = VEC_LD(0, pix2);
/* Calculate a sum of abs differences vector. */
vector unsigned char t3 = vec_max(t1, t2);
@@ -266,14 +271,13 @@ static int sad16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h)
{
- int i, s;
+ int i;
+ int __attribute__((aligned(16))) s;
const vector unsigned int zero =
(const vector unsigned int) vec_splat_u32(0);
const vector unsigned char permclear =
(vector unsigned char)
{ 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0 };
- vector unsigned char perm1 = vec_lvsl(0, pix1);
- vector unsigned char perm2 = vec_lvsl(0, pix2);
vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
vector signed int sumdiffs;
@@ -281,14 +285,10 @@ static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Read potentially unaligned pixels into t1 and t2.
* Since we're reading 16 pixels, and actually only want 8,
* mask out the last 8 pixels. The 0s don't change the sum. */
- vector unsigned char pix1l = vec_ld(0, pix1);
- vector unsigned char pix1r = vec_ld(7, pix1);
- vector unsigned char pix2l = vec_ld(0, pix2);
- vector unsigned char pix2r = vec_ld(7, pix2);
- vector unsigned char t1 = vec_and(vec_perm(pix1l, pix1r, perm1),
- permclear);
- vector unsigned char t2 = vec_and(vec_perm(pix2l, pix2r, perm2),
- permclear);
+ vector unsigned char pix1l = VEC_LD(0, pix1);
+ vector unsigned char pix2l = VEC_LD(0, pix2);
+ vector unsigned char t1 = vec_and(pix1l, permclear);
+ vector unsigned char t2 = vec_and(pix2l, permclear);
/* Calculate a sum of abs differences vector. */
vector unsigned char t3 = vec_max(t1, t2);
@@ -315,14 +315,13 @@ static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
static int sse8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h)
{
- int i, s;
+ int i;
+ int __attribute__((aligned(16))) s;
const vector unsigned int zero =
(const vector unsigned int) vec_splat_u32(0);
const vector unsigned char permclear =
(vector unsigned char)
{ 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0 };
- vector unsigned char perm1 = vec_lvsl(0, pix1);
- vector unsigned char perm2 = vec_lvsl(0, pix2);
vector unsigned int sum = (vector unsigned int) vec_splat_u32(0);
vector signed int sumsqr;
@@ -330,14 +329,8 @@ static int sse8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Read potentially unaligned pixels into t1 and t2.
* Since we're reading 16 pixels, and actually only want 8,
* mask out the last 8 pixels. The 0s don't change the sum. */
- vector unsigned char pix1l = vec_ld(0, pix1);
- vector unsigned char pix1r = vec_ld(7, pix1);
- vector unsigned char pix2l = vec_ld(0, pix2);
- vector unsigned char pix2r = vec_ld(7, pix2);
- vector unsigned char t1 = vec_and(vec_perm(pix1l, pix1r, perm1),
- permclear);
- vector unsigned char t2 = vec_and(vec_perm(pix2l, pix2r, perm2),
- permclear);
+ vector unsigned char t1 = vec_and(VEC_LD(0, pix1), permclear);
+ vector unsigned char t2 = vec_and(VEC_LD(0, pix2), permclear);
/* Since we want to use unsigned chars, we can take advantage
* of the fact that abs(a - b) ^ 2 = (a - b) ^ 2. */
@@ -367,19 +360,17 @@ static int sse8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
static int sse16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h)
{
- int i, s;
+ int i;
+ int __attribute__((aligned(16))) s;
const vector unsigned int zero =
(const vector unsigned int) vec_splat_u32(0);
- vector unsigned char perm = vec_lvsl(0, pix2);
vector unsigned int sum = (vector unsigned int) vec_splat_u32(0);
vector signed int sumsqr;
for (i = 0; i < h; i++) {
/* Read potentially unaligned pixels into t1 and t2. */
- vector unsigned char pix2l = vec_ld(0, pix2);
- vector unsigned char pix2r = vec_ld(15, pix2);
vector unsigned char t1 = vec_ld(0, pix1);
- vector unsigned char t2 = vec_perm(pix2l, pix2r, perm);
+ vector unsigned char t2 = VEC_LD(0, pix2);
/* Since we want to use unsigned chars, we can take advantage
* of the fact that abs(a - b) ^ 2 = (a - b) ^ 2. */
@@ -399,15 +390,15 @@ static int sse16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Sum up the four partial sums, and put the result into s. */
sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
sumsqr = vec_splat(sumsqr, 3);
- vec_ste(sumsqr, 0, &s);
+ vec_ste(sumsqr, 0, &s);
return s;
}
static int hadamard8_diff8x8_altivec(MpegEncContext *s, uint8_t *dst,
uint8_t *src, ptrdiff_t stride, int h)
{
- int sum;
+ int __attribute__((aligned(16))) sum;
register const vector unsigned char vzero =
(const vector unsigned char) vec_splat_u8(0);
register vector signed short temp0, temp1, temp2, temp3, temp4,
@@ -432,24 +423,19 @@ static int hadamard8_diff8x8_altivec(MpegEncContext *s, uint8_t *dst,
{ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 };
+
#define ONEITERBUTTERFLY(i, res) \
{ \
- register vector unsigned char src1 = vec_ld(stride * i, src); \
- register vector unsigned char src2 = vec_ld(stride * i + 15, src); \
- register vector unsigned char srcO = \
- vec_perm(src1, src2, vec_lvsl(stride * i, src)); \
- register vector unsigned char dst1 = vec_ld(stride * i, dst); \
- register vector unsigned char dst2 = vec_ld(stride * i + 15, dst); \
- register vector unsigned char dstO = \
- vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \
+ register vector unsigned char srcO = unaligned_load(stride * i, src); \
+ register vector unsigned char dstO = unaligned_load(stride * i, dst);\
\
/* Promote the unsigned chars to signed shorts. */ \
/* We're in the 8x8 function, we only care for the first 8. */ \
register vector signed short srcV = \
- (vector signed short) vec_mergeh((vector signed char) vzero, \
+ (vector signed short) VEC_MERGEH((vector signed char) vzero, \
(vector signed char) srcO); \
register vector signed short dstV = \
- (vector signed short) vec_mergeh((vector signed char) vzero, \
+ (vector signed short) VEC_MERGEH((vector signed char) vzero, \
(vector signed char) dstO); \
\
/* subtractions inside the first butterfly */ \
@@ -461,6 +447,7 @@ static int hadamard8_diff8x8_altivec(MpegEncContext *s, uint8_t *dst,
register vector signed short op3 = vec_perm(but2, but2, perm3); \
res = vec_mladd(but2, vprod3, op3); \
}
+
ONEITERBUTTERFLY(0, temp0);
ONEITERBUTTERFLY(1, temp1);
ONEITERBUTTERFLY(2, temp2);
@@ -510,13 +497,14 @@ static int hadamard8_diff8x8_altivec(MpegEncContext *s, uint8_t *dst,
vsum = vec_sum4s(vec_abs(line7C), vsum);
vsum = vec_sums(vsum, (vector signed int) vzero);
vsum = vec_splat(vsum, 3);
+
vec_ste(vsum, 0, &sum);
}
return sum;
}
/*
- * 16x8 works with 16 elements; it allows to avoid replicating loads, and
+ * 16x8 works with 16 elements; it can avoid replicating loads, and
* gives the compiler more room for scheduling. It's only used from
* inside hadamard8_diff16_altivec.
*
@@ -536,7 +524,7 @@ static int hadamard8_diff8x8_altivec(MpegEncContext *s, uint8_t *dst,
static int hadamard8_diff16x8_altivec(MpegEncContext *s, uint8_t *dst,
uint8_t *src, ptrdiff_t stride, int h)
{
- int sum;
+ int __attribute__((aligned(16))) sum;
register vector signed short
temp0 __asm__ ("v0"),
temp1 __asm__ ("v1"),
@@ -584,31 +572,23 @@ static int hadamard8_diff16x8_altivec(MpegEncContext *s, uint8_t *dst,
#define ONEITERBUTTERFLY(i, res1, res2) \
{ \
- register vector unsigned char src1 __asm__ ("v22") = \
- vec_ld(stride * i, src); \
- register vector unsigned char src2 __asm__ ("v23") = \
- vec_ld(stride * i + 16, src); \
register vector unsigned char srcO __asm__ ("v22") = \
- vec_perm(src1, src2, vec_lvsl(stride * i, src)); \
- register vector unsigned char dst1 __asm__ ("v24") = \
- vec_ld(stride * i, dst); \
- register vector unsigned char dst2 __asm__ ("v25") = \
- vec_ld(stride * i + 16, dst); \
+ unaligned_load(stride * i, src); \
register vector unsigned char dstO __asm__ ("v23") = \
- vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \
+ unaligned_load(stride * i, dst);\
\
/* Promote the unsigned chars to signed shorts. */ \
register vector signed short srcV __asm__ ("v24") = \
- (vector signed short) vec_mergeh((vector signed char) vzero, \
+ (vector signed short) VEC_MERGEH((vector signed char) vzero, \
(vector signed char) srcO); \
register vector signed short dstV __asm__ ("v25") = \
- (vector signed short) vec_mergeh((vector signed char) vzero, \
+ (vector signed short) VEC_MERGEH((vector signed char) vzero, \
(vector signed char) dstO); \
register vector signed short srcW __asm__ ("v26") = \
- (vector signed short) vec_mergel((vector signed char) vzero, \
+ (vector signed short) VEC_MERGEL((vector signed char) vzero, \
(vector signed char) srcO); \
register vector signed short dstW __asm__ ("v27") = \
- (vector signed short) vec_mergel((vector signed char) vzero, \
+ (vector signed short) VEC_MERGEL((vector signed char) vzero, \
(vector signed char) dstO); \
\
/* subtractions inside the first butterfly */ \
@@ -639,6 +619,7 @@ static int hadamard8_diff16x8_altivec(MpegEncContext *s, uint8_t *dst,
res1 = vec_mladd(but2, vprod3, op3); \
res2 = vec_mladd(but2S, vprod3, op3S); \
}
+
ONEITERBUTTERFLY(0, temp0, temp0S);
ONEITERBUTTERFLY(1, temp1, temp1S);
ONEITERBUTTERFLY(2, temp2, temp2S);
@@ -725,6 +706,7 @@ static int hadamard8_diff16x8_altivec(MpegEncContext *s, uint8_t *dst,
vsum = vec_sum4s(vec_abs(line7CS), vsum);
vsum = vec_sums(vsum, (vector signed int) vzero);
vsum = vec_splat(vsum, 3);
+
vec_ste(vsum, 0, &sum);
}
return sum;
@@ -742,11 +724,11 @@ static int hadamard8_diff16_altivec(MpegEncContext *s, uint8_t *dst,
}
return score;
}
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
av_cold void ff_me_cmp_init_ppc(MECmpContext *c, AVCodecContext *avctx)
{
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
if (!PPC_ALTIVEC(av_get_cpu_flags()))
return;
@@ -763,5 +745,5 @@ av_cold void ff_me_cmp_init_ppc(MECmpContext *c, AVCodecContext *avctx)
c->hadamard8_diff[0] = hadamard8_diff16_altivec;
c->hadamard8_diff[1] = hadamard8_diff8x8_altivec;
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
}
diff --git a/libavcodec/ppc/mpegaudiodsp_altivec.c b/libavcodec/ppc/mpegaudiodsp_altivec.c
index b3d3ed4a3e..ddfe5dcbb7 100644
--- a/libavcodec/ppc/mpegaudiodsp_altivec.c
+++ b/libavcodec/ppc/mpegaudiodsp_altivec.c
@@ -2,20 +2,20 @@
* Altivec optimized MP3 decoding functions
* Copyright (c) 2010 Vitor Sessak
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -27,7 +27,7 @@
#include "libavutil/ppc/util_altivec.h"
#include "libavcodec/mpegaudiodsp.h"
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
#define MACS(rt, ra, rb) rt+=(ra)*(rb)
#define MLSS(rt, ra, rb) rt-=(ra)*(rb)
@@ -128,14 +128,14 @@ static void apply_window_mp3(float *in, float *win, int *unused, float *out,
*out = sum;
}
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
av_cold void ff_mpadsp_init_ppc(MPADSPContext *s)
{
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
if (!PPC_ALTIVEC(av_get_cpu_flags()))
return;
s->apply_window_float = apply_window_mp3;
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
}
diff --git a/libavcodec/ppc/mpegvideo_altivec.c b/libavcodec/ppc/mpegvideo_altivec.c
index a09932bdf8..1b6bda6c36 100644
--- a/libavcodec/ppc/mpegvideo_altivec.c
+++ b/libavcodec/ppc/mpegvideo_altivec.c
@@ -4,20 +4,20 @@
* dct_unquantize_h263_altivec:
* Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -32,7 +32,7 @@
#include "libavutil/ppc/util_altivec.h"
#include "libavcodec/mpegvideo.h"
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
/* AltiVec version of dct_unquantize_h263
this code assumes `block' is 16 bytes-aligned */
@@ -42,8 +42,6 @@ static void dct_unquantize_h263_altivec(MpegEncContext *s,
int i, level, qmul, qadd;
int nCoeffs;
- assert(s->block_last_index[n]>=0);
-
qadd = (qscale - 1) | 1;
qmul = qscale << 1;
@@ -59,6 +57,7 @@ static void dct_unquantize_h263_altivec(MpegEncContext *s,
nCoeffs= 63; //does not always use zigzag table
} else {
i = 0;
+ av_assert2(s->block_last_index[n]>=0);
nCoeffs= s->intra_scantable.raster_end[ s->block_last_index[n] ];
}
@@ -113,11 +112,11 @@ static void dct_unquantize_h263_altivec(MpegEncContext *s,
}
}
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
av_cold void ff_mpv_common_init_ppc(MpegEncContext *s)
{
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
if (!PPC_ALTIVEC(av_get_cpu_flags()))
return;
@@ -126,5 +125,5 @@ av_cold void ff_mpv_common_init_ppc(MpegEncContext *s)
s->dct_unquantize_h263_intra = dct_unquantize_h263_altivec;
s->dct_unquantize_h263_inter = dct_unquantize_h263_altivec;
}
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
}
diff --git a/libavcodec/ppc/mpegvideodsp.c b/libavcodec/ppc/mpegvideodsp.c
index eef3e1dcbe..021933255b 100644
--- a/libavcodec/ppc/mpegvideodsp.c
+++ b/libavcodec/ppc/mpegvideodsp.c
@@ -3,20 +3,20 @@
*
* Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -27,7 +27,7 @@
#include "libavutil/ppc/util_altivec.h"
#include "libavcodec/mpegvideodsp.h"
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
/* AltiVec-enhanced gmc1. ATM this code assumes stride is a multiple of 8
* to preserve proper dst alignment. */
static void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */,
@@ -68,7 +68,7 @@ static void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */,
vec_lvsl(0, src));
if (src_really_odd != 0x0000000F)
- /* If src & 0xF == 0xF, then (src + 1) is properly aligned
+ /* If (src & 0xF) == 0xF, then (src + 1) is properly aligned
* on the second vector. */
srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src));
else
@@ -90,7 +90,7 @@ static void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */,
srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src));
if (src_really_odd != 0x0000000F)
- /* If src & 0xF == 0xF, then (src + 1) is properly aligned
+ /* If (src & 0xF) == 0xF, then (src + 1) is properly aligned
* on the second vector. */
srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src));
else
@@ -125,14 +125,14 @@ static void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */,
src += stride;
}
}
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
av_cold void ff_mpegvideodsp_init_ppc(MpegVideoDSPContext *c)
{
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
if (!PPC_ALTIVEC(av_get_cpu_flags()))
return;
c->gmc1 = gmc1_altivec;
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
}
diff --git a/libavcodec/ppc/mpegvideoencdsp.c b/libavcodec/ppc/mpegvideoencdsp.c
index fa71bac0ba..3e6765ce15 100644
--- a/libavcodec/ppc/mpegvideoencdsp.c
+++ b/libavcodec/ppc/mpegvideoencdsp.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -29,8 +29,36 @@
#include "libavutil/ppc/util_altivec.h"
#include "libavcodec/mpegvideoencdsp.h"
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
+#if HAVE_VSX
+static int pix_norm1_altivec(uint8_t *pix, int line_size)
+{
+ int i, s = 0;
+ const vector unsigned int zero =
+ (const vector unsigned int) vec_splat_u32(0);
+ vector unsigned int sv = (vector unsigned int) vec_splat_u32(0);
+ vector signed int sum;
+
+ for (i = 0; i < 16; i++) {
+ /* Read the potentially unaligned pixels. */
+ //vector unsigned char pixl = vec_ld(0, pix);
+ //vector unsigned char pixr = vec_ld(15, pix);
+ //vector unsigned char pixv = vec_perm(pixl, pixr, perm);
+ vector unsigned char pixv = vec_vsx_ld(0, pix);
+
+ /* Square the values, and add them to our sum. */
+ sv = vec_msum(pixv, pixv, sv);
+
+ pix += line_size;
+ }
+ /* Sum up the four partial sums, and put the result into s. */
+ sum = vec_sums((vector signed int) sv, (vector signed int) zero);
+ sum = vec_splat(sum, 3);
+ vec_ste(sum, 0, &s);
+ return s;
+}
+#else
static int pix_norm1_altivec(uint8_t *pix, int line_size)
{
int i, s = 0;
@@ -58,7 +86,37 @@ static int pix_norm1_altivec(uint8_t *pix, int line_size)
return s;
}
+#endif /* HAVE_VSX */
+
+#if HAVE_VSX
+static int pix_sum_altivec(uint8_t *pix, int line_size)
+{
+ int i, s;
+ const vector unsigned int zero =
+ (const vector unsigned int) vec_splat_u32(0);
+ vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
+ vector signed int sumdiffs;
+
+ for (i = 0; i < 16; i++) {
+ /* Read the potentially unaligned 16 pixels into t1. */
+ //vector unsigned char pixl = vec_ld(0, pix);
+ //vector unsigned char pixr = vec_ld(15, pix);
+ //vector unsigned char t1 = vec_perm(pixl, pixr, perm);
+ vector unsigned char t1 = vec_vsx_ld(0, pix);
+ /* Add each 4 pixel group together and put 4 results into sad. */
+ sad = vec_sum4s(t1, sad);
+
+ pix += line_size;
+ }
+
+ /* Sum up the four partial sums, and put the result into s. */
+ sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
+ sumdiffs = vec_splat(sumdiffs, 3);
+ vec_ste(sumdiffs, 0, &s);
+ return s;
+}
+#else
static int pix_sum_altivec(uint8_t *pix, int line_size)
{
int i, s;
@@ -88,16 +146,18 @@ static int pix_sum_altivec(uint8_t *pix, int line_size)
return s;
}
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_VSX */
+
+#endif /* HAVE_ALTIVEC */
av_cold void ff_mpegvideoencdsp_init_ppc(MpegvideoEncDSPContext *c,
AVCodecContext *avctx)
{
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
if (!PPC_ALTIVEC(av_get_cpu_flags()))
return;
c->pix_norm1 = pix_norm1_altivec;
c->pix_sum = pix_sum_altivec;
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
}
diff --git a/libavcodec/ppc/pixblockdsp.c b/libavcodec/ppc/pixblockdsp.c
index 96e702452f..f5ac8509f0 100644
--- a/libavcodec/ppc/pixblockdsp.c
+++ b/libavcodec/ppc/pixblockdsp.c
@@ -3,20 +3,20 @@
* Copyright (c) 2002 Dieter Shirley
* Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -33,16 +33,44 @@
#include "libavcodec/avcodec.h"
#include "libavcodec/pixblockdsp.h"
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
+#if HAVE_VSX
+static void get_pixels_altivec(int16_t *restrict block, const uint8_t *pixels,
+ ptrdiff_t stride)
+{
+ int i;
+ vector unsigned char perm =
+ (vector unsigned char) {0x00,0x10, 0x01,0x11,0x02,0x12,0x03,0x13,\
+ 0x04,0x14,0x05,0x15,0x06,0x16,0x07,0x17};
+ const vector unsigned char zero =
+ (const vector unsigned char) vec_splat_u8(0);
+
+ for (i = 0; i < 8; i++) {
+ /* Read potentially unaligned pixels.
+ * We're reading 16 pixels, and actually only want 8,
+ * but we simply ignore the extras. */
+ vector unsigned char bytes = vec_vsx_ld(0, pixels);
+
+ // Convert the bytes into shorts.
+ //vector signed short shorts = (vector signed short) vec_perm(zero, bytes, perm);
+ vector signed short shorts = (vector signed short) vec_perm(bytes, zero, perm);
+
+ // Save the data to the block, we assume the block is 16-byte aligned.
+ vec_vsx_st(shorts, i * 16, (vector signed short *) block);
+
+ pixels += stride;
+ }
+}
+#else
static void get_pixels_altivec(int16_t *restrict block, const uint8_t *pixels,
ptrdiff_t stride)
{
int i;
- vec_u8 perm = vec_lvsl(0, pixels);
const vec_u8 zero = (const vec_u8)vec_splat_u8(0);
for (i = 0; i < 8; i++) {
+ vec_u8 perm = vec_lvsl(0, pixels);
/* Read potentially unaligned pixels.
* We're reading 16 pixels, and actually only want 8,
* but we simply ignore the extras. */
@@ -60,12 +88,76 @@ static void get_pixels_altivec(int16_t *restrict block, const uint8_t *pixels,
}
}
+#endif /* HAVE_VSX */
+
+#if HAVE_VSX
+static void diff_pixels_altivec(int16_t *restrict block, const uint8_t *s1,
+ const uint8_t *s2, ptrdiff_t stride)
+{
+ int i;
+ const vector unsigned char zero =
+ (const vector unsigned char) vec_splat_u8(0);
+ vector signed short shorts1, shorts2;
+
+ for (i = 0; i < 4; i++) {
+ /* Read potentially unaligned pixels.
+ * We're reading 16 pixels, and actually only want 8,
+ * but we simply ignore the extras. */
+ vector unsigned char bytes = vec_vsx_ld(0, s1);
+
+ // Convert the bytes into shorts.
+ shorts1 = (vector signed short) vec_mergeh(bytes, zero);
+
+ // Do the same for the second block of pixels.
+ bytes =vec_vsx_ld(0, s2);
+
+ // Convert the bytes into shorts.
+ shorts2 = (vector signed short) vec_mergeh(bytes, zero);
+
+ // Do the subtraction.
+ shorts1 = vec_sub(shorts1, shorts2);
+
+ // Save the data to the block, we assume the block is 16-byte aligned.
+ vec_vsx_st(shorts1, 0, (vector signed short *) block);
+
+ s1 += stride;
+ s2 += stride;
+ block += 8;
+
+ /* The code below is a copy of the code above...
+ * This is a manual unroll. */
+
+ /* Read potentially unaligned pixels.
+ * We're reading 16 pixels, and actually only want 8,
+ * but we simply ignore the extras. */
+ bytes = vec_vsx_ld(0, s1);
+
+ // Convert the bytes into shorts.
+ shorts1 = (vector signed short) vec_mergeh(bytes, zero);
+
+ // Do the same for the second block of pixels.
+ bytes = vec_vsx_ld(0, s2);
+
+ // Convert the bytes into shorts.
+ shorts2 = (vector signed short) vec_mergeh(bytes, zero);
+
+ // Do the subtraction.
+ shorts1 = vec_sub(shorts1, shorts2);
+
+ // Save the data to the block, we assume the block is 16-byte aligned.
+ vec_vsx_st(shorts1, 0, (vector signed short *) block);
+
+ s1 += stride;
+ s2 += stride;
+ block += 8;
+ }
+}
+#else
static void diff_pixels_altivec(int16_t *restrict block, const uint8_t *s1,
const uint8_t *s2, ptrdiff_t stride)
{
int i;
- vec_u8 perm1 = vec_lvsl(0, s1);
- vec_u8 perm2 = vec_lvsl(0, s2);
+ vec_u8 perm;
const vec_u8 zero = (const vec_u8)vec_splat_u8(0);
vec_s16 shorts1, shorts2;
@@ -73,17 +165,19 @@ static void diff_pixels_altivec(int16_t *restrict block, const uint8_t *s1,
/* Read potentially unaligned pixels.
* We're reading 16 pixels, and actually only want 8,
* but we simply ignore the extras. */
+ perm = vec_lvsl(0, s1);
vec_u8 pixl = vec_ld(0, s1);
vec_u8 pixr = vec_ld(15, s1);
- vec_u8 bytes = vec_perm(pixl, pixr, perm1);
+ vec_u8 bytes = vec_perm(pixl, pixr, perm);
// Convert the bytes into shorts.
shorts1 = (vec_s16)vec_mergeh(zero, bytes);
// Do the same for the second block of pixels.
+ perm = vec_lvsl(0, s2);
pixl = vec_ld(0, s2);
pixr = vec_ld(15, s2);
- bytes = vec_perm(pixl, pixr, perm2);
+ bytes = vec_perm(pixl, pixr, perm);
// Convert the bytes into shorts.
shorts2 = (vec_s16)vec_mergeh(zero, bytes);
@@ -104,17 +198,19 @@ static void diff_pixels_altivec(int16_t *restrict block, const uint8_t *s1,
/* Read potentially unaligned pixels.
* We're reading 16 pixels, and actually only want 8,
* but we simply ignore the extras. */
+ perm = vec_lvsl(0, s1);
pixl = vec_ld(0, s1);
pixr = vec_ld(15, s1);
- bytes = vec_perm(pixl, pixr, perm1);
+ bytes = vec_perm(pixl, pixr, perm);
// Convert the bytes into shorts.
shorts1 = (vec_s16)vec_mergeh(zero, bytes);
// Do the same for the second block of pixels.
+ perm = vec_lvsl(0, s2);
pixl = vec_ld(0, s2);
pixr = vec_ld(15, s2);
- bytes = vec_perm(pixl, pixr, perm2);
+ bytes = vec_perm(pixl, pixr, perm);
// Convert the bytes into shorts.
shorts2 = (vec_s16)vec_mergeh(zero, bytes);
@@ -131,7 +227,9 @@ static void diff_pixels_altivec(int16_t *restrict block, const uint8_t *s1,
}
}
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_VSX */
+
+#endif /* HAVE_ALTIVEC */
#if HAVE_VSX
static void get_pixels_vsx(int16_t *restrict block, const uint8_t *pixels,
@@ -171,7 +269,7 @@ av_cold void ff_pixblockdsp_init_ppc(PixblockDSPContext *c,
AVCodecContext *avctx,
unsigned high_bit_depth)
{
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
if (!PPC_ALTIVEC(av_get_cpu_flags()))
return;
@@ -180,7 +278,7 @@ av_cold void ff_pixblockdsp_init_ppc(PixblockDSPContext *c,
if (!high_bit_depth) {
c->get_pixels = get_pixels_altivec;
}
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
#if HAVE_VSX
if (!PPC_VSX(av_get_cpu_flags()))
diff --git a/libavcodec/ppc/svq1enc_altivec.c b/libavcodec/ppc/svq1enc_altivec.c
index 77d771eba6..4e25e253f6 100644
--- a/libavcodec/ppc/svq1enc_altivec.c
+++ b/libavcodec/ppc/svq1enc_altivec.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2007 Luca Barbato <lu_zero@gentoo.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -32,7 +32,7 @@
#include "libavutil/ppc/util_altivec.h"
#include "libavcodec/svq1enc.h"
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
static int ssd_int8_vs_int16_altivec(const int8_t *pix1, const int16_t *pix2,
int size)
{
@@ -72,14 +72,14 @@ static int ssd_int8_vs_int16_altivec(const int8_t *pix1, const int16_t *pix2,
return u.score[3];
}
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
av_cold void ff_svq1enc_init_ppc(SVQ1EncContext *c)
{
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
if (!PPC_ALTIVEC(av_get_cpu_flags()))
return;
c->ssd_int8_vs_int16 = ssd_int8_vs_int16_altivec;
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
}
diff --git a/libavcodec/ppc/vc1dsp_altivec.c b/libavcodec/ppc/vc1dsp_altivec.c
index d252421560..83d537f0c1 100644
--- a/libavcodec/ppc/vc1dsp_altivec.c
+++ b/libavcodec/ppc/vc1dsp_altivec.c
@@ -2,20 +2,20 @@
* VC-1 and WMV3 decoder - DSP functions AltiVec-optimized
* Copyright (c) 2006 Konstantin Shishkov
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -27,7 +27,7 @@
#include "libavutil/ppc/util_altivec.h"
#include "libavcodec/vc1dsp.h"
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
// main steps of 8x8 transform
#define STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_rnd) \
@@ -305,16 +305,23 @@ static void vc1_inv_trans_8x4_altivec(uint8_t *dest, ptrdiff_t stride,
src2 = vec_pack(s2, sA);
src3 = vec_pack(s3, sB);
+#if HAVE_BIGENDIAN
p0 = vec_lvsl (0, dest);
p1 = vec_lvsl (stride, dest);
p = vec_splat_u8 (-1);
perm0 = vec_mergeh (p, p0);
perm1 = vec_mergeh (p, p1);
+#define GET_TMP2(dst, p) \
+ tmp = vec_ld (0, dest); \
+ tmp2 = (vector signed short)vec_perm (tmp, vec_splat_u8(0), p);
+#else
+#define GET_TMP2(dst,p) \
+ tmp = vec_vsx_ld (0, dst); \
+ tmp2 = (vector signed short)vec_mergeh (tmp, vec_splat_u8(0));
+#endif
#define ADD(dest,src,perm) \
- /* *(uint64_t *)&tmp = *(uint64_t *)dest; */ \
- tmp = vec_ld (0, dest); \
- tmp2 = (vector signed short)vec_perm (tmp, vec_splat_u8(0), perm); \
+ GET_TMP2(dest, perm); \
tmp3 = vec_adds (tmp2, src); \
tmp = vec_packsu (tmp3, tmp3); \
vec_ste ((vector unsigned int)tmp, 0, (unsigned int *)dest); \
@@ -341,11 +348,11 @@ static void vc1_inv_trans_8x4_altivec(uint8_t *dest, ptrdiff_t stride,
#undef OP_U8_ALTIVEC
#undef PREFIX_no_rnd_vc1_chroma_mc8_altivec
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
av_cold void ff_vc1dsp_init_ppc(VC1DSPContext *dsp)
{
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
if (!PPC_ALTIVEC(av_get_cpu_flags()))
return;
@@ -353,5 +360,5 @@ av_cold void ff_vc1dsp_init_ppc(VC1DSPContext *dsp)
dsp->vc1_inv_trans_8x4 = vc1_inv_trans_8x4_altivec;
dsp->put_no_rnd_vc1_chroma_pixels_tab[0] = put_no_rnd_vc1_chroma_mc8_altivec;
dsp->avg_no_rnd_vc1_chroma_pixels_tab[0] = avg_no_rnd_vc1_chroma_mc8_altivec;
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
}
diff --git a/libavcodec/ppc/videodsp_ppc.c b/libavcodec/ppc/videodsp_ppc.c
index b9e003b487..915702252e 100644
--- a/libavcodec/ppc/videodsp_ppc.c
+++ b/libavcodec/ppc/videodsp_ppc.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2003-2004 Romain Dolbeau
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/vorbisdsp_altivec.c b/libavcodec/ppc/vorbisdsp_altivec.c
index 509d48ae0c..d7557c815b 100644
--- a/libavcodec/ppc/vorbisdsp_altivec.c
+++ b/libavcodec/ppc/vorbisdsp_altivec.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2006 Luca Barbato <lu_zero@gentoo.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -27,7 +27,7 @@
#include "libavutil/ppc/cpu.h"
#include "libavcodec/vorbisdsp.h"
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
static void vorbis_inverse_coupling_altivec(float *mag, float *ang,
intptr_t blocksize)
{
@@ -50,14 +50,14 @@ static void vorbis_inverse_coupling_altivec(float *mag, float *ang,
vec_stl(m, 0, mag+i);
}
}
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
av_cold void ff_vorbisdsp_init_ppc(VorbisDSPContext *c)
{
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
if (!PPC_ALTIVEC(av_get_cpu_flags()))
return;
c->vorbis_inverse_coupling = vorbis_inverse_coupling_altivec;
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
}
diff --git a/libavcodec/ppc/vp3dsp_altivec.c b/libavcodec/ppc/vp3dsp_altivec.c
index 4dff4f1b96..d2231d090a 100644
--- a/libavcodec/ppc/vp3dsp_altivec.c
+++ b/libavcodec/ppc/vp3dsp_altivec.c
@@ -1,20 +1,20 @@
/*
* Copyright (C) 2009 David Conrad
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -28,12 +28,17 @@
#include "libavutil/ppc/util_altivec.h"
#include "libavcodec/vp3dsp.h"
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
static const vec_s16 constants =
{0, 64277, 60547, 54491, 46341, 36410, 25080, 12785};
+#if HAVE_BIGENDIAN
static const vec_u8 interleave_high =
{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29};
+#else
+static const vec_u8 interleave_high =
+ {2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31};
+#endif
#define IDCT_START \
vec_s16 A, B, C, D, Ad, Bd, Cd, Dd, E, F, G, H;\
@@ -156,9 +161,18 @@ static void vp3_idct_add_altivec(uint8_t *dst, ptrdiff_t stride, int16_t block[6
TRANSPOSE8(b0, b1, b2, b3, b4, b5, b6, b7);
IDCT_1D(ADD8, SHIFT4)
-#define ADD(a)\
+#if HAVE_BIGENDIAN
+#define GET_VDST16\
vdst = vec_ld(0, dst);\
- vdst_16 = (vec_s16)vec_perm(vdst, zero_u8v, vdst_mask);\
+ vdst_16 = (vec_s16)vec_perm(vdst, zero_u8v, vdst_mask);
+#else
+#define GET_VDST16\
+ vdst = vec_vsx_ld(0,dst);\
+ vdst_16 = (vec_s16)vec_mergeh(vdst, zero_u8v);
+#endif
+
+#define ADD(a)\
+ GET_VDST16;\
vdst_16 = vec_adds(a, vdst_16);\
t = vec_packsu(vdst_16, vdst_16);\
vec_ste((vec_u32)t, 0, (unsigned int *)dst);\
@@ -175,15 +189,15 @@ static void vp3_idct_add_altivec(uint8_t *dst, ptrdiff_t stride, int16_t block[6
memset(block, 0, sizeof(*block) * 64);
}
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
av_cold void ff_vp3dsp_init_ppc(VP3DSPContext *c, int flags)
{
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
if (!PPC_ALTIVEC(av_get_cpu_flags()))
return;
c->idct_put = vp3_idct_put_altivec;
c->idct_add = vp3_idct_add_altivec;
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif
}
diff --git a/libavcodec/ppc/vp8dsp_altivec.c b/libavcodec/ppc/vp8dsp_altivec.c
index 53fe44ab48..23e4ace7da 100644
--- a/libavcodec/ppc/vp8dsp_altivec.c
+++ b/libavcodec/ppc/vp8dsp_altivec.c
@@ -3,20 +3,20 @@
*
* Copyright (C) 2010 David Conrad
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -29,7 +29,7 @@
#include "libavcodec/vp8dsp.h"
#include "hpeldsp_altivec.h"
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
#define REPT4(...) { __VA_ARGS__, __VA_ARGS__, __VA_ARGS__, __VA_ARGS__ }
// h subpel filter uses msum to multiply+add 4 pixel taps at once
@@ -59,17 +59,30 @@ static const vec_s8 h_subpel_filters_outer[3] =
vec_s8 filter_outerh = h_subpel_filters_outer[(i)>>1]; \
vec_s8 filter_outerl = vec_sld(filter_outerh, filter_outerh, 2)
+#if HAVE_BIGENDIAN
+#define GET_PIXHL(offset) \
+ a = vec_ld((offset)-is6tap-1, src); \
+ b = vec_ld((offset)-is6tap-1+15, src); \
+ pixh = vec_perm(a, b, permh##offset); \
+ pixl = vec_perm(a, b, perml##offset)
+
+#define GET_OUTER(offset) outer = vec_perm(a, b, perm_6tap##offset)
+#else
+#define GET_PIXHL(offset) \
+ a = vec_vsx_ld((offset)-is6tap-1, src); \
+ pixh = vec_perm(a, a, perm_inner); \
+ pixl = vec_perm(a, a, vec_add(perm_inner, vec_splat_u8(4)))
+
+#define GET_OUTER(offset) outer = vec_perm(a, a, perm_outer)
+#endif
+
#define FILTER_H(dstv, off) \
- a = vec_ld((off)-is6tap-1, src); \
- b = vec_ld((off)-is6tap-1+15, src); \
-\
- pixh = vec_perm(a, b, permh##off); \
- pixl = vec_perm(a, b, perml##off); \
+ GET_PIXHL(off); \
filth = vec_msum(filter_inner, pixh, c64); \
filtl = vec_msum(filter_inner, pixl, c64); \
\
if (is6tap) { \
- outer = vec_perm(a, b, perm_6tap##off); \
+ GET_OUTER(off); \
filth = vec_msum(filter_outerh, outer, filth); \
filtl = vec_msum(filter_outerl, outer, filtl); \
} \
@@ -84,9 +97,12 @@ void put_vp8_epel_h_altivec_core(uint8_t *dst, ptrdiff_t dst_stride,
int h, int mx, int w, int is6tap)
{
LOAD_H_SUBPEL_FILTER(mx-1);
- vec_u8 align_vec0, align_vec8, permh0, permh8, filt;
+#if HAVE_BIGENDIAN
+ vec_u8 align_vec0, align_vec8, permh0, permh8;
vec_u8 perm_6tap0, perm_6tap8, perml0, perml8;
- vec_u8 a, b, pixh, pixl, outer;
+ vec_u8 b;
+#endif
+ vec_u8 filt, a, pixh, pixl, outer;
vec_s16 f16h, f16l;
vec_s32 filth, filtl;
@@ -97,6 +113,7 @@ void put_vp8_epel_h_altivec_core(uint8_t *dst, ptrdiff_t dst_stride,
vec_s32 c64 = vec_sl(vec_splat_s32(1), vec_splat_u32(6));
vec_u16 c7 = vec_splat_u16(7);
+#if HAVE_BIGENDIAN
align_vec0 = vec_lvsl( -is6tap-1, src);
align_vec8 = vec_lvsl(8-is6tap-1, src);
@@ -107,6 +124,7 @@ void put_vp8_epel_h_altivec_core(uint8_t *dst, ptrdiff_t dst_stride,
perml8 = vec_perm(align_vec8, align_vec8, perm_inner);
perm_6tap0 = vec_perm(align_vec0, align_vec0, perm_outer);
perm_6tap8 = vec_perm(align_vec8, align_vec8, perm_outer);
+#endif
while (h --> 0) {
FILTER_H(f16h, 0);
@@ -164,6 +182,12 @@ static const vec_u8 v_subpel_filters[7] =
dstv = vec_adds(dstv, c64); \
dstv = vec_sra(dstv, c7)
+#if HAVE_BIGENDIAN
+#define LOAD_HL(off, s, perm) load_with_perm_vec(off, s, perm)
+#else
+#define LOAD_HL(off, s, perm) vec_mergeh(vec_vsx_ld(off,s), vec_vsx_ld(off+8,s))
+#endif
+
static av_always_inline
void put_vp8_epel_v_altivec_core(uint8_t *dst, ptrdiff_t dst_stride,
uint8_t *src, ptrdiff_t src_stride,
@@ -175,6 +199,7 @@ void put_vp8_epel_v_altivec_core(uint8_t *dst, ptrdiff_t dst_stride,
vec_s16 c64 = vec_sl(vec_splat_s16(1), vec_splat_u16(6));
vec_u16 c7 = vec_splat_u16(7);
+#if HAVE_BIGENDIAN
// we want pixels 0-7 to be in the even positions and 8-15 in the odd,
// so combine this permute with the alignment permute vector
align_vech = vec_lvsl(0, src);
@@ -183,22 +208,23 @@ void put_vp8_epel_v_altivec_core(uint8_t *dst, ptrdiff_t dst_stride,
perm_vec = vec_mergeh(align_vech, align_vecl);
else
perm_vec = vec_mergeh(align_vech, align_vech);
+#endif
if (is6tap)
- s0 = load_with_perm_vec(-2*src_stride, src, perm_vec);
- s1 = load_with_perm_vec(-1*src_stride, src, perm_vec);
- s2 = load_with_perm_vec( 0*src_stride, src, perm_vec);
- s3 = load_with_perm_vec( 1*src_stride, src, perm_vec);
+ s0 = LOAD_HL(-2*src_stride, src, perm_vec);
+ s1 = LOAD_HL(-1*src_stride, src, perm_vec);
+ s2 = LOAD_HL( 0*src_stride, src, perm_vec);
+ s3 = LOAD_HL( 1*src_stride, src, perm_vec);
if (is6tap)
- s4 = load_with_perm_vec( 2*src_stride, src, perm_vec);
+ s4 = LOAD_HL( 2*src_stride, src, perm_vec);
src += (2+is6tap)*src_stride;
while (h --> 0) {
if (is6tap)
- s5 = load_with_perm_vec(0, src, perm_vec);
+ s5 = LOAD_HL(0, src, perm_vec);
else
- s4 = load_with_perm_vec(0, src, perm_vec);
+ s4 = LOAD_HL(0, src, perm_vec);
FILTER_V(f16h, vec_mule);
@@ -272,49 +298,36 @@ EPEL_HV(4, 4,4)
static void put_vp8_pixels16_altivec(uint8_t *dst, ptrdiff_t dstride, uint8_t *src, ptrdiff_t sstride, int h, int mx, int my)
{
- register vector unsigned char pixelsv1, pixelsv2;
- register vector unsigned char pixelsv1B, pixelsv2B;
- register vector unsigned char pixelsv1C, pixelsv2C;
- register vector unsigned char pixelsv1D, pixelsv2D;
-
- register vector unsigned char perm = vec_lvsl(0, src);
+ register vector unsigned char perm;
int i;
register ptrdiff_t dstride2 = dstride << 1, sstride2 = sstride << 1;
register ptrdiff_t dstride3 = dstride2 + dstride, sstride3 = sstride + sstride2;
register ptrdiff_t dstride4 = dstride << 2, sstride4 = sstride << 2;
+#if HAVE_BIGENDIAN
+ perm = vec_lvsl(0, src);
+#endif
// hand-unrolling the loop by 4 gains about 15%
// mininum execution time goes from 74 to 60 cycles
// it's faster than -funroll-loops, but using
// -funroll-loops w/ this is bad - 74 cycles again.
// all this is on a 7450, tuning for the 7450
for (i = 0; i < h; i += 4) {
- pixelsv1 = vec_ld( 0, src);
- pixelsv2 = vec_ld(15, src);
- pixelsv1B = vec_ld(sstride, src);
- pixelsv2B = vec_ld(15 + sstride, src);
- pixelsv1C = vec_ld(sstride2, src);
- pixelsv2C = vec_ld(15 + sstride2, src);
- pixelsv1D = vec_ld(sstride3, src);
- pixelsv2D = vec_ld(15 + sstride3, src);
- vec_st(vec_perm(pixelsv1, pixelsv2, perm),
- 0, (unsigned char*)dst);
- vec_st(vec_perm(pixelsv1B, pixelsv2B, perm),
- dstride, (unsigned char*)dst);
- vec_st(vec_perm(pixelsv1C, pixelsv2C, perm),
- dstride2, (unsigned char*)dst);
- vec_st(vec_perm(pixelsv1D, pixelsv2D, perm),
- dstride3, (unsigned char*)dst);
+ vec_st(load_with_perm_vec(0, src, perm), 0, dst);
+ vec_st(load_with_perm_vec(sstride, src, perm), dstride, dst);
+ vec_st(load_with_perm_vec(sstride2, src, perm), dstride2, dst);
+ vec_st(load_with_perm_vec(sstride3, src, perm), dstride3, dst);
src += sstride4;
dst += dstride4;
}
}
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
+
av_cold void ff_vp78dsp_init_ppc(VP8DSPContext *c)
{
-#if HAVE_ALTIVEC && HAVE_BIGENDIAN
+#if HAVE_ALTIVEC
if (!PPC_ALTIVEC(av_get_cpu_flags()))
return;
@@ -342,5 +355,5 @@ av_cold void ff_vp78dsp_init_ppc(VP8DSPContext *c)
c->put_vp8_epel_pixels_tab[2][1][1] = put_vp8_epel4_h4v4_altivec;
c->put_vp8_epel_pixels_tab[2][1][2] = put_vp8_epel4_h6v4_altivec;
c->put_vp8_epel_pixels_tab[2][2][1] = put_vp8_epel4_h4v6_altivec;
-#endif /* HAVE_ALTIVEC && HAVE_BIGENDIAN */
+#endif /* HAVE_ALTIVEC */
}