summaryrefslogtreecommitdiff
path: root/libavcodec/ppc
diff options
context:
space:
mode:
Diffstat (limited to 'libavcodec/ppc')
-rw-r--r--libavcodec/ppc/Makefile7
-rw-r--r--libavcodec/ppc/asm.S8
-rw-r--r--libavcodec/ppc/audiodsp.c8
-rw-r--r--libavcodec/ppc/blockdsp.c8
-rw-r--r--libavcodec/ppc/dct-test.c8
-rw-r--r--libavcodec/ppc/fdct.h8
-rw-r--r--libavcodec/ppc/fdctdsp.c10
-rw-r--r--libavcodec/ppc/fft_altivec.S8
-rw-r--r--libavcodec/ppc/fft_init.c20
-rw-r--r--libavcodec/ppc/fft_vsx.c227
-rw-r--r--libavcodec/ppc/fft_vsx.h830
-rw-r--r--libavcodec/ppc/fmtconvert_altivec.c8
-rw-r--r--libavcodec/ppc/h264chroma_init.c8
-rw-r--r--libavcodec/ppc/h264chroma_template.c231
-rw-r--r--libavcodec/ppc/h264dsp.c97
-rw-r--r--libavcodec/ppc/h264qpel.c117
-rw-r--r--libavcodec/ppc/h264qpel_template.c376
-rw-r--r--libavcodec/ppc/hpeldsp_altivec.c229
-rw-r--r--libavcodec/ppc/hpeldsp_altivec.h8
-rw-r--r--libavcodec/ppc/huffyuvdsp_altivec.c10
-rw-r--r--libavcodec/ppc/idctdsp.c33
-rw-r--r--libavcodec/ppc/lossless_audiodsp_altivec.c (renamed from libavcodec/ppc/apedsp_altivec.c)43
-rw-r--r--libavcodec/ppc/mathops.h8
-rw-r--r--libavcodec/ppc/me_cmp.c184
-rw-r--r--libavcodec/ppc/mpegaudiodsp_altivec.c8
-rw-r--r--libavcodec/ppc/mpegvideo_altivec.c8
-rw-r--r--libavcodec/ppc/mpegvideodsp.c12
-rw-r--r--libavcodec/ppc/mpegvideoencdsp.c68
-rw-r--r--libavcodec/ppc/pixblockdsp.c105
-rw-r--r--libavcodec/ppc/svq1enc_altivec.c8
-rw-r--r--libavcodec/ppc/vc1dsp_altivec.c21
-rw-r--r--libavcodec/ppc/videodsp_ppc.c8
-rw-r--r--libavcodec/ppc/vorbisdsp_altivec.c8
-rw-r--r--libavcodec/ppc/vp3dsp_altivec.c26
-rw-r--r--libavcodec/ppc/vp8dsp_altivec.c94
35 files changed, 2000 insertions, 860 deletions
diff --git a/libavcodec/ppc/Makefile b/libavcodec/ppc/Makefile
index c6ff0f1132..6f7eeb7286 100644
--- a/libavcodec/ppc/Makefile
+++ b/libavcodec/ppc/Makefile
@@ -4,9 +4,10 @@ OBJS += ppc/fmtconvert_altivec.o \
OBJS-$(CONFIG_AUDIODSP) += ppc/audiodsp.o
OBJS-$(CONFIG_BLOCKDSP) += ppc/blockdsp.o
OBJS-$(CONFIG_FFT) += ppc/fft_init.o \
- ppc/fft_altivec.o
+ ppc/fft_altivec.o \
+ ppc/fft_vsx.o
OBJS-$(CONFIG_H264CHROMA) += ppc/h264chroma_init.o
-OBJS-$(CONFIG_H264DSP) += ppc/h264dsp.o
+OBJS-$(CONFIG_H264DSP) += ppc/h264dsp.o ppc/hpeldsp_altivec.o
OBJS-$(CONFIG_H264QPEL) += ppc/h264qpel.o
OBJS-$(CONFIG_HPELDSP) += ppc/hpeldsp_altivec.o
OBJS-$(CONFIG_HUFFYUVDSP) += ppc/huffyuvdsp_altivec.o
@@ -22,7 +23,7 @@ OBJS-$(CONFIG_VIDEODSP) += ppc/videodsp_ppc.o
OBJS-$(CONFIG_VP3DSP) += ppc/vp3dsp_altivec.o
# decoders/encoders
-OBJS-$(CONFIG_APE_DECODER) += ppc/apedsp_altivec.o
+OBJS-$(CONFIG_LLAUDDSP) += ppc/lossless_audiodsp_altivec.o
OBJS-$(CONFIG_SVQ1_ENCODER) += ppc/svq1enc_altivec.o
OBJS-$(CONFIG_VC1_DECODER) += ppc/vc1dsp_altivec.o
OBJS-$(CONFIG_VORBIS_DECODER) += ppc/vorbisdsp_altivec.o
diff --git a/libavcodec/ppc/asm.S b/libavcodec/ppc/asm.S
index 141dee9b78..a3edeed202 100644
--- a/libavcodec/ppc/asm.S
+++ b/libavcodec/ppc/asm.S
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2009 Loren Merritt
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/audiodsp.c b/libavcodec/ppc/audiodsp.c
index 36506ce902..c88c3d9167 100644
--- a/libavcodec/ppc/audiodsp.c
+++ b/libavcodec/ppc/audiodsp.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2007 Luca Barbato <lu_zero@gentoo.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/blockdsp.c b/libavcodec/ppc/blockdsp.c
index 679bc0454f..0059b3b448 100644
--- a/libavcodec/ppc/blockdsp.c
+++ b/libavcodec/ppc/blockdsp.c
@@ -3,20 +3,20 @@
* Copyright (c) 2002 Dieter Shirley
* Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/dct-test.c b/libavcodec/ppc/dct-test.c
index 37fd8bbd23..2328516ca4 100644
--- a/libavcodec/ppc/dct-test.c
+++ b/libavcodec/ppc/dct-test.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/fdct.h b/libavcodec/ppc/fdct.h
index 74710354ef..437f815258 100644
--- a/libavcodec/ppc/fdct.h
+++ b/libavcodec/ppc/fdct.h
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/fdctdsp.c b/libavcodec/ppc/fdctdsp.c
index 51417a5828..f2efc5ddd0 100644
--- a/libavcodec/ppc/fdctdsp.c
+++ b/libavcodec/ppc/fdctdsp.c
@@ -1,20 +1,20 @@
/*
* Copyright (C) 2003 James Klicman <james@klicman.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -59,7 +59,7 @@
#define WA (SQRT_2 * (-C3 - C5))
#define WB (SQRT_2 * (C5 - C3))
-static vector float fdctconsts[3] = {
+static const vector float fdctconsts[3] = {
{ W0, W1, W2, W3 },
{ W4, W5, W6, W7 },
{ W8, W9, WA, WB }
diff --git a/libavcodec/ppc/fft_altivec.S b/libavcodec/ppc/fft_altivec.S
index c92b30b897..aab669ea45 100644
--- a/libavcodec/ppc/fft_altivec.S
+++ b/libavcodec/ppc/fft_altivec.S
@@ -5,20 +5,20 @@
* This algorithm (though not any of the implementation details) is
* based on libdjbfft by D. J. Bernstein.
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/fft_init.c b/libavcodec/ppc/fft_init.c
index 8fcc033b53..675fa33a95 100644
--- a/libavcodec/ppc/fft_init.c
+++ b/libavcodec/ppc/fft_init.c
@@ -3,20 +3,20 @@
* AltiVec-enabled
* Copyright (c) 2009 Loren Merritt
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -36,8 +36,12 @@
* It also assumes all FFTComplex are 8 bytes-aligned pairs of floats.
*/
+#if HAVE_VSX
+#include "fft_vsx.h"
+#else
void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z);
void ff_fft_calc_interleave_altivec(FFTContext *s, FFTComplex *z);
+#endif
#if HAVE_GNU_AS && HAVE_ALTIVEC
static void imdct_half_altivec(FFTContext *s, FFTSample *output, const FFTSample *input)
@@ -94,7 +98,11 @@ static void imdct_half_altivec(FFTContext *s, FFTSample *output, const FFTSample
k--;
} while(k >= 0);
+#if HAVE_VSX
+ ff_fft_calc_vsx(s, (FFTComplex*)output);
+#else
ff_fft_calc_altivec(s, (FFTComplex*)output);
+#endif
/* post rotation + reordering */
j = -n32;
@@ -147,7 +155,11 @@ av_cold void ff_fft_init_ppc(FFTContext *s)
if (!PPC_ALTIVEC(av_get_cpu_flags()))
return;
+#if HAVE_VSX
+ s->fft_calc = ff_fft_calc_interleave_vsx;
+#else
s->fft_calc = ff_fft_calc_interleave_altivec;
+#endif
if (s->mdct_bits >= 5) {
s->imdct_calc = imdct_calc_altivec;
s->imdct_half = imdct_half_altivec;
diff --git a/libavcodec/ppc/fft_vsx.c b/libavcodec/ppc/fft_vsx.c
new file mode 100644
index 0000000000..e92975f74e
--- /dev/null
+++ b/libavcodec/ppc/fft_vsx.c
@@ -0,0 +1,227 @@
+/*
+ * FFT transform, optimized with VSX built-in functions
+ * Copyright (c) 2014 Rong Yan
+ *
+ * This algorithm (though not any of the implementation details) is
+ * based on libdjbfft by D. J. Bernstein.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+#include "config.h"
+#include "libavutil/cpu.h"
+#include "libavutil/ppc/types_altivec.h"
+#include "libavutil/ppc/util_altivec.h"
+#include "libavcodec/fft.h"
+#include "libavcodec/fft-internal.h"
+#include "fft_vsx.h"
+
+#if HAVE_VSX
+
+static void fft32_vsx_interleave(FFTComplex *z)
+{
+ fft16_vsx_interleave(z);
+ fft8_vsx_interleave(z+16);
+ fft8_vsx_interleave(z+24);
+ pass_vsx_interleave(z,ff_cos_32,4);
+}
+
+static void fft64_vsx_interleave(FFTComplex *z)
+{
+ fft32_vsx_interleave(z);
+ fft16_vsx_interleave(z+32);
+ fft16_vsx_interleave(z+48);
+ pass_vsx_interleave(z,ff_cos_64, 8);
+}
+static void fft128_vsx_interleave(FFTComplex *z)
+{
+ fft64_vsx_interleave(z);
+ fft32_vsx_interleave(z+64);
+ fft32_vsx_interleave(z+96);
+ pass_vsx_interleave(z,ff_cos_128,16);
+}
+static void fft256_vsx_interleave(FFTComplex *z)
+{
+ fft128_vsx_interleave(z);
+ fft64_vsx_interleave(z+128);
+ fft64_vsx_interleave(z+192);
+ pass_vsx_interleave(z,ff_cos_256,32);
+}
+static void fft512_vsx_interleave(FFTComplex *z)
+{
+ fft256_vsx_interleave(z);
+ fft128_vsx_interleave(z+256);
+ fft128_vsx_interleave(z+384);
+ pass_vsx_interleave(z,ff_cos_512,64);
+}
+static void fft1024_vsx_interleave(FFTComplex *z)
+{
+ fft512_vsx_interleave(z);
+ fft256_vsx_interleave(z+512);
+ fft256_vsx_interleave(z+768);
+ pass_vsx_interleave(z,ff_cos_1024,128);
+
+}
+static void fft2048_vsx_interleave(FFTComplex *z)
+{
+ fft1024_vsx_interleave(z);
+ fft512_vsx_interleave(z+1024);
+ fft512_vsx_interleave(z+1536);
+ pass_vsx_interleave(z,ff_cos_2048,256);
+}
+static void fft4096_vsx_interleave(FFTComplex *z)
+{
+ fft2048_vsx_interleave(z);
+ fft1024_vsx_interleave(z+2048);
+ fft1024_vsx_interleave(z+3072);
+ pass_vsx_interleave(z,ff_cos_4096, 512);
+}
+static void fft8192_vsx_interleave(FFTComplex *z)
+{
+ fft4096_vsx_interleave(z);
+ fft2048_vsx_interleave(z+4096);
+ fft2048_vsx_interleave(z+6144);
+ pass_vsx_interleave(z,ff_cos_8192,1024);
+}
+static void fft16384_vsx_interleave(FFTComplex *z)
+{
+ fft8192_vsx_interleave(z);
+ fft4096_vsx_interleave(z+8192);
+ fft4096_vsx_interleave(z+12288);
+ pass_vsx_interleave(z,ff_cos_16384,2048);
+}
+static void fft32768_vsx_interleave(FFTComplex *z)
+{
+ fft16384_vsx_interleave(z);
+ fft8192_vsx_interleave(z+16384);
+ fft8192_vsx_interleave(z+24576);
+ pass_vsx_interleave(z,ff_cos_32768,4096);
+}
+static void fft65536_vsx_interleave(FFTComplex *z)
+{
+ fft32768_vsx_interleave(z);
+ fft16384_vsx_interleave(z+32768);
+ fft16384_vsx_interleave(z+49152);
+ pass_vsx_interleave(z,ff_cos_65536,8192);
+}
+
+static void fft32_vsx(FFTComplex *z)
+{
+ fft16_vsx(z);
+ fft8_vsx(z+16);
+ fft8_vsx(z+24);
+ pass_vsx(z,ff_cos_32,4);
+}
+
+static void fft64_vsx(FFTComplex *z)
+{
+ fft32_vsx(z);
+ fft16_vsx(z+32);
+ fft16_vsx(z+48);
+ pass_vsx(z,ff_cos_64, 8);
+}
+static void fft128_vsx(FFTComplex *z)
+{
+ fft64_vsx(z);
+ fft32_vsx(z+64);
+ fft32_vsx(z+96);
+ pass_vsx(z,ff_cos_128,16);
+}
+static void fft256_vsx(FFTComplex *z)
+{
+ fft128_vsx(z);
+ fft64_vsx(z+128);
+ fft64_vsx(z+192);
+ pass_vsx(z,ff_cos_256,32);
+}
+static void fft512_vsx(FFTComplex *z)
+{
+ fft256_vsx(z);
+ fft128_vsx(z+256);
+ fft128_vsx(z+384);
+ pass_vsx(z,ff_cos_512,64);
+}
+static void fft1024_vsx(FFTComplex *z)
+{
+ fft512_vsx(z);
+ fft256_vsx(z+512);
+ fft256_vsx(z+768);
+ pass_vsx(z,ff_cos_1024,128);
+
+}
+static void fft2048_vsx(FFTComplex *z)
+{
+ fft1024_vsx(z);
+ fft512_vsx(z+1024);
+ fft512_vsx(z+1536);
+ pass_vsx(z,ff_cos_2048,256);
+}
+static void fft4096_vsx(FFTComplex *z)
+{
+ fft2048_vsx(z);
+ fft1024_vsx(z+2048);
+ fft1024_vsx(z+3072);
+ pass_vsx(z,ff_cos_4096, 512);
+}
+static void fft8192_vsx(FFTComplex *z)
+{
+ fft4096_vsx(z);
+ fft2048_vsx(z+4096);
+ fft2048_vsx(z+6144);
+ pass_vsx(z,ff_cos_8192,1024);
+}
+static void fft16384_vsx(FFTComplex *z)
+{
+ fft8192_vsx(z);
+ fft4096_vsx(z+8192);
+ fft4096_vsx(z+12288);
+ pass_vsx(z,ff_cos_16384,2048);
+}
+static void fft32768_vsx(FFTComplex *z)
+{
+ fft16384_vsx(z);
+ fft8192_vsx(z+16384);
+ fft8192_vsx(z+24576);
+ pass_vsx(z,ff_cos_32768,4096);
+}
+static void fft65536_vsx(FFTComplex *z)
+{
+ fft32768_vsx(z);
+ fft16384_vsx(z+32768);
+ fft16384_vsx(z+49152);
+ pass_vsx(z,ff_cos_65536,8192);
+}
+
+static void (* const fft_dispatch_vsx[])(FFTComplex*) = {
+ fft4_vsx, fft8_vsx, fft16_vsx, fft32_vsx, fft64_vsx, fft128_vsx, fft256_vsx, fft512_vsx, fft1024_vsx,
+ fft2048_vsx, fft4096_vsx, fft8192_vsx, fft16384_vsx, fft32768_vsx, fft65536_vsx,
+};
+static void (* const fft_dispatch_vsx_interleave[])(FFTComplex*) = {
+ fft4_vsx_interleave, fft8_vsx_interleave, fft16_vsx_interleave, fft32_vsx_interleave, fft64_vsx_interleave,
+ fft128_vsx_interleave, fft256_vsx_interleave, fft512_vsx_interleave, fft1024_vsx_interleave,
+ fft2048_vsx_interleave, fft4096_vsx_interleave, fft8192_vsx_interleave, fft16384_vsx_interleave, fft32768_vsx_interleave, fft65536_vsx_interleave,
+};
+void ff_fft_calc_interleave_vsx(FFTContext *s, FFTComplex *z)
+{
+ fft_dispatch_vsx_interleave[s->nbits-2](z);
+}
+void ff_fft_calc_vsx(FFTContext *s, FFTComplex *z)
+{
+ fft_dispatch_vsx[s->nbits-2](z);
+}
+#endif /* HAVE_VSX */
diff --git a/libavcodec/ppc/fft_vsx.h b/libavcodec/ppc/fft_vsx.h
new file mode 100644
index 0000000000..a85475d160
--- /dev/null
+++ b/libavcodec/ppc/fft_vsx.h
@@ -0,0 +1,830 @@
+#ifndef AVCODEC_PPC_FFT_VSX_H
+#define AVCODEC_PPC_FFT_VSX_H
+/*
+ * FFT transform, optimized with VSX built-in functions
+ * Copyright (c) 2014 Rong Yan Copyright (c) 2009 Loren Merritt
+ *
+ * This algorithm (though not any of the implementation details) is
+ * based on libdjbfft by D. J. Bernstein, and fft_altivec_s.S.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+#include "config.h"
+#include "libavutil/cpu.h"
+#include "libavutil/ppc/types_altivec.h"
+#include "libavutil/ppc/util_altivec.h"
+#include "libavcodec/fft.h"
+#include "libavcodec/fft-internal.h"
+
+#if HAVE_VSX
+
+void ff_fft_calc_interleave_vsx(FFTContext *s, FFTComplex *z);
+void ff_fft_calc_vsx(FFTContext *s, FFTComplex *z);
+
+
+#define byte_2complex (2*sizeof(FFTComplex))
+#define byte_4complex (4*sizeof(FFTComplex))
+#define byte_6complex (6*sizeof(FFTComplex))
+#define byte_8complex (8*sizeof(FFTComplex))
+#define byte_10complex (10*sizeof(FFTComplex))
+#define byte_12complex (12*sizeof(FFTComplex))
+#define byte_14complex (14*sizeof(FFTComplex))
+
+inline static void pass_vsx_interleave(FFTComplex *z, const FFTSample *wre, unsigned int n)
+{
+ int o1 = n<<1;
+ int o2 = n<<2;
+ int o3 = o1+o2;
+ int i1, i2, i3;
+ FFTSample* out = (FFTSample*)z;
+ const FFTSample *wim = wre+o1;
+ vec_f vz0, vzo1, vzo2, vzo3;
+ vec_f x0, x1, x2, x3;
+ vec_f x4, x5, x6, x7;
+ vec_f x8, x9, x10, x11;
+ vec_f x12, x13, x14, x15;
+ vec_f x16, x17, x18, x19;
+ vec_f x20, x21, x22, x23;
+ vec_f vz0plus1, vzo1plus1, vzo2plus1, vzo3plus1;
+ vec_f y0, y1, y2, y3;
+ vec_f y4, y5, y8, y9;
+ vec_f y10, y13, y14, y15;
+ vec_f y16, y17, y18, y19;
+ vec_f y20, y21, y22, y23;
+ vec_f wr1, wi1, wr0, wi0;
+ vec_f wr2, wi2, wr3, wi3;
+ vec_f xmulwi0, xmulwi1, ymulwi2, ymulwi3;
+
+ n = n-2;
+ i1 = o1*sizeof(FFTComplex);
+ i2 = o2*sizeof(FFTComplex);
+ i3 = o3*sizeof(FFTComplex);
+ vzo2 = vec_ld(i2, &(out[0])); // zo2.r zo2.i z(o2+1).r z(o2+1).i
+ vzo2plus1 = vec_ld(i2+16, &(out[0]));
+ vzo3 = vec_ld(i3, &(out[0])); // zo3.r zo3.i z(o3+1).r z(o3+1).i
+ vzo3plus1 = vec_ld(i3+16, &(out[0]));
+ vz0 = vec_ld(0, &(out[0])); // z0.r z0.i z1.r z1.i
+ vz0plus1 = vec_ld(16, &(out[0]));
+ vzo1 = vec_ld(i1, &(out[0])); // zo1.r zo1.i z(o1+1).r z(o1+1).i
+ vzo1plus1 = vec_ld(i1+16, &(out[0]));
+
+ x0 = vec_add(vzo2, vzo3);
+ x1 = vec_sub(vzo2, vzo3);
+ y0 = vec_add(vzo2plus1, vzo3plus1);
+ y1 = vec_sub(vzo2plus1, vzo3plus1);
+
+ wr1 = vec_splats(wre[1]);
+ wi1 = vec_splats(wim[-1]);
+ wi2 = vec_splats(wim[-2]);
+ wi3 = vec_splats(wim[-3]);
+ wr2 = vec_splats(wre[2]);
+ wr3 = vec_splats(wre[3]);
+
+ x2 = vec_perm(x0, x1, vcprm(2,s2,3,s3));
+ x3 = vec_perm(x0, x1, vcprm(s3,3,s2,2));
+
+ y4 = vec_perm(y0, y1, vcprm(s1,1,s0,0));
+ y5 = vec_perm(y0, y1, vcprm(s3,3,s2,2));
+ y2 = vec_perm(y0, y1, vcprm(0,s0,1,s1));
+ y3 = vec_perm(y0, y1, vcprm(2,s2,3,s3));
+
+ ymulwi2 = vec_mul(y4, wi2);
+ ymulwi3 = vec_mul(y5, wi3);
+ x4 = vec_mul(x2, wr1);
+ x5 = vec_mul(x3, wi1);
+ y8 = vec_madd(y2, wr2, ymulwi2);
+ y9 = vec_msub(y2, wr2, ymulwi2);
+ x6 = vec_add(x4, x5);
+ x7 = vec_sub(x4, x5);
+ y13 = vec_madd(y3, wr3, ymulwi3);
+ y14 = vec_msub(y3, wr3, ymulwi3);
+
+ x8 = vec_perm(x6, x7, vcprm(0,1,s2,s3));
+ y10 = vec_perm(y8, y9, vcprm(0,1,s2,s3));
+ y15 = vec_perm(y13, y14, vcprm(0,1,s2,s3));
+
+ x9 = vec_perm(x0, x8, vcprm(0,1,s0,s2));
+ x10 = vec_perm(x1, x8, vcprm(1,0,s3,s1));
+
+ y16 = vec_perm(y10, y15, vcprm(0,2,s0,s2));
+ y17 = vec_perm(y10, y15, vcprm(3,1,s3,s1));
+
+ x11 = vec_add(vz0, x9);
+ x12 = vec_sub(vz0, x9);
+ x13 = vec_add(vzo1, x10);
+ x14 = vec_sub(vzo1, x10);
+
+ y18 = vec_add(vz0plus1, y16);
+ y19 = vec_sub(vz0plus1, y16);
+ y20 = vec_add(vzo1plus1, y17);
+ y21 = vec_sub(vzo1plus1, y17);
+
+ x15 = vec_perm(x13, x14, vcprm(0,s1,2,s3));
+ x16 = vec_perm(x13, x14, vcprm(s0,1,s2,3));
+ y22 = vec_perm(y20, y21, vcprm(0,s1,2,s3));
+ y23 = vec_perm(y20, y21, vcprm(s0,1,s2,3));
+
+
+ vec_st(x11, 0, &(out[0]));
+ vec_st(y18, 16, &(out[0]));
+ vec_st(x15, i1, &(out[0]));
+ vec_st(y22, i1+16, &(out[0]));
+ vec_st(x12, i2, &(out[0]));
+ vec_st(y19, i2+16, &(out[0]));
+ vec_st(x16, i3, &(out[0]));
+ vec_st(y23, i3+16, &(out[0]));
+
+ do {
+ out += 8;
+ wre += 4;
+ wim -= 4;
+ wr0 = vec_splats(wre[0]);
+ wr1 = vec_splats(wre[1]);
+ wi0 = vec_splats(wim[0]);
+ wi1 = vec_splats(wim[-1]);
+
+ wr2 = vec_splats(wre[2]);
+ wr3 = vec_splats(wre[3]);
+ wi2 = vec_splats(wim[-2]);
+ wi3 = vec_splats(wim[-3]);
+
+ vzo2 = vec_ld(i2, &(out[0])); // zo2.r zo2.i z(o2+1).r z(o2+1).i
+ vzo2plus1 = vec_ld(i2+16, &(out[0]));
+ vzo3 = vec_ld(i3, &(out[0])); // zo3.r zo3.i z(o3+1).r z(o3+1).i
+ vzo3plus1 = vec_ld(i3+16, &(out[0]));
+ vz0 = vec_ld(0, &(out[0])); // z0.r z0.i z1.r z1.i
+ vz0plus1 = vec_ld(16, &(out[0]));
+ vzo1 = vec_ld(i1, &(out[0])); // zo1.r zo1.i z(o1+1).r z(o1+1).i
+ vzo1plus1 = vec_ld(i1+16, &(out[0]));
+
+ x0 = vec_add(vzo2, vzo3);
+ x1 = vec_sub(vzo2, vzo3);
+
+ y0 = vec_add(vzo2plus1, vzo3plus1);
+ y1 = vec_sub(vzo2plus1, vzo3plus1);
+
+ x4 = vec_perm(x0, x1, vcprm(s1,1,s0,0));
+ x5 = vec_perm(x0, x1, vcprm(s3,3,s2,2));
+ x2 = vec_perm(x0, x1, vcprm(0,s0,1,s1));
+ x3 = vec_perm(x0, x1, vcprm(2,s2,3,s3));
+
+ y2 = vec_perm(y0, y1, vcprm(0,s0,1,s1));
+ y3 = vec_perm(y0, y1, vcprm(2,s2,3,s3));
+ xmulwi0 = vec_mul(x4, wi0);
+ xmulwi1 = vec_mul(x5, wi1);
+
+ y4 = vec_perm(y0, y1, vcprm(s1,1,s0,0));
+ y5 = vec_perm(y0, y1, vcprm(s3,3,s2,2));
+
+ x8 = vec_madd(x2, wr0, xmulwi0);
+ x9 = vec_msub(x2, wr0, xmulwi0);
+ ymulwi2 = vec_mul(y4, wi2);
+ ymulwi3 = vec_mul(y5, wi3);
+
+ x13 = vec_madd(x3, wr1, xmulwi1);
+ x14 = vec_msub(x3, wr1, xmulwi1);
+
+ y8 = vec_madd(y2, wr2, ymulwi2);
+ y9 = vec_msub(y2, wr2, ymulwi2);
+ y13 = vec_madd(y3, wr3, ymulwi3);
+ y14 = vec_msub(y3, wr3, ymulwi3);
+
+ x10 = vec_perm(x8, x9, vcprm(0,1,s2,s3));
+ x15 = vec_perm(x13, x14, vcprm(0,1,s2,s3));
+
+ y10 = vec_perm(y8, y9, vcprm(0,1,s2,s3));
+ y15 = vec_perm(y13, y14, vcprm(0,1,s2,s3));
+
+ x16 = vec_perm(x10, x15, vcprm(0,2,s0,s2));
+ x17 = vec_perm(x10, x15, vcprm(3,1,s3,s1));
+
+ y16 = vec_perm(y10, y15, vcprm(0,2,s0,s2));
+ y17 = vec_perm(y10, y15, vcprm(3,1,s3,s1));
+
+ x18 = vec_add(vz0, x16);
+ x19 = vec_sub(vz0, x16);
+ x20 = vec_add(vzo1, x17);
+ x21 = vec_sub(vzo1, x17);
+
+ y18 = vec_add(vz0plus1, y16);
+ y19 = vec_sub(vz0plus1, y16);
+ y20 = vec_add(vzo1plus1, y17);
+ y21 = vec_sub(vzo1plus1, y17);
+
+ x22 = vec_perm(x20, x21, vcprm(0,s1,2,s3));
+ x23 = vec_perm(x20, x21, vcprm(s0,1,s2,3));
+
+ y22 = vec_perm(y20, y21, vcprm(0,s1,2,s3));
+ y23 = vec_perm(y20, y21, vcprm(s0,1,s2,3));
+
+ vec_st(x18, 0, &(out[0]));
+ vec_st(y18, 16, &(out[0]));
+ vec_st(x22, i1, &(out[0]));
+ vec_st(y22, i1+16, &(out[0]));
+ vec_st(x19, i2, &(out[0]));
+ vec_st(y19, i2+16, &(out[0]));
+ vec_st(x23, i3, &(out[0]));
+ vec_st(y23, i3+16, &(out[0]));
+ } while (n-=2);
+}
+
+inline static void fft2_vsx_interleave(FFTComplex *z)
+{
+ FFTSample r1, i1;
+
+ r1 = z[0].re - z[1].re;
+ z[0].re += z[1].re;
+ z[1].re = r1;
+
+ i1 = z[0].im - z[1].im;
+ z[0].im += z[1].im;
+ z[1].im = i1;
+ }
+
+inline static void fft4_vsx_interleave(FFTComplex *z)
+{
+ vec_f a, b, c, d;
+ float* out= (float*)z;
+ a = vec_ld(0, &(out[0]));
+ b = vec_ld(byte_2complex, &(out[0]));
+
+ c = vec_perm(a, b, vcprm(0,1,s2,s1));
+ d = vec_perm(a, b, vcprm(2,3,s0,s3));
+ a = vec_add(c, d);
+ b = vec_sub(c, d);
+
+ c = vec_perm(a, b, vcprm(0,1,s0,s1));
+ d = vec_perm(a, b, vcprm(2,3,s3,s2));
+
+ a = vec_add(c, d);
+ b = vec_sub(c, d);
+ vec_st(a, 0, &(out[0]));
+ vec_st(b, byte_2complex, &(out[0]));
+}
+
+inline static void fft8_vsx_interleave(FFTComplex *z)
+{
+ vec_f vz0, vz1, vz2, vz3;
+ vec_f x0, x1, x2, x3;
+ vec_f x4, x5, x6, x7;
+ vec_f x8, x9, x10, x11;
+ vec_f x12, x13, x14, x15;
+ vec_f x16, x17, x18, x19;
+ vec_f x20, x21, x22, x23;
+ vec_f x24, x25, x26, x27;
+ vec_f x28, x29, x30, x31;
+ vec_f x32, x33, x34;
+
+ float* out= (float*)z;
+ vec_f vc1 = {sqrthalf, sqrthalf, sqrthalf, sqrthalf};
+
+ vz0 = vec_ld(0, &(out[0]));
+ vz1 = vec_ld(byte_2complex, &(out[0]));
+ vz2 = vec_ld(byte_4complex, &(out[0]));
+ vz3 = vec_ld(byte_6complex, &(out[0]));
+
+ x0 = vec_perm(vz0, vz1, vcprm(0,1,s2,s1));
+ x1 = vec_perm(vz0, vz1, vcprm(2,3,s0,s3));
+ x2 = vec_perm(vz2, vz3, vcprm(2,1,s0,s1));
+ x3 = vec_perm(vz2, vz3, vcprm(0,3,s2,s3));
+
+ x4 = vec_add(x0, x1);
+ x5 = vec_sub(x0, x1);
+ x6 = vec_add(x2, x3);
+ x7 = vec_sub(x2, x3);
+
+ x8 = vec_perm(x4, x5, vcprm(0,1,s0,s1));
+ x9 = vec_perm(x4, x5, vcprm(2,3,s3,s2));
+ x10 = vec_perm(x6, x7, vcprm(2,1,s2,s1));
+ x11 = vec_perm(x6, x7, vcprm(0,3,s0,s3));
+
+ x12 = vec_add(x8, x9);
+ x13 = vec_sub(x8, x9);
+ x14 = vec_add(x10, x11);
+ x15 = vec_sub(x10, x11);
+ x16 = vec_perm(x12, x13, vcprm(0,s0,1,s1));
+ x17 = vec_perm(x14, x15, vcprm(0,s0,1,s1));
+ x18 = vec_perm(x16, x17, vcprm(s0,s3,s2,s1));
+ x19 = vec_add(x16, x18); // z0.r z2.r z0.i z2.i
+ x20 = vec_sub(x16, x18); // z4.r z6.r z4.i z6.i
+
+ x21 = vec_perm(x12, x13, vcprm(2,s2,3,s3));
+ x22 = vec_perm(x14, x15, vcprm(2,3,s2,s3));
+ x23 = vec_perm(x14, x15, vcprm(3,2,s3,s2));
+ x24 = vec_add(x22, x23);
+ x25 = vec_sub(x22, x23);
+ x26 = vec_mul( vec_perm(x24, x25, vcprm(2,s2,0,s0)), vc1);
+
+ x27 = vec_add(x21, x26); // z1.r z7.r z1.i z3.i
+ x28 = vec_sub(x21, x26); //z5.r z3.r z5.i z7.i
+
+ x29 = vec_perm(x19, x27, vcprm(0,2,s0,s2)); // z0.r z0.i z1.r z1.i
+ x30 = vec_perm(x19, x27, vcprm(1,3,s1,s3)); // z2.r z2.i z7.r z3.i
+ x31 = vec_perm(x20, x28, vcprm(0,2,s0,s2)); // z4.r z4.i z5.r z5.i
+ x32 = vec_perm(x20, x28, vcprm(1,3,s1,s3)); // z6.r z6.i z3.r z7.i
+ x33 = vec_perm(x30, x32, vcprm(0,1,s2,3)); // z2.r z2.i z3.r z3.i
+ x34 = vec_perm(x30, x32, vcprm(s0,s1,2,s3)); // z6.r z6.i z7.r z7.i
+
+ vec_st(x29, 0, &(out[0]));
+ vec_st(x33, byte_2complex, &(out[0]));
+ vec_st(x31, byte_4complex, &(out[0]));
+ vec_st(x34, byte_6complex, &(out[0]));
+}
+
+inline static void fft16_vsx_interleave(FFTComplex *z)
+{
+ float* out= (float*)z;
+ vec_f vc0 = {sqrthalf, sqrthalf, sqrthalf, sqrthalf};
+ vec_f vc1 = {ff_cos_16[1], ff_cos_16[1], ff_cos_16[1], ff_cos_16[1]};
+ vec_f vc2 = {ff_cos_16[3], ff_cos_16[3], ff_cos_16[3], ff_cos_16[3]};
+ vec_f vz0, vz1, vz2, vz3;
+ vec_f vz4, vz5, vz6, vz7;
+ vec_f x0, x1, x2, x3;
+ vec_f x4, x5, x6, x7;
+ vec_f x8, x9, x10, x11;
+ vec_f x12, x13, x14, x15;
+ vec_f x16, x17, x18, x19;
+ vec_f x20, x21, x22, x23;
+ vec_f x24, x25, x26, x27;
+ vec_f x28, x29, x30, x31;
+ vec_f x32, x33, x34, x35;
+ vec_f x36, x37, x38, x39;
+ vec_f x40, x41, x42, x43;
+ vec_f x44, x45, x46, x47;
+ vec_f x48, x49, x50, x51;
+ vec_f x52, x53, x54, x55;
+ vec_f x56, x57, x58, x59;
+ vec_f x60, x61, x62, x63;
+ vec_f x64, x65, x66, x67;
+ vec_f x68, x69, x70, x71;
+ vec_f x72, x73, x74, x75;
+ vec_f x76, x77, x78, x79;
+ vec_f x80, x81, x82, x83;
+ vec_f x84, x85, x86;
+
+ vz0 = vec_ld(0, &(out[0]));
+ vz1 = vec_ld(byte_2complex, &(out[0]));
+ vz2 = vec_ld(byte_4complex, &(out[0]));
+ vz3 = vec_ld(byte_6complex, &(out[0]));
+ vz4 = vec_ld(byte_8complex, &(out[0]));
+ vz5 = vec_ld(byte_10complex, &(out[0]));
+ vz6 = vec_ld(byte_12complex, &(out[0]));
+ vz7 = vec_ld(byte_14complex, &(out[0]));
+
+ x0 = vec_perm(vz0, vz1, vcprm(0,1,s2,s1));
+ x1 = vec_perm(vz0, vz1, vcprm(2,3,s0,s3));
+ x2 = vec_perm(vz2, vz3, vcprm(0,1,s0,s1));
+ x3 = vec_perm(vz2, vz3, vcprm(2,3,s2,s3));
+
+ x4 = vec_perm(vz4, vz5, vcprm(0,1,s2,s1));
+ x5 = vec_perm(vz4, vz5, vcprm(2,3,s0,s3));
+ x6 = vec_perm(vz6, vz7, vcprm(0,1,s2,s1));
+ x7 = vec_perm(vz6, vz7, vcprm(2,3,s0,s3));
+
+ x8 = vec_add(x0, x1);
+ x9 = vec_sub(x0, x1);
+ x10 = vec_add(x2, x3);
+ x11 = vec_sub(x2, x3);
+
+ x12 = vec_add(x4, x5);
+ x13 = vec_sub(x4, x5);
+ x14 = vec_add(x6, x7);
+ x15 = vec_sub(x6, x7);
+
+ x16 = vec_perm(x8, x9, vcprm(0,1,s0,s1));
+ x17 = vec_perm(x8, x9, vcprm(2,3,s3,s2));
+ x18 = vec_perm(x10, x11, vcprm(2,1,s1,s2));
+ x19 = vec_perm(x10, x11, vcprm(0,3,s0,s3));
+ x20 = vec_perm(x12, x14, vcprm(0,1,s0, s1));
+ x21 = vec_perm(x12, x14, vcprm(2,3,s2,s3));
+ x22 = vec_perm(x13, x15, vcprm(0,1,s0,s1));
+ x23 = vec_perm(x13, x15, vcprm(3,2,s3,s2));
+
+ x24 = vec_add(x16, x17);
+ x25 = vec_sub(x16, x17);
+ x26 = vec_add(x18, x19);
+ x27 = vec_sub(x18, x19);
+ x28 = vec_add(x20, x21);
+ x29 = vec_sub(x20, x21);
+ x30 = vec_add(x22, x23);
+ x31 = vec_sub(x22, x23);
+
+ x32 = vec_add(x24, x26);
+ x33 = vec_sub(x24, x26);
+ x34 = vec_perm(x32, x33, vcprm(0,1,s0,s1));
+
+ x35 = vec_perm(x28, x29, vcprm(2,1,s1,s2));
+ x36 = vec_perm(x28, x29, vcprm(0,3,s0,s3));
+ x37 = vec_add(x35, x36);
+ x38 = vec_sub(x35, x36);
+ x39 = vec_perm(x37, x38, vcprm(0,1,s1,s0));
+
+ x40 = vec_perm(x27, x38, vcprm(3,2,s2,s3));
+ x41 = vec_perm(x26, x37, vcprm(2,3,s3,s2));
+ x42 = vec_add(x40, x41);
+ x43 = vec_sub(x40, x41);
+ x44 = vec_mul(x42, vc0);
+ x45 = vec_mul(x43, vc0);
+
+ x46 = vec_add(x34, x39); // z0.r z0.i z4.r z4.i
+ x47 = vec_sub(x34, x39); // z8.r z8.i z12.r z12.i
+
+ x48 = vec_perm(x30, x31, vcprm(2,1,s1,s2));
+ x49 = vec_perm(x30, x31, vcprm(0,3,s3,s0));
+ x50 = vec_add(x48, x49);
+ x51 = vec_sub(x48, x49);
+ x52 = vec_mul(x50, vc1);
+ x53 = vec_mul(x50, vc2);
+ x54 = vec_mul(x51, vc1);
+ x55 = vec_mul(x51, vc2);
+
+ x56 = vec_perm(x24, x25, vcprm(2,3,s2,s3));
+ x57 = vec_perm(x44, x45, vcprm(0,1,s1,s0));
+ x58 = vec_add(x56, x57);
+ x59 = vec_sub(x56, x57);
+
+ x60 = vec_perm(x54, x55, vcprm(1,0,3,2));
+ x61 = vec_perm(x54, x55, vcprm(s1,s0,s3,s2));
+ x62 = vec_add(x52, x61);
+ x63 = vec_sub(x52, x61);
+ x64 = vec_add(x60, x53);
+ x65 = vec_sub(x60, x53);
+ x66 = vec_perm(x62, x64, vcprm(0,1,s3,s2));
+ x67 = vec_perm(x63, x65, vcprm(s0,s1,3,2));
+
+ x68 = vec_add(x58, x66); // z1.r z1.i z3.r z3.i
+ x69 = vec_sub(x58, x66); // z9.r z9.i z11.r z11.i
+ x70 = vec_add(x59, x67); // z5.r z5.i z15.r z15.i
+ x71 = vec_sub(x59, x67); // z13.r z13.i z7.r z7.i
+
+ x72 = vec_perm(x25, x27, vcprm(s1,s0,s2,s3));
+ x73 = vec_add(x25, x72);
+ x74 = vec_sub(x25, x72);
+ x75 = vec_perm(x73, x74, vcprm(0,1,s0,s1));
+ x76 = vec_perm(x44, x45, vcprm(3,2,s2,s3));
+ x77 = vec_add(x75, x76); // z2.r z2.i z6.r z6.i
+ x78 = vec_sub(x75, x76); // z10.r z10.i z14.r z14.i
+
+ x79 = vec_perm(x46, x68, vcprm(0,1,s0,s1)); // z0.r z0.i z1.r z1.i
+ x80 = vec_perm(x77, x68, vcprm(0,1,s2,s3)); // z2.r z2.i z3.r z3.i
+ x81 = vec_perm(x46, x70, vcprm(2,3,s0,s1)); // z4.r z4.i z5.r z5.i
+ x82 = vec_perm(x71, x77, vcprm(s2,s3,2,3)); // z6.r z6.i z7.r z7.i
+ vec_st(x79, 0, &(out[0]));
+ vec_st(x80, byte_2complex, &(out[0]));
+ vec_st(x81, byte_4complex, &(out[0]));
+ vec_st(x82, byte_6complex, &(out[0]));
+ x83 = vec_perm(x47, x69, vcprm(0,1,s0,s1)); // z8.r z8.i z9.r z9.i
+ x84 = vec_perm(x78, x69, vcprm(0,1,s2,s3)); // z10.r z10.i z11.r z11.i
+ x85 = vec_perm(x47, x71, vcprm(2,3,s0,s1)); // z12.r z12.i z13.r z13.i
+ x86 = vec_perm(x70, x78, vcprm(s2,s3,2,3)); // z14.r z14.i z15.r z15.i
+ vec_st(x83, byte_8complex, &(out[0]));
+ vec_st(x84, byte_10complex, &(out[0]));
+ vec_st(x85, byte_12complex, &(out[0]));
+ vec_st(x86, byte_14complex, &(out[0]));
+}
+
+inline static void fft4_vsx(FFTComplex *z)
+{
+ vec_f a, b, c, d;
+ float* out= (float*)z;
+ a = vec_ld(0, &(out[0]));
+ b = vec_ld(byte_2complex, &(out[0]));
+
+ c = vec_perm(a, b, vcprm(0,1,s2,s1));
+ d = vec_perm(a, b, vcprm(2,3,s0,s3));
+ a = vec_add(c, d);
+ b = vec_sub(c, d);
+
+ c = vec_perm(a,b, vcprm(0,s0,1,s1));
+ d = vec_perm(a, b, vcprm(2,s3,3,s2));
+
+ a = vec_add(c, d);
+ b = vec_sub(c, d);
+
+ c = vec_perm(a, b, vcprm(0,1,s0,s1));
+ d = vec_perm(a, b, vcprm(2,3,s2,s3));
+
+ vec_st(c, 0, &(out[0]));
+ vec_st(d, byte_2complex, &(out[0]));
+ return;
+}
+
+inline static void fft8_vsx(FFTComplex *z)
+{
+ vec_f vz0, vz1, vz2, vz3;
+ vec_f vz4, vz5, vz6, vz7, vz8;
+
+ float* out= (float*)z;
+ vec_f vc0 = {0.0, 0.0, 0.0, 0.0};
+ vec_f vc1 = {-sqrthalf, sqrthalf, sqrthalf, -sqrthalf};
+ vec_f vc2 = {sqrthalf, sqrthalf, sqrthalf, sqrthalf};
+
+ vz0 = vec_ld(0, &(out[0]));
+ vz1 = vec_ld(byte_2complex, &(out[0]));
+ vz2 = vec_ld(byte_4complex, &(out[0]));
+ vz3 = vec_ld(byte_6complex, &(out[0]));
+
+ vz6 = vec_perm(vz2, vz3, vcprm(0,s0,1,s1));
+ vz7 = vec_perm(vz2, vz3, vcprm(2,s2,3,s3));
+ vz4 = vec_perm(vz0, vz1, vcprm(0,1,s2,s1));
+ vz5 = vec_perm(vz0, vz1, vcprm(2,3,s0,s3));
+
+ vz2 = vec_add(vz6, vz7);
+ vz3 = vec_sub(vz6, vz7);
+ vz8 = vec_perm(vz3, vz3, vcprm(2,3,0,1));
+
+ vz0 = vec_add(vz4, vz5);
+ vz1 = vec_sub(vz4, vz5);
+
+ vz3 = vec_madd(vz3, vc1, vc0);
+ vz3 = vec_madd(vz8, vc2, vz3);
+
+ vz4 = vec_perm(vz0, vz1, vcprm(0,s0,1,s1));
+ vz5 = vec_perm(vz0, vz1, vcprm(2,s3,3,s2));
+ vz6 = vec_perm(vz2, vz3, vcprm(1,2,s3,s0));
+ vz7 = vec_perm(vz2, vz3, vcprm(0,3,s2,s1));
+
+ vz0 = vec_add(vz4, vz5);
+ vz1 = vec_sub(vz4, vz5);
+ vz2 = vec_add(vz6, vz7);
+ vz3 = vec_sub(vz6, vz7);
+
+ vz4 = vec_perm(vz0, vz1, vcprm(0,1,s0,s1));
+ vz5 = vec_perm(vz0, vz1, vcprm(2,3,s2,s3));
+ vz6 = vec_perm(vz2, vz3, vcprm(0,2,s1,s3));
+ vz7 = vec_perm(vz2, vz3, vcprm(1,3,s0,s2));
+
+
+ vz2 = vec_sub(vz4, vz6);
+ vz3 = vec_sub(vz5, vz7);
+
+ vz0 = vec_add(vz4, vz6);
+ vz1 = vec_add(vz5, vz7);
+
+ vec_st(vz0, 0, &(out[0]));
+ vec_st(vz1, byte_2complex, &(out[0]));
+ vec_st(vz2, byte_4complex, &(out[0]));
+ vec_st(vz3, byte_6complex, &(out[0]));
+ return;
+}
+
+inline static void fft16_vsx(FFTComplex *z)
+{
+ float* out= (float*)z;
+ vec_f vc0 = {0.0, 0.0, 0.0, 0.0};
+ vec_f vc1 = {-sqrthalf, sqrthalf, sqrthalf, -sqrthalf};
+ vec_f vc2 = {sqrthalf, sqrthalf, sqrthalf, sqrthalf};
+ vec_f vc3 = {1.0, 0.92387953, sqrthalf, 0.38268343};
+ vec_f vc4 = {0.0, 0.38268343, sqrthalf, 0.92387953};
+ vec_f vc5 = {-0.0, -0.38268343, -sqrthalf, -0.92387953};
+
+ vec_f vz0, vz1, vz2, vz3;
+ vec_f vz4, vz5, vz6, vz7;
+ vec_f vz8, vz9, vz10, vz11;
+ vec_f vz12, vz13;
+
+ vz0 = vec_ld(byte_8complex, &(out[0]));
+ vz1 = vec_ld(byte_10complex, &(out[0]));
+ vz2 = vec_ld(byte_12complex, &(out[0]));
+ vz3 = vec_ld(byte_14complex, &(out[0]));
+
+ vz4 = vec_perm(vz0, vz1, vcprm(0,1,s2,s1));
+ vz5 = vec_perm(vz0, vz1, vcprm(2,3,s0,s3));
+ vz6 = vec_perm(vz2, vz3, vcprm(0,1,s2,s1));
+ vz7 = vec_perm(vz2, vz3, vcprm(2,3,s0,s3));
+
+ vz0 = vec_add(vz4, vz5);
+ vz1= vec_sub(vz4, vz5);
+ vz2 = vec_add(vz6, vz7);
+ vz3 = vec_sub(vz6, vz7);
+
+ vz4 = vec_perm(vz0, vz1, vcprm(0,s0,1,s1));
+ vz5 = vec_perm(vz0, vz1, vcprm(2,s3,3,s2));
+ vz6 = vec_perm(vz2, vz3, vcprm(0,s0,1,s1));
+ vz7 = vec_perm(vz2, vz3, vcprm(2,s3,3,s2));
+
+ vz0 = vec_add(vz4, vz5);
+ vz1 = vec_sub(vz4, vz5);
+ vz2 = vec_add(vz6, vz7);
+ vz3 = vec_sub(vz6, vz7);
+
+ vz4 = vec_perm(vz0, vz1, vcprm(0,1,s0,s1));
+ vz5 = vec_perm(vz0, vz1, vcprm(2,3,s2,s3));
+
+ vz6 = vec_perm(vz2, vz3, vcprm(0,1,s0,s1));
+ vz7 = vec_perm(vz2, vz3, vcprm(2,3,s2,s3));
+
+ vz0 = vec_ld(0, &(out[0]));
+ vz1 = vec_ld(byte_2complex, &(out[0]));
+ vz2 = vec_ld(byte_4complex, &(out[0]));
+ vz3 = vec_ld(byte_6complex, &(out[0]));
+ vz10 = vec_perm(vz2, vz3, vcprm(0,s0,1,s1));
+ vz11 = vec_perm(vz2, vz3, vcprm(2,s2,3,s3));
+ vz8 = vec_perm(vz0, vz1, vcprm(0,1,s2,s1));
+ vz9 = vec_perm(vz0, vz1, vcprm(2,3,s0,s3));
+
+ vz2 = vec_add(vz10, vz11);
+ vz3 = vec_sub(vz10, vz11);
+ vz12 = vec_perm(vz3, vz3, vcprm(2,3,0,1));
+ vz0 = vec_add(vz8, vz9);
+ vz1 = vec_sub(vz8, vz9);
+
+ vz3 = vec_madd(vz3, vc1, vc0);
+ vz3 = vec_madd(vz12, vc2, vz3);
+ vz8 = vec_perm(vz0, vz1, vcprm(0,s0,1,s1));
+ vz9 = vec_perm(vz0, vz1, vcprm(2,s3,3,s2));
+ vz10 = vec_perm(vz2, vz3, vcprm(1,2,s3,s0));
+ vz11 = vec_perm(vz2, vz3, vcprm(0,3,s2,s1));
+
+ vz0 = vec_add(vz8, vz9);
+ vz1 = vec_sub(vz8, vz9);
+ vz2 = vec_add(vz10, vz11);
+ vz3 = vec_sub(vz10, vz11);
+
+ vz8 = vec_perm(vz0, vz1, vcprm(0,1,s0,s1));
+ vz9 = vec_perm(vz0, vz1, vcprm(2,3,s2,s3));
+ vz10 = vec_perm(vz2, vz3, vcprm(0,2,s1,s3));
+ vz11 = vec_perm(vz2, vz3, vcprm(1,3,s0,s2));
+
+ vz2 = vec_sub(vz8, vz10);
+ vz3 = vec_sub(vz9, vz11);
+ vz0 = vec_add(vz8, vz10);
+ vz1 = vec_add(vz9, vz11);
+
+ vz8 = vec_madd(vz4, vc3, vc0);
+ vz9 = vec_madd(vz5, vc3, vc0);
+ vz10 = vec_madd(vz6, vc3, vc0);
+ vz11 = vec_madd(vz7, vc3, vc0);
+
+ vz8 = vec_madd(vz5, vc4, vz8);
+ vz9 = vec_madd(vz4, vc5, vz9);
+ vz10 = vec_madd(vz7, vc5, vz10);
+ vz11 = vec_madd(vz6, vc4, vz11);
+
+ vz12 = vec_sub(vz10, vz8);
+ vz10 = vec_add(vz10, vz8);
+
+ vz13 = vec_sub(vz9, vz11);
+ vz11 = vec_add(vz9, vz11);
+
+ vz4 = vec_sub(vz0, vz10);
+ vz0 = vec_add(vz0, vz10);
+
+ vz7= vec_sub(vz3, vz12);
+ vz3= vec_add(vz3, vz12);
+
+ vz5 = vec_sub(vz1, vz11);
+ vz1 = vec_add(vz1, vz11);
+
+ vz6 = vec_sub(vz2, vz13);
+ vz2 = vec_add(vz2, vz13);
+
+ vec_st(vz0, 0, &(out[0]));
+ vec_st(vz1, byte_2complex, &(out[0]));
+ vec_st(vz2, byte_4complex, &(out[0]));
+ vec_st(vz3, byte_6complex, &(out[0]));
+ vec_st(vz4, byte_8complex, &(out[0]));
+ vec_st(vz5, byte_10complex, &(out[0]));
+ vec_st(vz6, byte_12complex, &(out[0]));
+ vec_st(vz7, byte_14complex, &(out[0]));
+ return;
+
+}
+inline static void pass_vsx(FFTComplex * z, const FFTSample * wre, unsigned int n)
+{
+ int o1 = n<<1;
+ int o2 = n<<2;
+ int o3 = o1+o2;
+ int i1, i2, i3;
+ FFTSample* out = (FFTSample*)z;
+ const FFTSample *wim = wre+o1;
+ vec_f v0, v1, v2, v3;
+ vec_f v4, v5, v6, v7;
+ vec_f v8, v9, v10, v11;
+ vec_f v12, v13;
+
+ n = n-2;
+ i1 = o1*sizeof(FFTComplex);
+ i2 = o2*sizeof(FFTComplex);
+ i3 = o3*sizeof(FFTComplex);
+
+ v8 = vec_ld(0, &(wre[0]));
+ v10 = vec_ld(0, &(wim[0]));
+ v9 = vec_ld(0, &(wim[-4]));
+ v9 = vec_perm(v9, v10, vcprm(s0,3,2,1));
+
+ v4 = vec_ld(i2, &(out[0]));
+ v5 = vec_ld(i2+16, &(out[0]));
+ v6 = vec_ld(i3, &(out[0]));
+ v7 = vec_ld(i3+16, &(out[0]));
+ v10 = vec_mul(v4, v8); // r2*wre
+ v11 = vec_mul(v5, v8); // i2*wre
+ v12 = vec_mul(v6, v8); // r3*wre
+ v13 = vec_mul(v7, v8); // i3*wre
+
+ v0 = vec_ld(0, &(out[0])); // r0
+ v3 = vec_ld(i1+16, &(out[0])); // i1
+ v10 = vec_madd(v5, v9, v10); // r2*wim
+ v11 = vec_nmsub(v4, v9, v11); // i2*wim
+ v12 = vec_nmsub(v7, v9, v12); // r3*wim
+ v13 = vec_madd(v6, v9, v13); // i3*wim
+
+ v1 = vec_ld(16, &(out[0])); // i0
+ v2 = vec_ld(i1, &(out[0])); // r1
+ v8 = vec_sub(v12, v10);
+ v12 = vec_add(v12, v10);
+ v9 = vec_sub(v11, v13);
+ v13 = vec_add(v11, v13);
+ v4 = vec_sub(v0, v12);
+ v0 = vec_add(v0, v12);
+ v7 = vec_sub(v3, v8);
+ v3 = vec_add(v3, v8);
+
+ vec_st(v0, 0, &(out[0])); // r0
+ vec_st(v3, i1+16, &(out[0])); // i1
+ vec_st(v4, i2, &(out[0])); // r2
+ vec_st(v7, i3+16, &(out[0]));// i3
+
+ v5 = vec_sub(v1, v13);
+ v1 = vec_add(v1, v13);
+ v6 = vec_sub(v2, v9);
+ v2 = vec_add(v2, v9);
+
+ vec_st(v1, 16, &(out[0])); // i0
+ vec_st(v2, i1, &(out[0])); // r1
+ vec_st(v5, i2+16, &(out[0])); // i2
+ vec_st(v6, i3, &(out[0])); // r3
+
+ do {
+ out += 8;
+ wre += 4;
+ wim -= 4;
+
+ v8 = vec_ld(0, &(wre[0]));
+ v10 = vec_ld(0, &(wim[0]));
+ v9 = vec_ld(0, &(wim[-4]));
+ v9 = vec_perm(v9, v10, vcprm(s0,3,2,1));
+
+ v4 = vec_ld(i2, &(out[0])); // r2
+ v5 = vec_ld(i2+16, &(out[0])); // i2
+ v6 = vec_ld(i3, &(out[0])); // r3
+ v7 = vec_ld(i3+16, &(out[0]));// i3
+ v10 = vec_mul(v4, v8); // r2*wre
+ v11 = vec_mul(v5, v8); // i2*wre
+ v12 = vec_mul(v6, v8); // r3*wre
+ v13 = vec_mul(v7, v8); // i3*wre
+
+ v0 = vec_ld(0, &(out[0])); // r0
+ v3 = vec_ld(i1+16, &(out[0])); // i1
+ v10 = vec_madd(v5, v9, v10); // r2*wim
+ v11 = vec_nmsub(v4, v9, v11); // i2*wim
+ v12 = vec_nmsub(v7, v9, v12); // r3*wim
+ v13 = vec_madd(v6, v9, v13); // i3*wim
+
+ v1 = vec_ld(16, &(out[0])); // i0
+ v2 = vec_ld(i1, &(out[0])); // r1
+ v8 = vec_sub(v12, v10);
+ v12 = vec_add(v12, v10);
+ v9 = vec_sub(v11, v13);
+ v13 = vec_add(v11, v13);
+ v4 = vec_sub(v0, v12);
+ v0 = vec_add(v0, v12);
+ v7 = vec_sub(v3, v8);
+ v3 = vec_add(v3, v8);
+
+ vec_st(v0, 0, &(out[0])); // r0
+ vec_st(v3, i1+16, &(out[0])); // i1
+ vec_st(v4, i2, &(out[0])); // r2
+ vec_st(v7, i3+16, &(out[0])); // i3
+
+ v5 = vec_sub(v1, v13);
+ v1 = vec_add(v1, v13);
+ v6 = vec_sub(v2, v9);
+ v2 = vec_add(v2, v9);
+
+ vec_st(v1, 16, &(out[0])); // i0
+ vec_st(v2, i1, &(out[0])); // r1
+ vec_st(v5, i2+16, &(out[0])); // i2
+ vec_st(v6, i3, &(out[0])); // r3
+ } while (n-=2);
+}
+
+#endif
+
+#endif /* AVCODEC_PPC_FFT_VSX_H */
diff --git a/libavcodec/ppc/fmtconvert_altivec.c b/libavcodec/ppc/fmtconvert_altivec.c
index 14aaf6bc9a..10a7169d1b 100644
--- a/libavcodec/ppc/fmtconvert_altivec.c
+++ b/libavcodec/ppc/fmtconvert_altivec.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2006 Luca Barbato <lu_zero@gentoo.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/h264chroma_init.c b/libavcodec/ppc/h264chroma_init.c
index 6d656d46ba..876efeca09 100644
--- a/libavcodec/ppc/h264chroma_init.c
+++ b/libavcodec/ppc/h264chroma_init.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/h264chroma_template.c b/libavcodec/ppc/h264chroma_template.c
index 293fef5c90..cb1e0957e1 100644
--- a/libavcodec/ppc/h264chroma_template.c
+++ b/libavcodec/ppc/h264chroma_template.c
@@ -1,30 +1,32 @@
/*
* Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/mem.h"
+#include "libavutil/ppc/types_altivec.h"
+#include "libavutil/ppc/util_altivec.h"
/* this code assume that stride % 16 == 0 */
#define CHROMA_MC8_ALTIVEC_CORE(BIAS1, BIAS2) \
- vsrc2ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc2uc);\
- vsrc3ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc3uc);\
+ vsrc2ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc2uc);\
+ vsrc3ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc3uc);\
\
psum = vec_mladd(vA, vsrc0ssH, BIAS1);\
psum = vec_mladd(vB, vsrc1ssH, psum);\
@@ -49,8 +51,8 @@
#define CHROMA_MC8_ALTIVEC_CORE_SIMPLE \
\
- vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc0uc);\
- vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc1uc);\
+ vsrc0ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc0uc);\
+ vsrc1ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc1uc);\
\
psum = vec_mladd(vA, vsrc0ssH, v32ss);\
psum = vec_mladd(vE, vsrc1ssH, psum);\
@@ -70,6 +72,43 @@
#define noop(a) a
#define add28(a) vec_add(v28ss, a)
+#if HAVE_BIGENDIAN
+#define GET_VSRC1(vs0, off, b, perm0, s){ \
+ vec_u8 vsrcCuc, vsrcDuc; \
+ vsrcCuc = vec_ld(off, s); \
+ if (loadSecond){ \
+ vsrcDuc = vec_ld(off + b, s); \
+ } else \
+ vsrcDuc = vsrcCuc; \
+ \
+ vs0 = vec_perm(vsrcCuc, vsrcDuc, perm0); \
+}
+#define GET_VSRC(vs0, vs1, off, b, perm0, perm1, s){ \
+ vec_u8 vsrcCuc, vsrcDuc; \
+ vsrcCuc = vec_ld(off, s); \
+ if (loadSecond){ \
+ vsrcDuc = vec_ld(off + b, s); \
+ } else \
+ vsrcDuc = vsrcCuc; \
+ \
+ vs0 = vec_perm(vsrcCuc, vsrcDuc, perm0); \
+ if (reallyBadAlign){ \
+ vs1 = vsrcDuc; \
+ } else \
+ vs1 = vec_perm(vsrcCuc, vsrcDuc, perm1); \
+ }
+
+#else
+
+#define GET_VSRC1(vs0, off, b, perm0, s){ \
+ vs0 = vec_vsx_ld(off, s); \
+ }
+#define GET_VSRC(vs0, vs1, off, b, perm0, perm1, s){ \
+ vs0 = vec_vsx_ld(off, s); \
+ vs1 = vec_vsx_ld(off + 1, s); \
+ }
+#endif /* HAVE_BIGENDIAN */
+
#ifdef PREFIX_h264_chroma_mc8_altivec
static void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src,
int stride, int h, int x, int y) {
@@ -80,23 +119,27 @@ static void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src,
(( x) * ( y))};
register int i;
vec_u8 fperm;
- const vec_s32 vABCD = vec_ld(0, ABCD);
- const vec_s16 vA = vec_splat((vec_s16)vABCD, 1);
- const vec_s16 vB = vec_splat((vec_s16)vABCD, 3);
- const vec_s16 vC = vec_splat((vec_s16)vABCD, 5);
- const vec_s16 vD = vec_splat((vec_s16)vABCD, 7);
LOAD_ZERO;
+ const vec_s32 vABCD = vec_ld(0, ABCD);
+ const vec_s16 vA = VEC_SPLAT16(vABCD, 1);
+ const vec_s16 vB = VEC_SPLAT16(vABCD, 3);
+ const vec_s16 vC = VEC_SPLAT16(vABCD, 5);
+ const vec_s16 vD = VEC_SPLAT16(vABCD, 7);
const vec_s16 v32ss = vec_sl(vec_splat_s16(1),vec_splat_u16(5));
const vec_u16 v6us = vec_splat_u16(6);
- register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
- register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
- vec_u8 vsrcAuc, av_uninit(vsrcBuc), vsrcperm0, vsrcperm1;
+ vec_u8 vsrcperm0, vsrcperm1;
vec_u8 vsrc0uc, vsrc1uc;
vec_s16 vsrc0ssH, vsrc1ssH;
- vec_u8 vsrcCuc, vsrc2uc, vsrc3uc;
+ vec_u8 vsrc2uc, vsrc3uc;
vec_s16 vsrc2ssH, vsrc3ssH, psum;
vec_u8 vdst, ppsum, vfdst, fsum;
+#if HAVE_BIGENDIAN
+ register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
+ register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
+ vsrcperm0 = vec_lvsl(0, src);
+ vsrcperm1 = vec_lvsl(1, src);
+#endif
if (((unsigned long)dst) % 16 == 0) {
fperm = (vec_u8){0x10, 0x11, 0x12, 0x13,
@@ -110,89 +153,28 @@ static void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src,
0x1C, 0x1D, 0x1E, 0x1F};
}
- vsrcAuc = vec_ld(0, src);
-
- if (loadSecond)
- vsrcBuc = vec_ld(16, src);
- vsrcperm0 = vec_lvsl(0, src);
- vsrcperm1 = vec_lvsl(1, src);
-
- vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
- if (reallyBadAlign)
- vsrc1uc = vsrcBuc;
- else
- vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
+ GET_VSRC(vsrc0uc, vsrc1uc, 0, 16, vsrcperm0, vsrcperm1, src);
- vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc0uc);
- vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v,(vec_u8)vsrc1uc);
+ vsrc0ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc0uc);
+ vsrc1ssH = (vec_s16)VEC_MERGEH(zero_u8v,(vec_u8)vsrc1uc);
if (ABCD[3]) {
- if (!loadSecond) {// -> !reallyBadAlign
- for (i = 0 ; i < h ; i++) {
- vsrcCuc = vec_ld(stride + 0, src);
- vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
- vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
-
- CHROMA_MC8_ALTIVEC_CORE(v32ss, noop)
- }
- } else {
- vec_u8 vsrcDuc;
- for (i = 0 ; i < h ; i++) {
- vsrcCuc = vec_ld(stride + 0, src);
- vsrcDuc = vec_ld(stride + 16, src);
- vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
- if (reallyBadAlign)
- vsrc3uc = vsrcDuc;
- else
- vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
-
- CHROMA_MC8_ALTIVEC_CORE(v32ss, noop)
- }
+ for (i = 0 ; i < h ; i++) {
+ GET_VSRC(vsrc2uc, vsrc3uc, stride, 16, vsrcperm0, vsrcperm1, src);
+ CHROMA_MC8_ALTIVEC_CORE(v32ss, noop);
}
} else {
const vec_s16 vE = vec_add(vB, vC);
if (ABCD[2]) { // x == 0 B == 0
- if (!loadSecond) {// -> !reallyBadAlign
- for (i = 0 ; i < h ; i++) {
- vsrcCuc = vec_ld(stride + 0, src);
- vsrc1uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
- CHROMA_MC8_ALTIVEC_CORE_SIMPLE
-
- vsrc0uc = vsrc1uc;
- }
- } else {
- vec_u8 vsrcDuc;
- for (i = 0 ; i < h ; i++) {
- vsrcCuc = vec_ld(stride + 0, src);
- vsrcDuc = vec_ld(stride + 15, src);
- vsrc1uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
- CHROMA_MC8_ALTIVEC_CORE_SIMPLE
-
- vsrc0uc = vsrc1uc;
- }
+ for (i = 0 ; i < h ; i++) {
+ GET_VSRC1(vsrc1uc, stride, 15, vsrcperm0, src);
+ CHROMA_MC8_ALTIVEC_CORE_SIMPLE;
+ vsrc0uc = vsrc1uc;
}
} else { // y == 0 C == 0
- if (!loadSecond) {// -> !reallyBadAlign
- for (i = 0 ; i < h ; i++) {
- vsrcCuc = vec_ld(0, src);
- vsrc0uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
- vsrc1uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
-
- CHROMA_MC8_ALTIVEC_CORE_SIMPLE
- }
- } else {
- vec_u8 vsrcDuc;
- for (i = 0 ; i < h ; i++) {
- vsrcCuc = vec_ld(0, src);
- vsrcDuc = vec_ld(15, src);
- vsrc0uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
- if (reallyBadAlign)
- vsrc1uc = vsrcDuc;
- else
- vsrc1uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
-
- CHROMA_MC8_ALTIVEC_CORE_SIMPLE
- }
+ for (i = 0 ; i < h ; i++) {
+ GET_VSRC(vsrc0uc, vsrc1uc, 0, 15, vsrcperm0, vsrcperm1, src);
+ CHROMA_MC8_ALTIVEC_CORE_SIMPLE;
}
}
}
@@ -209,23 +191,27 @@ static void PREFIX_no_rnd_vc1_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, i
(( x) * ( y))};
register int i;
vec_u8 fperm;
- const vec_s32 vABCD = vec_ld(0, ABCD);
- const vec_s16 vA = vec_splat((vec_s16)vABCD, 1);
- const vec_s16 vB = vec_splat((vec_s16)vABCD, 3);
- const vec_s16 vC = vec_splat((vec_s16)vABCD, 5);
- const vec_s16 vD = vec_splat((vec_s16)vABCD, 7);
LOAD_ZERO;
+ const vec_s32 vABCD = vec_ld(0, ABCD);
+ const vec_s16 vA = VEC_SPLAT16(vABCD, 1);
+ const vec_s16 vB = VEC_SPLAT16(vABCD, 3);
+ const vec_s16 vC = VEC_SPLAT16(vABCD, 5);
+ const vec_s16 vD = VEC_SPLAT16(vABCD, 7);
const vec_s16 v28ss = vec_sub(vec_sl(vec_splat_s16(1),vec_splat_u16(5)),vec_splat_s16(4));
const vec_u16 v6us = vec_splat_u16(6);
- register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
- register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
- vec_u8 vsrcAuc, av_uninit(vsrcBuc), vsrcperm0, vsrcperm1;
+ vec_u8 vsrcperm0, vsrcperm1;
vec_u8 vsrc0uc, vsrc1uc;
vec_s16 vsrc0ssH, vsrc1ssH;
- vec_u8 vsrcCuc, vsrc2uc, vsrc3uc;
+ vec_u8 vsrc2uc, vsrc3uc;
vec_s16 vsrc2ssH, vsrc3ssH, psum;
vec_u8 vdst, ppsum, vfdst, fsum;
+#if HAVE_BIGENDIAN
+ register int loadSecond = (((unsigned long)src) % 16) <= 7 ? 0 : 1;
+ register int reallyBadAlign = (((unsigned long)src) % 16) == 15 ? 1 : 0;
+ vsrcperm0 = vec_lvsl(0, src);
+ vsrcperm1 = vec_lvsl(1, src);
+#endif
if (((unsigned long)dst) % 16 == 0) {
fperm = (vec_u8){0x10, 0x11, 0x12, 0x13,
@@ -239,47 +225,14 @@ static void PREFIX_no_rnd_vc1_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, i
0x1C, 0x1D, 0x1E, 0x1F};
}
- vsrcAuc = vec_ld(0, src);
-
- if (loadSecond)
- vsrcBuc = vec_ld(16, src);
- vsrcperm0 = vec_lvsl(0, src);
- vsrcperm1 = vec_lvsl(1, src);
-
- vsrc0uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm0);
- if (reallyBadAlign)
- vsrc1uc = vsrcBuc;
- else
- vsrc1uc = vec_perm(vsrcAuc, vsrcBuc, vsrcperm1);
-
- vsrc0ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc0uc);
- vsrc1ssH = (vec_s16)vec_mergeh(zero_u8v, (vec_u8)vsrc1uc);
-
- if (!loadSecond) {// -> !reallyBadAlign
- for (i = 0 ; i < h ; i++) {
-
-
- vsrcCuc = vec_ld(stride + 0, src);
+ GET_VSRC(vsrc0uc, vsrc1uc, 0, 16, vsrcperm0, vsrcperm1, src);
- vsrc2uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm0);
- vsrc3uc = vec_perm(vsrcCuc, vsrcCuc, vsrcperm1);
+ vsrc0ssH = (vec_s16)VEC_MERGEH(zero_u8v, (vec_u8)vsrc0uc);
+ vsrc1ssH = (vec_s16)VEC_MERGEH(zero_u8v, (vec_u8)vsrc1uc);
- CHROMA_MC8_ALTIVEC_CORE(vec_splat_s16(0), add28)
- }
- } else {
- vec_u8 vsrcDuc;
- for (i = 0 ; i < h ; i++) {
- vsrcCuc = vec_ld(stride + 0, src);
- vsrcDuc = vec_ld(stride + 16, src);
-
- vsrc2uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm0);
- if (reallyBadAlign)
- vsrc3uc = vsrcDuc;
- else
- vsrc3uc = vec_perm(vsrcCuc, vsrcDuc, vsrcperm1);
-
- CHROMA_MC8_ALTIVEC_CORE(vec_splat_s16(0), add28)
- }
+ for (i = 0 ; i < h ; i++) {
+ GET_VSRC(vsrc2uc, vsrc3uc, stride, 16, vsrcperm0, vsrcperm1, src);
+ CHROMA_MC8_ALTIVEC_CORE(vec_splat_s16(0), add28);
}
}
#endif
diff --git a/libavcodec/ppc/h264dsp.c b/libavcodec/ppc/h264dsp.c
index 31dc141d29..da118a49b6 100644
--- a/libavcodec/ppc/h264dsp.c
+++ b/libavcodec/ppc/h264dsp.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -62,10 +62,17 @@
b2 = vec_mergeh( a1, a3 ); \
b3 = vec_mergel( a1, a3 )
+#if HAVE_BIGENDIAN
+#define vdst_load(d) \
+ vdst_orig = vec_ld(0, dst); \
+ vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask);
+#else
+#define vdst_load(d) vdst = vec_vsx_ld(0, dst)
+#endif
+
#define VEC_LOAD_U8_ADD_S16_STORE_U8(va) \
- vdst_orig = vec_ld(0, dst); \
- vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask); \
- vdst_ss = (vec_s16) vec_mergeh(zero_u8v, vdst); \
+ vdst_load(); \
+ vdst_ss = (vec_s16) VEC_MERGEH(zero_u8v, vdst); \
va = vec_add(va, vdst_ss); \
va_u8 = vec_packsu(va, zero_s16v); \
va_u32 = vec_splat((vec_u32)va_u8, 0); \
@@ -165,26 +172,43 @@ static void h264_idct_add_altivec(uint8_t *dst, int16_t *block, int stride)
d7 = vec_sub(b0v, b7v); \
}
+#if HAVE_BIGENDIAN
+#define GET_2PERM(ldv, stv, d) \
+ ldv = vec_lvsl(0, d); \
+ stv = vec_lvsr(8, d);
+#define dstv_load(d) \
+ vec_u8 hv = vec_ld( 0, d ); \
+ vec_u8 lv = vec_ld( 7, d); \
+ vec_u8 dstv = vec_perm( hv, lv, (vec_u8)perm_ldv );
+#define dest_unligned_store(d) \
+ vec_u8 edgehv; \
+ vec_u8 bodyv = vec_perm( idstsum8, idstsum8, perm_stv ); \
+ vec_u8 edgelv = vec_perm( sel, zero_u8v, perm_stv ); \
+ lv = vec_sel( lv, bodyv, edgelv ); \
+ vec_st( lv, 7, d ); \
+ hv = vec_ld( 0, d ); \
+ edgehv = vec_perm( zero_u8v, sel, perm_stv ); \
+ hv = vec_sel( hv, bodyv, edgehv ); \
+ vec_st( hv, 0, d );
+#else
+
+#define GET_2PERM(ldv, stv, d) {}
+#define dstv_load(d) vec_u8 dstv = vec_vsx_ld(0, d)
+#define dest_unligned_store(d)\
+ vec_u8 dst8 = vec_perm((vec_u8)idstsum8, dstv, vcprm(2,3,s2,s3));\
+ vec_vsx_st(dst8, 0, d)
+#endif /* HAVE_BIGENDIAN */
+
#define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel) { \
/* unaligned load */ \
- vec_u8 hv = vec_ld( 0, dest ); \
- vec_u8 lv = vec_ld( 7, dest ); \
- vec_u8 dstv = vec_perm( hv, lv, (vec_u8)perm_ldv ); \
+ dstv_load(dest); \
vec_s16 idct_sh6 = vec_sra(idctv, sixv); \
- vec_u16 dst16 = (vec_u16)vec_mergeh(zero_u8v, dstv); \
+ vec_u16 dst16 = (vec_u16)VEC_MERGEH(zero_u8v, dstv); \
vec_s16 idstsum = vec_adds(idct_sh6, (vec_s16)dst16); \
vec_u8 idstsum8 = vec_packsu(zero_s16v, idstsum); \
- vec_u8 edgehv; \
/* unaligned store */ \
- vec_u8 bodyv = vec_perm( idstsum8, idstsum8, perm_stv );\
- vec_u8 edgelv = vec_perm( sel, zero_u8v, perm_stv ); \
- lv = vec_sel( lv, bodyv, edgelv ); \
- vec_st( lv, 7, dest ); \
- hv = vec_ld( 0, dest ); \
- edgehv = vec_perm( zero_u8v, sel, perm_stv ); \
- hv = vec_sel( hv, bodyv, edgehv ); \
- vec_st( hv, 0, dest ); \
- }
+ dest_unligned_store(dest);\
+}
static void h264_idct8_add_altivec(uint8_t *dst, int16_t *dct, int stride)
{
@@ -192,8 +216,8 @@ static void h264_idct8_add_altivec(uint8_t *dst, int16_t *dct, int stride)
vec_s16 d0, d1, d2, d3, d4, d5, d6, d7;
vec_s16 idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7;
- vec_u8 perm_ldv = vec_lvsl(0, dst);
- vec_u8 perm_stv = vec_lvsr(8, dst);
+ vec_u8 perm_ldv, perm_stv;
+ GET_2PERM(perm_ldv, perm_stv, dst);
const vec_u16 onev = vec_splat_u16(1);
const vec_u16 twov = vec_splat_u16(2);
@@ -236,20 +260,25 @@ static av_always_inline void h264_idct_dc_add_internal(uint8_t *dst, int16_t *bl
{
vec_s16 dc16;
vec_u8 dcplus, dcminus, v0, v1, v2, v3, aligner;
+ vec_s32 v_dc32;
LOAD_ZERO;
DECLARE_ALIGNED(16, int, dc);
int i;
dc = (block[0] + 32) >> 6;
block[0] = 0;
- dc16 = vec_splat((vec_s16) vec_lde(0, &dc), 1);
+ v_dc32 = vec_lde(0, &dc);
+ dc16 = VEC_SPLAT16((vec_s16)v_dc32, 1);
if (size == 4)
- dc16 = vec_sld(dc16, zero_s16v, 8);
+ dc16 = VEC_SLD16(dc16, zero_s16v, 8);
dcplus = vec_packsu(dc16, zero_s16v);
dcminus = vec_packsu(vec_sub(zero_s16v, dc16), zero_s16v);
aligner = vec_lvsr(0, dst);
+#if !HAVE_BIGENDIAN
+ aligner = vec_perm(aligner, zero_u8v, vcswapc());
+#endif
dcplus = vec_perm(dcplus, dcplus, aligner);
dcminus = vec_perm(dcminus, dcminus, aligner);
@@ -633,6 +662,9 @@ void weight_h264_W_altivec(uint8_t *block, int stride, int height,
temp[2] = offset;
vtemp = (vec_s16)vec_ld(0, temp);
+#if !HAVE_BIGENDIAN
+ vtemp =(vec_s16)vec_perm(vtemp, vtemp, vcswapi2s(0,1,2,3));
+#endif
vlog2_denom = (vec_u16)vec_splat(vtemp, 1);
vweight = vec_splat(vtemp, 3);
voffset = vec_splat(vtemp, 5);
@@ -641,8 +673,8 @@ void weight_h264_W_altivec(uint8_t *block, int stride, int height,
for (y = 0; y < height; y++) {
vblock = vec_ld(0, block);
- v0 = (vec_s16)vec_mergeh(zero_u8v, vblock);
- v1 = (vec_s16)vec_mergel(zero_u8v, vblock);
+ v0 = (vec_s16)VEC_MERGEH(zero_u8v, vblock);
+ v1 = (vec_s16)VEC_MERGEL(zero_u8v, vblock);
if (w == 16 || aligned) {
v0 = vec_mladd(v0, vweight, zero_s16v);
@@ -679,6 +711,9 @@ void biweight_h264_W_altivec(uint8_t *dst, uint8_t *src, int stride, int height,
temp[3] = offset;
vtemp = (vec_s16)vec_ld(0, temp);
+#if !HAVE_BIGENDIAN
+ vtemp =(vec_s16)vec_perm(vtemp, vtemp, vcswapi2s(0,1,2,3));
+#endif
vlog2_denom = (vec_u16)vec_splat(vtemp, 1);
vweights = vec_splat(vtemp, 3);
vweightd = vec_splat(vtemp, 5);
@@ -690,10 +725,10 @@ void biweight_h264_W_altivec(uint8_t *dst, uint8_t *src, int stride, int height,
vdst = vec_ld(0, dst);
vsrc = vec_ld(0, src);
- v0 = (vec_s16)vec_mergeh(zero_u8v, vdst);
- v1 = (vec_s16)vec_mergel(zero_u8v, vdst);
- v2 = (vec_s16)vec_mergeh(zero_u8v, vsrc);
- v3 = (vec_s16)vec_mergel(zero_u8v, vsrc);
+ v0 = (vec_s16)VEC_MERGEH(zero_u8v, vdst);
+ v1 = (vec_s16)VEC_MERGEL(zero_u8v, vdst);
+ v2 = (vec_s16)VEC_MERGEH(zero_u8v, vsrc);
+ v3 = (vec_s16)VEC_MERGEL(zero_u8v, vsrc);
if (w == 8) {
if (src_aligned)
diff --git a/libavcodec/ppc/h264qpel.c b/libavcodec/ppc/h264qpel.c
index f840277c88..575f504d32 100644
--- a/libavcodec/ppc/h264qpel.c
+++ b/libavcodec/ppc/h264qpel.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -191,86 +191,79 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, cons
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
}\
+#if HAVE_BIGENDIAN
+#define put_unligned_store(s, dest) { \
+ tmp1 = vec_ld(0, dest); \
+ mask = vec_lvsl(0, dest); \
+ tmp2 = vec_ld(15, dest); \
+ edges = vec_perm(tmp2, tmp1, mask); \
+ align = vec_lvsr(0, dest); \
+ tmp2 = vec_perm(s, edges, align); \
+ tmp1 = vec_perm(edges, s, align); \
+ vec_st(tmp2, 15, dest); \
+ vec_st(tmp1, 0 , dest); \
+ }
+#else
+#define put_unligned_store(s, dest) vec_vsx_st(s, 0, dest);
+#endif /* HAVE_BIGENDIAN */
+
static inline void put_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
const uint8_t * src2, int dst_stride,
int src_stride1, int h)
{
int i;
- vec_u8 a, b, d, tmp1, tmp2, mask, mask_, edges, align;
-
+ vec_u8 a, b, d, mask_;
+#if HAVE_BIGENDIAN
+ vec_u8 tmp1, tmp2, mask, edges, align;
mask_ = vec_lvsl(0, src2);
+#endif
for (i = 0; i < h; i++) {
-
- tmp1 = vec_ld(i * src_stride1, src1);
- mask = vec_lvsl(i * src_stride1, src1);
- tmp2 = vec_ld(i * src_stride1 + 15, src1);
-
- a = vec_perm(tmp1, tmp2, mask);
-
- tmp1 = vec_ld(i * 16, src2);
- tmp2 = vec_ld(i * 16 + 15, src2);
-
- b = vec_perm(tmp1, tmp2, mask_);
-
- tmp1 = vec_ld(0, dst);
- mask = vec_lvsl(0, dst);
- tmp2 = vec_ld(15, dst);
-
+ a = unaligned_load(i * src_stride1, src1);
+ b = load_with_perm_vec(i * 16, src2, mask_);
d = vec_avg(a, b);
-
- edges = vec_perm(tmp2, tmp1, mask);
-
- align = vec_lvsr(0, dst);
-
- tmp2 = vec_perm(d, edges, align);
- tmp1 = vec_perm(edges, d, align);
-
- vec_st(tmp2, 15, dst);
- vec_st(tmp1, 0 , dst);
-
+ put_unligned_store(d, dst);
dst += dst_stride;
}
}
+#if HAVE_BIGENDIAN
+#define avg_unligned_store(s, dest){ \
+ tmp1 = vec_ld(0, dest); \
+ mask = vec_lvsl(0, dest); \
+ tmp2 = vec_ld(15, dest); \
+ a = vec_avg(vec_perm(tmp1, tmp2, mask), s); \
+ edges = vec_perm(tmp2, tmp1, mask); \
+ align = vec_lvsr(0, dest); \
+ tmp2 = vec_perm(a, edges, align); \
+ tmp1 = vec_perm(edges, a, align); \
+ vec_st(tmp2, 15, dest); \
+ vec_st(tmp1, 0 , dest); \
+ }
+#else
+#define avg_unligned_store(s, dest){ \
+ a = vec_avg(vec_vsx_ld(0, dst), s); \
+ vec_vsx_st(a, 0, dst); \
+ }
+#endif /* HAVE_BIGENDIAN */
+
static inline void avg_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
const uint8_t * src2, int dst_stride,
int src_stride1, int h)
{
int i;
- vec_u8 a, b, d, tmp1, tmp2, mask, mask_, edges, align;
+ vec_u8 a, b, d, mask_;
+#if HAVE_BIGENDIAN
+ vec_u8 tmp1, tmp2, mask, edges, align;
mask_ = vec_lvsl(0, src2);
+#endif
for (i = 0; i < h; i++) {
-
- tmp1 = vec_ld(i * src_stride1, src1);
- mask = vec_lvsl(i * src_stride1, src1);
- tmp2 = vec_ld(i * src_stride1 + 15, src1);
-
- a = vec_perm(tmp1, tmp2, mask);
-
- tmp1 = vec_ld(i * 16, src2);
- tmp2 = vec_ld(i * 16 + 15, src2);
-
- b = vec_perm(tmp1, tmp2, mask_);
-
- tmp1 = vec_ld(0, dst);
- mask = vec_lvsl(0, dst);
- tmp2 = vec_ld(15, dst);
-
- d = vec_avg(vec_perm(tmp1, tmp2, mask), vec_avg(a, b));
-
- edges = vec_perm(tmp2, tmp1, mask);
-
- align = vec_lvsr(0, dst);
-
- tmp2 = vec_perm(d, edges, align);
- tmp1 = vec_perm(edges, d, align);
-
- vec_st(tmp2, 15, dst);
- vec_st(tmp1, 0 , dst);
-
+ a = unaligned_load(i * src_stride1, src1);
+ b = load_with_perm_vec(i * 16, src2, mask_);
+ d = vec_avg(a, b);
+ avg_unligned_store(d, dst);
dst += dst_stride;
}
}
diff --git a/libavcodec/ppc/h264qpel_template.c b/libavcodec/ppc/h264qpel_template.c
index fe83146e63..9a345485ba 100644
--- a/libavcodec/ppc/h264qpel_template.c
+++ b/libavcodec/ppc/h264qpel_template.c
@@ -1,24 +1,31 @@
/*
* Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "config.h"
+#if HAVE_UNISTD_H
+#include <unistd.h>
+#endif
+
#include "libavutil/mem.h"
+#include "libavutil/ppc/types_altivec.h"
+#include "libavutil/ppc/util_altivec.h"
#ifdef DEBUG
#define ASSERT_ALIGNED(ptr) assert(((unsigned long)ptr&0x0000000F));
@@ -26,6 +33,76 @@
#define ASSERT_ALIGNED(ptr) ;
#endif
+#if HAVE_BIGENDIAN
+#define load_alignment(s, ali, pm2, pm1, pp0, pp1, pp2, pp3){\
+ vec_u8 srcR1 = vec_ld(-2, s);\
+ vec_u8 srcR2 = vec_ld(14, s);\
+ switch (ali) {\
+ default: {\
+ srcM2 = vec_perm(srcR1, srcR2, pm2);\
+ srcM1 = vec_perm(srcR1, srcR2, pm1);\
+ srcP0 = vec_perm(srcR1, srcR2, pp0);\
+ srcP1 = vec_perm(srcR1, srcR2, pp1);\
+ srcP2 = vec_perm(srcR1, srcR2, pp2);\
+ srcP3 = vec_perm(srcR1, srcR2, pp3);\
+ } break;\
+ case 11: {\
+ srcM2 = vec_perm(srcR1, srcR2, pm2);\
+ srcM1 = vec_perm(srcR1, srcR2, pm1);\
+ srcP0 = vec_perm(srcR1, srcR2, pp0);\
+ srcP1 = vec_perm(srcR1, srcR2, pp1);\
+ srcP2 = vec_perm(srcR1, srcR2, pp2);\
+ srcP3 = srcR2;\
+ } break;\
+ case 12: {\
+ vec_u8 srcR3 = vec_ld(30, s);\
+ srcM2 = vec_perm(srcR1, srcR2, pm2);\
+ srcM1 = vec_perm(srcR1, srcR2, pm1);\
+ srcP0 = vec_perm(srcR1, srcR2, pp0);\
+ srcP1 = vec_perm(srcR1, srcR2, pp1);\
+ srcP2 = srcR2;\
+ srcP3 = vec_perm(srcR2, srcR3, pp3);\
+ } break;\
+ case 13: {\
+ vec_u8 srcR3 = vec_ld(30, s);\
+ srcM2 = vec_perm(srcR1, srcR2, pm2);\
+ srcM1 = vec_perm(srcR1, srcR2, pm1);\
+ srcP0 = vec_perm(srcR1, srcR2, pp0);\
+ srcP1 = srcR2;\
+ srcP2 = vec_perm(srcR2, srcR3, pp2);\
+ srcP3 = vec_perm(srcR2, srcR3, pp3);\
+ } break;\
+ case 14: {\
+ vec_u8 srcR3 = vec_ld(30, s);\
+ srcM2 = vec_perm(srcR1, srcR2, pm2);\
+ srcM1 = vec_perm(srcR1, srcR2, pm1);\
+ srcP0 = srcR2;\
+ srcP1 = vec_perm(srcR2, srcR3, pp1);\
+ srcP2 = vec_perm(srcR2, srcR3, pp2);\
+ srcP3 = vec_perm(srcR2, srcR3, pp3);\
+ } break;\
+ case 15: {\
+ vec_u8 srcR3 = vec_ld(30, s);\
+ srcM2 = vec_perm(srcR1, srcR2, pm2);\
+ srcM1 = srcR2;\
+ srcP0 = vec_perm(srcR2, srcR3, pp0);\
+ srcP1 = vec_perm(srcR2, srcR3, pp1);\
+ srcP2 = vec_perm(srcR2, srcR3, pp2);\
+ srcP3 = vec_perm(srcR2, srcR3, pp3);\
+ } break;\
+ }\
+ }
+#else
+#define load_alignment(s, ali, pm2, pm1, pp0, pp1, pp2, pp3){\
+ srcM2 = vec_vsx_ld(-2, s);\
+ srcM1 = vec_vsx_ld(-1, s);\
+ srcP0 = vec_vsx_ld(0, s);\
+ srcP1 = vec_vsx_ld(1, s);\
+ srcP2 = vec_vsx_ld(2, s);\
+ srcP3 = vec_vsx_ld(3, s);\
+ }
+#endif /* HAVE_BIGENDIAN */
+
/* this code assume stride % 16 == 0 */
#ifdef PREFIX_h264_qpel16_h_lowpass_altivec
static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t *dst,
@@ -35,12 +112,7 @@ static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t *dst,
register int i;
LOAD_ZERO;
- const vec_u8 permM2 = vec_lvsl(-2, src);
- const vec_u8 permM1 = vec_lvsl(-1, src);
- const vec_u8 permP0 = vec_lvsl(+0, src);
- const vec_u8 permP1 = vec_lvsl(+1, src);
- const vec_u8 permP2 = vec_lvsl(+2, src);
- const vec_u8 permP3 = vec_lvsl(+3, src);
+ vec_u8 permM2, permM1, permP0, permP1, permP2, permP3;
const vec_s16 v5ss = vec_splat_s16(5);
const vec_u16 v5us = vec_splat_u16(5);
const vec_s16 v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
@@ -59,79 +131,32 @@ static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t *dst,
vec_u8 sum, fsum;
+#if HAVE_BIGENDIAN
+ permM2 = vec_lvsl(-2, src);
+ permM1 = vec_lvsl(-1, src);
+ permP0 = vec_lvsl(+0, src);
+ permP1 = vec_lvsl(+1, src);
+ permP2 = vec_lvsl(+2, src);
+ permP3 = vec_lvsl(+3, src);
+#endif /* HAVE_BIGENDIAN */
+
for (i = 0 ; i < 16 ; i ++) {
- vec_u8 srcR1 = vec_ld(-2, src);
- vec_u8 srcR2 = vec_ld(14, src);
-
- switch (align) {
- default: {
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = vec_perm(srcR1, srcR2, permM1);
- srcP0 = vec_perm(srcR1, srcR2, permP0);
- srcP1 = vec_perm(srcR1, srcR2, permP1);
- srcP2 = vec_perm(srcR1, srcR2, permP2);
- srcP3 = vec_perm(srcR1, srcR2, permP3);
- } break;
- case 11: {
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = vec_perm(srcR1, srcR2, permM1);
- srcP0 = vec_perm(srcR1, srcR2, permP0);
- srcP1 = vec_perm(srcR1, srcR2, permP1);
- srcP2 = vec_perm(srcR1, srcR2, permP2);
- srcP3 = srcR2;
- } break;
- case 12: {
- vec_u8 srcR3 = vec_ld(30, src);
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = vec_perm(srcR1, srcR2, permM1);
- srcP0 = vec_perm(srcR1, srcR2, permP0);
- srcP1 = vec_perm(srcR1, srcR2, permP1);
- srcP2 = srcR2;
- srcP3 = vec_perm(srcR2, srcR3, permP3);
- } break;
- case 13: {
- vec_u8 srcR3 = vec_ld(30, src);
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = vec_perm(srcR1, srcR2, permM1);
- srcP0 = vec_perm(srcR1, srcR2, permP0);
- srcP1 = srcR2;
- srcP2 = vec_perm(srcR2, srcR3, permP2);
- srcP3 = vec_perm(srcR2, srcR3, permP3);
- } break;
- case 14: {
- vec_u8 srcR3 = vec_ld(30, src);
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = vec_perm(srcR1, srcR2, permM1);
- srcP0 = srcR2;
- srcP1 = vec_perm(srcR2, srcR3, permP1);
- srcP2 = vec_perm(srcR2, srcR3, permP2);
- srcP3 = vec_perm(srcR2, srcR3, permP3);
- } break;
- case 15: {
- vec_u8 srcR3 = vec_ld(30, src);
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = srcR2;
- srcP0 = vec_perm(srcR2, srcR3, permP0);
- srcP1 = vec_perm(srcR2, srcR3, permP1);
- srcP2 = vec_perm(srcR2, srcR3, permP2);
- srcP3 = vec_perm(srcR2, srcR3, permP3);
- } break;
- }
-
- srcP0A = (vec_s16) vec_mergeh(zero_u8v, srcP0);
- srcP0B = (vec_s16) vec_mergel(zero_u8v, srcP0);
- srcP1A = (vec_s16) vec_mergeh(zero_u8v, srcP1);
- srcP1B = (vec_s16) vec_mergel(zero_u8v, srcP1);
-
- srcP2A = (vec_s16) vec_mergeh(zero_u8v, srcP2);
- srcP2B = (vec_s16) vec_mergel(zero_u8v, srcP2);
- srcP3A = (vec_s16) vec_mergeh(zero_u8v, srcP3);
- srcP3B = (vec_s16) vec_mergel(zero_u8v, srcP3);
-
- srcM1A = (vec_s16) vec_mergeh(zero_u8v, srcM1);
- srcM1B = (vec_s16) vec_mergel(zero_u8v, srcM1);
- srcM2A = (vec_s16) vec_mergeh(zero_u8v, srcM2);
- srcM2B = (vec_s16) vec_mergel(zero_u8v, srcM2);
+ load_alignment(src, align, permM2, permM1, permP0, permP1, permP2, permP3);
+
+ srcP0A = (vec_s16) VEC_MERGEH(zero_u8v, srcP0);
+ srcP0B = (vec_s16) VEC_MERGEL(zero_u8v, srcP0);
+ srcP1A = (vec_s16) VEC_MERGEH(zero_u8v, srcP1);
+ srcP1B = (vec_s16) VEC_MERGEL(zero_u8v, srcP1);
+
+ srcP2A = (vec_s16) VEC_MERGEH(zero_u8v, srcP2);
+ srcP2B = (vec_s16) VEC_MERGEL(zero_u8v, srcP2);
+ srcP3A = (vec_s16) VEC_MERGEH(zero_u8v, srcP3);
+ srcP3B = (vec_s16) VEC_MERGEL(zero_u8v, srcP3);
+
+ srcM1A = (vec_s16) VEC_MERGEH(zero_u8v, srcM1);
+ srcM1B = (vec_s16) VEC_MERGEL(zero_u8v, srcM1);
+ srcM2A = (vec_s16) VEC_MERGEH(zero_u8v, srcM2);
+ srcM2B = (vec_s16) VEC_MERGEL(zero_u8v, srcM2);
sum1A = vec_adds(srcP0A, srcP1A);
sum1B = vec_adds(srcP0B, srcP1B);
@@ -178,7 +203,10 @@ static void PREFIX_h264_qpel16_v_lowpass_altivec(uint8_t *dst,
register int i;
LOAD_ZERO;
- const vec_u8 perm = vec_lvsl(0, src);
+ vec_u8 perm;
+#if HAVE_BIGENDIAN
+ perm = vec_lvsl(0, src);
+#endif
const vec_s16 v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
const vec_u16 v5us = vec_splat_u16(5);
const vec_s16 v5ss = vec_splat_s16(5);
@@ -186,52 +214,41 @@ static void PREFIX_h264_qpel16_v_lowpass_altivec(uint8_t *dst,
const uint8_t *srcbis = src - (srcStride * 2);
- const vec_u8 srcM2a = vec_ld(0, srcbis);
- const vec_u8 srcM2b = vec_ld(16, srcbis);
- const vec_u8 srcM2 = vec_perm(srcM2a, srcM2b, perm);
- //srcbis += srcStride;
- const vec_u8 srcM1a = vec_ld(0, srcbis += srcStride);
- const vec_u8 srcM1b = vec_ld(16, srcbis);
- const vec_u8 srcM1 = vec_perm(srcM1a, srcM1b, perm);
- //srcbis += srcStride;
- const vec_u8 srcP0a = vec_ld(0, srcbis += srcStride);
- const vec_u8 srcP0b = vec_ld(16, srcbis);
- const vec_u8 srcP0 = vec_perm(srcP0a, srcP0b, perm);
- //srcbis += srcStride;
- const vec_u8 srcP1a = vec_ld(0, srcbis += srcStride);
- const vec_u8 srcP1b = vec_ld(16, srcbis);
- const vec_u8 srcP1 = vec_perm(srcP1a, srcP1b, perm);
- //srcbis += srcStride;
- const vec_u8 srcP2a = vec_ld(0, srcbis += srcStride);
- const vec_u8 srcP2b = vec_ld(16, srcbis);
- const vec_u8 srcP2 = vec_perm(srcP2a, srcP2b, perm);
- //srcbis += srcStride;
-
- vec_s16 srcM2ssA = (vec_s16) vec_mergeh(zero_u8v, srcM2);
- vec_s16 srcM2ssB = (vec_s16) vec_mergel(zero_u8v, srcM2);
- vec_s16 srcM1ssA = (vec_s16) vec_mergeh(zero_u8v, srcM1);
- vec_s16 srcM1ssB = (vec_s16) vec_mergel(zero_u8v, srcM1);
- vec_s16 srcP0ssA = (vec_s16) vec_mergeh(zero_u8v, srcP0);
- vec_s16 srcP0ssB = (vec_s16) vec_mergel(zero_u8v, srcP0);
- vec_s16 srcP1ssA = (vec_s16) vec_mergeh(zero_u8v, srcP1);
- vec_s16 srcP1ssB = (vec_s16) vec_mergel(zero_u8v, srcP1);
- vec_s16 srcP2ssA = (vec_s16) vec_mergeh(zero_u8v, srcP2);
- vec_s16 srcP2ssB = (vec_s16) vec_mergel(zero_u8v, srcP2);
+ const vec_u8 srcM2 = load_with_perm_vec(0, srcbis, perm);
+ srcbis += srcStride;
+ const vec_u8 srcM1 = load_with_perm_vec(0, srcbis, perm);
+ srcbis += srcStride;
+ const vec_u8 srcP0 = load_with_perm_vec(0, srcbis, perm);
+ srcbis += srcStride;
+ const vec_u8 srcP1 = load_with_perm_vec(0, srcbis, perm);
+ srcbis += srcStride;
+ const vec_u8 srcP2 = load_with_perm_vec(0, srcbis, perm);
+ srcbis += srcStride;
+
+ vec_s16 srcM2ssA = (vec_s16) VEC_MERGEH(zero_u8v, srcM2);
+ vec_s16 srcM2ssB = (vec_s16) VEC_MERGEL(zero_u8v, srcM2);
+ vec_s16 srcM1ssA = (vec_s16) VEC_MERGEH(zero_u8v, srcM1);
+ vec_s16 srcM1ssB = (vec_s16) VEC_MERGEL(zero_u8v, srcM1);
+ vec_s16 srcP0ssA = (vec_s16) VEC_MERGEH(zero_u8v, srcP0);
+ vec_s16 srcP0ssB = (vec_s16) VEC_MERGEL(zero_u8v, srcP0);
+ vec_s16 srcP1ssA = (vec_s16) VEC_MERGEH(zero_u8v, srcP1);
+ vec_s16 srcP1ssB = (vec_s16) VEC_MERGEL(zero_u8v, srcP1);
+ vec_s16 srcP2ssA = (vec_s16) VEC_MERGEH(zero_u8v, srcP2);
+ vec_s16 srcP2ssB = (vec_s16) VEC_MERGEL(zero_u8v, srcP2);
vec_s16 pp1A, pp1B, pp2A, pp2B, pp3A, pp3B,
psumA, psumB, sumA, sumB,
srcP3ssA, srcP3ssB,
sum1A, sum1B, sum2A, sum2B, sum3A, sum3B;
- vec_u8 sum, fsum, srcP3a, srcP3b, srcP3;
+ vec_u8 sum, fsum, srcP3;
for (i = 0 ; i < 16 ; i++) {
- srcP3a = vec_ld(0, srcbis += srcStride);
- srcP3b = vec_ld(16, srcbis);
- srcP3 = vec_perm(srcP3a, srcP3b, perm);
- srcP3ssA = (vec_s16) vec_mergeh(zero_u8v, srcP3);
- srcP3ssB = (vec_s16) vec_mergel(zero_u8v, srcP3);
- //srcbis += srcStride;
+ srcP3 = load_with_perm_vec(0, srcbis, perm);
+ srcbis += srcStride;
+
+ srcP3ssA = (vec_s16) VEC_MERGEH(zero_u8v, srcP3);
+ srcP3ssB = (vec_s16) VEC_MERGEL(zero_u8v, srcP3);
sum1A = vec_adds(srcP0ssA, srcP1ssA);
sum1B = vec_adds(srcP0ssB, srcP1ssB);
@@ -288,12 +305,7 @@ static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t *dst, int16_t *tmp,
{
register int i;
LOAD_ZERO;
- const vec_u8 permM2 = vec_lvsl(-2, src);
- const vec_u8 permM1 = vec_lvsl(-1, src);
- const vec_u8 permP0 = vec_lvsl(+0, src);
- const vec_u8 permP1 = vec_lvsl(+1, src);
- const vec_u8 permP2 = vec_lvsl(+2, src);
- const vec_u8 permP3 = vec_lvsl(+3, src);
+ vec_u8 permM2, permM1, permP0, permP1, permP2, permP3;
const vec_s16 v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
const vec_u32 v10ui = vec_splat_u32(10);
const vec_s16 v5ss = vec_splat_s16(5);
@@ -325,81 +337,35 @@ static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t *dst, int16_t *tmp,
vec_u8 fsum, sumv, sum;
vec_s16 ssume, ssumo;
+#if HAVE_BIGENDIAN
+ permM2 = vec_lvsl(-2, src);
+ permM1 = vec_lvsl(-1, src);
+ permP0 = vec_lvsl(+0, src);
+ permP1 = vec_lvsl(+1, src);
+ permP2 = vec_lvsl(+2, src);
+ permP3 = vec_lvsl(+3, src);
+#endif /* HAVE_BIGENDIAN */
+
src -= (2 * srcStride);
for (i = 0 ; i < 21 ; i ++) {
vec_u8 srcM2, srcM1, srcP0, srcP1, srcP2, srcP3;
- vec_u8 srcR1 = vec_ld(-2, src);
- vec_u8 srcR2 = vec_ld(14, src);
-
- switch (align) {
- default: {
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = vec_perm(srcR1, srcR2, permM1);
- srcP0 = vec_perm(srcR1, srcR2, permP0);
- srcP1 = vec_perm(srcR1, srcR2, permP1);
- srcP2 = vec_perm(srcR1, srcR2, permP2);
- srcP3 = vec_perm(srcR1, srcR2, permP3);
- } break;
- case 11: {
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = vec_perm(srcR1, srcR2, permM1);
- srcP0 = vec_perm(srcR1, srcR2, permP0);
- srcP1 = vec_perm(srcR1, srcR2, permP1);
- srcP2 = vec_perm(srcR1, srcR2, permP2);
- srcP3 = srcR2;
- } break;
- case 12: {
- vec_u8 srcR3 = vec_ld(30, src);
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = vec_perm(srcR1, srcR2, permM1);
- srcP0 = vec_perm(srcR1, srcR2, permP0);
- srcP1 = vec_perm(srcR1, srcR2, permP1);
- srcP2 = srcR2;
- srcP3 = vec_perm(srcR2, srcR3, permP3);
- } break;
- case 13: {
- vec_u8 srcR3 = vec_ld(30, src);
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = vec_perm(srcR1, srcR2, permM1);
- srcP0 = vec_perm(srcR1, srcR2, permP0);
- srcP1 = srcR2;
- srcP2 = vec_perm(srcR2, srcR3, permP2);
- srcP3 = vec_perm(srcR2, srcR3, permP3);
- } break;
- case 14: {
- vec_u8 srcR3 = vec_ld(30, src);
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = vec_perm(srcR1, srcR2, permM1);
- srcP0 = srcR2;
- srcP1 = vec_perm(srcR2, srcR3, permP1);
- srcP2 = vec_perm(srcR2, srcR3, permP2);
- srcP3 = vec_perm(srcR2, srcR3, permP3);
- } break;
- case 15: {
- vec_u8 srcR3 = vec_ld(30, src);
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = srcR2;
- srcP0 = vec_perm(srcR2, srcR3, permP0);
- srcP1 = vec_perm(srcR2, srcR3, permP1);
- srcP2 = vec_perm(srcR2, srcR3, permP2);
- srcP3 = vec_perm(srcR2, srcR3, permP3);
- } break;
- }
-
- srcP0A = (vec_s16) vec_mergeh(zero_u8v, srcP0);
- srcP0B = (vec_s16) vec_mergel(zero_u8v, srcP0);
- srcP1A = (vec_s16) vec_mergeh(zero_u8v, srcP1);
- srcP1B = (vec_s16) vec_mergel(zero_u8v, srcP1);
-
- srcP2A = (vec_s16) vec_mergeh(zero_u8v, srcP2);
- srcP2B = (vec_s16) vec_mergel(zero_u8v, srcP2);
- srcP3A = (vec_s16) vec_mergeh(zero_u8v, srcP3);
- srcP3B = (vec_s16) vec_mergel(zero_u8v, srcP3);
-
- srcM1A = (vec_s16) vec_mergeh(zero_u8v, srcM1);
- srcM1B = (vec_s16) vec_mergel(zero_u8v, srcM1);
- srcM2A = (vec_s16) vec_mergeh(zero_u8v, srcM2);
- srcM2B = (vec_s16) vec_mergel(zero_u8v, srcM2);
+
+ load_alignment(src, align, permM2, permM1, permP0, permP1, permP2, permP3);
+
+ srcP0A = (vec_s16) VEC_MERGEH(zero_u8v, srcP0);
+ srcP0B = (vec_s16) VEC_MERGEL(zero_u8v, srcP0);
+ srcP1A = (vec_s16) VEC_MERGEH(zero_u8v, srcP1);
+ srcP1B = (vec_s16) VEC_MERGEL(zero_u8v, srcP1);
+
+ srcP2A = (vec_s16) VEC_MERGEH(zero_u8v, srcP2);
+ srcP2B = (vec_s16) VEC_MERGEL(zero_u8v, srcP2);
+ srcP3A = (vec_s16) VEC_MERGEH(zero_u8v, srcP3);
+ srcP3B = (vec_s16) VEC_MERGEL(zero_u8v, srcP3);
+
+ srcM1A = (vec_s16) VEC_MERGEH(zero_u8v, srcM1);
+ srcM1B = (vec_s16) VEC_MERGEL(zero_u8v, srcM1);
+ srcM2A = (vec_s16) VEC_MERGEH(zero_u8v, srcM2);
+ srcM2B = (vec_s16) VEC_MERGEL(zero_u8v, srcM2);
sum1A = vec_adds(srcP0A, srcP1A);
sum1B = vec_adds(srcP0B, srcP1B);
@@ -448,8 +414,8 @@ static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t *dst, int16_t *tmp,
const vec_s16 sum1B = vec_adds(tmpP0ssB, tmpP1ssB);
const vec_s16 sum2A = vec_adds(tmpM1ssA, tmpP2ssA);
const vec_s16 sum2B = vec_adds(tmpM1ssB, tmpP2ssB);
- const vec_s16 sum3A = vec_adds(tmpM2ssA, tmpP3ssA);
- const vec_s16 sum3B = vec_adds(tmpM2ssB, tmpP3ssB);
+ vec_s16 sum3A = vec_adds(tmpM2ssA, tmpP3ssA);
+ vec_s16 sum3B = vec_adds(tmpM2ssB, tmpP3ssB);
tmpbis += tmpStride;
@@ -474,10 +440,14 @@ static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t *dst, int16_t *tmp,
pp2Be = vec_mule(sum2B, v5ss);
pp2Bo = vec_mulo(sum2B, v5ss);
- pp3Ae = vec_sra((vec_s32)sum3A, v16ui);
pp3Ao = vec_mulo(sum3A, v1ss);
- pp3Be = vec_sra((vec_s32)sum3B, v16ui);
pp3Bo = vec_mulo(sum3B, v1ss);
+#if !HAVE_BIGENDIAN
+ sum3A = (vec_s16)vec_perm(sum3A, sum3A,vcswapi2s(0,1,2,3));
+ sum3B = (vec_s16)vec_perm(sum3B, sum3B,vcswapi2s(0,1,2,3));
+#endif
+ pp3Ae = vec_sra((vec_s32)sum3A, v16ui);
+ pp3Be = vec_sra((vec_s32)sum3B, v16ui);
pp1cAe = vec_add(pp1Ae, v512si);
pp1cAo = vec_add(pp1Ao, v512si);
diff --git a/libavcodec/ppc/hpeldsp_altivec.c b/libavcodec/ppc/hpeldsp_altivec.c
index fd6ae73915..87a1f05b6a 100644
--- a/libavcodec/ppc/hpeldsp_altivec.c
+++ b/libavcodec/ppc/hpeldsp_altivec.c
@@ -3,20 +3,20 @@
* Copyright (c) 2002 Dieter Shirley
* Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -38,12 +38,11 @@
/* next one assumes that ((line_size % 16) == 0) */
void ff_put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
{
- register vector unsigned char pixelsv1, pixelsv2;
- register vector unsigned char pixelsv1B, pixelsv2B;
- register vector unsigned char pixelsv1C, pixelsv2C;
- register vector unsigned char pixelsv1D, pixelsv2D;
+ register vector unsigned char pixelsv1;
+ register vector unsigned char pixelsv1B;
+ register vector unsigned char pixelsv1C;
+ register vector unsigned char pixelsv1D;
- register vector unsigned char perm = vec_lvsl(0, pixels);
int i;
register ptrdiff_t line_size_2 = line_size << 1;
register ptrdiff_t line_size_3 = line_size + line_size_2;
@@ -55,22 +54,14 @@ void ff_put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t li
// -funroll-loops w/ this is bad - 74 cycles again.
// all this is on a 7450, tuning for the 7450
for (i = 0; i < h; i += 4) {
- pixelsv1 = vec_ld( 0, pixels);
- pixelsv2 = vec_ld(15, pixels);
- pixelsv1B = vec_ld(line_size, pixels);
- pixelsv2B = vec_ld(15 + line_size, pixels);
- pixelsv1C = vec_ld(line_size_2, pixels);
- pixelsv2C = vec_ld(15 + line_size_2, pixels);
- pixelsv1D = vec_ld(line_size_3, pixels);
- pixelsv2D = vec_ld(15 + line_size_3, pixels);
- vec_st(vec_perm(pixelsv1, pixelsv2, perm),
- 0, (unsigned char*)block);
- vec_st(vec_perm(pixelsv1B, pixelsv2B, perm),
- line_size, (unsigned char*)block);
- vec_st(vec_perm(pixelsv1C, pixelsv2C, perm),
- line_size_2, (unsigned char*)block);
- vec_st(vec_perm(pixelsv1D, pixelsv2D, perm),
- line_size_3, (unsigned char*)block);
+ pixelsv1 = unaligned_load( 0, pixels);
+ pixelsv1B = unaligned_load(line_size, pixels);
+ pixelsv1C = unaligned_load(line_size_2, pixels);
+ pixelsv1D = unaligned_load(line_size_3, pixels);
+ VEC_ST(pixelsv1, 0, (unsigned char*)block);
+ VEC_ST(pixelsv1B, line_size, (unsigned char*)block);
+ VEC_ST(pixelsv1C, line_size_2, (unsigned char*)block);
+ VEC_ST(pixelsv1D, line_size_3, (unsigned char*)block);
pixels+=line_size_4;
block +=line_size_4;
}
@@ -80,15 +71,12 @@ void ff_put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t li
#define op_avg(a,b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEUL)>>1) )
void ff_avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
{
- register vector unsigned char pixelsv1, pixelsv2, pixelsv, blockv;
- register vector unsigned char perm = vec_lvsl(0, pixels);
- int i;
+ register vector unsigned char pixelsv, blockv;
+ int i;
for (i = 0; i < h; i++) {
- pixelsv1 = vec_ld( 0, pixels);
- pixelsv2 = vec_ld(16,pixels);
blockv = vec_ld(0, block);
- pixelsv = vec_perm(pixelsv1, pixelsv2, perm);
+ pixelsv = VEC_LD( 0, pixels);
blockv = vec_avg(blockv,pixelsv);
vec_st(blockv, 0, (unsigned char*)block);
pixels+=line_size;
@@ -108,9 +96,7 @@ static void avg_pixels8_altivec(uint8_t * block, const uint8_t * pixels, ptrdiff
int rightside = ((unsigned long)block & 0x0000000F);
blockv = vec_ld(0, block);
- pixelsv1 = vec_ld( 0, pixels);
- pixelsv2 = vec_ld(16, pixels);
- pixelsv = vec_perm(pixelsv1, pixelsv2, vec_lvsl(0, pixels));
+ pixelsv = VEC_LD( 0, pixels);
if (rightside) {
pixelsv = vec_perm(blockv, pixelsv, vcprm(0,1,s0,s1));
@@ -132,21 +118,16 @@ static void put_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, ptrdi
{
register int i;
register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
- register vector unsigned char blockv, temp1, temp2;
+ register vector unsigned char blockv;
register vector unsigned short pixelssum1, pixelssum2, temp3;
register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
- temp1 = vec_ld(0, pixels);
- temp2 = vec_ld(16, pixels);
- pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
- if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
- pixelsv2 = temp2;
- } else {
- pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
- }
- pixelsv1 = vec_mergeh(vczero, pixelsv1);
- pixelsv2 = vec_mergeh(vczero, pixelsv2);
+ pixelsv1 = VEC_LD(0, pixels);
+ pixelsv2 = VEC_LD(1, pixels);
+ pixelsv1 = VEC_MERGEH(vczero, pixelsv1);
+ pixelsv2 = VEC_MERGEH(vczero, pixelsv2);
+
pixelssum1 = vec_add((vector unsigned short)pixelsv1,
(vector unsigned short)pixelsv2);
pixelssum1 = vec_add(pixelssum1, vctwo);
@@ -155,17 +136,10 @@ static void put_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, ptrdi
int rightside = ((unsigned long)block & 0x0000000F);
blockv = vec_ld(0, block);
- temp1 = vec_ld(line_size, pixels);
- temp2 = vec_ld(line_size + 16, pixels);
- pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
- if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
- pixelsv2 = temp2;
- } else {
- pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
- }
-
- pixelsv1 = vec_mergeh(vczero, pixelsv1);
- pixelsv2 = vec_mergeh(vczero, pixelsv2);
+ pixelsv1 = unaligned_load(line_size, pixels);
+ pixelsv2 = unaligned_load(line_size+1, pixels);
+ pixelsv1 = VEC_MERGEH(vczero, pixelsv1);
+ pixelsv2 = VEC_MERGEH(vczero, pixelsv2);
pixelssum2 = vec_add((vector unsigned short)pixelsv1,
(vector unsigned short)pixelsv2);
temp3 = vec_add(pixelssum1, pixelssum2);
@@ -191,22 +165,16 @@ static void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels
{
register int i;
register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
- register vector unsigned char blockv, temp1, temp2;
+ register vector unsigned char blockv;
register vector unsigned short pixelssum1, pixelssum2, temp3;
register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
- temp1 = vec_ld(0, pixels);
- temp2 = vec_ld(16, pixels);
- pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
- if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
- pixelsv2 = temp2;
- } else {
- pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
- }
- pixelsv1 = vec_mergeh(vczero, pixelsv1);
- pixelsv2 = vec_mergeh(vczero, pixelsv2);
+ pixelsv1 = VEC_LD(0, pixels);
+ pixelsv2 = VEC_LD(1, pixels);
+ pixelsv1 = VEC_MERGEH(vczero, pixelsv1);
+ pixelsv2 = VEC_MERGEH(vczero, pixelsv2);
pixelssum1 = vec_add((vector unsigned short)pixelsv1,
(vector unsigned short)pixelsv2);
pixelssum1 = vec_add(pixelssum1, vcone);
@@ -215,17 +183,10 @@ static void put_no_rnd_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels
int rightside = ((unsigned long)block & 0x0000000F);
blockv = vec_ld(0, block);
- temp1 = vec_ld(line_size, pixels);
- temp2 = vec_ld(line_size + 16, pixels);
- pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
- if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
- pixelsv2 = temp2;
- } else {
- pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
- }
-
- pixelsv1 = vec_mergeh(vczero, pixelsv1);
- pixelsv2 = vec_mergeh(vczero, pixelsv2);
+ pixelsv1 = unaligned_load(line_size, pixels);
+ pixelsv2 = unaligned_load(line_size+1, pixels);
+ pixelsv1 = VEC_MERGEH(vczero, pixelsv1);
+ pixelsv2 = VEC_MERGEH(vczero, pixelsv2);
pixelssum2 = vec_add((vector unsigned short)pixelsv1,
(vector unsigned short)pixelsv2);
temp3 = vec_add(pixelssum1, pixelssum2);
@@ -251,24 +212,18 @@ static void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, pt
{
register int i;
register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4;
- register vector unsigned char blockv, temp1, temp2;
+ register vector unsigned char blockv;
register vector unsigned short temp3, temp4,
pixelssum1, pixelssum2, pixelssum3, pixelssum4;
register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
- temp1 = vec_ld(0, pixels);
- temp2 = vec_ld(16, pixels);
- pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
- if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
- pixelsv2 = temp2;
- } else {
- pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
- }
- pixelsv3 = vec_mergel(vczero, pixelsv1);
- pixelsv4 = vec_mergel(vczero, pixelsv2);
- pixelsv1 = vec_mergeh(vczero, pixelsv1);
- pixelsv2 = vec_mergeh(vczero, pixelsv2);
+ pixelsv1 = VEC_LD(0, pixels);
+ pixelsv2 = VEC_LD(1, pixels);
+ pixelsv3 = VEC_MERGEL(vczero, pixelsv1);
+ pixelsv4 = VEC_MERGEL(vczero, pixelsv2);
+ pixelsv1 = VEC_MERGEH(vczero, pixelsv1);
+ pixelsv2 = VEC_MERGEH(vczero, pixelsv2);
pixelssum3 = vec_add((vector unsigned short)pixelsv3,
(vector unsigned short)pixelsv4);
pixelssum3 = vec_add(pixelssum3, vctwo);
@@ -279,20 +234,13 @@ static void put_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pixels, pt
for (i = 0; i < h ; i++) {
blockv = vec_ld(0, block);
- temp1 = vec_ld(line_size, pixels);
- temp2 = vec_ld(line_size + 16, pixels);
- pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
- if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
- pixelsv2 = temp2;
- } else {
- pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
- }
-
- pixelsv3 = vec_mergel(vczero, pixelsv1);
- pixelsv4 = vec_mergel(vczero, pixelsv2);
- pixelsv1 = vec_mergeh(vczero, pixelsv1);
- pixelsv2 = vec_mergeh(vczero, pixelsv2);
+ pixelsv1 = unaligned_load(line_size, pixels);
+ pixelsv2 = unaligned_load(line_size+1, pixels);
+ pixelsv3 = VEC_MERGEL(vczero, pixelsv1);
+ pixelsv4 = VEC_MERGEL(vczero, pixelsv2);
+ pixelsv1 = VEC_MERGEH(vczero, pixelsv1);
+ pixelsv2 = VEC_MERGEH(vczero, pixelsv2);
pixelssum4 = vec_add((vector unsigned short)pixelsv3,
(vector unsigned short)pixelsv4);
pixelssum2 = vec_add((vector unsigned short)pixelsv1,
@@ -319,25 +267,19 @@ static void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pix
{
register int i;
register vector unsigned char pixelsv1, pixelsv2, pixelsv3, pixelsv4;
- register vector unsigned char blockv, temp1, temp2;
+ register vector unsigned char blockv;
register vector unsigned short temp3, temp4,
pixelssum1, pixelssum2, pixelssum3, pixelssum4;
register const vector unsigned char vczero = (const vector unsigned char)vec_splat_u8(0);
register const vector unsigned short vcone = (const vector unsigned short)vec_splat_u16(1);
register const vector unsigned short vctwo = (const vector unsigned short)vec_splat_u16(2);
- temp1 = vec_ld(0, pixels);
- temp2 = vec_ld(16, pixels);
- pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
- if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
- pixelsv2 = temp2;
- } else {
- pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
- }
- pixelsv3 = vec_mergel(vczero, pixelsv1);
- pixelsv4 = vec_mergel(vczero, pixelsv2);
- pixelsv1 = vec_mergeh(vczero, pixelsv1);
- pixelsv2 = vec_mergeh(vczero, pixelsv2);
+ pixelsv1 = VEC_LD(0, pixels);
+ pixelsv2 = VEC_LD(1, pixels);
+ pixelsv3 = VEC_MERGEL(vczero, pixelsv1);
+ pixelsv4 = VEC_MERGEL(vczero, pixelsv2);
+ pixelsv1 = VEC_MERGEH(vczero, pixelsv1);
+ pixelsv2 = VEC_MERGEH(vczero, pixelsv2);
pixelssum3 = vec_add((vector unsigned short)pixelsv3,
(vector unsigned short)pixelsv4);
pixelssum3 = vec_add(pixelssum3, vcone);
@@ -346,22 +288,13 @@ static void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pix
pixelssum1 = vec_add(pixelssum1, vcone);
for (i = 0; i < h ; i++) {
- blockv = vec_ld(0, block);
-
- temp1 = vec_ld(line_size, pixels);
- temp2 = vec_ld(line_size + 16, pixels);
- pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
- if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
- pixelsv2 = temp2;
- } else {
- pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
- }
-
- pixelsv3 = vec_mergel(vczero, pixelsv1);
- pixelsv4 = vec_mergel(vczero, pixelsv2);
- pixelsv1 = vec_mergeh(vczero, pixelsv1);
- pixelsv2 = vec_mergeh(vczero, pixelsv2);
+ pixelsv1 = unaligned_load(line_size, pixels);
+ pixelsv2 = unaligned_load(line_size+1, pixels);
+ pixelsv3 = VEC_MERGEL(vczero, pixelsv1);
+ pixelsv4 = VEC_MERGEL(vczero, pixelsv2);
+ pixelsv1 = VEC_MERGEH(vczero, pixelsv1);
+ pixelsv2 = VEC_MERGEH(vczero, pixelsv2);
pixelssum4 = vec_add((vector unsigned short)pixelsv3,
(vector unsigned short)pixelsv4);
pixelssum2 = vec_add((vector unsigned short)pixelsv1,
@@ -376,7 +309,7 @@ static void put_no_rnd_pixels16_xy2_altivec(uint8_t * block, const uint8_t * pix
blockv = vec_packsu(temp3, temp4);
- vec_st(blockv, 0, block);
+ VEC_ST(blockv, 0, block);
block += line_size;
pixels += line_size;
@@ -388,7 +321,7 @@ static void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, ptrdi
{
register int i;
register vector unsigned char pixelsv1, pixelsv2, pixelsavg;
- register vector unsigned char blockv, temp1, temp2, blocktemp;
+ register vector unsigned char blockv, blocktemp;
register vector unsigned short pixelssum1, pixelssum2, temp3;
register const vector unsigned char vczero = (const vector unsigned char)
@@ -396,16 +329,10 @@ static void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, ptrdi
register const vector unsigned short vctwo = (const vector unsigned short)
vec_splat_u16(2);
- temp1 = vec_ld(0, pixels);
- temp2 = vec_ld(16, pixels);
- pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(0, pixels));
- if ((((unsigned long)pixels) & 0x0000000F) == 0x0000000F) {
- pixelsv2 = temp2;
- } else {
- pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(1, pixels));
- }
- pixelsv1 = vec_mergeh(vczero, pixelsv1);
- pixelsv2 = vec_mergeh(vczero, pixelsv2);
+ pixelsv1 = VEC_LD(0, pixels);
+ pixelsv2 = VEC_LD(1, pixels);
+ pixelsv1 = VEC_MERGEH(vczero, pixelsv1);
+ pixelsv2 = VEC_MERGEH(vczero, pixelsv2);
pixelssum1 = vec_add((vector unsigned short)pixelsv1,
(vector unsigned short)pixelsv2);
pixelssum1 = vec_add(pixelssum1, vctwo);
@@ -414,17 +341,11 @@ static void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, ptrdi
int rightside = ((unsigned long)block & 0x0000000F);
blockv = vec_ld(0, block);
- temp1 = vec_ld(line_size, pixels);
- temp2 = vec_ld(line_size + 16, pixels);
- pixelsv1 = vec_perm(temp1, temp2, vec_lvsl(line_size, pixels));
- if (((((unsigned long)pixels) + line_size) & 0x0000000F) == 0x0000000F) {
- pixelsv2 = temp2;
- } else {
- pixelsv2 = vec_perm(temp1, temp2, vec_lvsl(line_size + 1, pixels));
- }
+ pixelsv1 = unaligned_load(line_size, pixels);
+ pixelsv2 = unaligned_load(line_size+1, pixels);
- pixelsv1 = vec_mergeh(vczero, pixelsv1);
- pixelsv2 = vec_mergeh(vczero, pixelsv2);
+ pixelsv1 = VEC_MERGEH(vczero, pixelsv1);
+ pixelsv2 = VEC_MERGEH(vczero, pixelsv2);
pixelssum2 = vec_add((vector unsigned short)pixelsv1,
(vector unsigned short)pixelsv2);
temp3 = vec_add(pixelssum1, pixelssum2);
diff --git a/libavcodec/ppc/hpeldsp_altivec.h b/libavcodec/ppc/hpeldsp_altivec.h
index 98dd80ea1c..590809f539 100644
--- a/libavcodec/ppc/hpeldsp_altivec.h
+++ b/libavcodec/ppc/hpeldsp_altivec.h
@@ -3,20 +3,20 @@
* Copyright (c) 2002 Dieter Shirley
* Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/huffyuvdsp_altivec.c b/libavcodec/ppc/huffyuvdsp_altivec.c
index 7c34a67ea4..6701524e4a 100644
--- a/libavcodec/ppc/huffyuvdsp_altivec.c
+++ b/libavcodec/ppc/huffyuvdsp_altivec.c
@@ -3,20 +3,20 @@
* Copyright (c) 2002 Dieter Shirley
* Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -33,7 +33,7 @@
#include "libavcodec/huffyuvdsp.h"
#if HAVE_ALTIVEC
-static void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w)
+static void add_bytes_altivec(uint8_t *dst, uint8_t *src, intptr_t w)
{
register int i;
register vector unsigned char vdst, vsrc;
diff --git a/libavcodec/ppc/idctdsp.c b/libavcodec/ppc/idctdsp.c
index 17f7dbbc7f..5ef514b51b 100644
--- a/libavcodec/ppc/idctdsp.c
+++ b/libavcodec/ppc/idctdsp.c
@@ -1,28 +1,28 @@
/*
* Copyright (c) 2001 Michel Lespinasse
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/* NOTE: This code is based on GPL code from the libmpeg2 project. The
* author, Michel Lespinasses, has given explicit permission to release
- * under LGPL as part of Libav.
+ * under LGPL as part of FFmpeg.
*
- * Libav integration by Dieter Shirley
+ * FFmpeg integration by Dieter Shirley
*
* This file is a direct copy of the AltiVec IDCT module from the libmpeg2
* project. I've deleted all of the libmpeg2-specific code, renamed the
@@ -153,6 +153,22 @@ static const vec_s16 constants[5] = {
{ 19266, 26722, 25172, 22654, 19266, 22654, 25172, 26722 }
};
+static void idct_altivec(int16_t *blk)
+{
+ vec_s16 *block = (vec_s16 *) blk;
+
+ IDCT;
+
+ block[0] = vx0;
+ block[1] = vx1;
+ block[2] = vx2;
+ block[3] = vx3;
+ block[4] = vx4;
+ block[5] = vx5;
+ block[6] = vx6;
+ block[7] = vx7;
+}
+
static void idct_put_altivec(uint8_t *dest, int stride, int16_t *blk)
{
vec_s16 *block = (vec_s16 *) blk;
@@ -234,9 +250,10 @@ av_cold void ff_idctdsp_init_ppc(IDCTDSPContext *c, AVCodecContext *avctx,
if (!PPC_ALTIVEC(av_get_cpu_flags()))
return;
- if (!high_bit_depth) {
- if ((avctx->idct_algo == FF_IDCT_AUTO) ||
+ if (!high_bit_depth && avctx->lowres == 0) {
+ if ((avctx->idct_algo == FF_IDCT_AUTO && !(avctx->flags & CODEC_FLAG_BITEXACT)) ||
(avctx->idct_algo == FF_IDCT_ALTIVEC)) {
+ c->idct = idct_altivec;
c->idct_add = idct_add_altivec;
c->idct_put = idct_put_altivec;
c->perm_type = FF_IDCT_PERM_TRANSPOSE;
diff --git a/libavcodec/ppc/apedsp_altivec.c b/libavcodec/ppc/lossless_audiodsp_altivec.c
index d8bf4bd167..bdec25223d 100644
--- a/libavcodec/ppc/apedsp_altivec.c
+++ b/libavcodec/ppc/lossless_audiodsp_altivec.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2007 Luca Barbato <lu_zero@gentoo.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -27,7 +27,21 @@
#include "libavutil/cpu.h"
#include "libavutil/ppc/cpu.h"
#include "libavutil/ppc/types_altivec.h"
-#include "libavcodec/apedsp.h"
+#include "libavcodec/lossless_audiodsp.h"
+
+#if HAVE_BIGENDIAN
+#define GET_T(tt0,tt1,src,a,b){ \
+ a = vec_ld(16, src); \
+ tt0 = vec_perm(b, a, align); \
+ b = vec_ld(32, src); \
+ tt1 = vec_perm(a, b, align); \
+ }
+#else
+#define GET_T(tt0,tt1,src,a,b){ \
+ tt0 = vec_vsx_ld(0, src); \
+ tt1 = vec_vsx_ld(16, src); \
+ }
+#endif
#if HAVE_ALTIVEC
static int32_t scalarproduct_and_madd_int16_altivec(int16_t *v1,
@@ -38,26 +52,23 @@ static int32_t scalarproduct_and_madd_int16_altivec(int16_t *v1,
LOAD_ZERO;
vec_s16 *pv1 = (vec_s16 *) v1;
register vec_s16 muls = { mul, mul, mul, mul, mul, mul, mul, mul };
- register vec_s16 t0, t1, i0, i1, i4;
- register vec_s16 i2 = vec_ld(0, v2), i3 = vec_ld(0, v3);
+ register vec_s16 t0, t1, i0, i1, i4, i2, i3;
register vec_s32 res = zero_s32v;
+#if HAVE_BIGENDIAN
register vec_u8 align = vec_lvsl(0, v2);
+ i2 = vec_ld(0, v2);
+ i3 = vec_ld(0, v3);
+#endif
int32_t ires;
order >>= 4;
do {
- i1 = vec_ld(16, v2);
- t0 = vec_perm(i2, i1, align);
- i2 = vec_ld(32, v2);
- t1 = vec_perm(i1, i2, align);
+ GET_T(t0,t1,v2,i1,i2);
i0 = pv1[0];
i1 = pv1[1];
res = vec_msum(t0, i0, res);
res = vec_msum(t1, i1, res);
- i4 = vec_ld(16, v3);
- t0 = vec_perm(i3, i4, align);
- i3 = vec_ld(32, v3);
- t1 = vec_perm(i4, i3, align);
+ GET_T(t0,t1,v3,i4,i3);
pv1[0] = vec_mladd(t0, muls, i0);
pv1[1] = vec_mladd(t1, muls, i1);
pv1 += 2;
@@ -71,7 +82,7 @@ static int32_t scalarproduct_and_madd_int16_altivec(int16_t *v1,
}
#endif /* HAVE_ALTIVEC */
-av_cold void ff_apedsp_init_ppc(APEDSPContext *c)
+av_cold void ff_llauddsp_init_ppc(LLAudDSPContext *c)
{
#if HAVE_ALTIVEC
if (!PPC_ALTIVEC(av_get_cpu_flags()))
diff --git a/libavcodec/ppc/mathops.h b/libavcodec/ppc/mathops.h
index 34ddb11800..dbd714fcd4 100644
--- a/libavcodec/ppc/mathops.h
+++ b/libavcodec/ppc/mathops.h
@@ -3,20 +3,20 @@
* Copyright (c) 2001, 2002 Fabrice Bellard
* Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/me_cmp.c b/libavcodec/ppc/me_cmp.c
index fd67cf34ee..38a7ba1476 100644
--- a/libavcodec/ppc/me_cmp.c
+++ b/libavcodec/ppc/me_cmp.c
@@ -3,20 +3,20 @@
* Copyright (c) 2002 Dieter Shirley
* Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -35,26 +35,43 @@
#include "libavcodec/me_cmp.h"
#if HAVE_ALTIVEC
+
+#if HAVE_BIGENDIAN
+#define GET_PERM(per1, per2, pix) {\
+ per1 = vec_lvsl(0, pix);\
+ per2 = vec_add(per1, vec_splat_u8(1));\
+}
+#define LOAD_PIX(v, iv, pix, per1, per2) {\
+ vector unsigned char pix2l = vec_ld(0, pix);\
+ vector unsigned char pix2r = vec_ld(16, pix);\
+ v = vec_perm(pix2l, pix2r, per1);\
+ iv = vec_perm(pix2l, pix2r, per2);\
+}
+#else
+#define GET_PERM(per1, per2, pix) {}
+#define LOAD_PIX(v, iv, pix, per1, per2) {\
+ v = vec_vsx_ld(0, pix);\
+ iv = vec_vsx_ld(1, pix);\
+}
+#endif
static int sad16_x2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h)
{
- int i, s = 0;
+ int i;
+ int __attribute__((aligned(16))) s = 0;
const vector unsigned char zero =
(const vector unsigned char) vec_splat_u8(0);
- vector unsigned char perm1 = vec_lvsl(0, pix2);
- vector unsigned char perm2 = vec_add(perm1, vec_splat_u8(1));
vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
vector signed int sumdiffs;
+ vector unsigned char perm1, perm2, pix2v, pix2iv;
+ GET_PERM(perm1, perm2, pix2);
for (i = 0; i < h; i++) {
/* Read unaligned pixels into our vectors. The vectors are as follows:
* pix1v: pix1[0] - pix1[15]
* pix2v: pix2[0] - pix2[15] pix2iv: pix2[1] - pix2[16] */
vector unsigned char pix1v = vec_ld(0, pix1);
- vector unsigned char pix2l = vec_ld(0, pix2);
- vector unsigned char pix2r = vec_ld(16, pix2);
- vector unsigned char pix2v = vec_perm(pix2l, pix2r, perm1);
- vector unsigned char pix2iv = vec_perm(pix2l, pix2r, perm2);
+ LOAD_PIX(pix2v, pix2iv, pix2, perm1, perm2);
/* Calculate the average vector. */
vector unsigned char avgv = vec_avg(pix2v, pix2iv);
@@ -80,13 +97,14 @@ static int sad16_x2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h)
{
- int i, s = 0;
+ int i;
+ int __attribute__((aligned(16))) s = 0;
const vector unsigned char zero =
(const vector unsigned char) vec_splat_u8(0);
- vector unsigned char perm = vec_lvsl(0, pix2);
vector unsigned char pix1v, pix3v, avgv, t5;
vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
vector signed int sumdiffs;
+
uint8_t *pix3 = pix2 + stride;
/* Due to the fact that pix3 = pix2 + stride, the pix3 of one
@@ -96,19 +114,14 @@ static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
* Read unaligned pixels into our vectors. The vectors are as follows:
* pix2v: pix2[0] - pix2[15]
* Split the pixel vectors into shorts. */
- vector unsigned char pix2l = vec_ld(0, pix2);
- vector unsigned char pix2r = vec_ld(15, pix2);
- vector unsigned char pix2v = vec_perm(pix2l, pix2r, perm);
+ vector unsigned char pix2v = VEC_LD(0, pix2);
for (i = 0; i < h; i++) {
/* Read unaligned pixels into our vectors. The vectors are as follows:
* pix1v: pix1[0] - pix1[15]
* pix3v: pix3[0] - pix3[15] */
pix1v = vec_ld(0, pix1);
-
- pix2l = vec_ld(0, pix3);
- pix2r = vec_ld(15, pix3);
- pix3v = vec_perm(pix2l, pix2r, perm);
+ pix3v = VEC_LD(0, pix3);
/* Calculate the average vector. */
avgv = vec_avg(pix2v, pix3v);
@@ -134,20 +147,21 @@ static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h)
{
- int i, s = 0;
+ int i;
+ int __attribute__((aligned(16))) s = 0;
uint8_t *pix3 = pix2 + stride;
const vector unsigned char zero =
(const vector unsigned char) vec_splat_u8(0);
const vector unsigned short two =
(const vector unsigned short) vec_splat_u16(2);
vector unsigned char avgv, t5;
- vector unsigned char perm1 = vec_lvsl(0, pix2);
- vector unsigned char perm2 = vec_add(perm1, vec_splat_u8(1));
vector unsigned char pix1v, pix3v, pix3iv;
vector unsigned short pix3lv, pix3hv, pix3ilv, pix3ihv;
vector unsigned short avghv, avglv;
vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
vector signed int sumdiffs;
+ vector unsigned char perm1, perm2, pix2v, pix2iv;
+ GET_PERM(perm1, perm2, pix2);
/* Due to the fact that pix3 = pix2 + stride, the pix3 of one
* iteration becomes pix2 in the next iteration. We can use this
@@ -156,19 +170,16 @@ static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
* Read unaligned pixels into our vectors. The vectors are as follows:
* pix2v: pix2[0] - pix2[15] pix2iv: pix2[1] - pix2[16]
* Split the pixel vectors into shorts. */
- vector unsigned char pix2l = vec_ld(0, pix2);
- vector unsigned char pix2r = vec_ld(16, pix2);
- vector unsigned char pix2v = vec_perm(pix2l, pix2r, perm1);
- vector unsigned char pix2iv = vec_perm(pix2l, pix2r, perm2);
-
+ LOAD_PIX(pix2v, pix2iv, pix2, perm1, perm2);
vector unsigned short pix2hv =
- (vector unsigned short) vec_mergeh(zero, pix2v);
+ (vector unsigned short) VEC_MERGEH(zero, pix2v);
vector unsigned short pix2lv =
- (vector unsigned short) vec_mergel(zero, pix2v);
+ (vector unsigned short) VEC_MERGEL(zero, pix2v);
vector unsigned short pix2ihv =
- (vector unsigned short) vec_mergeh(zero, pix2iv);
+ (vector unsigned short) VEC_MERGEH(zero, pix2iv);
vector unsigned short pix2ilv =
- (vector unsigned short) vec_mergel(zero, pix2iv);
+ (vector unsigned short) VEC_MERGEL(zero, pix2iv);
+
vector unsigned short t1 = vec_add(pix2hv, pix2ihv);
vector unsigned short t2 = vec_add(pix2lv, pix2ilv);
vector unsigned short t3, t4;
@@ -178,11 +189,7 @@ static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
* pix1v: pix1[0] - pix1[15]
* pix3v: pix3[0] - pix3[15] pix3iv: pix3[1] - pix3[16] */
pix1v = vec_ld(0, pix1);
-
- pix2l = vec_ld(0, pix3);
- pix2r = vec_ld(16, pix3);
- pix3v = vec_perm(pix2l, pix2r, perm1);
- pix3iv = vec_perm(pix2l, pix2r, perm2);
+ LOAD_PIX(pix3v, pix3iv, pix3, perm1, perm2);
/* Note that AltiVec does have vec_avg, but this works on vector pairs
* and rounds up. We could do avg(avg(a, b), avg(c, d)), but the
@@ -191,10 +198,10 @@ static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
* vectors of shorts and do the averaging by hand. */
/* Split the pixel vectors into shorts. */
- pix3hv = (vector unsigned short) vec_mergeh(zero, pix3v);
- pix3lv = (vector unsigned short) vec_mergel(zero, pix3v);
- pix3ihv = (vector unsigned short) vec_mergeh(zero, pix3iv);
- pix3ilv = (vector unsigned short) vec_mergel(zero, pix3iv);
+ pix3hv = (vector unsigned short) VEC_MERGEH(zero, pix3v);
+ pix3lv = (vector unsigned short) VEC_MERGEL(zero, pix3v);
+ pix3ihv = (vector unsigned short) VEC_MERGEH(zero, pix3iv);
+ pix3ilv = (vector unsigned short) VEC_MERGEL(zero, pix3iv);
/* Do the averaging on them. */
t3 = vec_add(pix3hv, pix3ihv);
@@ -229,19 +236,17 @@ static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
static int sad16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h)
{
- int i, s;
+ int i;
+ int __attribute__((aligned(16))) s;
const vector unsigned int zero =
(const vector unsigned int) vec_splat_u32(0);
- vector unsigned char perm = vec_lvsl(0, pix2);
vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
vector signed int sumdiffs;
for (i = 0; i < h; i++) {
/* Read potentially unaligned pixels into t1 and t2. */
- vector unsigned char pix2l = vec_ld(0, pix2);
- vector unsigned char pix2r = vec_ld(15, pix2);
- vector unsigned char t1 = vec_ld(0, pix1);
- vector unsigned char t2 = vec_perm(pix2l, pix2r, perm);
+ vector unsigned char t1 =vec_ld(0, pix1);
+ vector unsigned char t2 = VEC_LD(0, pix2);
/* Calculate a sum of abs differences vector. */
vector unsigned char t3 = vec_max(t1, t2);
@@ -266,14 +271,13 @@ static int sad16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h)
{
- int i, s;
+ int i;
+ int __attribute__((aligned(16))) s;
const vector unsigned int zero =
(const vector unsigned int) vec_splat_u32(0);
const vector unsigned char permclear =
(vector unsigned char)
{ 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0 };
- vector unsigned char perm1 = vec_lvsl(0, pix1);
- vector unsigned char perm2 = vec_lvsl(0, pix2);
vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
vector signed int sumdiffs;
@@ -281,14 +285,10 @@ static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Read potentially unaligned pixels into t1 and t2.
* Since we're reading 16 pixels, and actually only want 8,
* mask out the last 8 pixels. The 0s don't change the sum. */
- vector unsigned char pix1l = vec_ld(0, pix1);
- vector unsigned char pix1r = vec_ld(7, pix1);
- vector unsigned char pix2l = vec_ld(0, pix2);
- vector unsigned char pix2r = vec_ld(7, pix2);
- vector unsigned char t1 = vec_and(vec_perm(pix1l, pix1r, perm1),
- permclear);
- vector unsigned char t2 = vec_and(vec_perm(pix2l, pix2r, perm2),
- permclear);
+ vector unsigned char pix1l = VEC_LD(0, pix1);
+ vector unsigned char pix2l = VEC_LD(0, pix2);
+ vector unsigned char t1 = vec_and(pix1l, permclear);
+ vector unsigned char t2 = vec_and(pix2l, permclear);
/* Calculate a sum of abs differences vector. */
vector unsigned char t3 = vec_max(t1, t2);
@@ -315,14 +315,13 @@ static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
static int sse8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h)
{
- int i, s;
+ int i;
+ int __attribute__((aligned(16))) s;
const vector unsigned int zero =
(const vector unsigned int) vec_splat_u32(0);
const vector unsigned char permclear =
(vector unsigned char)
{ 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0 };
- vector unsigned char perm1 = vec_lvsl(0, pix1);
- vector unsigned char perm2 = vec_lvsl(0, pix2);
vector unsigned int sum = (vector unsigned int) vec_splat_u32(0);
vector signed int sumsqr;
@@ -330,14 +329,8 @@ static int sse8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Read potentially unaligned pixels into t1 and t2.
* Since we're reading 16 pixels, and actually only want 8,
* mask out the last 8 pixels. The 0s don't change the sum. */
- vector unsigned char pix1l = vec_ld(0, pix1);
- vector unsigned char pix1r = vec_ld(7, pix1);
- vector unsigned char pix2l = vec_ld(0, pix2);
- vector unsigned char pix2r = vec_ld(7, pix2);
- vector unsigned char t1 = vec_and(vec_perm(pix1l, pix1r, perm1),
- permclear);
- vector unsigned char t2 = vec_and(vec_perm(pix2l, pix2r, perm2),
- permclear);
+ vector unsigned char t1 = vec_and(VEC_LD(0, pix1), permclear);
+ vector unsigned char t2 = vec_and(VEC_LD(0, pix2), permclear);
/* Since we want to use unsigned chars, we can take advantage
* of the fact that abs(a - b) ^ 2 = (a - b) ^ 2. */
@@ -367,19 +360,17 @@ static int sse8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
static int sse16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
ptrdiff_t stride, int h)
{
- int i, s;
+ int i;
+ int __attribute__((aligned(16))) s;
const vector unsigned int zero =
(const vector unsigned int) vec_splat_u32(0);
- vector unsigned char perm = vec_lvsl(0, pix2);
vector unsigned int sum = (vector unsigned int) vec_splat_u32(0);
vector signed int sumsqr;
for (i = 0; i < h; i++) {
/* Read potentially unaligned pixels into t1 and t2. */
- vector unsigned char pix2l = vec_ld(0, pix2);
- vector unsigned char pix2r = vec_ld(15, pix2);
vector unsigned char t1 = vec_ld(0, pix1);
- vector unsigned char t2 = vec_perm(pix2l, pix2r, perm);
+ vector unsigned char t2 = VEC_LD(0, pix2);
/* Since we want to use unsigned chars, we can take advantage
* of the fact that abs(a - b) ^ 2 = (a - b) ^ 2. */
@@ -399,15 +390,15 @@ static int sse16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Sum up the four partial sums, and put the result into s. */
sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
sumsqr = vec_splat(sumsqr, 3);
- vec_ste(sumsqr, 0, &s);
+ vec_ste(sumsqr, 0, &s);
return s;
}
static int hadamard8_diff8x8_altivec(MpegEncContext *s, uint8_t *dst,
uint8_t *src, ptrdiff_t stride, int h)
{
- int sum;
+ int __attribute__((aligned(16))) sum;
register const vector unsigned char vzero =
(const vector unsigned char) vec_splat_u8(0);
register vector signed short temp0, temp1, temp2, temp3, temp4,
@@ -432,24 +423,19 @@ static int hadamard8_diff8x8_altivec(MpegEncContext *s, uint8_t *dst,
{ 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07 };
+
#define ONEITERBUTTERFLY(i, res) \
{ \
- register vector unsigned char src1 = vec_ld(stride * i, src); \
- register vector unsigned char src2 = vec_ld(stride * i + 15, src); \
- register vector unsigned char srcO = \
- vec_perm(src1, src2, vec_lvsl(stride * i, src)); \
- register vector unsigned char dst1 = vec_ld(stride * i, dst); \
- register vector unsigned char dst2 = vec_ld(stride * i + 15, dst); \
- register vector unsigned char dstO = \
- vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \
+ register vector unsigned char srcO = unaligned_load(stride * i, src); \
+ register vector unsigned char dstO = unaligned_load(stride * i, dst);\
\
/* Promote the unsigned chars to signed shorts. */ \
/* We're in the 8x8 function, we only care for the first 8. */ \
register vector signed short srcV = \
- (vector signed short) vec_mergeh((vector signed char) vzero, \
+ (vector signed short) VEC_MERGEH((vector signed char) vzero, \
(vector signed char) srcO); \
register vector signed short dstV = \
- (vector signed short) vec_mergeh((vector signed char) vzero, \
+ (vector signed short) VEC_MERGEH((vector signed char) vzero, \
(vector signed char) dstO); \
\
/* subtractions inside the first butterfly */ \
@@ -461,6 +447,7 @@ static int hadamard8_diff8x8_altivec(MpegEncContext *s, uint8_t *dst,
register vector signed short op3 = vec_perm(but2, but2, perm3); \
res = vec_mladd(but2, vprod3, op3); \
}
+
ONEITERBUTTERFLY(0, temp0);
ONEITERBUTTERFLY(1, temp1);
ONEITERBUTTERFLY(2, temp2);
@@ -510,6 +497,7 @@ static int hadamard8_diff8x8_altivec(MpegEncContext *s, uint8_t *dst,
vsum = vec_sum4s(vec_abs(line7C), vsum);
vsum = vec_sums(vsum, (vector signed int) vzero);
vsum = vec_splat(vsum, 3);
+
vec_ste(vsum, 0, &sum);
}
return sum;
@@ -536,7 +524,7 @@ static int hadamard8_diff8x8_altivec(MpegEncContext *s, uint8_t *dst,
static int hadamard8_diff16x8_altivec(MpegEncContext *s, uint8_t *dst,
uint8_t *src, ptrdiff_t stride, int h)
{
- int sum;
+ int __attribute__((aligned(16))) sum;
register vector signed short
temp0 __asm__ ("v0"),
temp1 __asm__ ("v1"),
@@ -584,31 +572,23 @@ static int hadamard8_diff16x8_altivec(MpegEncContext *s, uint8_t *dst,
#define ONEITERBUTTERFLY(i, res1, res2) \
{ \
- register vector unsigned char src1 __asm__ ("v22") = \
- vec_ld(stride * i, src); \
- register vector unsigned char src2 __asm__ ("v23") = \
- vec_ld(stride * i + 16, src); \
register vector unsigned char srcO __asm__ ("v22") = \
- vec_perm(src1, src2, vec_lvsl(stride * i, src)); \
- register vector unsigned char dst1 __asm__ ("v24") = \
- vec_ld(stride * i, dst); \
- register vector unsigned char dst2 __asm__ ("v25") = \
- vec_ld(stride * i + 16, dst); \
+ unaligned_load(stride * i, src); \
register vector unsigned char dstO __asm__ ("v23") = \
- vec_perm(dst1, dst2, vec_lvsl(stride * i, dst)); \
+ unaligned_load(stride * i, dst);\
\
/* Promote the unsigned chars to signed shorts. */ \
register vector signed short srcV __asm__ ("v24") = \
- (vector signed short) vec_mergeh((vector signed char) vzero, \
+ (vector signed short) VEC_MERGEH((vector signed char) vzero, \
(vector signed char) srcO); \
register vector signed short dstV __asm__ ("v25") = \
- (vector signed short) vec_mergeh((vector signed char) vzero, \
+ (vector signed short) VEC_MERGEH((vector signed char) vzero, \
(vector signed char) dstO); \
register vector signed short srcW __asm__ ("v26") = \
- (vector signed short) vec_mergel((vector signed char) vzero, \
+ (vector signed short) VEC_MERGEL((vector signed char) vzero, \
(vector signed char) srcO); \
register vector signed short dstW __asm__ ("v27") = \
- (vector signed short) vec_mergel((vector signed char) vzero, \
+ (vector signed short) VEC_MERGEL((vector signed char) vzero, \
(vector signed char) dstO); \
\
/* subtractions inside the first butterfly */ \
@@ -639,6 +619,7 @@ static int hadamard8_diff16x8_altivec(MpegEncContext *s, uint8_t *dst,
res1 = vec_mladd(but2, vprod3, op3); \
res2 = vec_mladd(but2S, vprod3, op3S); \
}
+
ONEITERBUTTERFLY(0, temp0, temp0S);
ONEITERBUTTERFLY(1, temp1, temp1S);
ONEITERBUTTERFLY(2, temp2, temp2S);
@@ -725,6 +706,7 @@ static int hadamard8_diff16x8_altivec(MpegEncContext *s, uint8_t *dst,
vsum = vec_sum4s(vec_abs(line7CS), vsum);
vsum = vec_sums(vsum, (vector signed int) vzero);
vsum = vec_splat(vsum, 3);
+
vec_ste(vsum, 0, &sum);
}
return sum;
diff --git a/libavcodec/ppc/mpegaudiodsp_altivec.c b/libavcodec/ppc/mpegaudiodsp_altivec.c
index c37f8ec975..ddfe5dcbb7 100644
--- a/libavcodec/ppc/mpegaudiodsp_altivec.c
+++ b/libavcodec/ppc/mpegaudiodsp_altivec.c
@@ -2,20 +2,20 @@
* Altivec optimized MP3 decoding functions
* Copyright (c) 2010 Vitor Sessak
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/mpegvideo_altivec.c b/libavcodec/ppc/mpegvideo_altivec.c
index 9ae849f173..ce53ae4b61 100644
--- a/libavcodec/ppc/mpegvideo_altivec.c
+++ b/libavcodec/ppc/mpegvideo_altivec.c
@@ -4,20 +4,20 @@
* dct_unquantize_h263_altivec:
* Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/mpegvideodsp.c b/libavcodec/ppc/mpegvideodsp.c
index 2bdf909e4a..7696954335 100644
--- a/libavcodec/ppc/mpegvideodsp.c
+++ b/libavcodec/ppc/mpegvideodsp.c
@@ -3,20 +3,20 @@
*
* Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -66,7 +66,7 @@ static void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */,
vec_lvsl(0, src));
if (src_really_odd != 0x0000000F)
- /* If src & 0xF == 0xF, then (src + 1) is properly aligned
+ /* If (src & 0xF) == 0xF, then (src + 1) is properly aligned
* on the second vector. */
srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src));
else
@@ -88,7 +88,7 @@ static void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */,
srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src));
if (src_really_odd != 0x0000000F)
- /* If src & 0xF == 0xF, then (src + 1) is properly aligned
+ /* If (src & 0xF) == 0xF, then (src + 1) is properly aligned
* on the second vector. */
srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src));
else
diff --git a/libavcodec/ppc/mpegvideoencdsp.c b/libavcodec/ppc/mpegvideoencdsp.c
index b5348e6abd..e91ba5d25f 100644
--- a/libavcodec/ppc/mpegvideoencdsp.c
+++ b/libavcodec/ppc/mpegvideoencdsp.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -31,6 +31,34 @@
#if HAVE_ALTIVEC
+#if HAVE_VSX
+static int pix_norm1_altivec(uint8_t *pix, int line_size)
+{
+ int i, s = 0;
+ const vector unsigned int zero =
+ (const vector unsigned int) vec_splat_u32(0);
+ vector unsigned int sv = (vector unsigned int) vec_splat_u32(0);
+ vector signed int sum;
+
+ for (i = 0; i < 16; i++) {
+ /* Read the potentially unaligned pixels. */
+ //vector unsigned char pixl = vec_ld(0, pix);
+ //vector unsigned char pixr = vec_ld(15, pix);
+ //vector unsigned char pixv = vec_perm(pixl, pixr, perm);
+ vector unsigned char pixv = vec_vsx_ld(0, pix);
+
+ /* Square the values, and add them to our sum. */
+ sv = vec_msum(pixv, pixv, sv);
+
+ pix += line_size;
+ }
+ /* Sum up the four partial sums, and put the result into s. */
+ sum = vec_sums((vector signed int) sv, (vector signed int) zero);
+ sum = vec_splat(sum, 3);
+ vec_vsx_st(sum, 0, &s);
+ return s;
+}
+#else
static int pix_norm1_altivec(uint8_t *pix, int line_size)
{
int i, s = 0;
@@ -58,7 +86,37 @@ static int pix_norm1_altivec(uint8_t *pix, int line_size)
return s;
}
+#endif /* HAVE_VSX */
+
+#if HAVE_VSX
+static int pix_sum_altivec(uint8_t *pix, int line_size)
+{
+ int i, s;
+ const vector unsigned int zero =
+ (const vector unsigned int) vec_splat_u32(0);
+ vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
+ vector signed int sumdiffs;
+
+ for (i = 0; i < 16; i++) {
+ /* Read the potentially unaligned 16 pixels into t1. */
+ //vector unsigned char pixl = vec_ld(0, pix);
+ //vector unsigned char pixr = vec_ld(15, pix);
+ //vector unsigned char t1 = vec_perm(pixl, pixr, perm);
+ vector unsigned char t1 = vec_vsx_ld(0, pix);
+
+ /* Add each 4 pixel group together and put 4 results into sad. */
+ sad = vec_sum4s(t1, sad);
+
+ pix += line_size;
+ }
+ /* Sum up the four partial sums, and put the result into s. */
+ sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
+ sumdiffs = vec_splat(sumdiffs, 3);
+ vec_vsx_st(sumdiffs, 0, &s);
+ return s;
+}
+#else
static int pix_sum_altivec(uint8_t *pix, int line_size)
{
int i, s;
@@ -88,6 +146,8 @@ static int pix_sum_altivec(uint8_t *pix, int line_size)
return s;
}
+#endif /* HAVE_VSX */
+
#endif /* HAVE_ALTIVEC */
av_cold void ff_mpegvideoencdsp_init_ppc(MpegvideoEncDSPContext *c,
diff --git a/libavcodec/ppc/pixblockdsp.c b/libavcodec/ppc/pixblockdsp.c
index 698d655fc6..9bbdf96d12 100644
--- a/libavcodec/ppc/pixblockdsp.c
+++ b/libavcodec/ppc/pixblockdsp.c
@@ -3,20 +3,20 @@
* Copyright (c) 2002 Dieter Shirley
* Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -35,8 +35,36 @@
#if HAVE_ALTIVEC
+#if HAVE_VSX
static void get_pixels_altivec(int16_t *restrict block, const uint8_t *pixels,
- int line_size)
+ ptrdiff_t line_size)
+{
+ int i;
+ vector unsigned char perm =
+ (vector unsigned char) {0x00,0x10, 0x01,0x11,0x02,0x12,0x03,0x13,\
+ 0x04,0x14,0x05,0x15,0x06,0x16,0x07,0x17};
+ const vector unsigned char zero =
+ (const vector unsigned char) vec_splat_u8(0);
+
+ for (i = 0; i < 8; i++) {
+ /* Read potentially unaligned pixels.
+ * We're reading 16 pixels, and actually only want 8,
+ * but we simply ignore the extras. */
+ vector unsigned char bytes = vec_vsx_ld(0, pixels);
+
+ // Convert the bytes into shorts.
+ //vector signed short shorts = (vector signed short) vec_perm(zero, bytes, perm);
+ vector signed short shorts = (vector signed short) vec_perm(bytes, zero, perm);
+
+ // Save the data to the block, we assume the block is 16-byte aligned.
+ vec_vsx_st(shorts, i * 16, (vector signed short *) block);
+
+ pixels += line_size;
+ }
+}
+#else
+static void get_pixels_altivec(int16_t *restrict block, const uint8_t *pixels,
+ ptrdiff_t line_size)
{
int i;
vector unsigned char perm = vec_lvsl(0, pixels);
@@ -62,6 +90,71 @@ static void get_pixels_altivec(int16_t *restrict block, const uint8_t *pixels,
}
}
+#endif /* HAVE_VSX */
+
+#if HAVE_VSX
+static void diff_pixels_altivec(int16_t *restrict block, const uint8_t *s1,
+ const uint8_t *s2, int stride)
+{
+ int i;
+ const vector unsigned char zero =
+ (const vector unsigned char) vec_splat_u8(0);
+ vector signed short shorts1, shorts2;
+
+ for (i = 0; i < 4; i++) {
+ /* Read potentially unaligned pixels.
+ * We're reading 16 pixels, and actually only want 8,
+ * but we simply ignore the extras. */
+ vector unsigned char bytes = vec_vsx_ld(0, s1);
+
+ // Convert the bytes into shorts.
+ shorts1 = (vector signed short) vec_mergeh(bytes, zero);
+
+ // Do the same for the second block of pixels.
+ bytes =vec_vsx_ld(0, s2);
+
+ // Convert the bytes into shorts.
+ shorts2 = (vector signed short) vec_mergeh(bytes, zero);
+
+ // Do the subtraction.
+ shorts1 = vec_sub(shorts1, shorts2);
+
+ // Save the data to the block, we assume the block is 16-byte aligned.
+ vec_vsx_st(shorts1, 0, (vector signed short *) block);
+
+ s1 += stride;
+ s2 += stride;
+ block += 8;
+
+ /* The code below is a copy of the code above...
+ * This is a manual unroll. */
+
+ /* Read potentially unaligned pixels.
+ * We're reading 16 pixels, and actually only want 8,
+ * but we simply ignore the extras. */
+ bytes = vec_vsx_ld(0, s1);
+
+ // Convert the bytes into shorts.
+ shorts1 = (vector signed short) vec_mergeh(bytes, zero);
+
+ // Do the same for the second block of pixels.
+ bytes = vec_vsx_ld(0, s2);
+
+ // Convert the bytes into shorts.
+ shorts2 = (vector signed short) vec_mergeh(bytes, zero);
+
+ // Do the subtraction.
+ shorts1 = vec_sub(shorts1, shorts2);
+
+ // Save the data to the block, we assume the block is 16-byte aligned.
+ vec_vsx_st(shorts1, 0, (vector signed short *) block);
+
+ s1 += stride;
+ s2 += stride;
+ block += 8;
+ }
+}
+#else
static void diff_pixels_altivec(int16_t *restrict block, const uint8_t *s1,
const uint8_t *s2, int stride)
{
@@ -134,6 +227,8 @@ static void diff_pixels_altivec(int16_t *restrict block, const uint8_t *s1,
}
}
+#endif /* HAVE_VSX */
+
#endif /* HAVE_ALTIVEC */
av_cold void ff_pixblockdsp_init_ppc(PixblockDSPContext *c,
diff --git a/libavcodec/ppc/svq1enc_altivec.c b/libavcodec/ppc/svq1enc_altivec.c
index 564f12986b..4e25e253f6 100644
--- a/libavcodec/ppc/svq1enc_altivec.c
+++ b/libavcodec/ppc/svq1enc_altivec.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2007 Luca Barbato <lu_zero@gentoo.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/vc1dsp_altivec.c b/libavcodec/ppc/vc1dsp_altivec.c
index 90c3d27e82..35bb280842 100644
--- a/libavcodec/ppc/vc1dsp_altivec.c
+++ b/libavcodec/ppc/vc1dsp_altivec.c
@@ -2,20 +2,20 @@
* VC-1 and WMV3 decoder - DSP functions AltiVec-optimized
* Copyright (c) 2006 Konstantin Shishkov
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -304,16 +304,23 @@ static void vc1_inv_trans_8x4_altivec(uint8_t *dest, int stride, int16_t *block)
src2 = vec_pack(s2, sA);
src3 = vec_pack(s3, sB);
+#if HAVE_BIGENDIAN
p0 = vec_lvsl (0, dest);
p1 = vec_lvsl (stride, dest);
p = vec_splat_u8 (-1);
perm0 = vec_mergeh (p, p0);
perm1 = vec_mergeh (p, p1);
+#define GET_TMP2(dst, p) \
+ tmp = vec_ld (0, dest); \
+ tmp2 = (vector signed short)vec_perm (tmp, vec_splat_u8(0), p);
+#else
+#define GET_TMP2(dst,p) \
+ tmp = vec_vsx_ld (0, dst); \
+ tmp2 = (vector signed short)vec_mergeh (tmp, vec_splat_u8(0));
+#endif
#define ADD(dest,src,perm) \
- /* *(uint64_t *)&tmp = *(uint64_t *)dest; */ \
- tmp = vec_ld (0, dest); \
- tmp2 = (vector signed short)vec_perm (tmp, vec_splat_u8(0), perm); \
+ GET_TMP2(dest, perm); \
tmp3 = vec_adds (tmp2, src); \
tmp = vec_packsu (tmp3, tmp3); \
vec_ste ((vector unsigned int)tmp, 0, (unsigned int *)dest); \
diff --git a/libavcodec/ppc/videodsp_ppc.c b/libavcodec/ppc/videodsp_ppc.c
index b9e003b487..915702252e 100644
--- a/libavcodec/ppc/videodsp_ppc.c
+++ b/libavcodec/ppc/videodsp_ppc.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2003-2004 Romain Dolbeau
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/vorbisdsp_altivec.c b/libavcodec/ppc/vorbisdsp_altivec.c
index 43f4d0325b..d7557c815b 100644
--- a/libavcodec/ppc/vorbisdsp_altivec.c
+++ b/libavcodec/ppc/vorbisdsp_altivec.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2006 Luca Barbato <lu_zero@gentoo.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/vp3dsp_altivec.c b/libavcodec/ppc/vp3dsp_altivec.c
index bce49e3170..4a367b655e 100644
--- a/libavcodec/ppc/vp3dsp_altivec.c
+++ b/libavcodec/ppc/vp3dsp_altivec.c
@@ -1,20 +1,20 @@
/*
* Copyright (C) 2009 David Conrad
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -32,8 +32,13 @@
static const vec_s16 constants =
{0, 64277, 60547, 54491, 46341, 36410, 25080, 12785};
+#if HAVE_BIGENDIAN
static const vec_u8 interleave_high =
{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29};
+#else
+static const vec_u8 interleave_high =
+ {2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31};
+#endif
#define IDCT_START \
vec_s16 A, B, C, D, Ad, Bd, Cd, Dd, E, F, G, H;\
@@ -156,9 +161,18 @@ static void vp3_idct_add_altivec(uint8_t *dst, int stride, int16_t block[64])
TRANSPOSE8(b0, b1, b2, b3, b4, b5, b6, b7);
IDCT_1D(ADD8, SHIFT4)
-#define ADD(a)\
+#if HAVE_BIGENDIAN
+#define GET_VDST16\
vdst = vec_ld(0, dst);\
- vdst_16 = (vec_s16)vec_perm(vdst, zero_u8v, vdst_mask);\
+ vdst_16 = (vec_s16)vec_perm(vdst, zero_u8v, vdst_mask);
+#else
+#define GET_VDST16\
+ vdst = vec_vsx_ld(0,dst);\
+ vdst_16 = (vec_s16)vec_mergeh(vdst, zero_u8v);
+#endif
+
+#define ADD(a)\
+ GET_VDST16;\
vdst_16 = vec_adds(a, vdst_16);\
t = vec_packsu(vdst_16, vdst_16);\
vec_ste((vec_u32)t, 0, (unsigned int *)dst);\
diff --git a/libavcodec/ppc/vp8dsp_altivec.c b/libavcodec/ppc/vp8dsp_altivec.c
index e010dee4d2..23e4ace7da 100644
--- a/libavcodec/ppc/vp8dsp_altivec.c
+++ b/libavcodec/ppc/vp8dsp_altivec.c
@@ -3,20 +3,20 @@
*
* Copyright (C) 2010 David Conrad
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -59,17 +59,30 @@ static const vec_s8 h_subpel_filters_outer[3] =
vec_s8 filter_outerh = h_subpel_filters_outer[(i)>>1]; \
vec_s8 filter_outerl = vec_sld(filter_outerh, filter_outerh, 2)
+#if HAVE_BIGENDIAN
+#define GET_PIXHL(offset) \
+ a = vec_ld((offset)-is6tap-1, src); \
+ b = vec_ld((offset)-is6tap-1+15, src); \
+ pixh = vec_perm(a, b, permh##offset); \
+ pixl = vec_perm(a, b, perml##offset)
+
+#define GET_OUTER(offset) outer = vec_perm(a, b, perm_6tap##offset)
+#else
+#define GET_PIXHL(offset) \
+ a = vec_vsx_ld((offset)-is6tap-1, src); \
+ pixh = vec_perm(a, a, perm_inner); \
+ pixl = vec_perm(a, a, vec_add(perm_inner, vec_splat_u8(4)))
+
+#define GET_OUTER(offset) outer = vec_perm(a, a, perm_outer)
+#endif
+
#define FILTER_H(dstv, off) \
- a = vec_ld((off)-is6tap-1, src); \
- b = vec_ld((off)-is6tap-1+15, src); \
-\
- pixh = vec_perm(a, b, permh##off); \
- pixl = vec_perm(a, b, perml##off); \
+ GET_PIXHL(off); \
filth = vec_msum(filter_inner, pixh, c64); \
filtl = vec_msum(filter_inner, pixl, c64); \
\
if (is6tap) { \
- outer = vec_perm(a, b, perm_6tap##off); \
+ GET_OUTER(off); \
filth = vec_msum(filter_outerh, outer, filth); \
filtl = vec_msum(filter_outerl, outer, filtl); \
} \
@@ -84,9 +97,12 @@ void put_vp8_epel_h_altivec_core(uint8_t *dst, ptrdiff_t dst_stride,
int h, int mx, int w, int is6tap)
{
LOAD_H_SUBPEL_FILTER(mx-1);
- vec_u8 align_vec0, align_vec8, permh0, permh8, filt;
+#if HAVE_BIGENDIAN
+ vec_u8 align_vec0, align_vec8, permh0, permh8;
vec_u8 perm_6tap0, perm_6tap8, perml0, perml8;
- vec_u8 a, b, pixh, pixl, outer;
+ vec_u8 b;
+#endif
+ vec_u8 filt, a, pixh, pixl, outer;
vec_s16 f16h, f16l;
vec_s32 filth, filtl;
@@ -97,6 +113,7 @@ void put_vp8_epel_h_altivec_core(uint8_t *dst, ptrdiff_t dst_stride,
vec_s32 c64 = vec_sl(vec_splat_s32(1), vec_splat_u32(6));
vec_u16 c7 = vec_splat_u16(7);
+#if HAVE_BIGENDIAN
align_vec0 = vec_lvsl( -is6tap-1, src);
align_vec8 = vec_lvsl(8-is6tap-1, src);
@@ -107,6 +124,7 @@ void put_vp8_epel_h_altivec_core(uint8_t *dst, ptrdiff_t dst_stride,
perml8 = vec_perm(align_vec8, align_vec8, perm_inner);
perm_6tap0 = vec_perm(align_vec0, align_vec0, perm_outer);
perm_6tap8 = vec_perm(align_vec8, align_vec8, perm_outer);
+#endif
while (h --> 0) {
FILTER_H(f16h, 0);
@@ -164,6 +182,12 @@ static const vec_u8 v_subpel_filters[7] =
dstv = vec_adds(dstv, c64); \
dstv = vec_sra(dstv, c7)
+#if HAVE_BIGENDIAN
+#define LOAD_HL(off, s, perm) load_with_perm_vec(off, s, perm)
+#else
+#define LOAD_HL(off, s, perm) vec_mergeh(vec_vsx_ld(off,s), vec_vsx_ld(off+8,s))
+#endif
+
static av_always_inline
void put_vp8_epel_v_altivec_core(uint8_t *dst, ptrdiff_t dst_stride,
uint8_t *src, ptrdiff_t src_stride,
@@ -175,6 +199,7 @@ void put_vp8_epel_v_altivec_core(uint8_t *dst, ptrdiff_t dst_stride,
vec_s16 c64 = vec_sl(vec_splat_s16(1), vec_splat_u16(6));
vec_u16 c7 = vec_splat_u16(7);
+#if HAVE_BIGENDIAN
// we want pixels 0-7 to be in the even positions and 8-15 in the odd,
// so combine this permute with the alignment permute vector
align_vech = vec_lvsl(0, src);
@@ -183,22 +208,23 @@ void put_vp8_epel_v_altivec_core(uint8_t *dst, ptrdiff_t dst_stride,
perm_vec = vec_mergeh(align_vech, align_vecl);
else
perm_vec = vec_mergeh(align_vech, align_vech);
+#endif
if (is6tap)
- s0 = load_with_perm_vec(-2*src_stride, src, perm_vec);
- s1 = load_with_perm_vec(-1*src_stride, src, perm_vec);
- s2 = load_with_perm_vec( 0*src_stride, src, perm_vec);
- s3 = load_with_perm_vec( 1*src_stride, src, perm_vec);
+ s0 = LOAD_HL(-2*src_stride, src, perm_vec);
+ s1 = LOAD_HL(-1*src_stride, src, perm_vec);
+ s2 = LOAD_HL( 0*src_stride, src, perm_vec);
+ s3 = LOAD_HL( 1*src_stride, src, perm_vec);
if (is6tap)
- s4 = load_with_perm_vec( 2*src_stride, src, perm_vec);
+ s4 = LOAD_HL( 2*src_stride, src, perm_vec);
src += (2+is6tap)*src_stride;
while (h --> 0) {
if (is6tap)
- s5 = load_with_perm_vec(0, src, perm_vec);
+ s5 = LOAD_HL(0, src, perm_vec);
else
- s4 = load_with_perm_vec(0, src, perm_vec);
+ s4 = LOAD_HL(0, src, perm_vec);
FILTER_V(f16h, vec_mule);
@@ -272,39 +298,25 @@ EPEL_HV(4, 4,4)
static void put_vp8_pixels16_altivec(uint8_t *dst, ptrdiff_t dstride, uint8_t *src, ptrdiff_t sstride, int h, int mx, int my)
{
- register vector unsigned char pixelsv1, pixelsv2;
- register vector unsigned char pixelsv1B, pixelsv2B;
- register vector unsigned char pixelsv1C, pixelsv2C;
- register vector unsigned char pixelsv1D, pixelsv2D;
-
- register vector unsigned char perm = vec_lvsl(0, src);
+ register vector unsigned char perm;
int i;
register ptrdiff_t dstride2 = dstride << 1, sstride2 = sstride << 1;
register ptrdiff_t dstride3 = dstride2 + dstride, sstride3 = sstride + sstride2;
register ptrdiff_t dstride4 = dstride << 2, sstride4 = sstride << 2;
+#if HAVE_BIGENDIAN
+ perm = vec_lvsl(0, src);
+#endif
// hand-unrolling the loop by 4 gains about 15%
// mininum execution time goes from 74 to 60 cycles
// it's faster than -funroll-loops, but using
// -funroll-loops w/ this is bad - 74 cycles again.
// all this is on a 7450, tuning for the 7450
for (i = 0; i < h; i += 4) {
- pixelsv1 = vec_ld( 0, src);
- pixelsv2 = vec_ld(15, src);
- pixelsv1B = vec_ld(sstride, src);
- pixelsv2B = vec_ld(15 + sstride, src);
- pixelsv1C = vec_ld(sstride2, src);
- pixelsv2C = vec_ld(15 + sstride2, src);
- pixelsv1D = vec_ld(sstride3, src);
- pixelsv2D = vec_ld(15 + sstride3, src);
- vec_st(vec_perm(pixelsv1, pixelsv2, perm),
- 0, (unsigned char*)dst);
- vec_st(vec_perm(pixelsv1B, pixelsv2B, perm),
- dstride, (unsigned char*)dst);
- vec_st(vec_perm(pixelsv1C, pixelsv2C, perm),
- dstride2, (unsigned char*)dst);
- vec_st(vec_perm(pixelsv1D, pixelsv2D, perm),
- dstride3, (unsigned char*)dst);
+ vec_st(load_with_perm_vec(0, src, perm), 0, dst);
+ vec_st(load_with_perm_vec(sstride, src, perm), dstride, dst);
+ vec_st(load_with_perm_vec(sstride2, src, perm), dstride2, dst);
+ vec_st(load_with_perm_vec(sstride3, src, perm), dstride3, dst);
src += sstride4;
dst += dstride4;
}