summaryrefslogtreecommitdiff
path: root/libavcodec/ppc
diff options
context:
space:
mode:
Diffstat (limited to 'libavcodec/ppc')
-rw-r--r--libavcodec/ppc/Makefile7
-rw-r--r--libavcodec/ppc/asm.S8
-rw-r--r--libavcodec/ppc/audiodsp.c8
-rw-r--r--libavcodec/ppc/blockdsp.c8
-rw-r--r--libavcodec/ppc/dct-test.c8
-rw-r--r--libavcodec/ppc/fdct.h8
-rw-r--r--libavcodec/ppc/fdctdsp.c10
-rw-r--r--libavcodec/ppc/fft_altivec.S8
-rw-r--r--libavcodec/ppc/fft_init.c20
-rw-r--r--libavcodec/ppc/fft_vsx.c227
-rw-r--r--libavcodec/ppc/fft_vsx.h830
-rw-r--r--libavcodec/ppc/fmtconvert_altivec.c8
-rw-r--r--libavcodec/ppc/h264chroma_init.c8
-rw-r--r--libavcodec/ppc/h264chroma_template.c8
-rw-r--r--libavcodec/ppc/h264dsp.c8
-rw-r--r--libavcodec/ppc/h264qpel.c8
-rw-r--r--libavcodec/ppc/h264qpel_template.c8
-rw-r--r--libavcodec/ppc/hpeldsp_altivec.c42
-rw-r--r--libavcodec/ppc/hpeldsp_altivec.h8
-rw-r--r--libavcodec/ppc/huffyuvdsp_altivec.c10
-rw-r--r--libavcodec/ppc/idctdsp.c33
-rw-r--r--libavcodec/ppc/lossless_audiodsp_altivec.c (renamed from libavcodec/ppc/apedsp_altivec.c)12
-rw-r--r--libavcodec/ppc/mathops.h8
-rw-r--r--libavcodec/ppc/me_cmp.c302
-rw-r--r--libavcodec/ppc/mpegaudiodsp_altivec.c8
-rw-r--r--libavcodec/ppc/mpegvideo_altivec.c8
-rw-r--r--libavcodec/ppc/mpegvideodsp.c12
-rw-r--r--libavcodec/ppc/mpegvideoencdsp.c68
-rw-r--r--libavcodec/ppc/pixblockdsp.c105
-rw-r--r--libavcodec/ppc/svq1enc_altivec.c8
-rw-r--r--libavcodec/ppc/vc1dsp_altivec.c8
-rw-r--r--libavcodec/ppc/videodsp_ppc.c8
-rw-r--r--libavcodec/ppc/vorbisdsp_altivec.c8
-rw-r--r--libavcodec/ppc/vp3dsp_altivec.c8
-rw-r--r--libavcodec/ppc/vp8dsp_altivec.c8
35 files changed, 1712 insertions, 142 deletions
diff --git a/libavcodec/ppc/Makefile b/libavcodec/ppc/Makefile
index c6ff0f1132..6f7eeb7286 100644
--- a/libavcodec/ppc/Makefile
+++ b/libavcodec/ppc/Makefile
@@ -4,9 +4,10 @@ OBJS += ppc/fmtconvert_altivec.o \
OBJS-$(CONFIG_AUDIODSP) += ppc/audiodsp.o
OBJS-$(CONFIG_BLOCKDSP) += ppc/blockdsp.o
OBJS-$(CONFIG_FFT) += ppc/fft_init.o \
- ppc/fft_altivec.o
+ ppc/fft_altivec.o \
+ ppc/fft_vsx.o
OBJS-$(CONFIG_H264CHROMA) += ppc/h264chroma_init.o
-OBJS-$(CONFIG_H264DSP) += ppc/h264dsp.o
+OBJS-$(CONFIG_H264DSP) += ppc/h264dsp.o ppc/hpeldsp_altivec.o
OBJS-$(CONFIG_H264QPEL) += ppc/h264qpel.o
OBJS-$(CONFIG_HPELDSP) += ppc/hpeldsp_altivec.o
OBJS-$(CONFIG_HUFFYUVDSP) += ppc/huffyuvdsp_altivec.o
@@ -22,7 +23,7 @@ OBJS-$(CONFIG_VIDEODSP) += ppc/videodsp_ppc.o
OBJS-$(CONFIG_VP3DSP) += ppc/vp3dsp_altivec.o
# decoders/encoders
-OBJS-$(CONFIG_APE_DECODER) += ppc/apedsp_altivec.o
+OBJS-$(CONFIG_LLAUDDSP) += ppc/lossless_audiodsp_altivec.o
OBJS-$(CONFIG_SVQ1_ENCODER) += ppc/svq1enc_altivec.o
OBJS-$(CONFIG_VC1_DECODER) += ppc/vc1dsp_altivec.o
OBJS-$(CONFIG_VORBIS_DECODER) += ppc/vorbisdsp_altivec.o
diff --git a/libavcodec/ppc/asm.S b/libavcodec/ppc/asm.S
index 141dee9b78..a3edeed202 100644
--- a/libavcodec/ppc/asm.S
+++ b/libavcodec/ppc/asm.S
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2009 Loren Merritt
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/audiodsp.c b/libavcodec/ppc/audiodsp.c
index 36506ce902..c88c3d9167 100644
--- a/libavcodec/ppc/audiodsp.c
+++ b/libavcodec/ppc/audiodsp.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2007 Luca Barbato <lu_zero@gentoo.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/blockdsp.c b/libavcodec/ppc/blockdsp.c
index 679bc0454f..0059b3b448 100644
--- a/libavcodec/ppc/blockdsp.c
+++ b/libavcodec/ppc/blockdsp.c
@@ -3,20 +3,20 @@
* Copyright (c) 2002 Dieter Shirley
* Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/dct-test.c b/libavcodec/ppc/dct-test.c
index 37fd8bbd23..2328516ca4 100644
--- a/libavcodec/ppc/dct-test.c
+++ b/libavcodec/ppc/dct-test.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/fdct.h b/libavcodec/ppc/fdct.h
index 74710354ef..437f815258 100644
--- a/libavcodec/ppc/fdct.h
+++ b/libavcodec/ppc/fdct.h
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/fdctdsp.c b/libavcodec/ppc/fdctdsp.c
index 51417a5828..f2efc5ddd0 100644
--- a/libavcodec/ppc/fdctdsp.c
+++ b/libavcodec/ppc/fdctdsp.c
@@ -1,20 +1,20 @@
/*
* Copyright (C) 2003 James Klicman <james@klicman.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -59,7 +59,7 @@
#define WA (SQRT_2 * (-C3 - C5))
#define WB (SQRT_2 * (C5 - C3))
-static vector float fdctconsts[3] = {
+static const vector float fdctconsts[3] = {
{ W0, W1, W2, W3 },
{ W4, W5, W6, W7 },
{ W8, W9, WA, WB }
diff --git a/libavcodec/ppc/fft_altivec.S b/libavcodec/ppc/fft_altivec.S
index c92b30b897..aab669ea45 100644
--- a/libavcodec/ppc/fft_altivec.S
+++ b/libavcodec/ppc/fft_altivec.S
@@ -5,20 +5,20 @@
* This algorithm (though not any of the implementation details) is
* based on libdjbfft by D. J. Bernstein.
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/fft_init.c b/libavcodec/ppc/fft_init.c
index 8fcc033b53..675fa33a95 100644
--- a/libavcodec/ppc/fft_init.c
+++ b/libavcodec/ppc/fft_init.c
@@ -3,20 +3,20 @@
* AltiVec-enabled
* Copyright (c) 2009 Loren Merritt
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -36,8 +36,12 @@
* It also assumes all FFTComplex are 8 bytes-aligned pairs of floats.
*/
+#if HAVE_VSX
+#include "fft_vsx.h"
+#else
void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z);
void ff_fft_calc_interleave_altivec(FFTContext *s, FFTComplex *z);
+#endif
#if HAVE_GNU_AS && HAVE_ALTIVEC
static void imdct_half_altivec(FFTContext *s, FFTSample *output, const FFTSample *input)
@@ -94,7 +98,11 @@ static void imdct_half_altivec(FFTContext *s, FFTSample *output, const FFTSample
k--;
} while(k >= 0);
+#if HAVE_VSX
+ ff_fft_calc_vsx(s, (FFTComplex*)output);
+#else
ff_fft_calc_altivec(s, (FFTComplex*)output);
+#endif
/* post rotation + reordering */
j = -n32;
@@ -147,7 +155,11 @@ av_cold void ff_fft_init_ppc(FFTContext *s)
if (!PPC_ALTIVEC(av_get_cpu_flags()))
return;
+#if HAVE_VSX
+ s->fft_calc = ff_fft_calc_interleave_vsx;
+#else
s->fft_calc = ff_fft_calc_interleave_altivec;
+#endif
if (s->mdct_bits >= 5) {
s->imdct_calc = imdct_calc_altivec;
s->imdct_half = imdct_half_altivec;
diff --git a/libavcodec/ppc/fft_vsx.c b/libavcodec/ppc/fft_vsx.c
new file mode 100644
index 0000000000..e92975f74e
--- /dev/null
+++ b/libavcodec/ppc/fft_vsx.c
@@ -0,0 +1,227 @@
+/*
+ * FFT transform, optimized with VSX built-in functions
+ * Copyright (c) 2014 Rong Yan
+ *
+ * This algorithm (though not any of the implementation details) is
+ * based on libdjbfft by D. J. Bernstein.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+#include "config.h"
+#include "libavutil/cpu.h"
+#include "libavutil/ppc/types_altivec.h"
+#include "libavutil/ppc/util_altivec.h"
+#include "libavcodec/fft.h"
+#include "libavcodec/fft-internal.h"
+#include "fft_vsx.h"
+
+#if HAVE_VSX
+
+static void fft32_vsx_interleave(FFTComplex *z)
+{
+ fft16_vsx_interleave(z);
+ fft8_vsx_interleave(z+16);
+ fft8_vsx_interleave(z+24);
+ pass_vsx_interleave(z,ff_cos_32,4);
+}
+
+static void fft64_vsx_interleave(FFTComplex *z)
+{
+ fft32_vsx_interleave(z);
+ fft16_vsx_interleave(z+32);
+ fft16_vsx_interleave(z+48);
+ pass_vsx_interleave(z,ff_cos_64, 8);
+}
+static void fft128_vsx_interleave(FFTComplex *z)
+{
+ fft64_vsx_interleave(z);
+ fft32_vsx_interleave(z+64);
+ fft32_vsx_interleave(z+96);
+ pass_vsx_interleave(z,ff_cos_128,16);
+}
+static void fft256_vsx_interleave(FFTComplex *z)
+{
+ fft128_vsx_interleave(z);
+ fft64_vsx_interleave(z+128);
+ fft64_vsx_interleave(z+192);
+ pass_vsx_interleave(z,ff_cos_256,32);
+}
+static void fft512_vsx_interleave(FFTComplex *z)
+{
+ fft256_vsx_interleave(z);
+ fft128_vsx_interleave(z+256);
+ fft128_vsx_interleave(z+384);
+ pass_vsx_interleave(z,ff_cos_512,64);
+}
+static void fft1024_vsx_interleave(FFTComplex *z)
+{
+ fft512_vsx_interleave(z);
+ fft256_vsx_interleave(z+512);
+ fft256_vsx_interleave(z+768);
+ pass_vsx_interleave(z,ff_cos_1024,128);
+
+}
+static void fft2048_vsx_interleave(FFTComplex *z)
+{
+ fft1024_vsx_interleave(z);
+ fft512_vsx_interleave(z+1024);
+ fft512_vsx_interleave(z+1536);
+ pass_vsx_interleave(z,ff_cos_2048,256);
+}
+static void fft4096_vsx_interleave(FFTComplex *z)
+{
+ fft2048_vsx_interleave(z);
+ fft1024_vsx_interleave(z+2048);
+ fft1024_vsx_interleave(z+3072);
+ pass_vsx_interleave(z,ff_cos_4096, 512);
+}
+static void fft8192_vsx_interleave(FFTComplex *z)
+{
+ fft4096_vsx_interleave(z);
+ fft2048_vsx_interleave(z+4096);
+ fft2048_vsx_interleave(z+6144);
+ pass_vsx_interleave(z,ff_cos_8192,1024);
+}
+static void fft16384_vsx_interleave(FFTComplex *z)
+{
+ fft8192_vsx_interleave(z);
+ fft4096_vsx_interleave(z+8192);
+ fft4096_vsx_interleave(z+12288);
+ pass_vsx_interleave(z,ff_cos_16384,2048);
+}
+static void fft32768_vsx_interleave(FFTComplex *z)
+{
+ fft16384_vsx_interleave(z);
+ fft8192_vsx_interleave(z+16384);
+ fft8192_vsx_interleave(z+24576);
+ pass_vsx_interleave(z,ff_cos_32768,4096);
+}
+static void fft65536_vsx_interleave(FFTComplex *z)
+{
+ fft32768_vsx_interleave(z);
+ fft16384_vsx_interleave(z+32768);
+ fft16384_vsx_interleave(z+49152);
+ pass_vsx_interleave(z,ff_cos_65536,8192);
+}
+
+static void fft32_vsx(FFTComplex *z)
+{
+ fft16_vsx(z);
+ fft8_vsx(z+16);
+ fft8_vsx(z+24);
+ pass_vsx(z,ff_cos_32,4);
+}
+
+static void fft64_vsx(FFTComplex *z)
+{
+ fft32_vsx(z);
+ fft16_vsx(z+32);
+ fft16_vsx(z+48);
+ pass_vsx(z,ff_cos_64, 8);
+}
+static void fft128_vsx(FFTComplex *z)
+{
+ fft64_vsx(z);
+ fft32_vsx(z+64);
+ fft32_vsx(z+96);
+ pass_vsx(z,ff_cos_128,16);
+}
+static void fft256_vsx(FFTComplex *z)
+{
+ fft128_vsx(z);
+ fft64_vsx(z+128);
+ fft64_vsx(z+192);
+ pass_vsx(z,ff_cos_256,32);
+}
+static void fft512_vsx(FFTComplex *z)
+{
+ fft256_vsx(z);
+ fft128_vsx(z+256);
+ fft128_vsx(z+384);
+ pass_vsx(z,ff_cos_512,64);
+}
+static void fft1024_vsx(FFTComplex *z)
+{
+ fft512_vsx(z);
+ fft256_vsx(z+512);
+ fft256_vsx(z+768);
+ pass_vsx(z,ff_cos_1024,128);
+
+}
+static void fft2048_vsx(FFTComplex *z)
+{
+ fft1024_vsx(z);
+ fft512_vsx(z+1024);
+ fft512_vsx(z+1536);
+ pass_vsx(z,ff_cos_2048,256);
+}
+static void fft4096_vsx(FFTComplex *z)
+{
+ fft2048_vsx(z);
+ fft1024_vsx(z+2048);
+ fft1024_vsx(z+3072);
+ pass_vsx(z,ff_cos_4096, 512);
+}
+static void fft8192_vsx(FFTComplex *z)
+{
+ fft4096_vsx(z);
+ fft2048_vsx(z+4096);
+ fft2048_vsx(z+6144);
+ pass_vsx(z,ff_cos_8192,1024);
+}
+static void fft16384_vsx(FFTComplex *z)
+{
+ fft8192_vsx(z);
+ fft4096_vsx(z+8192);
+ fft4096_vsx(z+12288);
+ pass_vsx(z,ff_cos_16384,2048);
+}
+static void fft32768_vsx(FFTComplex *z)
+{
+ fft16384_vsx(z);
+ fft8192_vsx(z+16384);
+ fft8192_vsx(z+24576);
+ pass_vsx(z,ff_cos_32768,4096);
+}
+static void fft65536_vsx(FFTComplex *z)
+{
+ fft32768_vsx(z);
+ fft16384_vsx(z+32768);
+ fft16384_vsx(z+49152);
+ pass_vsx(z,ff_cos_65536,8192);
+}
+
+static void (* const fft_dispatch_vsx[])(FFTComplex*) = {
+ fft4_vsx, fft8_vsx, fft16_vsx, fft32_vsx, fft64_vsx, fft128_vsx, fft256_vsx, fft512_vsx, fft1024_vsx,
+ fft2048_vsx, fft4096_vsx, fft8192_vsx, fft16384_vsx, fft32768_vsx, fft65536_vsx,
+};
+static void (* const fft_dispatch_vsx_interleave[])(FFTComplex*) = {
+ fft4_vsx_interleave, fft8_vsx_interleave, fft16_vsx_interleave, fft32_vsx_interleave, fft64_vsx_interleave,
+ fft128_vsx_interleave, fft256_vsx_interleave, fft512_vsx_interleave, fft1024_vsx_interleave,
+ fft2048_vsx_interleave, fft4096_vsx_interleave, fft8192_vsx_interleave, fft16384_vsx_interleave, fft32768_vsx_interleave, fft65536_vsx_interleave,
+};
+void ff_fft_calc_interleave_vsx(FFTContext *s, FFTComplex *z)
+{
+ fft_dispatch_vsx_interleave[s->nbits-2](z);
+}
+void ff_fft_calc_vsx(FFTContext *s, FFTComplex *z)
+{
+ fft_dispatch_vsx[s->nbits-2](z);
+}
+#endif /* HAVE_VSX */
diff --git a/libavcodec/ppc/fft_vsx.h b/libavcodec/ppc/fft_vsx.h
new file mode 100644
index 0000000000..a85475d160
--- /dev/null
+++ b/libavcodec/ppc/fft_vsx.h
@@ -0,0 +1,830 @@
+#ifndef AVCODEC_PPC_FFT_VSX_H
+#define AVCODEC_PPC_FFT_VSX_H
+/*
+ * FFT transform, optimized with VSX built-in functions
+ * Copyright (c) 2014 Rong Yan Copyright (c) 2009 Loren Merritt
+ *
+ * This algorithm (though not any of the implementation details) is
+ * based on libdjbfft by D. J. Bernstein, and fft_altivec_s.S.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+
+#include "config.h"
+#include "libavutil/cpu.h"
+#include "libavutil/ppc/types_altivec.h"
+#include "libavutil/ppc/util_altivec.h"
+#include "libavcodec/fft.h"
+#include "libavcodec/fft-internal.h"
+
+#if HAVE_VSX
+
+void ff_fft_calc_interleave_vsx(FFTContext *s, FFTComplex *z);
+void ff_fft_calc_vsx(FFTContext *s, FFTComplex *z);
+
+
+#define byte_2complex (2*sizeof(FFTComplex))
+#define byte_4complex (4*sizeof(FFTComplex))
+#define byte_6complex (6*sizeof(FFTComplex))
+#define byte_8complex (8*sizeof(FFTComplex))
+#define byte_10complex (10*sizeof(FFTComplex))
+#define byte_12complex (12*sizeof(FFTComplex))
+#define byte_14complex (14*sizeof(FFTComplex))
+
+inline static void pass_vsx_interleave(FFTComplex *z, const FFTSample *wre, unsigned int n)
+{
+ int o1 = n<<1;
+ int o2 = n<<2;
+ int o3 = o1+o2;
+ int i1, i2, i3;
+ FFTSample* out = (FFTSample*)z;
+ const FFTSample *wim = wre+o1;
+ vec_f vz0, vzo1, vzo2, vzo3;
+ vec_f x0, x1, x2, x3;
+ vec_f x4, x5, x6, x7;
+ vec_f x8, x9, x10, x11;
+ vec_f x12, x13, x14, x15;
+ vec_f x16, x17, x18, x19;
+ vec_f x20, x21, x22, x23;
+ vec_f vz0plus1, vzo1plus1, vzo2plus1, vzo3plus1;
+ vec_f y0, y1, y2, y3;
+ vec_f y4, y5, y8, y9;
+ vec_f y10, y13, y14, y15;
+ vec_f y16, y17, y18, y19;
+ vec_f y20, y21, y22, y23;
+ vec_f wr1, wi1, wr0, wi0;
+ vec_f wr2, wi2, wr3, wi3;
+ vec_f xmulwi0, xmulwi1, ymulwi2, ymulwi3;
+
+ n = n-2;
+ i1 = o1*sizeof(FFTComplex);
+ i2 = o2*sizeof(FFTComplex);
+ i3 = o3*sizeof(FFTComplex);
+ vzo2 = vec_ld(i2, &(out[0])); // zo2.r zo2.i z(o2+1).r z(o2+1).i
+ vzo2plus1 = vec_ld(i2+16, &(out[0]));
+ vzo3 = vec_ld(i3, &(out[0])); // zo3.r zo3.i z(o3+1).r z(o3+1).i
+ vzo3plus1 = vec_ld(i3+16, &(out[0]));
+ vz0 = vec_ld(0, &(out[0])); // z0.r z0.i z1.r z1.i
+ vz0plus1 = vec_ld(16, &(out[0]));
+ vzo1 = vec_ld(i1, &(out[0])); // zo1.r zo1.i z(o1+1).r z(o1+1).i
+ vzo1plus1 = vec_ld(i1+16, &(out[0]));
+
+ x0 = vec_add(vzo2, vzo3);
+ x1 = vec_sub(vzo2, vzo3);
+ y0 = vec_add(vzo2plus1, vzo3plus1);
+ y1 = vec_sub(vzo2plus1, vzo3plus1);
+
+ wr1 = vec_splats(wre[1]);
+ wi1 = vec_splats(wim[-1]);
+ wi2 = vec_splats(wim[-2]);
+ wi3 = vec_splats(wim[-3]);
+ wr2 = vec_splats(wre[2]);
+ wr3 = vec_splats(wre[3]);
+
+ x2 = vec_perm(x0, x1, vcprm(2,s2,3,s3));
+ x3 = vec_perm(x0, x1, vcprm(s3,3,s2,2));
+
+ y4 = vec_perm(y0, y1, vcprm(s1,1,s0,0));
+ y5 = vec_perm(y0, y1, vcprm(s3,3,s2,2));
+ y2 = vec_perm(y0, y1, vcprm(0,s0,1,s1));
+ y3 = vec_perm(y0, y1, vcprm(2,s2,3,s3));
+
+ ymulwi2 = vec_mul(y4, wi2);
+ ymulwi3 = vec_mul(y5, wi3);
+ x4 = vec_mul(x2, wr1);
+ x5 = vec_mul(x3, wi1);
+ y8 = vec_madd(y2, wr2, ymulwi2);
+ y9 = vec_msub(y2, wr2, ymulwi2);
+ x6 = vec_add(x4, x5);
+ x7 = vec_sub(x4, x5);
+ y13 = vec_madd(y3, wr3, ymulwi3);
+ y14 = vec_msub(y3, wr3, ymulwi3);
+
+ x8 = vec_perm(x6, x7, vcprm(0,1,s2,s3));
+ y10 = vec_perm(y8, y9, vcprm(0,1,s2,s3));
+ y15 = vec_perm(y13, y14, vcprm(0,1,s2,s3));
+
+ x9 = vec_perm(x0, x8, vcprm(0,1,s0,s2));
+ x10 = vec_perm(x1, x8, vcprm(1,0,s3,s1));
+
+ y16 = vec_perm(y10, y15, vcprm(0,2,s0,s2));
+ y17 = vec_perm(y10, y15, vcprm(3,1,s3,s1));
+
+ x11 = vec_add(vz0, x9);
+ x12 = vec_sub(vz0, x9);
+ x13 = vec_add(vzo1, x10);
+ x14 = vec_sub(vzo1, x10);
+
+ y18 = vec_add(vz0plus1, y16);
+ y19 = vec_sub(vz0plus1, y16);
+ y20 = vec_add(vzo1plus1, y17);
+ y21 = vec_sub(vzo1plus1, y17);
+
+ x15 = vec_perm(x13, x14, vcprm(0,s1,2,s3));
+ x16 = vec_perm(x13, x14, vcprm(s0,1,s2,3));
+ y22 = vec_perm(y20, y21, vcprm(0,s1,2,s3));
+ y23 = vec_perm(y20, y21, vcprm(s0,1,s2,3));
+
+
+ vec_st(x11, 0, &(out[0]));
+ vec_st(y18, 16, &(out[0]));
+ vec_st(x15, i1, &(out[0]));
+ vec_st(y22, i1+16, &(out[0]));
+ vec_st(x12, i2, &(out[0]));
+ vec_st(y19, i2+16, &(out[0]));
+ vec_st(x16, i3, &(out[0]));
+ vec_st(y23, i3+16, &(out[0]));
+
+ do {
+ out += 8;
+ wre += 4;
+ wim -= 4;
+ wr0 = vec_splats(wre[0]);
+ wr1 = vec_splats(wre[1]);
+ wi0 = vec_splats(wim[0]);
+ wi1 = vec_splats(wim[-1]);
+
+ wr2 = vec_splats(wre[2]);
+ wr3 = vec_splats(wre[3]);
+ wi2 = vec_splats(wim[-2]);
+ wi3 = vec_splats(wim[-3]);
+
+ vzo2 = vec_ld(i2, &(out[0])); // zo2.r zo2.i z(o2+1).r z(o2+1).i
+ vzo2plus1 = vec_ld(i2+16, &(out[0]));
+ vzo3 = vec_ld(i3, &(out[0])); // zo3.r zo3.i z(o3+1).r z(o3+1).i
+ vzo3plus1 = vec_ld(i3+16, &(out[0]));
+ vz0 = vec_ld(0, &(out[0])); // z0.r z0.i z1.r z1.i
+ vz0plus1 = vec_ld(16, &(out[0]));
+ vzo1 = vec_ld(i1, &(out[0])); // zo1.r zo1.i z(o1+1).r z(o1+1).i
+ vzo1plus1 = vec_ld(i1+16, &(out[0]));
+
+ x0 = vec_add(vzo2, vzo3);
+ x1 = vec_sub(vzo2, vzo3);
+
+ y0 = vec_add(vzo2plus1, vzo3plus1);
+ y1 = vec_sub(vzo2plus1, vzo3plus1);
+
+ x4 = vec_perm(x0, x1, vcprm(s1,1,s0,0));
+ x5 = vec_perm(x0, x1, vcprm(s3,3,s2,2));
+ x2 = vec_perm(x0, x1, vcprm(0,s0,1,s1));
+ x3 = vec_perm(x0, x1, vcprm(2,s2,3,s3));
+
+ y2 = vec_perm(y0, y1, vcprm(0,s0,1,s1));
+ y3 = vec_perm(y0, y1, vcprm(2,s2,3,s3));
+ xmulwi0 = vec_mul(x4, wi0);
+ xmulwi1 = vec_mul(x5, wi1);
+
+ y4 = vec_perm(y0, y1, vcprm(s1,1,s0,0));
+ y5 = vec_perm(y0, y1, vcprm(s3,3,s2,2));
+
+ x8 = vec_madd(x2, wr0, xmulwi0);
+ x9 = vec_msub(x2, wr0, xmulwi0);
+ ymulwi2 = vec_mul(y4, wi2);
+ ymulwi3 = vec_mul(y5, wi3);
+
+ x13 = vec_madd(x3, wr1, xmulwi1);
+ x14 = vec_msub(x3, wr1, xmulwi1);
+
+ y8 = vec_madd(y2, wr2, ymulwi2);
+ y9 = vec_msub(y2, wr2, ymulwi2);
+ y13 = vec_madd(y3, wr3, ymulwi3);
+ y14 = vec_msub(y3, wr3, ymulwi3);
+
+ x10 = vec_perm(x8, x9, vcprm(0,1,s2,s3));
+ x15 = vec_perm(x13, x14, vcprm(0,1,s2,s3));
+
+ y10 = vec_perm(y8, y9, vcprm(0,1,s2,s3));
+ y15 = vec_perm(y13, y14, vcprm(0,1,s2,s3));
+
+ x16 = vec_perm(x10, x15, vcprm(0,2,s0,s2));
+ x17 = vec_perm(x10, x15, vcprm(3,1,s3,s1));
+
+ y16 = vec_perm(y10, y15, vcprm(0,2,s0,s2));
+ y17 = vec_perm(y10, y15, vcprm(3,1,s3,s1));
+
+ x18 = vec_add(vz0, x16);
+ x19 = vec_sub(vz0, x16);
+ x20 = vec_add(vzo1, x17);
+ x21 = vec_sub(vzo1, x17);
+
+ y18 = vec_add(vz0plus1, y16);
+ y19 = vec_sub(vz0plus1, y16);
+ y20 = vec_add(vzo1plus1, y17);
+ y21 = vec_sub(vzo1plus1, y17);
+
+ x22 = vec_perm(x20, x21, vcprm(0,s1,2,s3));
+ x23 = vec_perm(x20, x21, vcprm(s0,1,s2,3));
+
+ y22 = vec_perm(y20, y21, vcprm(0,s1,2,s3));
+ y23 = vec_perm(y20, y21, vcprm(s0,1,s2,3));
+
+ vec_st(x18, 0, &(out[0]));
+ vec_st(y18, 16, &(out[0]));
+ vec_st(x22, i1, &(out[0]));
+ vec_st(y22, i1+16, &(out[0]));
+ vec_st(x19, i2, &(out[0]));
+ vec_st(y19, i2+16, &(out[0]));
+ vec_st(x23, i3, &(out[0]));
+ vec_st(y23, i3+16, &(out[0]));
+ } while (n-=2);
+}
+
+inline static void fft2_vsx_interleave(FFTComplex *z)
+{
+ FFTSample r1, i1;
+
+ r1 = z[0].re - z[1].re;
+ z[0].re += z[1].re;
+ z[1].re = r1;
+
+ i1 = z[0].im - z[1].im;
+ z[0].im += z[1].im;
+ z[1].im = i1;
+ }
+
+inline static void fft4_vsx_interleave(FFTComplex *z)
+{
+ vec_f a, b, c, d;
+ float* out= (float*)z;
+ a = vec_ld(0, &(out[0]));
+ b = vec_ld(byte_2complex, &(out[0]));
+
+ c = vec_perm(a, b, vcprm(0,1,s2,s1));
+ d = vec_perm(a, b, vcprm(2,3,s0,s3));
+ a = vec_add(c, d);
+ b = vec_sub(c, d);
+
+ c = vec_perm(a, b, vcprm(0,1,s0,s1));
+ d = vec_perm(a, b, vcprm(2,3,s3,s2));
+
+ a = vec_add(c, d);
+ b = vec_sub(c, d);
+ vec_st(a, 0, &(out[0]));
+ vec_st(b, byte_2complex, &(out[0]));
+}
+
+inline static void fft8_vsx_interleave(FFTComplex *z)
+{
+ vec_f vz0, vz1, vz2, vz3;
+ vec_f x0, x1, x2, x3;
+ vec_f x4, x5, x6, x7;
+ vec_f x8, x9, x10, x11;
+ vec_f x12, x13, x14, x15;
+ vec_f x16, x17, x18, x19;
+ vec_f x20, x21, x22, x23;
+ vec_f x24, x25, x26, x27;
+ vec_f x28, x29, x30, x31;
+ vec_f x32, x33, x34;
+
+ float* out= (float*)z;
+ vec_f vc1 = {sqrthalf, sqrthalf, sqrthalf, sqrthalf};
+
+ vz0 = vec_ld(0, &(out[0]));
+ vz1 = vec_ld(byte_2complex, &(out[0]));
+ vz2 = vec_ld(byte_4complex, &(out[0]));
+ vz3 = vec_ld(byte_6complex, &(out[0]));
+
+ x0 = vec_perm(vz0, vz1, vcprm(0,1,s2,s1));
+ x1 = vec_perm(vz0, vz1, vcprm(2,3,s0,s3));
+ x2 = vec_perm(vz2, vz3, vcprm(2,1,s0,s1));
+ x3 = vec_perm(vz2, vz3, vcprm(0,3,s2,s3));
+
+ x4 = vec_add(x0, x1);
+ x5 = vec_sub(x0, x1);
+ x6 = vec_add(x2, x3);
+ x7 = vec_sub(x2, x3);
+
+ x8 = vec_perm(x4, x5, vcprm(0,1,s0,s1));
+ x9 = vec_perm(x4, x5, vcprm(2,3,s3,s2));
+ x10 = vec_perm(x6, x7, vcprm(2,1,s2,s1));
+ x11 = vec_perm(x6, x7, vcprm(0,3,s0,s3));
+
+ x12 = vec_add(x8, x9);
+ x13 = vec_sub(x8, x9);
+ x14 = vec_add(x10, x11);
+ x15 = vec_sub(x10, x11);
+ x16 = vec_perm(x12, x13, vcprm(0,s0,1,s1));
+ x17 = vec_perm(x14, x15, vcprm(0,s0,1,s1));
+ x18 = vec_perm(x16, x17, vcprm(s0,s3,s2,s1));
+ x19 = vec_add(x16, x18); // z0.r z2.r z0.i z2.i
+ x20 = vec_sub(x16, x18); // z4.r z6.r z4.i z6.i
+
+ x21 = vec_perm(x12, x13, vcprm(2,s2,3,s3));
+ x22 = vec_perm(x14, x15, vcprm(2,3,s2,s3));
+ x23 = vec_perm(x14, x15, vcprm(3,2,s3,s2));
+ x24 = vec_add(x22, x23);
+ x25 = vec_sub(x22, x23);
+ x26 = vec_mul( vec_perm(x24, x25, vcprm(2,s2,0,s0)), vc1);
+
+ x27 = vec_add(x21, x26); // z1.r z7.r z1.i z3.i
+ x28 = vec_sub(x21, x26); //z5.r z3.r z5.i z7.i
+
+ x29 = vec_perm(x19, x27, vcprm(0,2,s0,s2)); // z0.r z0.i z1.r z1.i
+ x30 = vec_perm(x19, x27, vcprm(1,3,s1,s3)); // z2.r z2.i z7.r z3.i
+ x31 = vec_perm(x20, x28, vcprm(0,2,s0,s2)); // z4.r z4.i z5.r z5.i
+ x32 = vec_perm(x20, x28, vcprm(1,3,s1,s3)); // z6.r z6.i z3.r z7.i
+ x33 = vec_perm(x30, x32, vcprm(0,1,s2,3)); // z2.r z2.i z3.r z3.i
+ x34 = vec_perm(x30, x32, vcprm(s0,s1,2,s3)); // z6.r z6.i z7.r z7.i
+
+ vec_st(x29, 0, &(out[0]));
+ vec_st(x33, byte_2complex, &(out[0]));
+ vec_st(x31, byte_4complex, &(out[0]));
+ vec_st(x34, byte_6complex, &(out[0]));
+}
+
+inline static void fft16_vsx_interleave(FFTComplex *z)
+{
+ float* out= (float*)z;
+ vec_f vc0 = {sqrthalf, sqrthalf, sqrthalf, sqrthalf};
+ vec_f vc1 = {ff_cos_16[1], ff_cos_16[1], ff_cos_16[1], ff_cos_16[1]};
+ vec_f vc2 = {ff_cos_16[3], ff_cos_16[3], ff_cos_16[3], ff_cos_16[3]};
+ vec_f vz0, vz1, vz2, vz3;
+ vec_f vz4, vz5, vz6, vz7;
+ vec_f x0, x1, x2, x3;
+ vec_f x4, x5, x6, x7;
+ vec_f x8, x9, x10, x11;
+ vec_f x12, x13, x14, x15;
+ vec_f x16, x17, x18, x19;
+ vec_f x20, x21, x22, x23;
+ vec_f x24, x25, x26, x27;
+ vec_f x28, x29, x30, x31;
+ vec_f x32, x33, x34, x35;
+ vec_f x36, x37, x38, x39;
+ vec_f x40, x41, x42, x43;
+ vec_f x44, x45, x46, x47;
+ vec_f x48, x49, x50, x51;
+ vec_f x52, x53, x54, x55;
+ vec_f x56, x57, x58, x59;
+ vec_f x60, x61, x62, x63;
+ vec_f x64, x65, x66, x67;
+ vec_f x68, x69, x70, x71;
+ vec_f x72, x73, x74, x75;
+ vec_f x76, x77, x78, x79;
+ vec_f x80, x81, x82, x83;
+ vec_f x84, x85, x86;
+
+ vz0 = vec_ld(0, &(out[0]));
+ vz1 = vec_ld(byte_2complex, &(out[0]));
+ vz2 = vec_ld(byte_4complex, &(out[0]));
+ vz3 = vec_ld(byte_6complex, &(out[0]));
+ vz4 = vec_ld(byte_8complex, &(out[0]));
+ vz5 = vec_ld(byte_10complex, &(out[0]));
+ vz6 = vec_ld(byte_12complex, &(out[0]));
+ vz7 = vec_ld(byte_14complex, &(out[0]));
+
+ x0 = vec_perm(vz0, vz1, vcprm(0,1,s2,s1));
+ x1 = vec_perm(vz0, vz1, vcprm(2,3,s0,s3));
+ x2 = vec_perm(vz2, vz3, vcprm(0,1,s0,s1));
+ x3 = vec_perm(vz2, vz3, vcprm(2,3,s2,s3));
+
+ x4 = vec_perm(vz4, vz5, vcprm(0,1,s2,s1));
+ x5 = vec_perm(vz4, vz5, vcprm(2,3,s0,s3));
+ x6 = vec_perm(vz6, vz7, vcprm(0,1,s2,s1));
+ x7 = vec_perm(vz6, vz7, vcprm(2,3,s0,s3));
+
+ x8 = vec_add(x0, x1);
+ x9 = vec_sub(x0, x1);
+ x10 = vec_add(x2, x3);
+ x11 = vec_sub(x2, x3);
+
+ x12 = vec_add(x4, x5);
+ x13 = vec_sub(x4, x5);
+ x14 = vec_add(x6, x7);
+ x15 = vec_sub(x6, x7);
+
+ x16 = vec_perm(x8, x9, vcprm(0,1,s0,s1));
+ x17 = vec_perm(x8, x9, vcprm(2,3,s3,s2));
+ x18 = vec_perm(x10, x11, vcprm(2,1,s1,s2));
+ x19 = vec_perm(x10, x11, vcprm(0,3,s0,s3));
+ x20 = vec_perm(x12, x14, vcprm(0,1,s0, s1));
+ x21 = vec_perm(x12, x14, vcprm(2,3,s2,s3));
+ x22 = vec_perm(x13, x15, vcprm(0,1,s0,s1));
+ x23 = vec_perm(x13, x15, vcprm(3,2,s3,s2));
+
+ x24 = vec_add(x16, x17);
+ x25 = vec_sub(x16, x17);
+ x26 = vec_add(x18, x19);
+ x27 = vec_sub(x18, x19);
+ x28 = vec_add(x20, x21);
+ x29 = vec_sub(x20, x21);
+ x30 = vec_add(x22, x23);
+ x31 = vec_sub(x22, x23);
+
+ x32 = vec_add(x24, x26);
+ x33 = vec_sub(x24, x26);
+ x34 = vec_perm(x32, x33, vcprm(0,1,s0,s1));
+
+ x35 = vec_perm(x28, x29, vcprm(2,1,s1,s2));
+ x36 = vec_perm(x28, x29, vcprm(0,3,s0,s3));
+ x37 = vec_add(x35, x36);
+ x38 = vec_sub(x35, x36);
+ x39 = vec_perm(x37, x38, vcprm(0,1,s1,s0));
+
+ x40 = vec_perm(x27, x38, vcprm(3,2,s2,s3));
+ x41 = vec_perm(x26, x37, vcprm(2,3,s3,s2));
+ x42 = vec_add(x40, x41);
+ x43 = vec_sub(x40, x41);
+ x44 = vec_mul(x42, vc0);
+ x45 = vec_mul(x43, vc0);
+
+ x46 = vec_add(x34, x39); // z0.r z0.i z4.r z4.i
+ x47 = vec_sub(x34, x39); // z8.r z8.i z12.r z12.i
+
+ x48 = vec_perm(x30, x31, vcprm(2,1,s1,s2));
+ x49 = vec_perm(x30, x31, vcprm(0,3,s3,s0));
+ x50 = vec_add(x48, x49);
+ x51 = vec_sub(x48, x49);
+ x52 = vec_mul(x50, vc1);
+ x53 = vec_mul(x50, vc2);
+ x54 = vec_mul(x51, vc1);
+ x55 = vec_mul(x51, vc2);
+
+ x56 = vec_perm(x24, x25, vcprm(2,3,s2,s3));
+ x57 = vec_perm(x44, x45, vcprm(0,1,s1,s0));
+ x58 = vec_add(x56, x57);
+ x59 = vec_sub(x56, x57);
+
+ x60 = vec_perm(x54, x55, vcprm(1,0,3,2));
+ x61 = vec_perm(x54, x55, vcprm(s1,s0,s3,s2));
+ x62 = vec_add(x52, x61);
+ x63 = vec_sub(x52, x61);
+ x64 = vec_add(x60, x53);
+ x65 = vec_sub(x60, x53);
+ x66 = vec_perm(x62, x64, vcprm(0,1,s3,s2));
+ x67 = vec_perm(x63, x65, vcprm(s0,s1,3,2));
+
+ x68 = vec_add(x58, x66); // z1.r z1.i z3.r z3.i
+ x69 = vec_sub(x58, x66); // z9.r z9.i z11.r z11.i
+ x70 = vec_add(x59, x67); // z5.r z5.i z15.r z15.i
+ x71 = vec_sub(x59, x67); // z13.r z13.i z7.r z7.i
+
+ x72 = vec_perm(x25, x27, vcprm(s1,s0,s2,s3));
+ x73 = vec_add(x25, x72);
+ x74 = vec_sub(x25, x72);
+ x75 = vec_perm(x73, x74, vcprm(0,1,s0,s1));
+ x76 = vec_perm(x44, x45, vcprm(3,2,s2,s3));
+ x77 = vec_add(x75, x76); // z2.r z2.i z6.r z6.i
+ x78 = vec_sub(x75, x76); // z10.r z10.i z14.r z14.i
+
+ x79 = vec_perm(x46, x68, vcprm(0,1,s0,s1)); // z0.r z0.i z1.r z1.i
+ x80 = vec_perm(x77, x68, vcprm(0,1,s2,s3)); // z2.r z2.i z3.r z3.i
+ x81 = vec_perm(x46, x70, vcprm(2,3,s0,s1)); // z4.r z4.i z5.r z5.i
+ x82 = vec_perm(x71, x77, vcprm(s2,s3,2,3)); // z6.r z6.i z7.r z7.i
+ vec_st(x79, 0, &(out[0]));
+ vec_st(x80, byte_2complex, &(out[0]));
+ vec_st(x81, byte_4complex, &(out[0]));
+ vec_st(x82, byte_6complex, &(out[0]));
+ x83 = vec_perm(x47, x69, vcprm(0,1,s0,s1)); // z8.r z8.i z9.r z9.i
+ x84 = vec_perm(x78, x69, vcprm(0,1,s2,s3)); // z10.r z10.i z11.r z11.i
+ x85 = vec_perm(x47, x71, vcprm(2,3,s0,s1)); // z12.r z12.i z13.r z13.i
+ x86 = vec_perm(x70, x78, vcprm(s2,s3,2,3)); // z14.r z14.i z15.r z15.i
+ vec_st(x83, byte_8complex, &(out[0]));
+ vec_st(x84, byte_10complex, &(out[0]));
+ vec_st(x85, byte_12complex, &(out[0]));
+ vec_st(x86, byte_14complex, &(out[0]));
+}
+
+inline static void fft4_vsx(FFTComplex *z)
+{
+ vec_f a, b, c, d;
+ float* out= (float*)z;
+ a = vec_ld(0, &(out[0]));
+ b = vec_ld(byte_2complex, &(out[0]));
+
+ c = vec_perm(a, b, vcprm(0,1,s2,s1));
+ d = vec_perm(a, b, vcprm(2,3,s0,s3));
+ a = vec_add(c, d);
+ b = vec_sub(c, d);
+
+ c = vec_perm(a,b, vcprm(0,s0,1,s1));
+ d = vec_perm(a, b, vcprm(2,s3,3,s2));
+
+ a = vec_add(c, d);
+ b = vec_sub(c, d);
+
+ c = vec_perm(a, b, vcprm(0,1,s0,s1));
+ d = vec_perm(a, b, vcprm(2,3,s2,s3));
+
+ vec_st(c, 0, &(out[0]));
+ vec_st(d, byte_2complex, &(out[0]));
+ return;
+}
+
+inline static void fft8_vsx(FFTComplex *z)
+{
+ vec_f vz0, vz1, vz2, vz3;
+ vec_f vz4, vz5, vz6, vz7, vz8;
+
+ float* out= (float*)z;
+ vec_f vc0 = {0.0, 0.0, 0.0, 0.0};
+ vec_f vc1 = {-sqrthalf, sqrthalf, sqrthalf, -sqrthalf};
+ vec_f vc2 = {sqrthalf, sqrthalf, sqrthalf, sqrthalf};
+
+ vz0 = vec_ld(0, &(out[0]));
+ vz1 = vec_ld(byte_2complex, &(out[0]));
+ vz2 = vec_ld(byte_4complex, &(out[0]));
+ vz3 = vec_ld(byte_6complex, &(out[0]));
+
+ vz6 = vec_perm(vz2, vz3, vcprm(0,s0,1,s1));
+ vz7 = vec_perm(vz2, vz3, vcprm(2,s2,3,s3));
+ vz4 = vec_perm(vz0, vz1, vcprm(0,1,s2,s1));
+ vz5 = vec_perm(vz0, vz1, vcprm(2,3,s0,s3));
+
+ vz2 = vec_add(vz6, vz7);
+ vz3 = vec_sub(vz6, vz7);
+ vz8 = vec_perm(vz3, vz3, vcprm(2,3,0,1));
+
+ vz0 = vec_add(vz4, vz5);
+ vz1 = vec_sub(vz4, vz5);
+
+ vz3 = vec_madd(vz3, vc1, vc0);
+ vz3 = vec_madd(vz8, vc2, vz3);
+
+ vz4 = vec_perm(vz0, vz1, vcprm(0,s0,1,s1));
+ vz5 = vec_perm(vz0, vz1, vcprm(2,s3,3,s2));
+ vz6 = vec_perm(vz2, vz3, vcprm(1,2,s3,s0));
+ vz7 = vec_perm(vz2, vz3, vcprm(0,3,s2,s1));
+
+ vz0 = vec_add(vz4, vz5);
+ vz1 = vec_sub(vz4, vz5);
+ vz2 = vec_add(vz6, vz7);
+ vz3 = vec_sub(vz6, vz7);
+
+ vz4 = vec_perm(vz0, vz1, vcprm(0,1,s0,s1));
+ vz5 = vec_perm(vz0, vz1, vcprm(2,3,s2,s3));
+ vz6 = vec_perm(vz2, vz3, vcprm(0,2,s1,s3));
+ vz7 = vec_perm(vz2, vz3, vcprm(1,3,s0,s2));
+
+
+ vz2 = vec_sub(vz4, vz6);
+ vz3 = vec_sub(vz5, vz7);
+
+ vz0 = vec_add(vz4, vz6);
+ vz1 = vec_add(vz5, vz7);
+
+ vec_st(vz0, 0, &(out[0]));
+ vec_st(vz1, byte_2complex, &(out[0]));
+ vec_st(vz2, byte_4complex, &(out[0]));
+ vec_st(vz3, byte_6complex, &(out[0]));
+ return;
+}
+
+inline static void fft16_vsx(FFTComplex *z)
+{
+ float* out= (float*)z;
+ vec_f vc0 = {0.0, 0.0, 0.0, 0.0};
+ vec_f vc1 = {-sqrthalf, sqrthalf, sqrthalf, -sqrthalf};
+ vec_f vc2 = {sqrthalf, sqrthalf, sqrthalf, sqrthalf};
+ vec_f vc3 = {1.0, 0.92387953, sqrthalf, 0.38268343};
+ vec_f vc4 = {0.0, 0.38268343, sqrthalf, 0.92387953};
+ vec_f vc5 = {-0.0, -0.38268343, -sqrthalf, -0.92387953};
+
+ vec_f vz0, vz1, vz2, vz3;
+ vec_f vz4, vz5, vz6, vz7;
+ vec_f vz8, vz9, vz10, vz11;
+ vec_f vz12, vz13;
+
+ vz0 = vec_ld(byte_8complex, &(out[0]));
+ vz1 = vec_ld(byte_10complex, &(out[0]));
+ vz2 = vec_ld(byte_12complex, &(out[0]));
+ vz3 = vec_ld(byte_14complex, &(out[0]));
+
+ vz4 = vec_perm(vz0, vz1, vcprm(0,1,s2,s1));
+ vz5 = vec_perm(vz0, vz1, vcprm(2,3,s0,s3));
+ vz6 = vec_perm(vz2, vz3, vcprm(0,1,s2,s1));
+ vz7 = vec_perm(vz2, vz3, vcprm(2,3,s0,s3));
+
+ vz0 = vec_add(vz4, vz5);
+ vz1= vec_sub(vz4, vz5);
+ vz2 = vec_add(vz6, vz7);
+ vz3 = vec_sub(vz6, vz7);
+
+ vz4 = vec_perm(vz0, vz1, vcprm(0,s0,1,s1));
+ vz5 = vec_perm(vz0, vz1, vcprm(2,s3,3,s2));
+ vz6 = vec_perm(vz2, vz3, vcprm(0,s0,1,s1));
+ vz7 = vec_perm(vz2, vz3, vcprm(2,s3,3,s2));
+
+ vz0 = vec_add(vz4, vz5);
+ vz1 = vec_sub(vz4, vz5);
+ vz2 = vec_add(vz6, vz7);
+ vz3 = vec_sub(vz6, vz7);
+
+ vz4 = vec_perm(vz0, vz1, vcprm(0,1,s0,s1));
+ vz5 = vec_perm(vz0, vz1, vcprm(2,3,s2,s3));
+
+ vz6 = vec_perm(vz2, vz3, vcprm(0,1,s0,s1));
+ vz7 = vec_perm(vz2, vz3, vcprm(2,3,s2,s3));
+
+ vz0 = vec_ld(0, &(out[0]));
+ vz1 = vec_ld(byte_2complex, &(out[0]));
+ vz2 = vec_ld(byte_4complex, &(out[0]));
+ vz3 = vec_ld(byte_6complex, &(out[0]));
+ vz10 = vec_perm(vz2, vz3, vcprm(0,s0,1,s1));
+ vz11 = vec_perm(vz2, vz3, vcprm(2,s2,3,s3));
+ vz8 = vec_perm(vz0, vz1, vcprm(0,1,s2,s1));
+ vz9 = vec_perm(vz0, vz1, vcprm(2,3,s0,s3));
+
+ vz2 = vec_add(vz10, vz11);
+ vz3 = vec_sub(vz10, vz11);
+ vz12 = vec_perm(vz3, vz3, vcprm(2,3,0,1));
+ vz0 = vec_add(vz8, vz9);
+ vz1 = vec_sub(vz8, vz9);
+
+ vz3 = vec_madd(vz3, vc1, vc0);
+ vz3 = vec_madd(vz12, vc2, vz3);
+ vz8 = vec_perm(vz0, vz1, vcprm(0,s0,1,s1));
+ vz9 = vec_perm(vz0, vz1, vcprm(2,s3,3,s2));
+ vz10 = vec_perm(vz2, vz3, vcprm(1,2,s3,s0));
+ vz11 = vec_perm(vz2, vz3, vcprm(0,3,s2,s1));
+
+ vz0 = vec_add(vz8, vz9);
+ vz1 = vec_sub(vz8, vz9);
+ vz2 = vec_add(vz10, vz11);
+ vz3 = vec_sub(vz10, vz11);
+
+ vz8 = vec_perm(vz0, vz1, vcprm(0,1,s0,s1));
+ vz9 = vec_perm(vz0, vz1, vcprm(2,3,s2,s3));
+ vz10 = vec_perm(vz2, vz3, vcprm(0,2,s1,s3));
+ vz11 = vec_perm(vz2, vz3, vcprm(1,3,s0,s2));
+
+ vz2 = vec_sub(vz8, vz10);
+ vz3 = vec_sub(vz9, vz11);
+ vz0 = vec_add(vz8, vz10);
+ vz1 = vec_add(vz9, vz11);
+
+ vz8 = vec_madd(vz4, vc3, vc0);
+ vz9 = vec_madd(vz5, vc3, vc0);
+ vz10 = vec_madd(vz6, vc3, vc0);
+ vz11 = vec_madd(vz7, vc3, vc0);
+
+ vz8 = vec_madd(vz5, vc4, vz8);
+ vz9 = vec_madd(vz4, vc5, vz9);
+ vz10 = vec_madd(vz7, vc5, vz10);
+ vz11 = vec_madd(vz6, vc4, vz11);
+
+ vz12 = vec_sub(vz10, vz8);
+ vz10 = vec_add(vz10, vz8);
+
+ vz13 = vec_sub(vz9, vz11);
+ vz11 = vec_add(vz9, vz11);
+
+ vz4 = vec_sub(vz0, vz10);
+ vz0 = vec_add(vz0, vz10);
+
+ vz7= vec_sub(vz3, vz12);
+ vz3= vec_add(vz3, vz12);
+
+ vz5 = vec_sub(vz1, vz11);
+ vz1 = vec_add(vz1, vz11);
+
+ vz6 = vec_sub(vz2, vz13);
+ vz2 = vec_add(vz2, vz13);
+
+ vec_st(vz0, 0, &(out[0]));
+ vec_st(vz1, byte_2complex, &(out[0]));
+ vec_st(vz2, byte_4complex, &(out[0]));
+ vec_st(vz3, byte_6complex, &(out[0]));
+ vec_st(vz4, byte_8complex, &(out[0]));
+ vec_st(vz5, byte_10complex, &(out[0]));
+ vec_st(vz6, byte_12complex, &(out[0]));
+ vec_st(vz7, byte_14complex, &(out[0]));
+ return;
+
+}
+inline static void pass_vsx(FFTComplex * z, const FFTSample * wre, unsigned int n)
+{
+ int o1 = n<<1;
+ int o2 = n<<2;
+ int o3 = o1+o2;
+ int i1, i2, i3;
+ FFTSample* out = (FFTSample*)z;
+ const FFTSample *wim = wre+o1;
+ vec_f v0, v1, v2, v3;
+ vec_f v4, v5, v6, v7;
+ vec_f v8, v9, v10, v11;
+ vec_f v12, v13;
+
+ n = n-2;
+ i1 = o1*sizeof(FFTComplex);
+ i2 = o2*sizeof(FFTComplex);
+ i3 = o3*sizeof(FFTComplex);
+
+ v8 = vec_ld(0, &(wre[0]));
+ v10 = vec_ld(0, &(wim[0]));
+ v9 = vec_ld(0, &(wim[-4]));
+ v9 = vec_perm(v9, v10, vcprm(s0,3,2,1));
+
+ v4 = vec_ld(i2, &(out[0]));
+ v5 = vec_ld(i2+16, &(out[0]));
+ v6 = vec_ld(i3, &(out[0]));
+ v7 = vec_ld(i3+16, &(out[0]));
+ v10 = vec_mul(v4, v8); // r2*wre
+ v11 = vec_mul(v5, v8); // i2*wre
+ v12 = vec_mul(v6, v8); // r3*wre
+ v13 = vec_mul(v7, v8); // i3*wre
+
+ v0 = vec_ld(0, &(out[0])); // r0
+ v3 = vec_ld(i1+16, &(out[0])); // i1
+ v10 = vec_madd(v5, v9, v10); // r2*wim
+ v11 = vec_nmsub(v4, v9, v11); // i2*wim
+ v12 = vec_nmsub(v7, v9, v12); // r3*wim
+ v13 = vec_madd(v6, v9, v13); // i3*wim
+
+ v1 = vec_ld(16, &(out[0])); // i0
+ v2 = vec_ld(i1, &(out[0])); // r1
+ v8 = vec_sub(v12, v10);
+ v12 = vec_add(v12, v10);
+ v9 = vec_sub(v11, v13);
+ v13 = vec_add(v11, v13);
+ v4 = vec_sub(v0, v12);
+ v0 = vec_add(v0, v12);
+ v7 = vec_sub(v3, v8);
+ v3 = vec_add(v3, v8);
+
+ vec_st(v0, 0, &(out[0])); // r0
+ vec_st(v3, i1+16, &(out[0])); // i1
+ vec_st(v4, i2, &(out[0])); // r2
+ vec_st(v7, i3+16, &(out[0]));// i3
+
+ v5 = vec_sub(v1, v13);
+ v1 = vec_add(v1, v13);
+ v6 = vec_sub(v2, v9);
+ v2 = vec_add(v2, v9);
+
+ vec_st(v1, 16, &(out[0])); // i0
+ vec_st(v2, i1, &(out[0])); // r1
+ vec_st(v5, i2+16, &(out[0])); // i2
+ vec_st(v6, i3, &(out[0])); // r3
+
+ do {
+ out += 8;
+ wre += 4;
+ wim -= 4;
+
+ v8 = vec_ld(0, &(wre[0]));
+ v10 = vec_ld(0, &(wim[0]));
+ v9 = vec_ld(0, &(wim[-4]));
+ v9 = vec_perm(v9, v10, vcprm(s0,3,2,1));
+
+ v4 = vec_ld(i2, &(out[0])); // r2
+ v5 = vec_ld(i2+16, &(out[0])); // i2
+ v6 = vec_ld(i3, &(out[0])); // r3
+ v7 = vec_ld(i3+16, &(out[0]));// i3
+ v10 = vec_mul(v4, v8); // r2*wre
+ v11 = vec_mul(v5, v8); // i2*wre
+ v12 = vec_mul(v6, v8); // r3*wre
+ v13 = vec_mul(v7, v8); // i3*wre
+
+ v0 = vec_ld(0, &(out[0])); // r0
+ v3 = vec_ld(i1+16, &(out[0])); // i1
+ v10 = vec_madd(v5, v9, v10); // r2*wim
+ v11 = vec_nmsub(v4, v9, v11); // i2*wim
+ v12 = vec_nmsub(v7, v9, v12); // r3*wim
+ v13 = vec_madd(v6, v9, v13); // i3*wim
+
+ v1 = vec_ld(16, &(out[0])); // i0
+ v2 = vec_ld(i1, &(out[0])); // r1
+ v8 = vec_sub(v12, v10);
+ v12 = vec_add(v12, v10);
+ v9 = vec_sub(v11, v13);
+ v13 = vec_add(v11, v13);
+ v4 = vec_sub(v0, v12);
+ v0 = vec_add(v0, v12);
+ v7 = vec_sub(v3, v8);
+ v3 = vec_add(v3, v8);
+
+ vec_st(v0, 0, &(out[0])); // r0
+ vec_st(v3, i1+16, &(out[0])); // i1
+ vec_st(v4, i2, &(out[0])); // r2
+ vec_st(v7, i3+16, &(out[0])); // i3
+
+ v5 = vec_sub(v1, v13);
+ v1 = vec_add(v1, v13);
+ v6 = vec_sub(v2, v9);
+ v2 = vec_add(v2, v9);
+
+ vec_st(v1, 16, &(out[0])); // i0
+ vec_st(v2, i1, &(out[0])); // r1
+ vec_st(v5, i2+16, &(out[0])); // i2
+ vec_st(v6, i3, &(out[0])); // r3
+ } while (n-=2);
+}
+
+#endif
+
+#endif /* AVCODEC_PPC_FFT_VSX_H */
diff --git a/libavcodec/ppc/fmtconvert_altivec.c b/libavcodec/ppc/fmtconvert_altivec.c
index 14aaf6bc9a..10a7169d1b 100644
--- a/libavcodec/ppc/fmtconvert_altivec.c
+++ b/libavcodec/ppc/fmtconvert_altivec.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2006 Luca Barbato <lu_zero@gentoo.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/h264chroma_init.c b/libavcodec/ppc/h264chroma_init.c
index 6d656d46ba..876efeca09 100644
--- a/libavcodec/ppc/h264chroma_init.c
+++ b/libavcodec/ppc/h264chroma_init.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/h264chroma_template.c b/libavcodec/ppc/h264chroma_template.c
index 293fef5c90..7436e118c5 100644
--- a/libavcodec/ppc/h264chroma_template.c
+++ b/libavcodec/ppc/h264chroma_template.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/h264dsp.c b/libavcodec/ppc/h264dsp.c
index 31dc141d29..7fc7e0bc7f 100644
--- a/libavcodec/ppc/h264dsp.c
+++ b/libavcodec/ppc/h264dsp.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/h264qpel.c b/libavcodec/ppc/h264qpel.c
index f840277c88..4a01f17926 100644
--- a/libavcodec/ppc/h264qpel.c
+++ b/libavcodec/ppc/h264qpel.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/h264qpel_template.c b/libavcodec/ppc/h264qpel_template.c
index fe83146e63..360e9e35b9 100644
--- a/libavcodec/ppc/h264qpel_template.c
+++ b/libavcodec/ppc/h264qpel_template.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/hpeldsp_altivec.c b/libavcodec/ppc/hpeldsp_altivec.c
index fd6ae73915..79c2af8ac3 100644
--- a/libavcodec/ppc/hpeldsp_altivec.c
+++ b/libavcodec/ppc/hpeldsp_altivec.c
@@ -3,20 +3,20 @@
* Copyright (c) 2002 Dieter Shirley
* Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -36,6 +36,38 @@
#if HAVE_ALTIVEC
/* next one assumes that ((line_size % 16) == 0) */
+#if HAVE_VSX
+void ff_put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
+{
+ register vector unsigned char pixelsv1;
+ register vector unsigned char pixelsv1B;
+ register vector unsigned char pixelsv1C;
+ register vector unsigned char pixelsv1D;
+
+ int i;
+ register ptrdiff_t line_size_2 = line_size << 1;
+ register ptrdiff_t line_size_3 = line_size + line_size_2;
+ register ptrdiff_t line_size_4 = line_size << 2;
+
+// hand-unrolling the loop by 4 gains about 15%
+// mininum execution time goes from 74 to 60 cycles
+// it's faster than -funroll-loops, but using
+// -funroll-loops w/ this is bad - 74 cycles again.
+// all this is on a 7450, tuning for the 7450
+ for (i = 0; i < h; i += 4) {
+ pixelsv1 = vec_vsx_ld( 0, pixels);
+ pixelsv1B = vec_vsx_ld(line_size, pixels);
+ pixelsv1C = vec_vsx_ld(line_size_2, pixels);
+ pixelsv1D = vec_vsx_ld(line_size_3, pixels);
+ vec_vsx_st(pixelsv1, 0, (unsigned char*)block);
+ vec_vsx_st(pixelsv1B, line_size, (unsigned char*)block);
+ vec_vsx_st(pixelsv1C, line_size_2, (unsigned char*)block);
+ vec_st(pixelsv1D, line_size_3, (unsigned char*)block);
+ pixels+=line_size_4;
+ block +=line_size_4;
+ }
+}
+#else
void ff_put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
{
register vector unsigned char pixelsv1, pixelsv2;
@@ -76,6 +108,8 @@ void ff_put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t li
}
}
+#endif /* HAVE_VSX */
+
/* next one assumes that ((line_size % 16) == 0) */
#define op_avg(a,b) a = ( ((a)|(b)) - ((((a)^(b))&0xFEFEFEFEUL)>>1) )
void ff_avg_pixels16_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
diff --git a/libavcodec/ppc/hpeldsp_altivec.h b/libavcodec/ppc/hpeldsp_altivec.h
index 98dd80ea1c..590809f539 100644
--- a/libavcodec/ppc/hpeldsp_altivec.h
+++ b/libavcodec/ppc/hpeldsp_altivec.h
@@ -3,20 +3,20 @@
* Copyright (c) 2002 Dieter Shirley
* Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/huffyuvdsp_altivec.c b/libavcodec/ppc/huffyuvdsp_altivec.c
index 7c34a67ea4..6701524e4a 100644
--- a/libavcodec/ppc/huffyuvdsp_altivec.c
+++ b/libavcodec/ppc/huffyuvdsp_altivec.c
@@ -3,20 +3,20 @@
* Copyright (c) 2002 Dieter Shirley
* Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -33,7 +33,7 @@
#include "libavcodec/huffyuvdsp.h"
#if HAVE_ALTIVEC
-static void add_bytes_altivec(uint8_t *dst, uint8_t *src, int w)
+static void add_bytes_altivec(uint8_t *dst, uint8_t *src, intptr_t w)
{
register int i;
register vector unsigned char vdst, vsrc;
diff --git a/libavcodec/ppc/idctdsp.c b/libavcodec/ppc/idctdsp.c
index 17f7dbbc7f..5ef514b51b 100644
--- a/libavcodec/ppc/idctdsp.c
+++ b/libavcodec/ppc/idctdsp.c
@@ -1,28 +1,28 @@
/*
* Copyright (c) 2001 Michel Lespinasse
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/* NOTE: This code is based on GPL code from the libmpeg2 project. The
* author, Michel Lespinasses, has given explicit permission to release
- * under LGPL as part of Libav.
+ * under LGPL as part of FFmpeg.
*
- * Libav integration by Dieter Shirley
+ * FFmpeg integration by Dieter Shirley
*
* This file is a direct copy of the AltiVec IDCT module from the libmpeg2
* project. I've deleted all of the libmpeg2-specific code, renamed the
@@ -153,6 +153,22 @@ static const vec_s16 constants[5] = {
{ 19266, 26722, 25172, 22654, 19266, 22654, 25172, 26722 }
};
+static void idct_altivec(int16_t *blk)
+{
+ vec_s16 *block = (vec_s16 *) blk;
+
+ IDCT;
+
+ block[0] = vx0;
+ block[1] = vx1;
+ block[2] = vx2;
+ block[3] = vx3;
+ block[4] = vx4;
+ block[5] = vx5;
+ block[6] = vx6;
+ block[7] = vx7;
+}
+
static void idct_put_altivec(uint8_t *dest, int stride, int16_t *blk)
{
vec_s16 *block = (vec_s16 *) blk;
@@ -234,9 +250,10 @@ av_cold void ff_idctdsp_init_ppc(IDCTDSPContext *c, AVCodecContext *avctx,
if (!PPC_ALTIVEC(av_get_cpu_flags()))
return;
- if (!high_bit_depth) {
- if ((avctx->idct_algo == FF_IDCT_AUTO) ||
+ if (!high_bit_depth && avctx->lowres == 0) {
+ if ((avctx->idct_algo == FF_IDCT_AUTO && !(avctx->flags & CODEC_FLAG_BITEXACT)) ||
(avctx->idct_algo == FF_IDCT_ALTIVEC)) {
+ c->idct = idct_altivec;
c->idct_add = idct_add_altivec;
c->idct_put = idct_put_altivec;
c->perm_type = FF_IDCT_PERM_TRANSPOSE;
diff --git a/libavcodec/ppc/apedsp_altivec.c b/libavcodec/ppc/lossless_audiodsp_altivec.c
index d8bf4bd167..1ebb0f4aa3 100644
--- a/libavcodec/ppc/apedsp_altivec.c
+++ b/libavcodec/ppc/lossless_audiodsp_altivec.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2007 Luca Barbato <lu_zero@gentoo.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -27,7 +27,7 @@
#include "libavutil/cpu.h"
#include "libavutil/ppc/cpu.h"
#include "libavutil/ppc/types_altivec.h"
-#include "libavcodec/apedsp.h"
+#include "libavcodec/lossless_audiodsp.h"
#if HAVE_ALTIVEC
static int32_t scalarproduct_and_madd_int16_altivec(int16_t *v1,
@@ -71,7 +71,7 @@ static int32_t scalarproduct_and_madd_int16_altivec(int16_t *v1,
}
#endif /* HAVE_ALTIVEC */
-av_cold void ff_apedsp_init_ppc(APEDSPContext *c)
+av_cold void ff_llauddsp_init_ppc(LLAudDSPContext *c)
{
#if HAVE_ALTIVEC
if (!PPC_ALTIVEC(av_get_cpu_flags()))
diff --git a/libavcodec/ppc/mathops.h b/libavcodec/ppc/mathops.h
index 34ddb11800..dbd714fcd4 100644
--- a/libavcodec/ppc/mathops.h
+++ b/libavcodec/ppc/mathops.h
@@ -3,20 +3,20 @@
* Copyright (c) 2001, 2002 Fabrice Bellard
* Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/me_cmp.c b/libavcodec/ppc/me_cmp.c
index 88c7feaa7e..8ff8193d6d 100644
--- a/libavcodec/ppc/me_cmp.c
+++ b/libavcodec/ppc/me_cmp.c
@@ -3,20 +3,20 @@
* Copyright (c) 2002 Dieter Shirley
* Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -35,6 +35,44 @@
#include "libavcodec/me_cmp.h"
#if HAVE_ALTIVEC
+#if HAVE_VSX
+static int sad16_x2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+ int line_size, int h)
+{
+ int i, s = 0;
+ const vector unsigned char zero =
+ (const vector unsigned char) vec_splat_u8(0);
+ vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
+ vector signed int sumdiffs;
+
+ for (i = 0; i < h; i++) {
+ /* Read unaligned pixels into our vectors. The vectors are as follows:
+ * pix1v: pix1[0] - pix1[15]
+ * pix2v: pix2[0] - pix2[15] pix2iv: pix2[1] - pix2[16] */
+ vector unsigned char pix1v = vec_vsx_ld(0, pix1);
+ vector unsigned char pix2v = vec_vsx_ld(0, pix2);
+ vector unsigned char pix2iv = vec_vsx_ld(1, pix2);
+
+ /* Calculate the average vector. */
+ vector unsigned char avgv = vec_avg(pix2v, pix2iv);
+
+ /* Calculate a sum of abs differences vector. */
+ vector unsigned char t5 = vec_sub(vec_max(pix1v, avgv),
+ vec_min(pix1v, avgv));
+
+ /* Add each 4 pixel group together and put 4 results into sad. */
+ sad = vec_sum4s(t5, sad);
+
+ pix1 += line_size;
+ pix2 += line_size;
+ }
+ /* Sum up the four partial sums, and put the result into s. */
+ sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
+ sumdiffs = vec_splat(sumdiffs, 3);
+ vec_vsx_st(sumdiffs, 0, &s);
+ return s;
+}
+#else
static int sad16_x2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
int line_size, int h)
{
@@ -76,7 +114,58 @@ static int sad16_x2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
return s;
}
+#endif /* HAVE_VSX */
+#if HAVE_VSX
+static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+ int line_size, int h)
+{
+ int i, s = 0;
+ const vector unsigned char zero =
+ (const vector unsigned char) vec_splat_u8(0);
+ vector unsigned char perm = vec_lvsl(0, pix2);
+ vector unsigned char pix1v, pix3v, avgv, t5;
+ vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
+ vector signed int sumdiffs;
+ uint8_t *pix3 = pix2 + line_size;
+
+ /* Due to the fact that pix3 = pix2 + line_size, the pix3 of one
+ * iteration becomes pix2 in the next iteration. We can use this
+ * fact to avoid a potentially expensive unaligned read, each
+ * time around the loop.
+ * Read unaligned pixels into our vectors. The vectors are as follows:
+ * pix2v: pix2[0] - pix2[15]
+ * Split the pixel vectors into shorts. */
+ vector unsigned char pix2v = vec_vsx_ld(0, pix2);
+
+ for (i = 0; i < h; i++) {
+ /* Read unaligned pixels into our vectors. The vectors are as follows:
+ * pix1v: pix1[0] - pix1[15]
+ * pix3v: pix3[0] - pix3[15] */
+ pix1v = vec_vsx_ld(0, pix1);
+ pix3v = vec_vsx_ld(0, pix3);
+
+ /* Calculate the average vector. */
+ avgv = vec_avg(pix2v, pix3v);
+
+ /* Calculate a sum of abs differences vector. */
+ t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
+
+ /* Add each 4 pixel group together and put 4 results into sad. */
+ sad = vec_sum4s(t5, sad);
+
+ pix1 += line_size;
+ pix2v = pix3v;
+ pix3 += line_size;
+ }
+
+ /* Sum up the four partial sums, and put the result into s. */
+ sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
+ sumdiffs = vec_splat(sumdiffs, 3);
+ vec_vsx_st(sumdiffs, 0, &s);
+ return s;
+}
+#else
static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
int line_size, int h)
{
@@ -130,7 +219,96 @@ static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
vec_ste(sumdiffs, 0, &s);
return s;
}
+#endif /* HAVE_VSX */
+#if HAVE_VSX
+static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+ int line_size, int h)
+{
+ int i, s = 0;
+ uint8_t *pix3 = pix2 + line_size;
+ const vector unsigned char zero =
+ (const vector unsigned char) vec_splat_u8(0);
+ const vector unsigned short two =
+ (const vector unsigned short) vec_splat_u16(2);
+ vector unsigned char avgv, t5;
+ vector unsigned char pix1v, pix3v, pix3iv;
+ vector unsigned short pix3lv, pix3hv, pix3ilv, pix3ihv;
+ vector unsigned short avghv, avglv;
+ vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
+ vector signed int sumdiffs;
+
+ /* Due to the fact that pix3 = pix2 + line_size, the pix3 of one
+ * iteration becomes pix2 in the next iteration. We can use this
+ * fact to avoid a potentially expensive unaligned read, as well
+ * as some splitting, and vector addition each time around the loop.
+ * Read unaligned pixels into our vectors. The vectors are as follows:
+ * pix2v: pix2[0] - pix2[15] pix2iv: pix2[1] - pix2[16]
+ * Split the pixel vectors into shorts. */
+ vector unsigned char pix2v = vec_vsx_ld(0, pix2);
+ vector unsigned char pix2iv = vec_vsx_ld(1, pix2);
+ vector unsigned short pix2hv =
+ (vector unsigned short) vec_mergeh(pix2v, zero);
+ vector unsigned short pix2lv =
+ (vector unsigned short) vec_mergel(pix2v, zero);
+ vector unsigned short pix2ihv =
+ (vector unsigned short) vec_mergeh(pix2iv, zero);
+ vector unsigned short pix2ilv =
+ (vector unsigned short) vec_mergel(pix2iv, zero);
+ vector unsigned short t1 = vec_add(pix2hv, pix2ihv);
+ vector unsigned short t2 = vec_add(pix2lv, pix2ilv);
+
+ vector unsigned short t3, t4;
+
+ for (i = 0; i < h; i++) {
+ /* Read unaligned pixels into our vectors. The vectors are as follows:
+ * pix1v: pix1[0] - pix1[15]
+ * pix3v: pix3[0] - pix3[15] pix3iv: pix3[1] - pix3[16] */
+ pix1v = vec_vsx_ld(0, pix1);
+ pix3v = vec_vsx_ld(0, pix3);
+ pix3iv = vec_vsx_ld(1, pix3);
+
+ /* Note that AltiVec does have vec_avg, but this works on vector pairs
+ * and rounds up. We could do avg(avg(a, b), avg(c, d)), but the
+ * rounding would mean that, for example, avg(3, 0, 0, 1) = 2, when
+ * it should be 1. Instead, we have to split the pixel vectors into
+ * vectors of shorts and do the averaging by hand. */
+
+ /* Split the pixel vectors into shorts. */
+ pix3hv = (vector unsigned short) vec_mergeh(pix3v, zero);
+ pix3lv = (vector unsigned short) vec_mergel(pix3v, zero);
+ pix3ihv = (vector unsigned short) vec_mergeh(pix3iv, zero);
+ pix3ilv = (vector unsigned short) vec_mergel(pix3iv, zero);
+
+ /* Do the averaging on them. */
+ t3 = vec_add(pix3hv, pix3ihv);
+ t4 = vec_add(pix3lv, pix3ilv);
+
+ avghv = vec_sr(vec_add(vec_add(t1, t3), two), two);
+ avglv = vec_sr(vec_add(vec_add(t2, t4), two), two);
+
+ /* Pack the shorts back into a result. */
+ avgv = vec_pack(avghv, avglv);
+
+ /* Calculate a sum of abs differences vector. */
+ t5 = vec_sub(vec_max(pix1v, avgv), vec_min(pix1v, avgv));
+
+ /* Add each 4 pixel group together and put 4 results into sad. */
+ sad = vec_sum4s(t5, sad);
+
+ pix1 += line_size;
+ pix3 += line_size;
+ /* Transfer the calculated values for pix3 into pix2. */
+ t1 = t3;
+ t2 = t4;
+ }
+ /* Sum up the four partial sums, and put the result into s. */
+ sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
+ sumdiffs = vec_splat(sumdiffs, 3);
+ vec_vsx_st(sumdiffs, 0, &s);
+ return s;
+}
+#else
static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
int line_size, int h)
{
@@ -225,7 +403,42 @@ static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
return s;
}
+#endif /* HAVE_VSX */
+#if HAVE_VSX
+static int sad16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+ int line_size, int h)
+{
+ int i, s;
+ const vector unsigned int zero =
+ (const vector unsigned int) vec_splat_u32(0);
+ vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
+ vector signed int sumdiffs;
+
+ for (i = 0; i < h; i++) {
+ /* Read potentially unaligned pixels into t1 and t2. */
+ vector unsigned char t1 =vec_vsx_ld(0, pix1);
+ vector unsigned char t2 = vec_vsx_ld(0, pix2);
+
+ /* Calculate a sum of abs differences vector. */
+ vector unsigned char t3 = vec_max(t1, t2);
+ vector unsigned char t4 = vec_min(t1, t2);
+ vector unsigned char t5 = vec_sub(t3, t4);
+
+ /* Add each 4 pixel group together and put 4 results into sad. */
+ sad = vec_sum4s(t5, sad);
+
+ pix1 += line_size;
+ pix2 += line_size;
+ }
+
+ /* Sum up the four partial sums, and put the result into s. */
+ sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
+ sumdiffs = vec_splat(sumdiffs, 3);
+ vec_vsx_st(sumdiffs, 0, &s);
+ return s;
+}
+#else
static int sad16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
int line_size, int h)
{
@@ -262,7 +475,49 @@ static int sad16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
return s;
}
+#endif /* HAVE_VSX */
+#if HAVE_VSX
+static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+ int line_size, int h)
+{
+ int i, s;
+ const vector unsigned int zero =
+ (const vector unsigned int) vec_splat_u32(0);
+ const vector unsigned char permclear =
+ (vector unsigned char)
+ { 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0 };
+ vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
+ vector signed int sumdiffs;
+
+ for (i = 0; i < h; i++) {
+ /* Read potentially unaligned pixels into t1 and t2.
+ * Since we're reading 16 pixels, and actually only want 8,
+ * mask out the last 8 pixels. The 0s don't change the sum. */
+ vector unsigned char pix1l = vec_vsx_ld(0, pix1);
+ vector unsigned char pix2l = vec_vsx_ld(0, pix2);
+ vector unsigned char t1 = vec_and(pix1l, permclear);
+ vector unsigned char t2 = vec_and(pix2l, permclear);
+
+ /* Calculate a sum of abs differences vector. */
+ vector unsigned char t3 = vec_max(t1, t2);
+ vector unsigned char t4 = vec_min(t1, t2);
+ vector unsigned char t5 = vec_sub(t3, t4);
+
+ /* Add each 4 pixel group together and put 4 results into sad. */
+ sad = vec_sum4s(t5, sad);
+
+ pix1 += line_size;
+ pix2 += line_size;
+ }
+
+ /* Sum up the four partial sums, and put the result into s. */
+ sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
+ sumdiffs = vec_splat(sumdiffs, 3);
+ vec_vsx_st(sumdiffs, 0, &s);
+ return s;
+}
+#else
static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
int line_size, int h)
{
@@ -309,6 +564,7 @@ static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
return s;
}
+#endif /* HAVE_VSX */
/* Sum of Squared Errors for an 8x8 block, AltiVec-enhanced.
* It's the sad8_altivec code above w/ squaring added. */
@@ -364,6 +620,43 @@ static int sse8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
/* Sum of Squared Errors for a 16x16 block, AltiVec-enhanced.
* It's the sad16_altivec code above w/ squaring added. */
+#if HAVE_VSX
+static int sse16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+ int line_size, int h)
+{
+ int i, s;
+ const vector unsigned int zero =
+ (const vector unsigned int) vec_splat_u32(0);
+ vector unsigned int sum = (vector unsigned int) vec_splat_u32(0);
+ vector signed int sumsqr;
+
+ for (i = 0; i < h; i++) {
+ /* Read potentially unaligned pixels into t1 and t2. */
+ vector unsigned char t1 = vec_vsx_ld(0, pix1);
+ vector unsigned char t2 = vec_vsx_ld(0, pix2);
+
+ /* Since we want to use unsigned chars, we can take advantage
+ * * of the fact that abs(a - b) ^ 2 = (a - b) ^ 2. */
+
+ /* Calculate abs differences vector. */
+ vector unsigned char t3 = vec_max(t1, t2);
+ vector unsigned char t4 = vec_min(t1, t2);
+ vector unsigned char t5 = vec_sub(t3, t4);
+
+ /* Square the values and add them to our sum. */
+ sum = vec_msum(t5, t5, sum);
+
+ pix1 += line_size;
+ pix2 += line_size;
+ }
+
+ /* Sum up the four partial sums, and put the result into s. */
+ sumsqr = vec_sums((vector signed int) sum, (vector signed int) zero);
+ sumsqr = vec_splat(sumsqr, 3);
+ vec_vsx_st(sumsqr, 0, &s);
+ return s;
+}
+#else
static int sse16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
int line_size, int h)
{
@@ -403,6 +696,7 @@ static int sse16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
return s;
}
+#endif /* HAVE_VSX */
static int hadamard8_diff8x8_altivec(MpegEncContext *s, uint8_t *dst,
uint8_t *src, int stride, int h)
diff --git a/libavcodec/ppc/mpegaudiodsp_altivec.c b/libavcodec/ppc/mpegaudiodsp_altivec.c
index c37f8ec975..ddfe5dcbb7 100644
--- a/libavcodec/ppc/mpegaudiodsp_altivec.c
+++ b/libavcodec/ppc/mpegaudiodsp_altivec.c
@@ -2,20 +2,20 @@
* Altivec optimized MP3 decoding functions
* Copyright (c) 2010 Vitor Sessak
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/mpegvideo_altivec.c b/libavcodec/ppc/mpegvideo_altivec.c
index 9ae849f173..ce53ae4b61 100644
--- a/libavcodec/ppc/mpegvideo_altivec.c
+++ b/libavcodec/ppc/mpegvideo_altivec.c
@@ -4,20 +4,20 @@
* dct_unquantize_h263_altivec:
* Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/mpegvideodsp.c b/libavcodec/ppc/mpegvideodsp.c
index 2bdf909e4a..7696954335 100644
--- a/libavcodec/ppc/mpegvideodsp.c
+++ b/libavcodec/ppc/mpegvideodsp.c
@@ -3,20 +3,20 @@
*
* Copyright (c) 2003 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -66,7 +66,7 @@ static void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */,
vec_lvsl(0, src));
if (src_really_odd != 0x0000000F)
- /* If src & 0xF == 0xF, then (src + 1) is properly aligned
+ /* If (src & 0xF) == 0xF, then (src + 1) is properly aligned
* on the second vector. */
srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src));
else
@@ -88,7 +88,7 @@ static void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */,
srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src));
if (src_really_odd != 0x0000000F)
- /* If src & 0xF == 0xF, then (src + 1) is properly aligned
+ /* If (src & 0xF) == 0xF, then (src + 1) is properly aligned
* on the second vector. */
srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src));
else
diff --git a/libavcodec/ppc/mpegvideoencdsp.c b/libavcodec/ppc/mpegvideoencdsp.c
index b5348e6abd..e91ba5d25f 100644
--- a/libavcodec/ppc/mpegvideoencdsp.c
+++ b/libavcodec/ppc/mpegvideoencdsp.c
@@ -1,18 +1,18 @@
/*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -31,6 +31,34 @@
#if HAVE_ALTIVEC
+#if HAVE_VSX
+static int pix_norm1_altivec(uint8_t *pix, int line_size)
+{
+ int i, s = 0;
+ const vector unsigned int zero =
+ (const vector unsigned int) vec_splat_u32(0);
+ vector unsigned int sv = (vector unsigned int) vec_splat_u32(0);
+ vector signed int sum;
+
+ for (i = 0; i < 16; i++) {
+ /* Read the potentially unaligned pixels. */
+ //vector unsigned char pixl = vec_ld(0, pix);
+ //vector unsigned char pixr = vec_ld(15, pix);
+ //vector unsigned char pixv = vec_perm(pixl, pixr, perm);
+ vector unsigned char pixv = vec_vsx_ld(0, pix);
+
+ /* Square the values, and add them to our sum. */
+ sv = vec_msum(pixv, pixv, sv);
+
+ pix += line_size;
+ }
+ /* Sum up the four partial sums, and put the result into s. */
+ sum = vec_sums((vector signed int) sv, (vector signed int) zero);
+ sum = vec_splat(sum, 3);
+ vec_vsx_st(sum, 0, &s);
+ return s;
+}
+#else
static int pix_norm1_altivec(uint8_t *pix, int line_size)
{
int i, s = 0;
@@ -58,7 +86,37 @@ static int pix_norm1_altivec(uint8_t *pix, int line_size)
return s;
}
+#endif /* HAVE_VSX */
+
+#if HAVE_VSX
+static int pix_sum_altivec(uint8_t *pix, int line_size)
+{
+ int i, s;
+ const vector unsigned int zero =
+ (const vector unsigned int) vec_splat_u32(0);
+ vector unsigned int sad = (vector unsigned int) vec_splat_u32(0);
+ vector signed int sumdiffs;
+
+ for (i = 0; i < 16; i++) {
+ /* Read the potentially unaligned 16 pixels into t1. */
+ //vector unsigned char pixl = vec_ld(0, pix);
+ //vector unsigned char pixr = vec_ld(15, pix);
+ //vector unsigned char t1 = vec_perm(pixl, pixr, perm);
+ vector unsigned char t1 = vec_vsx_ld(0, pix);
+
+ /* Add each 4 pixel group together and put 4 results into sad. */
+ sad = vec_sum4s(t1, sad);
+
+ pix += line_size;
+ }
+ /* Sum up the four partial sums, and put the result into s. */
+ sumdiffs = vec_sums((vector signed int) sad, (vector signed int) zero);
+ sumdiffs = vec_splat(sumdiffs, 3);
+ vec_vsx_st(sumdiffs, 0, &s);
+ return s;
+}
+#else
static int pix_sum_altivec(uint8_t *pix, int line_size)
{
int i, s;
@@ -88,6 +146,8 @@ static int pix_sum_altivec(uint8_t *pix, int line_size)
return s;
}
+#endif /* HAVE_VSX */
+
#endif /* HAVE_ALTIVEC */
av_cold void ff_mpegvideoencdsp_init_ppc(MpegvideoEncDSPContext *c,
diff --git a/libavcodec/ppc/pixblockdsp.c b/libavcodec/ppc/pixblockdsp.c
index 698d655fc6..9bbdf96d12 100644
--- a/libavcodec/ppc/pixblockdsp.c
+++ b/libavcodec/ppc/pixblockdsp.c
@@ -3,20 +3,20 @@
* Copyright (c) 2002 Dieter Shirley
* Copyright (c) 2003-2004 Romain Dolbeau <romain@dolbeau.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
@@ -35,8 +35,36 @@
#if HAVE_ALTIVEC
+#if HAVE_VSX
static void get_pixels_altivec(int16_t *restrict block, const uint8_t *pixels,
- int line_size)
+ ptrdiff_t line_size)
+{
+ int i;
+ vector unsigned char perm =
+ (vector unsigned char) {0x00,0x10, 0x01,0x11,0x02,0x12,0x03,0x13,\
+ 0x04,0x14,0x05,0x15,0x06,0x16,0x07,0x17};
+ const vector unsigned char zero =
+ (const vector unsigned char) vec_splat_u8(0);
+
+ for (i = 0; i < 8; i++) {
+ /* Read potentially unaligned pixels.
+ * We're reading 16 pixels, and actually only want 8,
+ * but we simply ignore the extras. */
+ vector unsigned char bytes = vec_vsx_ld(0, pixels);
+
+ // Convert the bytes into shorts.
+ //vector signed short shorts = (vector signed short) vec_perm(zero, bytes, perm);
+ vector signed short shorts = (vector signed short) vec_perm(bytes, zero, perm);
+
+ // Save the data to the block, we assume the block is 16-byte aligned.
+ vec_vsx_st(shorts, i * 16, (vector signed short *) block);
+
+ pixels += line_size;
+ }
+}
+#else
+static void get_pixels_altivec(int16_t *restrict block, const uint8_t *pixels,
+ ptrdiff_t line_size)
{
int i;
vector unsigned char perm = vec_lvsl(0, pixels);
@@ -62,6 +90,71 @@ static void get_pixels_altivec(int16_t *restrict block, const uint8_t *pixels,
}
}
+#endif /* HAVE_VSX */
+
+#if HAVE_VSX
+static void diff_pixels_altivec(int16_t *restrict block, const uint8_t *s1,
+ const uint8_t *s2, int stride)
+{
+ int i;
+ const vector unsigned char zero =
+ (const vector unsigned char) vec_splat_u8(0);
+ vector signed short shorts1, shorts2;
+
+ for (i = 0; i < 4; i++) {
+ /* Read potentially unaligned pixels.
+ * We're reading 16 pixels, and actually only want 8,
+ * but we simply ignore the extras. */
+ vector unsigned char bytes = vec_vsx_ld(0, s1);
+
+ // Convert the bytes into shorts.
+ shorts1 = (vector signed short) vec_mergeh(bytes, zero);
+
+ // Do the same for the second block of pixels.
+ bytes =vec_vsx_ld(0, s2);
+
+ // Convert the bytes into shorts.
+ shorts2 = (vector signed short) vec_mergeh(bytes, zero);
+
+ // Do the subtraction.
+ shorts1 = vec_sub(shorts1, shorts2);
+
+ // Save the data to the block, we assume the block is 16-byte aligned.
+ vec_vsx_st(shorts1, 0, (vector signed short *) block);
+
+ s1 += stride;
+ s2 += stride;
+ block += 8;
+
+ /* The code below is a copy of the code above...
+ * This is a manual unroll. */
+
+ /* Read potentially unaligned pixels.
+ * We're reading 16 pixels, and actually only want 8,
+ * but we simply ignore the extras. */
+ bytes = vec_vsx_ld(0, s1);
+
+ // Convert the bytes into shorts.
+ shorts1 = (vector signed short) vec_mergeh(bytes, zero);
+
+ // Do the same for the second block of pixels.
+ bytes = vec_vsx_ld(0, s2);
+
+ // Convert the bytes into shorts.
+ shorts2 = (vector signed short) vec_mergeh(bytes, zero);
+
+ // Do the subtraction.
+ shorts1 = vec_sub(shorts1, shorts2);
+
+ // Save the data to the block, we assume the block is 16-byte aligned.
+ vec_vsx_st(shorts1, 0, (vector signed short *) block);
+
+ s1 += stride;
+ s2 += stride;
+ block += 8;
+ }
+}
+#else
static void diff_pixels_altivec(int16_t *restrict block, const uint8_t *s1,
const uint8_t *s2, int stride)
{
@@ -134,6 +227,8 @@ static void diff_pixels_altivec(int16_t *restrict block, const uint8_t *s1,
}
}
+#endif /* HAVE_VSX */
+
#endif /* HAVE_ALTIVEC */
av_cold void ff_pixblockdsp_init_ppc(PixblockDSPContext *c,
diff --git a/libavcodec/ppc/svq1enc_altivec.c b/libavcodec/ppc/svq1enc_altivec.c
index 564f12986b..4e25e253f6 100644
--- a/libavcodec/ppc/svq1enc_altivec.c
+++ b/libavcodec/ppc/svq1enc_altivec.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2007 Luca Barbato <lu_zero@gentoo.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/vc1dsp_altivec.c b/libavcodec/ppc/vc1dsp_altivec.c
index 90c3d27e82..2128b56d05 100644
--- a/libavcodec/ppc/vc1dsp_altivec.c
+++ b/libavcodec/ppc/vc1dsp_altivec.c
@@ -2,20 +2,20 @@
* VC-1 and WMV3 decoder - DSP functions AltiVec-optimized
* Copyright (c) 2006 Konstantin Shishkov
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/videodsp_ppc.c b/libavcodec/ppc/videodsp_ppc.c
index b9e003b487..915702252e 100644
--- a/libavcodec/ppc/videodsp_ppc.c
+++ b/libavcodec/ppc/videodsp_ppc.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2003-2004 Romain Dolbeau
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/vorbisdsp_altivec.c b/libavcodec/ppc/vorbisdsp_altivec.c
index 43f4d0325b..d7557c815b 100644
--- a/libavcodec/ppc/vorbisdsp_altivec.c
+++ b/libavcodec/ppc/vorbisdsp_altivec.c
@@ -1,20 +1,20 @@
/*
* Copyright (c) 2006 Luca Barbato <lu_zero@gentoo.org>
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/vp3dsp_altivec.c b/libavcodec/ppc/vp3dsp_altivec.c
index bce49e3170..9d81b3f265 100644
--- a/libavcodec/ppc/vp3dsp_altivec.c
+++ b/libavcodec/ppc/vp3dsp_altivec.c
@@ -1,20 +1,20 @@
/*
* Copyright (C) 2009 David Conrad
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
diff --git a/libavcodec/ppc/vp8dsp_altivec.c b/libavcodec/ppc/vp8dsp_altivec.c
index e010dee4d2..91ff8cc8ff 100644
--- a/libavcodec/ppc/vp8dsp_altivec.c
+++ b/libavcodec/ppc/vp8dsp_altivec.c
@@ -3,20 +3,20 @@
*
* Copyright (C) 2010 David Conrad
*
- * This file is part of Libav.
+ * This file is part of FFmpeg.
*
- * Libav is free software; you can redistribute it and/or
+ * FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
- * Libav is distributed in the hope that it will be useful,
+ * FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
- * License along with Libav; if not, write to the Free Software
+ * License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/