summaryrefslogtreecommitdiff
path: root/libavcodec/ppc
diff options
context:
space:
mode:
authorMichael Niedermayer <michaelni@gmx.at>2002-11-02 11:28:08 +0000
committerMichael Niedermayer <michaelni@gmx.at>2002-11-02 11:28:08 +0000
commit05c4072b45f3cde1185de6eccfe7febf91d9f8fd (patch)
tree164979e7e556e44f5678d5599a40810bbf89fed8 /libavcodec/ppc
parent26b35efb3a0d02a1ef6a8af804e6c59c1a190fa3 (diff)
Altivec Patch (Mark III) by (Dieter Shirley <dieters at schemasoft dot com>)
Originally committed as revision 1147 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavcodec/ppc')
-rw-r--r--libavcodec/ppc/dsputil_altivec.c126
-rw-r--r--libavcodec/ppc/dsputil_altivec.h21
-rw-r--r--libavcodec/ppc/dsputil_ppc.c30
-rw-r--r--libavcodec/ppc/idct_altivec.c216
-rw-r--r--libavcodec/ppc/mpegvideo_altivec.c509
-rw-r--r--libavcodec/ppc/mpegvideo_ppc.c77
6 files changed, 972 insertions, 7 deletions
diff --git a/libavcodec/ppc/dsputil_altivec.c b/libavcodec/ppc/dsputil_altivec.c
index 18d9d27a48..8a50ccb900 100644
--- a/libavcodec/ppc/dsputil_altivec.c
+++ b/libavcodec/ppc/dsputil_altivec.c
@@ -1,15 +1,29 @@
+/*
+ * Copyright (c) 2002 Brian Foley
+ * Copyright (c) 2002 Dieter Shirley
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
#include "../dsputil.h"
+#include "dsputil_altivec.h"
#if CONFIG_DARWIN
#include <sys/sysctl.h>
#endif
-int pix_abs16x16_altivec(uint8_t *pix1, uint8_t *pix2, int line_size);
-int pix_abs8x8_altivec(uint8_t *pix1, uint8_t *pix2, int line_size);
-int pix_sum_altivec(UINT8 * pix, int line_size);
-
-int has_altivec(void);
-
int pix_abs16x16_altivec(uint8_t *pix1, uint8_t *pix2, int line_size)
{
int i, s;
@@ -127,6 +141,105 @@ int pix_sum_altivec(UINT8 * pix, int line_size)
return s;
}
+void get_pixels_altivec(DCTELEM *restrict block, const UINT8 *pixels, int line_size)
+{
+ int i;
+ vector unsigned char perm, bytes, *pixv;
+ vector unsigned char zero = (vector unsigned char) (0);
+ vector signed short shorts;
+
+ for(i=0;i<8;i++)
+ {
+ // Read potentially unaligned pixels.
+ // We're reading 16 pixels, and actually only want 8,
+ // but we simply ignore the extras.
+ perm = vec_lvsl(0, pixels);
+ pixv = (vector unsigned char *) pixels;
+ bytes = vec_perm(pixv[0], pixv[1], perm);
+
+ // convert the bytes into shorts
+ shorts = (vector signed short)vec_mergeh(zero, bytes);
+
+ // save the data to the block, we assume the block is 16-byte aligned
+ vec_st(shorts, i*16, (vector signed short*)block);
+
+ pixels += line_size;
+ }
+}
+
+void diff_pixels_altivec(DCTELEM *restrict block, const UINT8 *s1,
+ const UINT8 *s2, int stride)
+{
+ int i;
+ vector unsigned char perm, bytes, *pixv;
+ vector unsigned char zero = (vector unsigned char) (0);
+ vector signed short shorts1, shorts2;
+
+ for(i=0;i<4;i++)
+ {
+ // Read potentially unaligned pixels
+ // We're reading 16 pixels, and actually only want 8,
+ // but we simply ignore the extras.
+ perm = vec_lvsl(0, s1);
+ pixv = (vector unsigned char *) s1;
+ bytes = vec_perm(pixv[0], pixv[1], perm);
+
+ // convert the bytes into shorts
+ shorts1 = (vector signed short)vec_mergeh(zero, bytes);
+
+ // Do the same for the second block of pixels
+ perm = vec_lvsl(0, s2);
+ pixv = (vector unsigned char *) s2;
+ bytes = vec_perm(pixv[0], pixv[1], perm);
+
+ // convert the bytes into shorts
+ shorts2 = (vector signed short)vec_mergeh(zero, bytes);
+
+ // Do the subtraction
+ shorts1 = vec_sub(shorts1, shorts2);
+
+ // save the data to the block, we assume the block is 16-byte aligned
+ vec_st(shorts1, 0, (vector signed short*)block);
+
+ s1 += stride;
+ s2 += stride;
+ block += 8;
+
+
+ // The code below is a copy of the code above... This is a manual
+ // unroll.
+
+ // Read potentially unaligned pixels
+ // We're reading 16 pixels, and actually only want 8,
+ // but we simply ignore the extras.
+ perm = vec_lvsl(0, s1);
+ pixv = (vector unsigned char *) s1;
+ bytes = vec_perm(pixv[0], pixv[1], perm);
+
+ // convert the bytes into shorts
+ shorts1 = (vector signed short)vec_mergeh(zero, bytes);
+
+ // Do the same for the second block of pixels
+ perm = vec_lvsl(0, s2);
+ pixv = (vector unsigned char *) s2;
+ bytes = vec_perm(pixv[0], pixv[1], perm);
+
+ // convert the bytes into shorts
+ shorts2 = (vector signed short)vec_mergeh(zero, bytes);
+
+ // Do the subtraction
+ shorts1 = vec_sub(shorts1, shorts2);
+
+ // save the data to the block, we assume the block is 16-byte aligned
+ vec_st(shorts1, 0, (vector signed short*)block);
+
+ s1 += stride;
+ s2 += stride;
+ block += 8;
+ }
+}
+
+
int has_altivec(void)
{
#if CONFIG_DARWIN
@@ -141,3 +254,4 @@ int has_altivec(void)
#endif
return 0;
}
+
diff --git a/libavcodec/ppc/dsputil_altivec.h b/libavcodec/ppc/dsputil_altivec.h
index 42c373e76b..bdf8f5ffee 100644
--- a/libavcodec/ppc/dsputil_altivec.h
+++ b/libavcodec/ppc/dsputil_altivec.h
@@ -1,5 +1,26 @@
+/*
+ * Copyright (c) 2002 Brian Foley
+ * Copyright (c) 2002 Dieter Shirley
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
extern int pix_abs16x16_altivec(uint8_t *pix1, uint8_t *pix2, int line_size);
extern int pix_abs8x8_altivec(uint8_t *pix1, uint8_t *pix2, int line_size);
extern int pix_sum_altivec(UINT8 * pix, int line_size);
+extern void diff_pixels_altivec(DCTELEM* block, const UINT8* s1, const UINT8* s2, int stride);
+extern void get_pixels_altivec(DCTELEM* block, const UINT8 * pixels, int line_size);
extern int has_altivec(void);
diff --git a/libavcodec/ppc/dsputil_ppc.c b/libavcodec/ppc/dsputil_ppc.c
index d9c8ba86e0..28c56ee94a 100644
--- a/libavcodec/ppc/dsputil_ppc.c
+++ b/libavcodec/ppc/dsputil_ppc.c
@@ -1,3 +1,22 @@
+/*
+ * Copyright (c) 2002 Brian Foley
+ * Copyright (c) 2002 Dieter Shirley
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
#include "../dsputil.h"
#ifdef HAVE_ALTIVEC
@@ -6,14 +25,23 @@
void dsputil_init_ppc(void)
{
+ // Common optimisations whether Altivec or not
+
+ // ... pending ...
+
#if HAVE_ALTIVEC
if (has_altivec()) {
+ // Altivec specific optimisations
pix_abs16x16 = pix_abs16x16_altivec;
pix_abs8x8 = pix_abs8x8_altivec;
pix_sum = pix_sum_altivec;
+ diff_pixels = diff_pixels_altivec;
+ get_pixels = get_pixels_altivec;
} else
#endif
{
- /* Non-AltiVec PPC optimisations here */
+ // Non-AltiVec PPC optimisations
+
+ // ... pending ...
}
}
diff --git a/libavcodec/ppc/idct_altivec.c b/libavcodec/ppc/idct_altivec.c
new file mode 100644
index 0000000000..8036d403fa
--- /dev/null
+++ b/libavcodec/ppc/idct_altivec.c
@@ -0,0 +1,216 @@
+/*
+ * Copyright (c) 2001 Michel Lespinasse
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+
+/*
+ * NOTE: This code is based on GPL code from the libmpeg2 project. The
+ * author, Michel Lespinasses, has given explicit permission to release
+ * under LGPL as part of ffmpeg.
+ *
+ */
+
+/*
+ * FFMpeg integration by Dieter Shirley
+ *
+ * This file is a direct copy of the altivec idct module from the libmpeg2
+ * project. I've deleted all of the libmpeg2 specific code, renamed the functions and
+ * re-ordered the function parameters. The only change to the IDCT function
+ * itself was to factor out the partial transposition, and to perform a full
+ * transpose at the end of the function.
+ */
+
+
+#include <stdlib.h> /* malloc(), free() */
+#include <string.h>
+#include "../dsputil.h"
+
+#define vector_s16_t vector signed short
+#define vector_u16_t vector unsigned short
+#define vector_s8_t vector signed char
+#define vector_u8_t vector unsigned char
+#define vector_s32_t vector signed int
+#define vector_u32_t vector unsigned int
+
+#define IDCT_HALF \
+ /* 1st stage */ \
+ t1 = vec_mradds (a1, vx7, vx1 ); \
+ t8 = vec_mradds (a1, vx1, vec_subs (zero, vx7)); \
+ t7 = vec_mradds (a2, vx5, vx3); \
+ t3 = vec_mradds (ma2, vx3, vx5); \
+ \
+ /* 2nd stage */ \
+ t5 = vec_adds (vx0, vx4); \
+ t0 = vec_subs (vx0, vx4); \
+ t2 = vec_mradds (a0, vx6, vx2); \
+ t4 = vec_mradds (a0, vx2, vec_subs (zero, vx6)); \
+ t6 = vec_adds (t8, t3); \
+ t3 = vec_subs (t8, t3); \
+ t8 = vec_subs (t1, t7); \
+ t1 = vec_adds (t1, t7); \
+ \
+ /* 3rd stage */ \
+ t7 = vec_adds (t5, t2); \
+ t2 = vec_subs (t5, t2); \
+ t5 = vec_adds (t0, t4); \
+ t0 = vec_subs (t0, t4); \
+ t4 = vec_subs (t8, t3); \
+ t3 = vec_adds (t8, t3); \
+ \
+ /* 4th stage */ \
+ vy0 = vec_adds (t7, t1); \
+ vy7 = vec_subs (t7, t1); \
+ vy1 = vec_mradds (c4, t3, t5); \
+ vy6 = vec_mradds (mc4, t3, t5); \
+ vy2 = vec_mradds (c4, t4, t0); \
+ vy5 = vec_mradds (mc4, t4, t0); \
+ vy3 = vec_adds (t2, t6); \
+ vy4 = vec_subs (t2, t6);
+
+
+#define IDCT \
+ vector_s16_t vx0, vx1, vx2, vx3, vx4, vx5, vx6, vx7; \
+ vector_s16_t vy0, vy1, vy2, vy3, vy4, vy5, vy6, vy7; \
+ vector_s16_t a0, a1, a2, ma2, c4, mc4, zero, bias; \
+ vector_s16_t t0, t1, t2, t3, t4, t5, t6, t7, t8; \
+ vector_u16_t shift; \
+ \
+ c4 = vec_splat (constants[0], 0); \
+ a0 = vec_splat (constants[0], 1); \
+ a1 = vec_splat (constants[0], 2); \
+ a2 = vec_splat (constants[0], 3); \
+ mc4 = vec_splat (constants[0], 4); \
+ ma2 = vec_splat (constants[0], 5); \
+ bias = (vector_s16_t)vec_splat ((vector_s32_t)constants[0], 3); \
+ \
+ zero = vec_splat_s16 (0); \
+ shift = vec_splat_u16 (4); \
+ \
+ vx0 = vec_mradds (vec_sl (block[0], shift), constants[1], zero); \
+ vx1 = vec_mradds (vec_sl (block[1], shift), constants[2], zero); \
+ vx2 = vec_mradds (vec_sl (block[2], shift), constants[3], zero); \
+ vx3 = vec_mradds (vec_sl (block[3], shift), constants[4], zero); \
+ vx4 = vec_mradds (vec_sl (block[4], shift), constants[1], zero); \
+ vx5 = vec_mradds (vec_sl (block[5], shift), constants[4], zero); \
+ vx6 = vec_mradds (vec_sl (block[6], shift), constants[3], zero); \
+ vx7 = vec_mradds (vec_sl (block[7], shift), constants[2], zero); \
+ \
+ IDCT_HALF \
+ \
+ vx0 = vec_mergeh (vy0, vy4); \
+ vx1 = vec_mergel (vy0, vy4); \
+ vx2 = vec_mergeh (vy1, vy5); \
+ vx3 = vec_mergel (vy1, vy5); \
+ vx4 = vec_mergeh (vy2, vy6); \
+ vx5 = vec_mergel (vy2, vy6); \
+ vx6 = vec_mergeh (vy3, vy7); \
+ vx7 = vec_mergel (vy3, vy7); \
+ \
+ vy0 = vec_mergeh (vx0, vx4); \
+ vy1 = vec_mergel (vx0, vx4); \
+ vy2 = vec_mergeh (vx1, vx5); \
+ vy3 = vec_mergel (vx1, vx5); \
+ vy4 = vec_mergeh (vx2, vx6); \
+ vy5 = vec_mergel (vx2, vx6); \
+ vy6 = vec_mergeh (vx3, vx7); \
+ vy7 = vec_mergel (vx3, vx7); \
+ \
+ vx0 = vec_adds (vec_mergeh (vy0, vy4), bias); \
+ vx1 = vec_mergel (vy0, vy4); \
+ vx2 = vec_mergeh (vy1, vy5); \
+ vx3 = vec_mergel (vy1, vy5); \
+ vx4 = vec_mergeh (vy2, vy6); \
+ vx5 = vec_mergel (vy2, vy6); \
+ vx6 = vec_mergeh (vy3, vy7); \
+ vx7 = vec_mergel (vy3, vy7); \
+ \
+ IDCT_HALF \
+ \
+ shift = vec_splat_u16 (6); \
+ vx0 = vec_sra (vy0, shift); \
+ vx1 = vec_sra (vy1, shift); \
+ vx2 = vec_sra (vy2, shift); \
+ vx3 = vec_sra (vy3, shift); \
+ vx4 = vec_sra (vy4, shift); \
+ vx5 = vec_sra (vy5, shift); \
+ vx6 = vec_sra (vy6, shift); \
+ vx7 = vec_sra (vy7, shift);
+
+static const vector_s16_t constants[5] = {
+ (vector_s16_t)(23170, 13573, 6518, 21895, -23170, -21895, 32, 31),
+ (vector_s16_t)(16384, 22725, 21407, 19266, 16384, 19266, 21407, 22725),
+ (vector_s16_t)(22725, 31521, 29692, 26722, 22725, 26722, 29692, 31521),
+ (vector_s16_t)(21407, 29692, 27969, 25172, 21407, 25172, 27969, 29692),
+ (vector_s16_t)(19266, 26722, 25172, 22654, 19266, 22654, 25172, 26722)
+};
+
+void idct_put_altivec(uint8_t* dest, int stride, vector_s16_t* block)
+{
+ vector_u8_t tmp;
+
+ IDCT
+
+#define COPY(dest,src) \
+ tmp = vec_packsu (src, src); \
+ vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); \
+ vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+
+ COPY (dest, vx0) dest += stride;
+ COPY (dest, vx1) dest += stride;
+ COPY (dest, vx2) dest += stride;
+ COPY (dest, vx3) dest += stride;
+ COPY (dest, vx4) dest += stride;
+ COPY (dest, vx5) dest += stride;
+ COPY (dest, vx6) dest += stride;
+ COPY (dest, vx7)
+}
+
+void idct_add_altivec(uint8_t* dest, int stride, vector_s16_t* block)
+{
+ vector_u8_t tmp;
+ vector_s16_t tmp2, tmp3;
+ vector_u8_t perm0;
+ vector_u8_t perm1;
+ vector_u8_t p0, p1, p;
+
+ IDCT
+
+ p0 = vec_lvsl (0, dest);
+ p1 = vec_lvsl (stride, dest);
+ p = vec_splat_u8 (-1);
+ perm0 = vec_mergeh (p, p0);
+ perm1 = vec_mergeh (p, p1);
+
+#define ADD(dest,src,perm) \
+ /* *(uint64_t *)&tmp = *(uint64_t *)dest; */ \
+ tmp = vec_ld (0, dest); \
+ tmp2 = (vector_s16_t)vec_perm (tmp, (vector_u8_t)zero, perm); \
+ tmp3 = vec_adds (tmp2, src); \
+ tmp = vec_packsu (tmp3, tmp3); \
+ vec_ste ((vector_u32_t)tmp, 0, (unsigned int *)dest); \
+ vec_ste ((vector_u32_t)tmp, 4, (unsigned int *)dest);
+
+ ADD (dest, vx0, perm0) dest += stride;
+ ADD (dest, vx1, perm1) dest += stride;
+ ADD (dest, vx2, perm0) dest += stride;
+ ADD (dest, vx3, perm1) dest += stride;
+ ADD (dest, vx4, perm0) dest += stride;
+ ADD (dest, vx5, perm1) dest += stride;
+ ADD (dest, vx6, perm0) dest += stride;
+ ADD (dest, vx7, perm1)
+}
+
diff --git a/libavcodec/ppc/mpegvideo_altivec.c b/libavcodec/ppc/mpegvideo_altivec.c
new file mode 100644
index 0000000000..bcbc1e6baf
--- /dev/null
+++ b/libavcodec/ppc/mpegvideo_altivec.c
@@ -0,0 +1,509 @@
+/*
+ * Copyright (c) 2002 Dieter Shirley
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include "../dsputil.h"
+#include "../mpegvideo.h"
+
+
+// Used when initializing constant vectors
+#define FOUR_INSTANCES(x) x,x,x,x
+
+// Swaps two variables (used for altivec registers)
+#define SWAP(a,b) \
+do { \
+ __typeof__(a) swap_temp=a; \
+ a=b; \
+ b=swap_temp; \
+} while (0)
+
+// transposes a matrix consisting of four vectors with four elements each
+#define TRANSPOSE4(a,b,c,d) \
+do { \
+ __typeof__(a) _trans_ach = vec_mergeh(a, c); \
+ __typeof__(a) _trans_acl = vec_mergel(a, c); \
+ __typeof__(a) _trans_bdh = vec_mergeh(b, d); \
+ __typeof__(a) _trans_bdl = vec_mergel(b, d); \
+ \
+ a = vec_mergeh(_trans_ach, _trans_bdh); \
+ b = vec_mergel(_trans_ach, _trans_bdh); \
+ c = vec_mergeh(_trans_acl, _trans_bdl); \
+ d = vec_mergel(_trans_acl, _trans_bdl); \
+} while (0)
+
+#define TRANSPOSE8(a,b,c,d,e,f,g,h) \
+do { \
+ __typeof__(a) _A1, _B1, _C1, _D1, _E1, _F1, _G1, _H1; \
+ __typeof__(a) _A2, _B2, _C2, _D2, _E2, _F2, _G2, _H2; \
+ \
+ _A1 = vec_mergeh (a, e); \
+ _B1 = vec_mergel (a, e); \
+ _C1 = vec_mergeh (b, f); \
+ _D1 = vec_mergel (b, f); \
+ _E1 = vec_mergeh (c, g); \
+ _F1 = vec_mergel (c, g); \
+ _G1 = vec_mergeh (d, h); \
+ _H1 = vec_mergel (d, h); \
+ \
+ _A2 = vec_mergeh (_A1, _E1); \
+ _B2 = vec_mergel (_A1, _E1); \
+ _C2 = vec_mergeh (_B1, _F1); \
+ _D2 = vec_mergel (_B1, _F1); \
+ _E2 = vec_mergeh (_C1, _G1); \
+ _F2 = vec_mergel (_C1, _G1); \
+ _G2 = vec_mergeh (_D1, _H1); \
+ _H2 = vec_mergel (_D1, _H1); \
+ \
+ a = vec_mergeh (_A2, _E2); \
+ b = vec_mergel (_A2, _E2); \
+ c = vec_mergeh (_B2, _F2); \
+ d = vec_mergel (_B2, _F2); \
+ e = vec_mergeh (_C2, _G2); \
+ f = vec_mergel (_C2, _G2); \
+ g = vec_mergeh (_D2, _H2); \
+ h = vec_mergel (_D2, _H2); \
+} while (0)
+
+
+// Loads a four-byte value (int or float) from the target address
+// into every element in the target vector. Only works if the
+// target address is four-byte aligned (which should be always).
+#define LOAD4(vec, address) \
+{ \
+ __typeof__(vec)* _load_addr = (__typeof__(vec)*)(address); \
+ vector unsigned char _perm_vec = vec_lvsl(0,(address)); \
+ vec = vec_ld(0, _load_addr); \
+ vec = vec_perm(vec, vec, _perm_vec); \
+ vec = vec_splat(vec, 0); \
+}
+
+int dct_quantize_altivec(MpegEncContext* s,
+ DCTELEM* data, int n,
+ int qscale, int* overflow)
+{
+ int lastNonZero;
+ vector float row0, row1, row2, row3, row4, row5, row6, row7;
+ vector float alt0, alt1, alt2, alt3, alt4, alt5, alt6, alt7;
+ const vector float zero = {FOUR_INSTANCES(0.0f)};
+
+ // Load the data into the row/alt vectors
+ {
+ vector signed short data0, data1, data2, data3, data4, data5, data6, data7;
+
+ data0 = vec_ld(0, data);
+ data1 = vec_ld(16, data);
+ data2 = vec_ld(32, data);
+ data3 = vec_ld(48, data);
+ data4 = vec_ld(64, data);
+ data5 = vec_ld(80, data);
+ data6 = vec_ld(96, data);
+ data7 = vec_ld(112, data);
+
+ // Transpose the data before we start
+ TRANSPOSE8(data0, data1, data2, data3, data4, data5, data6, data7);
+
+ // load the data into floating point vectors. We load
+ // the high half of each row into the main row vectors
+ // and the low half into the alt vectors.
+ row0 = vec_ctf(vec_unpackh(data0), 0);
+ alt0 = vec_ctf(vec_unpackl(data0), 0);
+ row1 = vec_ctf(vec_unpackh(data1), 0);
+ alt1 = vec_ctf(vec_unpackl(data1), 0);
+ row2 = vec_ctf(vec_unpackh(data2), 0);
+ alt2 = vec_ctf(vec_unpackl(data2), 0);
+ row3 = vec_ctf(vec_unpackh(data3), 0);
+ alt3 = vec_ctf(vec_unpackl(data3), 0);
+ row4 = vec_ctf(vec_unpackh(data4), 0);
+ alt4 = vec_ctf(vec_unpackl(data4), 0);
+ row5 = vec_ctf(vec_unpackh(data5), 0);
+ alt5 = vec_ctf(vec_unpackl(data5), 0);
+ row6 = vec_ctf(vec_unpackh(data6), 0);
+ alt6 = vec_ctf(vec_unpackl(data6), 0);
+ row7 = vec_ctf(vec_unpackh(data7), 0);
+ alt7 = vec_ctf(vec_unpackl(data7), 0);
+ }
+
+ // The following block could exist as a separate an altivec dct
+ // function. However, if we put it inline, the DCT data can remain
+ // in the vector local variables, as floats, which we'll use during the
+ // quantize step...
+ {
+ const vector float vec_0_298631336 = {FOUR_INSTANCES(0.298631336f)};
+ const vector float vec_0_390180644 = {FOUR_INSTANCES(-0.390180644f)};
+ const vector float vec_0_541196100 = {FOUR_INSTANCES(0.541196100f)};
+ const vector float vec_0_765366865 = {FOUR_INSTANCES(0.765366865f)};
+ const vector float vec_0_899976223 = {FOUR_INSTANCES(-0.899976223f)};
+ const vector float vec_1_175875602 = {FOUR_INSTANCES(1.175875602f)};
+ const vector float vec_1_501321110 = {FOUR_INSTANCES(1.501321110f)};
+ const vector float vec_1_847759065 = {FOUR_INSTANCES(-1.847759065f)};
+ const vector float vec_1_961570560 = {FOUR_INSTANCES(-1.961570560f)};
+ const vector float vec_2_053119869 = {FOUR_INSTANCES(2.053119869f)};
+ const vector float vec_2_562915447 = {FOUR_INSTANCES(-2.562915447f)};
+ const vector float vec_3_072711026 = {FOUR_INSTANCES(3.072711026f)};
+
+
+ int whichPass, whichHalf;
+
+ for(whichPass = 1; whichPass<=2; whichPass++)
+ {
+ for(whichHalf = 1; whichHalf<=2; whichHalf++)
+ {
+ vector float tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
+ vector float tmp10, tmp11, tmp12, tmp13;
+ vector float z1, z2, z3, z4, z5;
+
+ tmp0 = vec_add(row0, row7); // tmp0 = dataptr[0] + dataptr[7];
+ tmp7 = vec_sub(row0, row7); // tmp7 = dataptr[0] - dataptr[7];
+ tmp3 = vec_add(row3, row4); // tmp3 = dataptr[3] + dataptr[4];
+ tmp4 = vec_sub(row3, row4); // tmp4 = dataptr[3] - dataptr[4];
+ tmp1 = vec_add(row1, row6); // tmp1 = dataptr[1] + dataptr[6];
+ tmp6 = vec_sub(row1, row6); // tmp6 = dataptr[1] - dataptr[6];
+ tmp2 = vec_add(row2, row5); // tmp2 = dataptr[2] + dataptr[5];
+ tmp5 = vec_sub(row2, row5); // tmp5 = dataptr[2] - dataptr[5];
+
+ tmp10 = vec_add(tmp0, tmp3); // tmp10 = tmp0 + tmp3;
+ tmp13 = vec_sub(tmp0, tmp3); // tmp13 = tmp0 - tmp3;
+ tmp11 = vec_add(tmp1, tmp2); // tmp11 = tmp1 + tmp2;
+ tmp12 = vec_sub(tmp1, tmp2); // tmp12 = tmp1 - tmp2;
+
+
+ // dataptr[0] = (DCTELEM) ((tmp10 + tmp11) << PASS1_BITS);
+ row0 = vec_add(tmp10, tmp11);
+
+ // dataptr[4] = (DCTELEM) ((tmp10 - tmp11) << PASS1_BITS);
+ row4 = vec_sub(tmp10, tmp11);
+
+
+ // z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100);
+ z1 = vec_madd(vec_add(tmp12, tmp13), vec_0_541196100, (vector float)zero);
+
+ // dataptr[2] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865),
+ // CONST_BITS-PASS1_BITS);
+ row2 = vec_madd(tmp13, vec_0_765366865, z1);
+
+ // dataptr[6] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065),
+ // CONST_BITS-PASS1_BITS);
+ row6 = vec_madd(tmp12, vec_1_847759065, z1);
+
+ z1 = vec_add(tmp4, tmp7); // z1 = tmp4 + tmp7;
+ z2 = vec_add(tmp5, tmp6); // z2 = tmp5 + tmp6;
+ z3 = vec_add(tmp4, tmp6); // z3 = tmp4 + tmp6;
+ z4 = vec_add(tmp5, tmp7); // z4 = tmp5 + tmp7;
+
+ // z5 = MULTIPLY(z3 + z4, FIX_1_175875602); /* sqrt(2) * c3 */
+ z5 = vec_madd(vec_add(z3, z4), vec_1_175875602, (vector float)zero);
+
+ // z3 = MULTIPLY(z3, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */
+ z3 = vec_madd(z3, vec_1_961570560, z5);
+
+ // z4 = MULTIPLY(z4, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */
+ z4 = vec_madd(z4, vec_0_390180644, z5);
+
+ // The following adds are rolled into the multiplies above
+ // z3 = vec_add(z3, z5); // z3 += z5;
+ // z4 = vec_add(z4, z5); // z4 += z5;
+
+ // z2 = MULTIPLY(z2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */
+ // Wow! It's actually more effecient to roll this multiply
+ // into the adds below, even thought the multiply gets done twice!
+ // z2 = vec_madd(z2, vec_2_562915447, (vector float)zero);
+
+ // z1 = MULTIPLY(z1, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */
+ // Same with this one...
+ // z1 = vec_madd(z1, vec_0_899976223, (vector float)zero);
+
+ // tmp4 = MULTIPLY(tmp4, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */
+ // dataptr[7] = (DCTELEM) DESCALE(tmp4 + z1 + z3, CONST_BITS-PASS1_BITS);
+ row7 = vec_madd(tmp4, vec_0_298631336, vec_madd(z1, vec_0_899976223, z3));
+
+ // tmp5 = MULTIPLY(tmp5, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */
+ // dataptr[5] = (DCTELEM) DESCALE(tmp5 + z2 + z4, CONST_BITS-PASS1_BITS);
+ row5 = vec_madd(tmp5, vec_2_053119869, vec_madd(z2, vec_2_562915447, z4));
+
+ // tmp6 = MULTIPLY(tmp6, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */
+ // dataptr[3] = (DCTELEM) DESCALE(tmp6 + z2 + z3, CONST_BITS-PASS1_BITS);
+ row3 = vec_madd(tmp6, vec_3_072711026, vec_madd(z2, vec_2_562915447, z3));
+
+ // tmp7 = MULTIPLY(tmp7, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */
+ // dataptr[1] = (DCTELEM) DESCALE(tmp7 + z1 + z4, CONST_BITS-PASS1_BITS);
+ row1 = vec_madd(z1, vec_0_899976223, vec_madd(tmp7, vec_1_501321110, z4));
+
+ // Swap the row values with the alts. If this is the first half,
+ // this sets up the low values to be acted on in the second half.
+ // If this is the second half, it puts the high values back in
+ // the row values where they are expected to be when we're done.
+ SWAP(row0, alt0);
+ SWAP(row1, alt1);
+ SWAP(row2, alt2);
+ SWAP(row3, alt3);
+ SWAP(row4, alt4);
+ SWAP(row5, alt5);
+ SWAP(row6, alt6);
+ SWAP(row7, alt7);
+ }
+
+ if (whichPass == 1)
+ {
+ // transpose the data for the second pass
+
+ // First, block transpose the upper right with lower left.
+ SWAP(row4, alt0);
+ SWAP(row5, alt1);
+ SWAP(row6, alt2);
+ SWAP(row7, alt3);
+
+ // Now, transpose each block of four
+ TRANSPOSE4(row0, row1, row2, row3);
+ TRANSPOSE4(row4, row5, row6, row7);
+ TRANSPOSE4(alt0, alt1, alt2, alt3);
+ TRANSPOSE4(alt4, alt5, alt6, alt7);
+ }
+ }
+ }
+
+ // used after quantise step
+ int oldBaseValue = 0;
+
+ // perform the quantise step, using the floating point data
+ // still in the row/alt registers
+ {
+ const int* biasAddr;
+ const vector signed int* qmat;
+ vector float bias, negBias;
+
+ if (s->mb_intra)
+ {
+ vector signed int baseVector;
+
+ // We must cache element 0 in the intra case
+ // (it needs special handling).
+ baseVector = vec_cts(vec_splat(row0, 0), 0);
+ vec_ste(baseVector, 0, &oldBaseValue);
+
+ qmat = (vector signed int*)s->q_intra_matrix[qscale];
+ biasAddr = &(s->intra_quant_bias);
+ }
+ else
+ {
+ qmat = (vector signed int*)s->q_inter_matrix[qscale];
+ biasAddr = &(s->inter_quant_bias);
+ }
+
+ // Load the bias vector (We add 0.5 to the bias so that we're
+ // rounding when we convert to int, instead of flooring.)
+ {
+ vector signed int biasInt;
+ const vector float negOneFloat = (vector float)(FOUR_INSTANCES(-1.0f));
+ LOAD4(biasInt, biasAddr);
+ bias = vec_ctf(biasInt, QUANT_BIAS_SHIFT);
+ negBias = vec_madd(bias, negOneFloat, zero);
+ }
+
+ {
+ vector float q0, q1, q2, q3, q4, q5, q6, q7;
+
+ q0 = vec_ctf(qmat[0], QMAT_SHIFT);
+ q1 = vec_ctf(qmat[2], QMAT_SHIFT);
+ q2 = vec_ctf(qmat[4], QMAT_SHIFT);
+ q3 = vec_ctf(qmat[6], QMAT_SHIFT);
+ q4 = vec_ctf(qmat[8], QMAT_SHIFT);
+ q5 = vec_ctf(qmat[10], QMAT_SHIFT);
+ q6 = vec_ctf(qmat[12], QMAT_SHIFT);
+ q7 = vec_ctf(qmat[14], QMAT_SHIFT);
+
+ row0 = vec_sel(vec_madd(row0, q0, negBias), vec_madd(row0, q0, bias),
+ vec_cmpgt(row0, zero));
+ row1 = vec_sel(vec_madd(row1, q1, negBias), vec_madd(row1, q1, bias),
+ vec_cmpgt(row1, zero));
+ row2 = vec_sel(vec_madd(row2, q2, negBias), vec_madd(row2, q2, bias),
+ vec_cmpgt(row2, zero));
+ row3 = vec_sel(vec_madd(row3, q3, negBias), vec_madd(row3, q3, bias),
+ vec_cmpgt(row3, zero));
+ row4 = vec_sel(vec_madd(row4, q4, negBias), vec_madd(row4, q4, bias),
+ vec_cmpgt(row4, zero));
+ row5 = vec_sel(vec_madd(row5, q5, negBias), vec_madd(row5, q5, bias),
+ vec_cmpgt(row5, zero));
+ row6 = vec_sel(vec_madd(row6, q6, negBias), vec_madd(row6, q6, bias),
+ vec_cmpgt(row6, zero));
+ row7 = vec_sel(vec_madd(row7, q7, negBias), vec_madd(row7, q7, bias),
+ vec_cmpgt(row7, zero));
+
+ q0 = vec_ctf(qmat[1], QMAT_SHIFT);
+ q1 = vec_ctf(qmat[3], QMAT_SHIFT);
+ q2 = vec_ctf(qmat[5], QMAT_SHIFT);
+ q3 = vec_ctf(qmat[7], QMAT_SHIFT);
+ q4 = vec_ctf(qmat[9], QMAT_SHIFT);
+ q5 = vec_ctf(qmat[11], QMAT_SHIFT);
+ q6 = vec_ctf(qmat[13], QMAT_SHIFT);
+ q7 = vec_ctf(qmat[15], QMAT_SHIFT);
+
+ alt0 = vec_sel(vec_madd(alt0, q0, negBias), vec_madd(alt0, q0, bias),
+ vec_cmpgt(alt0, zero));
+ alt1 = vec_sel(vec_madd(alt1, q1, negBias), vec_madd(alt1, q1, bias),
+ vec_cmpgt(alt1, zero));
+ alt2 = vec_sel(vec_madd(alt2, q2, negBias), vec_madd(alt2, q2, bias),
+ vec_cmpgt(alt2, zero));
+ alt3 = vec_sel(vec_madd(alt3, q3, negBias), vec_madd(alt3, q3, bias),
+ vec_cmpgt(alt3, zero));
+ alt4 = vec_sel(vec_madd(alt4, q4, negBias), vec_madd(alt4, q4, bias),
+ vec_cmpgt(alt4, zero));
+ alt5 = vec_sel(vec_madd(alt5, q5, negBias), vec_madd(alt5, q5, bias),
+ vec_cmpgt(alt5, zero));
+ alt6 = vec_sel(vec_madd(alt6, q6, negBias), vec_madd(alt6, q6, bias),
+ vec_cmpgt(alt6, zero));
+ alt7 = vec_sel(vec_madd(alt7, q7, negBias), vec_madd(alt7, q7, bias),
+ vec_cmpgt(alt7, zero));
+ }
+
+
+ }
+
+ // Store the data back into the original block
+ {
+ vector signed short data0, data1, data2, data3, data4, data5, data6, data7;
+
+ data0 = vec_pack(vec_cts(row0, 0), vec_cts(alt0, 0));
+ data1 = vec_pack(vec_cts(row1, 0), vec_cts(alt1, 0));
+ data2 = vec_pack(vec_cts(row2, 0), vec_cts(alt2, 0));
+ data3 = vec_pack(vec_cts(row3, 0), vec_cts(alt3, 0));
+ data4 = vec_pack(vec_cts(row4, 0), vec_cts(alt4, 0));
+ data5 = vec_pack(vec_cts(row5, 0), vec_cts(alt5, 0));
+ data6 = vec_pack(vec_cts(row6, 0), vec_cts(alt6, 0));
+ data7 = vec_pack(vec_cts(row7, 0), vec_cts(alt7, 0));
+
+ {
+ // Clamp for overflow
+ vector signed int max_q_int, min_q_int;
+ vector signed short max_q, min_q;
+
+ LOAD4(max_q_int, &(s->max_qcoeff));
+ LOAD4(min_q_int, &(s->min_qcoeff));
+
+ max_q = vec_pack(max_q_int, max_q_int);
+ min_q = vec_pack(min_q_int, min_q_int);
+
+ data0 = vec_max(vec_min(data0, max_q), min_q);
+ data1 = vec_max(vec_min(data1, max_q), min_q);
+ data2 = vec_max(vec_min(data2, max_q), min_q);
+ data4 = vec_max(vec_min(data4, max_q), min_q);
+ data5 = vec_max(vec_min(data5, max_q), min_q);
+ data6 = vec_max(vec_min(data6, max_q), min_q);
+ data7 = vec_max(vec_min(data7, max_q), min_q);
+ }
+
+ vector bool char zero_01, zero_23, zero_45, zero_67;
+ vector signed char scanIndices_01, scanIndices_23, scanIndices_45, scanIndices_67;
+ vector signed char negOne = vec_splat_s8(-1);
+ vector signed char* scanPtr =
+ (vector signed char*)(s->intra_scantable.inverse);
+
+ // Determine the largest non-zero index.
+ zero_01 = vec_pack(vec_cmpeq(data0, (vector short)zero),
+ vec_cmpeq(data1, (vector short)zero));
+ zero_23 = vec_pack(vec_cmpeq(data2, (vector short)zero),
+ vec_cmpeq(data3, (vector short)zero));
+ zero_45 = vec_pack(vec_cmpeq(data4, (vector short)zero),
+ vec_cmpeq(data5, (vector short)zero));
+ zero_67 = vec_pack(vec_cmpeq(data6, (vector short)zero),
+ vec_cmpeq(data7, (vector short)zero));
+
+ // 64 biggest values
+ scanIndices_01 = vec_sel(scanPtr[0], negOne, zero_01);
+ scanIndices_23 = vec_sel(scanPtr[1], negOne, zero_23);
+ scanIndices_45 = vec_sel(scanPtr[2], negOne, zero_45);
+ scanIndices_67 = vec_sel(scanPtr[3], negOne, zero_67);
+
+ // 32 largest values
+ scanIndices_01 = vec_max(scanIndices_01, scanIndices_23);
+ scanIndices_45 = vec_max(scanIndices_45, scanIndices_67);
+
+ // 16 largest values
+ scanIndices_01 = vec_max(scanIndices_01, scanIndices_45);
+
+ // 8 largest values
+ scanIndices_01 = vec_max(vec_mergeh(scanIndices_01, negOne),
+ vec_mergel(scanIndices_01, negOne));
+
+ // 4 largest values
+ scanIndices_01 = vec_max(vec_mergeh(scanIndices_01, negOne),
+ vec_mergel(scanIndices_01, negOne));
+
+ // 2 largest values
+ scanIndices_01 = vec_max(vec_mergeh(scanIndices_01, negOne),
+ vec_mergel(scanIndices_01, negOne));
+
+ // largest value
+ scanIndices_01 = vec_max(vec_mergeh(scanIndices_01, negOne),
+ vec_mergel(scanIndices_01, negOne));
+
+ scanIndices_01 = vec_splat(scanIndices_01, 0);
+
+ signed char lastNonZeroChar;
+
+ vec_ste(scanIndices_01, 0, &lastNonZeroChar);
+
+ lastNonZero = lastNonZeroChar;
+
+ // While the data is still in vectors we check for the transpose IDCT permute
+ // and handle it using the vector unit if we can. This is the permute used
+ // by the altivec idct, so it is common when using the altivec dct.
+
+ if ((lastNonZero > 0) && (s->idct_permutation_type == FF_TRANSPOSE_IDCT_PERM))
+ {
+ TRANSPOSE8(data0, data1, data2, data3, data4, data5, data6, data7);
+ }
+
+ vec_st(data0, 0, data);
+ vec_st(data1, 16, data);
+ vec_st(data2, 32, data);
+ vec_st(data3, 48, data);
+ vec_st(data4, 64, data);
+ vec_st(data5, 80, data);
+ vec_st(data6, 96, data);
+ vec_st(data7, 112, data);
+ }
+
+ // special handling of block[0]
+ if (s->mb_intra)
+ {
+ if (!s->h263_aic)
+ {
+ if (n < 4)
+ oldBaseValue /= s->y_dc_scale;
+ else
+ oldBaseValue /= s->c_dc_scale;
+ }
+
+ // Divide by 8, rounding the result
+ data[0] = (oldBaseValue + 4) >> 3;
+ }
+
+ // We handled the tranpose permutation above and we don't
+ // need to permute the "no" permutation case.
+ if ((lastNonZero > 0) &&
+ (s->idct_permutation_type != FF_TRANSPOSE_IDCT_PERM) &&
+ (s->idct_permutation_type != FF_NO_IDCT_PERM))
+ {
+ ff_block_permute(data, s->idct_permutation,
+ s->intra_scantable.scantable, lastNonZero);
+ }
+
+ return lastNonZero;
+}
+
diff --git a/libavcodec/ppc/mpegvideo_ppc.c b/libavcodec/ppc/mpegvideo_ppc.c
new file mode 100644
index 0000000000..1f5e5a4ef1
--- /dev/null
+++ b/libavcodec/ppc/mpegvideo_ppc.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2002 Dieter Shirley
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <time.h>
+#include "../../config.h"
+#include "../dsputil.h"
+#include "../mpegvideo.h"
+
+#ifdef HAVE_ALTIVEC
+#include "dsputil_altivec.h"
+#endif
+
+extern int dct_quantize_altivec(MpegEncContext *s,
+ DCTELEM *block, int n,
+ int qscale, int *overflow);
+
+extern void idct_put_altivec(UINT8 *dest, int line_size, INT16 *block);
+extern void idct_add_altivec(UINT8 *dest, int line_size, INT16 *block);
+
+
+void MPV_common_init_ppc(MpegEncContext *s)
+{
+#if HAVE_ALTIVEC
+ if (has_altivec())
+ {
+ if ((s->avctx->idct_algo == FF_IDCT_AUTO) ||
+ (s->avctx->idct_algo == FF_IDCT_ALTIVEC))
+ {
+ s->idct_put = idct_put_altivec;
+ s->idct_add = idct_add_altivec;
+ s->idct_permutation_type = FF_TRANSPOSE_IDCT_PERM;
+ }
+
+ // Test to make sure that the dct required alignments are met.
+ if ((((long)(s->q_intra_matrix) & 0x0f) != 0) ||
+ (((long)(s->q_inter_matrix) & 0x0f) != 0))
+ {
+ fprintf(stderr, "Internal Error: q-matrix blocks must be 16-byte aligned "
+ "to use Altivec DCT. Reverting to non-altivec version.\n");
+ return;
+ }
+
+ if (((long)(s->intra_scantable.inverse) & 0x0f) != 0)
+ {
+ fprintf(stderr, "Internal Error: scan table blocks must be 16-byte aligned "
+ "to use Altivec DCT. Reverting to non-altivec version.\n");
+ return;
+ }
+
+
+ if ((s->avctx->dct_algo == FF_DCT_AUTO) ||
+ (s->avctx->dct_algo == FF_DCT_ALTIVEC))
+ {
+ s->dct_quantize = dct_quantize_altivec;
+ }
+ } else
+#endif
+ {
+ /* Non-AltiVec PPC optimisations here */
+ }
+}
+