summaryrefslogtreecommitdiff
path: root/libavcodec/ppc
diff options
context:
space:
mode:
authorDiego Biurrun <diego@biurrun.de>2008-07-24 10:53:32 +0000
committerDiego Biurrun <diego@biurrun.de>2008-07-24 10:53:32 +0000
commit80a61f08d2d6059b0aaffa4c8b8120fb0ab0ca75 (patch)
tree4567552fff1cba203b754fc790485d702414a687 /libavcodec/ppc
parentec072669f7398f40d8001c8cb0868880569c6cd9 (diff)
Remove AltiVec vector declaration compiler compatibility macros.
The original problem was that FSF and Apple gcc used a different syntax for vector declarations, i.e. {} vs. (). Nowadays Apple gcc versions support the standard {} syntax and versions that support {} are available on all relevant Mac OS X versions. Thus the greater compatibility is no longer worth cluttering the code with macros. Originally committed as revision 14366 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavcodec/ppc')
-rw-r--r--libavcodec/ppc/dsputil_altivec.c40
-rw-r--r--libavcodec/ppc/fdct_altivec.c6
-rw-r--r--libavcodec/ppc/h264_altivec.c10
-rw-r--r--libavcodec/ppc/h264_template_altivec.c12
-rw-r--r--libavcodec/ppc/idct_altivec.c10
-rw-r--r--libavcodec/ppc/mpegvideo_altivec.c2
-rw-r--r--libavcodec/ppc/util_altivec.h4
7 files changed, 42 insertions, 42 deletions
diff --git a/libavcodec/ppc/dsputil_altivec.c b/libavcodec/ppc/dsputil_altivec.c
index bb0fad4e48..6ff219e3b0 100644
--- a/libavcodec/ppc/dsputil_altivec.c
+++ b/libavcodec/ppc/dsputil_altivec.c
@@ -277,7 +277,7 @@ int sad8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
sad = (vector unsigned int)vec_splat_u32(0);
- permclear = (vector unsigned char)AVV(255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0);
+ permclear = (vector unsigned char){255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0};
for (i = 0; i < h; i++) {
/* Read potentially unaligned pixels into t1 and t2
@@ -358,7 +358,7 @@ int sse8_altivec(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h)
sum = (vector unsigned int)vec_splat_u32(0);
- permclear = (vector unsigned char)AVV(255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0);
+ permclear = (vector unsigned char){255,255,255,255,255,255,255,255,0,0,0,0,0,0,0,0};
for (i = 0; i < h; i++) {
@@ -990,20 +990,20 @@ POWERPC_PERF_DECLARE(altivec_hadamard8_diff8x8_num, 1);
POWERPC_PERF_START_COUNT(altivec_hadamard8_diff8x8_num, 1);
{
register const vector signed short vprod1 =(const vector signed short)
- AVV( 1,-1, 1,-1, 1,-1, 1,-1);
+ { 1,-1, 1,-1, 1,-1, 1,-1 };
register const vector signed short vprod2 =(const vector signed short)
- AVV( 1, 1,-1,-1, 1, 1,-1,-1);
+ { 1, 1,-1,-1, 1, 1,-1,-1 };
register const vector signed short vprod3 =(const vector signed short)
- AVV( 1, 1, 1, 1,-1,-1,-1,-1);
+ { 1, 1, 1, 1,-1,-1,-1,-1 };
register const vector unsigned char perm1 = (const vector unsigned char)
- AVV(0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
- 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D);
+ {0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
+ 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D};
register const vector unsigned char perm2 = (const vector unsigned char)
- AVV(0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
- 0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B);
+ {0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B};
register const vector unsigned char perm3 = (const vector unsigned char)
- AVV(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
+ {0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07};
#define ONEITERBUTTERFLY(i, res) \
{ \
@@ -1130,23 +1130,23 @@ static int hadamard8_diff16x8_altivec(/*MpegEncContext*/ void *s, uint8_t *dst,
(const vector unsigned char)vec_splat_u8(0);
{
register const vector signed short vprod1 REG_v(v16)=
- (const vector signed short)AVV( 1,-1, 1,-1, 1,-1, 1,-1);
+ (const vector signed short){ 1,-1, 1,-1, 1,-1, 1,-1 };
register const vector signed short vprod2 REG_v(v17)=
- (const vector signed short)AVV( 1, 1,-1,-1, 1, 1,-1,-1);
+ (const vector signed short){ 1, 1,-1,-1, 1, 1,-1,-1 };
register const vector signed short vprod3 REG_v(v18)=
- (const vector signed short)AVV( 1, 1, 1, 1,-1,-1,-1,-1);
+ (const vector signed short){ 1, 1, 1, 1,-1,-1,-1,-1 };
register const vector unsigned char perm1 REG_v(v19)=
(const vector unsigned char)
- AVV(0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
- 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D);
+ {0x02, 0x03, 0x00, 0x01, 0x06, 0x07, 0x04, 0x05,
+ 0x0A, 0x0B, 0x08, 0x09, 0x0E, 0x0F, 0x0C, 0x0D};
register const vector unsigned char perm2 REG_v(v20)=
(const vector unsigned char)
- AVV(0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
- 0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B);
+ {0x04, 0x05, 0x06, 0x07, 0x00, 0x01, 0x02, 0x03,
+ 0x0C, 0x0D, 0x0E, 0x0F, 0x08, 0x09, 0x0A, 0x0B};
register const vector unsigned char perm3 REG_v(v21)=
(const vector unsigned char)
- AVV(0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
- 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07);
+ {0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F,
+ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07};
#define ONEITERBUTTERFLY(i, res1, res2) \
{ \
diff --git a/libavcodec/ppc/fdct_altivec.c b/libavcodec/ppc/fdct_altivec.c
index ed2903f163..cb7577486b 100644
--- a/libavcodec/ppc/fdct_altivec.c
+++ b/libavcodec/ppc/fdct_altivec.c
@@ -58,9 +58,9 @@
static vector float fdctconsts[3] = {
- AVV( W0, W1, W2, W3 ),
- AVV( W4, W5, W6, W7 ),
- AVV( W8, W9, WA, WB )
+ { W0, W1, W2, W3 },
+ { W4, W5, W6, W7 },
+ { W8, W9, WA, WB }
};
#define LD_W0 vec_splat(cnsts0, 0)
diff --git a/libavcodec/ppc/h264_altivec.c b/libavcodec/ppc/h264_altivec.c
index 04dad2e33d..87bd26871c 100644
--- a/libavcodec/ppc/h264_altivec.c
+++ b/libavcodec/ppc/h264_altivec.c
@@ -208,15 +208,15 @@ void put_no_rnd_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride
vec_u8_t vdst, ppsum, fsum;
if (((unsigned long)dst) % 16 == 0) {
- fperm = (vec_u8_t)AVV(0x10, 0x11, 0x12, 0x13,
+ fperm = (vec_u8_t){0x10, 0x11, 0x12, 0x13,
0x14, 0x15, 0x16, 0x17,
0x08, 0x09, 0x0A, 0x0B,
- 0x0C, 0x0D, 0x0E, 0x0F);
+ 0x0C, 0x0D, 0x0E, 0x0F};
} else {
- fperm = (vec_u8_t)AVV(0x00, 0x01, 0x02, 0x03,
+ fperm = (vec_u8_t){0x00, 0x01, 0x02, 0x03,
0x04, 0x05, 0x06, 0x07,
0x18, 0x19, 0x1A, 0x1B,
- 0x1C, 0x1D, 0x1E, 0x1F);
+ 0x1C, 0x1D, 0x1E, 0x1F};
}
vsrcAuc = vec_ld(0, src);
@@ -563,7 +563,7 @@ void ff_h264_idct8_add_altivec( uint8_t *dst, DCTELEM *dct, int stride ) {
const vec_u16_t twov = vec_splat_u16(2);
const vec_u16_t sixv = vec_splat_u16(6);
- const vec_u8_t sel = (vec_u8_t) AVV(0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1);
+ const vec_u8_t sel = (vec_u8_t) {0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1};
LOAD_ZERO;
dct[0] += 32; // rounding for the >>6 at the end
diff --git a/libavcodec/ppc/h264_template_altivec.c b/libavcodec/ppc/h264_template_altivec.c
index b4a5ddcdc5..d0f2fc0da7 100644
--- a/libavcodec/ppc/h264_template_altivec.c
+++ b/libavcodec/ppc/h264_template_altivec.c
@@ -102,15 +102,15 @@ void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src,
POWERPC_PERF_START_COUNT(PREFIX_h264_chroma_mc8_num, 1);
if (((unsigned long)dst) % 16 == 0) {
- fperm = (vec_u8_t)AVV(0x10, 0x11, 0x12, 0x13,
+ fperm = (vec_u8_t){0x10, 0x11, 0x12, 0x13,
0x14, 0x15, 0x16, 0x17,
0x08, 0x09, 0x0A, 0x0B,
- 0x0C, 0x0D, 0x0E, 0x0F);
+ 0x0C, 0x0D, 0x0E, 0x0F};
} else {
- fperm = (vec_u8_t)AVV(0x00, 0x01, 0x02, 0x03,
+ fperm = (vec_u8_t){0x00, 0x01, 0x02, 0x03,
0x04, 0x05, 0x06, 0x07,
0x18, 0x19, 0x1A, 0x1B,
- 0x1C, 0x1D, 0x1E, 0x1F);
+ 0x1C, 0x1D, 0x1E, 0x1F};
}
vsrcAuc = vec_ld(0, src);
@@ -485,8 +485,8 @@ static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t * dst, int16_t * tmp,
pp1A, pp1B, pp2A, pp2B, psumA, psumB;
const vec_u8_t mperm = (const vec_u8_t)
- AVV(0x00, 0x08, 0x01, 0x09, 0x02, 0x0A, 0x03, 0x0B,
- 0x04, 0x0C, 0x05, 0x0D, 0x06, 0x0E, 0x07, 0x0F);
+ {0x00, 0x08, 0x01, 0x09, 0x02, 0x0A, 0x03, 0x0B,
+ 0x04, 0x0C, 0x05, 0x0D, 0x06, 0x0E, 0x07, 0x0F};
int16_t *tmpbis = tmp;
vec_s16_t tmpM1ssA, tmpM1ssB, tmpM2ssA, tmpM2ssB,
diff --git a/libavcodec/ppc/idct_altivec.c b/libavcodec/ppc/idct_altivec.c
index 5d596b1acd..7acef487a6 100644
--- a/libavcodec/ppc/idct_altivec.c
+++ b/libavcodec/ppc/idct_altivec.c
@@ -157,11 +157,11 @@
static const_vector_s16_t constants[5] = {
- AVV(23170, 13573, 6518, 21895, -23170, -21895, 32, 31),
- AVV(16384, 22725, 21407, 19266, 16384, 19266, 21407, 22725),
- AVV(22725, 31521, 29692, 26722, 22725, 26722, 29692, 31521),
- AVV(21407, 29692, 27969, 25172, 21407, 25172, 27969, 29692),
- AVV(19266, 26722, 25172, 22654, 19266, 22654, 25172, 26722)
+ {23170, 13573, 6518, 21895, -23170, -21895, 32, 31},
+ {16384, 22725, 21407, 19266, 16384, 19266, 21407, 22725},
+ {22725, 31521, 29692, 26722, 22725, 26722, 29692, 31521},
+ {21407, 29692, 27969, 25172, 21407, 25172, 27969, 29692},
+ {19266, 26722, 25172, 22654, 19266, 22654, 25172, 26722}
};
void idct_put_altivec(uint8_t* dest, int stride, vector_s16_t* block)
diff --git a/libavcodec/ppc/mpegvideo_altivec.c b/libavcodec/ppc/mpegvideo_altivec.c
index f2e4fae092..4c9ae2da36 100644
--- a/libavcodec/ppc/mpegvideo_altivec.c
+++ b/libavcodec/ppc/mpegvideo_altivec.c
@@ -66,7 +66,7 @@ do { \
}
-#define FOUROF(a) AVV(a,a,a,a)
+#define FOUROF(a) {a,a,a,a}
int dct_quantize_altivec(MpegEncContext* s,
DCTELEM* data, int n,
diff --git a/libavcodec/ppc/util_altivec.h b/libavcodec/ppc/util_altivec.h
index 6a8afb1b22..74fc2ab1ce 100644
--- a/libavcodec/ppc/util_altivec.h
+++ b/libavcodec/ppc/util_altivec.h
@@ -43,8 +43,8 @@
#define WORD_s2 0x18,0x19,0x1a,0x1b
#define WORD_s3 0x1c,0x1d,0x1e,0x1f
-#define vcprm(a,b,c,d) (const vector unsigned char)AVV(WORD_ ## a, WORD_ ## b, WORD_ ## c, WORD_ ## d)
-#define vcii(a,b,c,d) (const vector float)AVV(FLOAT_ ## a, FLOAT_ ## b, FLOAT_ ## c, FLOAT_ ## d)
+#define vcprm(a,b,c,d) (const vector unsigned char){WORD_ ## a, WORD_ ## b, WORD_ ## c, WORD_ ## d}
+#define vcii(a,b,c,d) (const vector float){FLOAT_ ## a, FLOAT_ ## b, FLOAT_ ## c, FLOAT_ ## d}
// vcprmle is used to keep the same index as in the SSE version.
// it's the same as vcprm, with the index inversed