summaryrefslogtreecommitdiff
path: root/libavcodec/ppc
diff options
context:
space:
mode:
authorMåns Rullgård <mans@mansr.com>2010-01-22 03:25:11 +0000
committerMåns Rullgård <mans@mansr.com>2010-01-22 03:25:11 +0000
commitc67278098def4438fc587744f5df1c147bc95dc3 (patch)
tree032a9f82fd504566b8e7361d6ea6e80cbda18c0c /libavcodec/ppc
parent27ce1be89ba765d4129a638f2dd673e1f6e17682 (diff)
Move array specifiers outside DECLARE_ALIGNED() invocations
Originally committed as revision 21377 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavcodec/ppc')
-rw-r--r--libavcodec/ppc/float_altivec.c2
-rw-r--r--libavcodec/ppc/gmc_altivec.c2
-rw-r--r--libavcodec/ppc/h264_altivec.c58
-rw-r--r--libavcodec/ppc/h264_template_altivec.c4
4 files changed, 33 insertions, 33 deletions
diff --git a/libavcodec/ppc/float_altivec.c b/libavcodec/ppc/float_altivec.c
index 1c7326392b..d1f9f1ade3 100644
--- a/libavcodec/ppc/float_altivec.c
+++ b/libavcodec/ppc/float_altivec.c
@@ -226,7 +226,7 @@ float_to_int16_interleave_altivec(int16_t *dst, const float **src,
dst+=8;
}
} else {
- DECLARE_ALIGNED(16, int16_t, tmp[len]);
+ DECLARE_ALIGNED(16, int16_t, tmp)[len];
int c, j;
for (c = 0; c < channels; c++) {
float_to_int16_altivec(tmp, src[c], len);
diff --git a/libavcodec/ppc/gmc_altivec.c b/libavcodec/ppc/gmc_altivec.c
index 70c0cf9eb8..3b582c67c0 100644
--- a/libavcodec/ppc/gmc_altivec.c
+++ b/libavcodec/ppc/gmc_altivec.c
@@ -34,7 +34,7 @@ void gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, int str
{
POWERPC_PERF_DECLARE(altivec_gmc1_num, GMC1_PERF_COND);
const DECLARE_ALIGNED_16(unsigned short, rounder_a) = rounder;
- const DECLARE_ALIGNED_16(unsigned short, ABCD[8]) =
+ const DECLARE_ALIGNED_16(unsigned short, ABCD)[8] =
{
(16-x16)*(16-y16), /* A */
( x16)*(16-y16), /* B */
diff --git a/libavcodec/ppc/h264_altivec.c b/libavcodec/ppc/h264_altivec.c
index 29a9c7315a..0af9cf7b78 100644
--- a/libavcodec/ppc/h264_altivec.c
+++ b/libavcodec/ppc/h264_altivec.c
@@ -79,7 +79,7 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## CODETYPE (uint8_t *dst, uin
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){ \
- DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(uint8_t, half)[SIZE*SIZE];\
put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
}\
@@ -89,13 +89,13 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## CODETYPE(uint8_t *dst, uint
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(uint8_t, half)[SIZE*SIZE];\
put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+1, half, stride, stride, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(uint8_t, half)[SIZE*SIZE];\
put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
}\
@@ -105,79 +105,79 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## CODETYPE(uint8_t *dst, uint
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint8_t, half[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(uint8_t, half)[SIZE*SIZE];\
put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+stride, half, stride, stride, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
- DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(uint8_t, halfH)[SIZE*SIZE];\
+ DECLARE_ALIGNED_16(uint8_t, halfV)[SIZE*SIZE];\
put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
- DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(uint8_t, halfH)[SIZE*SIZE];\
+ DECLARE_ALIGNED_16(uint8_t, halfV)[SIZE*SIZE];\
put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
- DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(uint8_t, halfH)[SIZE*SIZE];\
+ DECLARE_ALIGNED_16(uint8_t, halfV)[SIZE*SIZE];\
put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
- DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
+ DECLARE_ALIGNED_16(uint8_t, halfH)[SIZE*SIZE];\
+ DECLARE_ALIGNED_16(uint8_t, halfV)[SIZE*SIZE];\
put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
+ DECLARE_ALIGNED_16(int16_t, tmp)[SIZE*(SIZE+8)];\
OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(dst, tmp, src, stride, SIZE, stride);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
- DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
- DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
+ DECLARE_ALIGNED_16(uint8_t, halfH)[SIZE*SIZE];\
+ DECLARE_ALIGNED_16(uint8_t, halfHV)[SIZE*SIZE];\
+ DECLARE_ALIGNED_16(int16_t, tmp)[SIZE*(SIZE+8)];\
put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint8_t, halfH[SIZE*SIZE]);\
- DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
- DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
+ DECLARE_ALIGNED_16(uint8_t, halfH)[SIZE*SIZE];\
+ DECLARE_ALIGNED_16(uint8_t, halfHV)[SIZE*SIZE];\
+ DECLARE_ALIGNED_16(int16_t, tmp)[SIZE*(SIZE+8)];\
put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
- DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
- DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
+ DECLARE_ALIGNED_16(uint8_t, halfV)[SIZE*SIZE];\
+ DECLARE_ALIGNED_16(uint8_t, halfHV)[SIZE*SIZE];\
+ DECLARE_ALIGNED_16(int16_t, tmp)[SIZE*(SIZE+8)];\
put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED_16(uint8_t, halfV[SIZE*SIZE]);\
- DECLARE_ALIGNED_16(uint8_t, halfHV[SIZE*SIZE]);\
- DECLARE_ALIGNED_16(int16_t, tmp[SIZE*(SIZE+8)]);\
+ DECLARE_ALIGNED_16(uint8_t, halfV)[SIZE*SIZE];\
+ DECLARE_ALIGNED_16(uint8_t, halfHV)[SIZE*SIZE];\
+ DECLARE_ALIGNED_16(int16_t, tmp)[SIZE*(SIZE+8)];\
put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
@@ -590,7 +590,7 @@ static void ff_h264_idct_add8_altivec(uint8_t **dest, const int *block_offset, D
static inline void write16x4(uint8_t *dst, int dst_stride,
register vec_u8 r0, register vec_u8 r1,
register vec_u8 r2, register vec_u8 r3) {
- DECLARE_ALIGNED_16(unsigned char, result[64]);
+ DECLARE_ALIGNED_16(unsigned char, result)[64];
uint32_t *src_int = (uint32_t *)result, *dst_int = (uint32_t *)dst;
int int_dst_stride = dst_stride/4;
@@ -770,7 +770,7 @@ static inline vec_u8 h264_deblock_q1(register vec_u8 p0,
}
#define h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0) { \
- DECLARE_ALIGNED_16(unsigned char, temp[16]); \
+ DECLARE_ALIGNED_16(unsigned char, temp)[16]; \
register vec_u8 alphavec; \
register vec_u8 betavec; \
register vec_u8 mask; \
@@ -850,7 +850,7 @@ void weight_h264_WxH_altivec(uint8_t *block, int stride, int log2_denom, int wei
vec_u8 vblock;
vec_s16 vtemp, vweight, voffset, v0, v1;
vec_u16 vlog2_denom;
- DECLARE_ALIGNED_16(int32_t, temp[4]);
+ DECLARE_ALIGNED_16(int32_t, temp)[4];
LOAD_ZERO;
offset <<= log2_denom;
@@ -896,7 +896,7 @@ void biweight_h264_WxH_altivec(uint8_t *dst, uint8_t *src, int stride, int log2_
vec_u8 vsrc, vdst;
vec_s16 vtemp, vweights, vweightd, voffset, v0, v1, v2, v3;
vec_u16 vlog2_denom;
- DECLARE_ALIGNED_16(int32_t, temp[4]);
+ DECLARE_ALIGNED_16(int32_t, temp)[4];
LOAD_ZERO;
offset = ((offset + 1) | 1) << log2_denom;
diff --git a/libavcodec/ppc/h264_template_altivec.c b/libavcodec/ppc/h264_template_altivec.c
index 954fd16bd9..3ed751135e 100644
--- a/libavcodec/ppc/h264_template_altivec.c
+++ b/libavcodec/ppc/h264_template_altivec.c
@@ -78,7 +78,7 @@
void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src,
int stride, int h, int x, int y) {
POWERPC_PERF_DECLARE(PREFIX_h264_chroma_mc8_num, 1);
- DECLARE_ALIGNED_16(signed int, ABCD[4]) =
+ DECLARE_ALIGNED_16(signed int, ABCD)[4] =
{((8 - x) * (8 - y)),
(( x) * (8 - y)),
((8 - x) * ( y)),
@@ -208,7 +208,7 @@ void PREFIX_h264_chroma_mc8_altivec(uint8_t * dst, uint8_t * src,
/* this code assume that stride % 16 == 0 */
void PREFIX_no_rnd_vc1_chroma_mc8_altivec(uint8_t * dst, uint8_t * src, int stride, int h, int x, int y) {
- DECLARE_ALIGNED_16(signed int, ABCD[4]) =
+ DECLARE_ALIGNED_16(signed int, ABCD)[4] =
{((8 - x) * (8 - y)),
(( x) * (8 - y)),
((8 - x) * ( y)),