summaryrefslogtreecommitdiff
path: root/libavcodec/x86
diff options
context:
space:
mode:
authorMåns Rullgård <mans@mansr.com>2010-01-22 03:25:11 +0000
committerMåns Rullgård <mans@mansr.com>2010-01-22 03:25:11 +0000
commitc67278098def4438fc587744f5df1c147bc95dc3 (patch)
tree032a9f82fd504566b8e7361d6ea6e80cbda18c0c /libavcodec/x86
parent27ce1be89ba765d4129a638f2dd673e1f6e17682 (diff)
Move array specifiers outside DECLARE_ALIGNED() invocations
Originally committed as revision 21377 to svn://svn.ffmpeg.org/ffmpeg/trunk
Diffstat (limited to 'libavcodec/x86')
-rw-r--r--libavcodec/x86/cavsdsp_mmx.c2
-rw-r--r--libavcodec/x86/dsputil_mmx.c14
-rw-r--r--libavcodec/x86/dsputilenc_mmx.c4
-rw-r--r--libavcodec/x86/fft_3dn2.c2
-rw-r--r--libavcodec/x86/fft_sse.c2
-rw-r--r--libavcodec/x86/h264dsp_mmx.c34
-rw-r--r--libavcodec/x86/idct_mmx_xvid.c8
-rw-r--r--libavcodec/x86/idct_sse2_xvid.c20
-rw-r--r--libavcodec/x86/motion_est_mmx.c2
-rw-r--r--libavcodec/x86/mpegvideo_mmx_template.c2
-rw-r--r--libavcodec/x86/rv40dsp_mmx.c2
-rw-r--r--libavcodec/x86/simple_idct_mmx.c4
-rw-r--r--libavcodec/x86/snowdsp_mmx.c2
-rw-r--r--libavcodec/x86/vc1dsp_mmx.c2
-rw-r--r--libavcodec/x86/vp3dsp_sse2.c2
15 files changed, 51 insertions, 51 deletions
diff --git a/libavcodec/x86/cavsdsp_mmx.c b/libavcodec/x86/cavsdsp_mmx.c
index 4dadc86213..3edfdb9822 100644
--- a/libavcodec/x86/cavsdsp_mmx.c
+++ b/libavcodec/x86/cavsdsp_mmx.c
@@ -113,7 +113,7 @@ static inline void cavs_idct8_1d(int16_t *block, uint64_t bias)
static void cavs_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride)
{
int i;
- DECLARE_ALIGNED_8(int16_t, b2[64]);
+ DECLARE_ALIGNED_8(int16_t, b2)[64];
for(i=0; i<2; i++){
DECLARE_ALIGNED_8(uint64_t, tmp);
diff --git a/libavcodec/x86/dsputil_mmx.c b/libavcodec/x86/dsputil_mmx.c
index abfebfbff5..0283fb41e5 100644
--- a/libavcodec/x86/dsputil_mmx.c
+++ b/libavcodec/x86/dsputil_mmx.c
@@ -42,7 +42,7 @@ int mm_flags; /* multimedia extension flags */
DECLARE_ALIGNED_8 (const uint64_t, ff_bone) = 0x0101010101010101ULL;
DECLARE_ALIGNED_8 (const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
-DECLARE_ALIGNED_16(const uint64_t, ff_pdw_80000000[2]) =
+DECLARE_ALIGNED_16(const uint64_t, ff_pdw_80000000)[2] =
{0x8000000080000000ULL, 0x8000000080000000ULL};
DECLARE_ALIGNED_8 (const uint64_t, ff_pw_3 ) = 0x0003000300030003ULL;
@@ -69,8 +69,8 @@ DECLARE_ALIGNED_8 (const uint64_t, ff_pb_81 ) = 0x8181818181818181ULL;
DECLARE_ALIGNED_8 (const uint64_t, ff_pb_A1 ) = 0xA1A1A1A1A1A1A1A1ULL;
DECLARE_ALIGNED_8 (const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL;
-DECLARE_ALIGNED_16(const double, ff_pd_1[2]) = { 1.0, 1.0 };
-DECLARE_ALIGNED_16(const double, ff_pd_2[2]) = { 2.0, 2.0 };
+DECLARE_ALIGNED_16(const double, ff_pd_1)[2] = { 1.0, 1.0 };
+DECLARE_ALIGNED_16(const double, ff_pd_2)[2] = { 2.0, 2.0 };
#define JUMPALIGN() __asm__ volatile (ASMALIGN(3)::)
#define MOVQ_ZERO(regd) __asm__ volatile ("pxor %%" #regd ", %%" #regd ::)
@@ -277,7 +277,7 @@ void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size
:"memory");
}
-DECLARE_ASM_CONST(8, uint8_t, ff_vector128[8]) =
+DECLARE_ASM_CONST(8, uint8_t, ff_vector128)[8] =
{ 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 };
#define put_signed_pixels_clamped_mmx_half(off) \
@@ -754,7 +754,7 @@ static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int
static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
const int strength= ff_h263_loop_filter_strength[qscale];
- DECLARE_ALIGNED(8, uint64_t, temp[4]);
+ DECLARE_ALIGNED(8, uint64_t, temp)[4];
uint8_t *btemp= (uint8_t*)temp;
src -= 2;
@@ -2026,7 +2026,7 @@ static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2], int out_c
} else if(in_ch == 5 && out_ch == 1 && matrix_cmp[0][0]==matrix_cmp[2][0] && matrix_cmp[3][0]==matrix_cmp[4][0]) {
MIX5(IF1,IF0);
} else {
- DECLARE_ALIGNED_16(float, matrix_simd[in_ch][2][4]);
+ DECLARE_ALIGNED_16(float, matrix_simd)[in_ch][2][4];
j = 2*in_ch*sizeof(float);
__asm__ volatile(
"1: \n"
@@ -2413,7 +2413,7 @@ static void ff_x264_deblock_v_luma_intra_mmxext(uint8_t *pix, int stride, int al
#define FLOAT_TO_INT16_INTERLEAVE(cpu, body) \
/* gcc pessimizes register allocation if this is in the same function as float_to_int16_interleave_sse2*/\
static av_noinline void float_to_int16_interleave_misc_##cpu(int16_t *dst, const float **src, long len, int channels){\
- DECLARE_ALIGNED_16(int16_t, tmp[len]);\
+ DECLARE_ALIGNED_16(int16_t, tmp)[len];\
int i,j,c;\
for(c=0; c<channels; c++){\
float_to_int16_##cpu(tmp, src[c], len);\
diff --git a/libavcodec/x86/dsputilenc_mmx.c b/libavcodec/x86/dsputilenc_mmx.c
index 3ee2c38d00..010ba5d5ed 100644
--- a/libavcodec/x86/dsputilenc_mmx.c
+++ b/libavcodec/x86/dsputilenc_mmx.c
@@ -1063,7 +1063,7 @@ static void sub_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *src1, c
#define HADAMARD8_DIFF_MMX(cpu) \
static int hadamard8_diff_##cpu(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){\
- DECLARE_ALIGNED_8(uint64_t, temp[13]);\
+ DECLARE_ALIGNED_8(uint64_t, temp)[13];\
int sum;\
\
assert(h==8);\
@@ -1146,7 +1146,7 @@ WRAPPER8_16_SQ(hadamard8_diff_##cpu, hadamard8_diff16_##cpu)
#define HADAMARD8_DIFF_SSE2(cpu) \
static int hadamard8_diff_##cpu(void *s, uint8_t *src1, uint8_t *src2, int stride, int h){\
- DECLARE_ALIGNED_16(uint64_t, temp[4]);\
+ DECLARE_ALIGNED_16(uint64_t, temp)[4];\
int sum;\
\
assert(h==8);\
diff --git a/libavcodec/x86/fft_3dn2.c b/libavcodec/x86/fft_3dn2.c
index d5557de638..dd84f5bad4 100644
--- a/libavcodec/x86/fft_3dn2.c
+++ b/libavcodec/x86/fft_3dn2.c
@@ -23,7 +23,7 @@
#include "libavcodec/dsputil.h"
#include "fft.h"
-DECLARE_ALIGNED_8(static const int, m1m1[2]) = { 1<<31, 1<<31 };
+DECLARE_ALIGNED_8(static const int, m1m1)[2] = { 1<<31, 1<<31 };
#ifdef EMULATE_3DNOWEXT
#define PSWAPD(s,d)\
diff --git a/libavcodec/x86/fft_sse.c b/libavcodec/x86/fft_sse.c
index a76074645a..a4cce69db1 100644
--- a/libavcodec/x86/fft_sse.c
+++ b/libavcodec/x86/fft_sse.c
@@ -23,7 +23,7 @@
#include "libavcodec/dsputil.h"
#include "fft.h"
-DECLARE_ALIGNED(16, static const int, m1m1m1m1[4]) =
+DECLARE_ALIGNED(16, static const int, m1m1m1m1)[4] =
{ 1 << 31, 1 << 31, 1 << 31, 1 << 31 };
void ff_fft_dispatch_sse(FFTComplex *z, int nbits);
diff --git a/libavcodec/x86/h264dsp_mmx.c b/libavcodec/x86/h264dsp_mmx.c
index 30d54978f6..90f5392f09 100644
--- a/libavcodec/x86/h264dsp_mmx.c
+++ b/libavcodec/x86/h264dsp_mmx.c
@@ -157,7 +157,7 @@ static inline void h264_idct8_1d(int16_t *block)
static void ff_h264_idct8_add_mmx(uint8_t *dst, int16_t *block, int stride)
{
int i;
- DECLARE_ALIGNED_8(int16_t, b2[64]);
+ DECLARE_ALIGNED_8(int16_t, b2)[64];
block[0] += 32;
@@ -628,7 +628,7 @@ static void ff_h264_idct_add8_sse2(uint8_t **dest, const int *block_offset, DCTE
static inline void h264_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha1, int beta1, int8_t *tc0)
{
- DECLARE_ALIGNED_8(uint64_t, tmp0[2]);
+ DECLARE_ALIGNED_8(uint64_t, tmp0)[2];
__asm__ volatile(
"movq (%2,%4), %%mm0 \n\t" //p1
@@ -690,7 +690,7 @@ static void h264_h_loop_filter_luma_mmx2(uint8_t *pix, int stride, int alpha, in
{
//FIXME: could cut some load/stores by merging transpose with filter
// also, it only needs to transpose 6x8
- DECLARE_ALIGNED_8(uint8_t, trans[8*8]);
+ DECLARE_ALIGNED_8(uint8_t, trans)[8*8];
int i;
for(i=0; i<2; i++, pix+=8*stride, tc0+=2) {
if((tc0[0] & tc0[1]) < 0)
@@ -734,7 +734,7 @@ static void h264_v_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha,
static void h264_h_loop_filter_chroma_mmx2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0)
{
//FIXME: could cut some load/stores by merging transpose with filter
- DECLARE_ALIGNED_8(uint8_t, trans[8*4]);
+ DECLARE_ALIGNED_8(uint8_t, trans)[8*4];
transpose4x4(trans, pix-2, 8, stride);
transpose4x4(trans+4, pix-2+4*stride, 8, stride);
h264_loop_filter_chroma_mmx2(trans+2*8, 8, alpha-1, beta-1, tc0);
@@ -784,7 +784,7 @@ static void h264_v_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int a
static void h264_h_loop_filter_chroma_intra_mmx2(uint8_t *pix, int stride, int alpha, int beta)
{
//FIXME: could cut some load/stores by merging transpose with filter
- DECLARE_ALIGNED_8(uint8_t, trans[8*4]);
+ DECLARE_ALIGNED_8(uint8_t, trans)[8*4];
transpose4x4(trans, pix-2, 8, stride);
transpose4x4(trans+4, pix-2+4*stride, 8, stride);
h264_loop_filter_chroma_intra_mmx2(trans+2*8, 8, alpha-1, beta-1);
@@ -1974,7 +1974,7 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## MMX(uint8_t *dst, uint8_t *
#define H264_MC_V(OPNAME, SIZE, MMX, ALIGN) \
static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*SIZE]);\
+ DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src, temp, stride, stride, SIZE);\
}\
@@ -1984,43 +1984,43 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## MMX(uint8_t *dst, uint8_t *
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*SIZE]);\
+ DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
OPNAME ## pixels ## SIZE ## _l2_ ## MMX(dst, src+stride, temp, stride, stride, SIZE);\
}\
#define H264_MC_HV(OPNAME, SIZE, MMX, ALIGN) \
static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*SIZE]);\
+ DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*SIZE]);\
+ DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\
OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src, temp, stride, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*SIZE]);\
+ DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src, SIZE, stride);\
OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*SIZE]);\
+ DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*SIZE];\
put_h264_qpel ## SIZE ## _v_lowpass_ ## MMX(temp, src+1, SIZE, stride);\
OPNAME ## h264_qpel ## SIZE ## _h_lowpass_l2_ ## MMX(dst, src+stride, temp, stride, SIZE);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED(ALIGN, uint16_t, temp[SIZE*(SIZE<8?12:24)]);\
+ DECLARE_ALIGNED(ALIGN, uint16_t, temp)[SIZE*(SIZE<8?12:24)];\
OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## MMX(dst, temp, src, stride, SIZE, stride);\
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
+ DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\
uint8_t * const halfHV= temp;\
int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
assert(((int)temp & 7) == 0);\
@@ -2029,7 +2029,7 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## MMX(uint8_t *dst, uint8_t *
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
+ DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\
uint8_t * const halfHV= temp;\
int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
assert(((int)temp & 7) == 0);\
@@ -2038,7 +2038,7 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
+ DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\
uint8_t * const halfHV= temp;\
int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
assert(((int)temp & 7) == 0);\
@@ -2047,7 +2047,7 @@ static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## MMX(uint8_t *dst, uint8_t *
}\
\
static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
- DECLARE_ALIGNED(ALIGN, uint8_t, temp[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE]);\
+ DECLARE_ALIGNED(ALIGN, uint8_t, temp)[SIZE*(SIZE<8?12:24)*2 + SIZE*SIZE];\
uint8_t * const halfHV= temp;\
int16_t * const halfV= (int16_t*)(temp + SIZE*SIZE);\
assert(((int)temp & 7) == 0);\
@@ -2110,7 +2110,7 @@ H264_MC_816(H264_MC_HV, ssse3)
#endif
/* rnd interleaved with rnd div 8, use p+1 to access rnd div 8 */
-DECLARE_ALIGNED_8(static const uint64_t, h264_rnd_reg[4]) = {
+DECLARE_ALIGNED_8(static const uint64_t, h264_rnd_reg)[4] = {
0x0020002000200020ULL, 0x0004000400040004ULL, 0x001C001C001C001CULL, 0x0003000300030003ULL
};
diff --git a/libavcodec/x86/idct_mmx_xvid.c b/libavcodec/x86/idct_mmx_xvid.c
index d4fdd7a54a..f955bd8221 100644
--- a/libavcodec/x86/idct_mmx_xvid.c
+++ b/libavcodec/x86/idct_mmx_xvid.c
@@ -64,13 +64,13 @@
//-----------------------------------------------------------------------------
-DECLARE_ALIGNED(8, static const int16_t, tg_1_16[4*4]) = {
+DECLARE_ALIGNED(8, static const int16_t, tg_1_16)[4*4] = {
13036,13036,13036,13036, // tg * (2<<16) + 0.5
27146,27146,27146,27146, // tg * (2<<16) + 0.5
-21746,-21746,-21746,-21746, // tg * (2<<16) + 0.5
23170,23170,23170,23170}; // cos * (2<<15) + 0.5
-DECLARE_ALIGNED(8, static const int32_t, rounder_0[2*8]) = {
+DECLARE_ALIGNED(8, static const int32_t, rounder_0)[2*8] = {
65536,65536,
3597,3597,
2260,2260,
@@ -140,7 +140,7 @@ DECLARE_ALIGNED(8, static const int32_t, rounder_0[2*8]) = {
//-----------------------------------------------------------------------------
// Table for rows 0,4 - constants are multiplied by cos_4_16
-DECLARE_ALIGNED(8, static const int16_t, tab_i_04_mmx[32*4]) = {
+DECLARE_ALIGNED(8, static const int16_t, tab_i_04_mmx)[32*4] = {
16384,16384,16384,-16384, // movq-> w06 w04 w02 w00
21407,8867,8867,-21407, // w07 w05 w03 w01
16384,-16384,16384,16384, // w14 w12 w10 w08
@@ -182,7 +182,7 @@ DECLARE_ALIGNED(8, static const int16_t, tab_i_04_mmx[32*4]) = {
//-----------------------------------------------------------------------------
// %3 for rows 0,4 - constants are multiplied by cos_4_16
-DECLARE_ALIGNED(8, static const int16_t, tab_i_04_xmm[32*4]) = {
+DECLARE_ALIGNED(8, static const int16_t, tab_i_04_xmm)[32*4] = {
16384,21407,16384,8867, // movq-> w05 w04 w01 w00
16384,8867,-16384,-21407, // w07 w06 w03 w02
16384,-8867,16384,-21407, // w13 w12 w09 w08
diff --git a/libavcodec/x86/idct_sse2_xvid.c b/libavcodec/x86/idct_sse2_xvid.c
index 81f617b117..a81c7d5173 100644
--- a/libavcodec/x86/idct_sse2_xvid.c
+++ b/libavcodec/x86/idct_sse2_xvid.c
@@ -52,41 +52,41 @@
#define ROW_SHIFT 11
#define COL_SHIFT 6
-DECLARE_ASM_CONST(16, int16_t, tan1[]) = {X8(13036)}; // tan( pi/16)
-DECLARE_ASM_CONST(16, int16_t, tan2[]) = {X8(27146)}; // tan(2pi/16) = sqrt(2)-1
-DECLARE_ASM_CONST(16, int16_t, tan3[]) = {X8(43790)}; // tan(3pi/16)-1
-DECLARE_ASM_CONST(16, int16_t, sqrt2[])= {X8(23170)}; // 0.5/sqrt(2)
-DECLARE_ASM_CONST(8, uint8_t, m127[]) = {X8(127)};
+DECLARE_ASM_CONST(16, int16_t, tan1)[] = {X8(13036)}; // tan( pi/16)
+DECLARE_ASM_CONST(16, int16_t, tan2)[] = {X8(27146)}; // tan(2pi/16) = sqrt(2)-1
+DECLARE_ASM_CONST(16, int16_t, tan3)[] = {X8(43790)}; // tan(3pi/16)-1
+DECLARE_ASM_CONST(16, int16_t, sqrt2)[]= {X8(23170)}; // 0.5/sqrt(2)
+DECLARE_ASM_CONST(8, uint8_t, m127)[] = {X8(127)};
-DECLARE_ASM_CONST(16, int16_t, iTab1[]) = {
+DECLARE_ASM_CONST(16, int16_t, iTab1)[] = {
0x4000, 0x539f, 0xc000, 0xac61, 0x4000, 0xdd5d, 0x4000, 0xdd5d,
0x4000, 0x22a3, 0x4000, 0x22a3, 0xc000, 0x539f, 0x4000, 0xac61,
0x3249, 0x11a8, 0x4b42, 0xee58, 0x11a8, 0x4b42, 0x11a8, 0xcdb7,
0x58c5, 0x4b42, 0xa73b, 0xcdb7, 0x3249, 0xa73b, 0x4b42, 0xa73b
};
-DECLARE_ASM_CONST(16, int16_t, iTab2[]) = {
+DECLARE_ASM_CONST(16, int16_t, iTab2)[] = {
0x58c5, 0x73fc, 0xa73b, 0x8c04, 0x58c5, 0xcff5, 0x58c5, 0xcff5,
0x58c5, 0x300b, 0x58c5, 0x300b, 0xa73b, 0x73fc, 0x58c5, 0x8c04,
0x45bf, 0x187e, 0x6862, 0xe782, 0x187e, 0x6862, 0x187e, 0xba41,
0x7b21, 0x6862, 0x84df, 0xba41, 0x45bf, 0x84df, 0x6862, 0x84df
};
-DECLARE_ASM_CONST(16, int16_t, iTab3[]) = {
+DECLARE_ASM_CONST(16, int16_t, iTab3)[] = {
0x539f, 0x6d41, 0xac61, 0x92bf, 0x539f, 0xd2bf, 0x539f, 0xd2bf,
0x539f, 0x2d41, 0x539f, 0x2d41, 0xac61, 0x6d41, 0x539f, 0x92bf,
0x41b3, 0x1712, 0x6254, 0xe8ee, 0x1712, 0x6254, 0x1712, 0xbe4d,
0x73fc, 0x6254, 0x8c04, 0xbe4d, 0x41b3, 0x8c04, 0x6254, 0x8c04
};
-DECLARE_ASM_CONST(16, int16_t, iTab4[]) = {
+DECLARE_ASM_CONST(16, int16_t, iTab4)[] = {
0x4b42, 0x6254, 0xb4be, 0x9dac, 0x4b42, 0xd746, 0x4b42, 0xd746,
0x4b42, 0x28ba, 0x4b42, 0x28ba, 0xb4be, 0x6254, 0x4b42, 0x9dac,
0x3b21, 0x14c3, 0x587e, 0xeb3d, 0x14c3, 0x587e, 0x14c3, 0xc4df,
0x6862, 0x587e, 0x979e, 0xc4df, 0x3b21, 0x979e, 0x587e, 0x979e
};
-DECLARE_ASM_CONST(16, int32_t, walkenIdctRounders[]) = {
+DECLARE_ASM_CONST(16, int32_t, walkenIdctRounders)[] = {
65536, 65536, 65536, 65536,
3597, 3597, 3597, 3597,
2260, 2260, 2260, 2260,
diff --git a/libavcodec/x86/motion_est_mmx.c b/libavcodec/x86/motion_est_mmx.c
index 079d604cb5..0272410dc5 100644
--- a/libavcodec/x86/motion_est_mmx.c
+++ b/libavcodec/x86/motion_est_mmx.c
@@ -26,7 +26,7 @@
#include "libavcodec/dsputil.h"
#include "dsputil_mmx.h"
-DECLARE_ASM_CONST(8, uint64_t, round_tab[3])={
+DECLARE_ASM_CONST(8, uint64_t, round_tab)[3]={
0x0000000000000000ULL,
0x0001000100010001ULL,
0x0002000200020002ULL,
diff --git a/libavcodec/x86/mpegvideo_mmx_template.c b/libavcodec/x86/mpegvideo_mmx_template.c
index dbc94d36dc..6c782b936e 100644
--- a/libavcodec/x86/mpegvideo_mmx_template.c
+++ b/libavcodec/x86/mpegvideo_mmx_template.c
@@ -98,7 +98,7 @@ static int RENAME(dct_quantize)(MpegEncContext *s,
x86_reg last_non_zero_p1;
int level=0, q; //=0 is because gcc says uninitialized ...
const uint16_t *qmat, *bias;
- DECLARE_ALIGNED_16(int16_t, temp_block[64]);
+ DECLARE_ALIGNED_16(int16_t, temp_block)[64];
assert((7&(int)(&temp_block[0])) == 0); //did gcc align it correctly?
diff --git a/libavcodec/x86/rv40dsp_mmx.c b/libavcodec/x86/rv40dsp_mmx.c
index 47461c68aa..9a702084a4 100644
--- a/libavcodec/x86/rv40dsp_mmx.c
+++ b/libavcodec/x86/rv40dsp_mmx.c
@@ -24,7 +24,7 @@
#include "dsputil_mmx.h"
/* bias interleaved with bias div 8, use p+1 to access bias div 8 */
-DECLARE_ALIGNED_8(static const uint64_t, rv40_bias_reg[4][8]) = {
+DECLARE_ALIGNED_8(static const uint64_t, rv40_bias_reg)[4][8] = {
{ 0x0000000000000000ULL, 0x0000000000000000ULL, 0x0010001000100010ULL, 0x0002000200020002ULL,
0x0020002000200020ULL, 0x0004000400040004ULL, 0x0010001000100010ULL, 0x0002000200020002ULL },
{ 0x0020002000200020ULL, 0x0004000400040004ULL, 0x001C001C001C001CULL, 0x0003000300030003ULL,
diff --git a/libavcodec/x86/simple_idct_mmx.c b/libavcodec/x86/simple_idct_mmx.c
index 5cc1e74692..5ea4c84ed9 100644
--- a/libavcodec/x86/simple_idct_mmx.c
+++ b/libavcodec/x86/simple_idct_mmx.c
@@ -52,7 +52,7 @@
DECLARE_ASM_CONST(8, uint64_t, wm1010)= 0xFFFF0000FFFF0000ULL;
DECLARE_ASM_CONST(8, uint64_t, d40000)= 0x0000000000040000ULL;
-DECLARE_ALIGNED(8, static const int16_t, coeffs[])= {
+DECLARE_ALIGNED(8, static const int16_t, coeffs)[]= {
1<<(ROW_SHIFT-1), 0, 1<<(ROW_SHIFT-1), 0,
// 1<<(COL_SHIFT-1), 0, 1<<(COL_SHIFT-1), 0,
// 0, 1<<(COL_SHIFT-1-16), 0, 1<<(COL_SHIFT-1-16),
@@ -211,7 +211,7 @@ row[7] = input[13];
static inline void idct(int16_t *block)
{
- DECLARE_ALIGNED(8, int64_t, align_tmp[16]);
+ DECLARE_ALIGNED(8, int64_t, align_tmp)[16];
int16_t * const temp= (int16_t*)align_tmp;
__asm__ volatile(
diff --git a/libavcodec/x86/snowdsp_mmx.c b/libavcodec/x86/snowdsp_mmx.c
index e7868eae9d..95ca2f4fef 100644
--- a/libavcodec/x86/snowdsp_mmx.c
+++ b/libavcodec/x86/snowdsp_mmx.c
@@ -25,7 +25,7 @@
void ff_snow_horizontal_compose97i_sse2(IDWTELEM *b, int width){
const int w2= (width+1)>>1;
- DECLARE_ALIGNED_16(IDWTELEM, temp[width>>1]);
+ DECLARE_ALIGNED_16(IDWTELEM, temp)[width>>1];
const int w_l= (width>>1);
const int w_r= w2 - 1;
int i;
diff --git a/libavcodec/x86/vc1dsp_mmx.c b/libavcodec/x86/vc1dsp_mmx.c
index 3071d02808..bf96f24702 100644
--- a/libavcodec/x86/vc1dsp_mmx.c
+++ b/libavcodec/x86/vc1dsp_mmx.c
@@ -442,7 +442,7 @@ static void OP ## vc1_mspel_mc(uint8_t *dst, const uint8_t *src, int stride,\
static const int shift_value[] = { 0, 5, 1, 5 };\
int shift = (shift_value[hmode]+shift_value[vmode])>>1;\
int r;\
- DECLARE_ALIGNED_16(int16_t, tmp[12*8]);\
+ DECLARE_ALIGNED_16(int16_t, tmp)[12*8];\
\
r = (1<<(shift-1)) + rnd-1;\
vc1_put_shift_ver_16bits[vmode](tmp, src-1, stride, r, shift);\
diff --git a/libavcodec/x86/vp3dsp_sse2.c b/libavcodec/x86/vp3dsp_sse2.c
index 6f90dc6ed1..dfe368b5ae 100644
--- a/libavcodec/x86/vp3dsp_sse2.c
+++ b/libavcodec/x86/vp3dsp_sse2.c
@@ -26,7 +26,7 @@
#include "libavcodec/dsputil.h"
#include "dsputil_mmx.h"
-DECLARE_ALIGNED_16(const uint16_t, ff_vp3_idct_data[7 * 8]) =
+DECLARE_ALIGNED_16(const uint16_t, ff_vp3_idct_data)[7 * 8] =
{
64277,64277,64277,64277,64277,64277,64277,64277,
60547,60547,60547,60547,60547,60547,60547,60547,