From 5d0ddd1a9fcdfbb6b24e75af4384e1d36a1d331e Mon Sep 17 00:00:00 2001 From: Loren Merritt Date: Tue, 12 Aug 2008 00:26:58 +0000 Subject: split-radix FFT c is 1.9x faster than previous c (on various x86 cpus), sse is 1.6x faster than previous sse. Originally committed as revision 14698 to svn://svn.ffmpeg.org/ffmpeg/trunk --- libavcodec/fft.c | 371 ++++++++++++++++++++++++++++++++++++------------------- 1 file changed, 245 insertions(+), 126 deletions(-) (limited to 'libavcodec/fft.c') diff --git a/libavcodec/fft.c b/libavcodec/fft.c index 47e9e062f4..7b0d3b3b61 100644 --- a/libavcodec/fft.c +++ b/libavcodec/fft.c @@ -1,6 +1,8 @@ /* * FFT/IFFT transforms + * Copyright (c) 2008 Loren Merritt * Copyright (c) 2002 Fabrice Bellard. + * Partly based on libdjbfft by D. J. Bernstein * * This file is part of FFmpeg. * @@ -26,6 +28,36 @@ #include "dsputil.h" +/* cos(2*pi*x/n) for 0<=x<=n/4, followed by its reverse */ +DECLARE_ALIGNED_16(FFTSample, ff_cos_16[8]); +DECLARE_ALIGNED_16(FFTSample, ff_cos_32[16]); +DECLARE_ALIGNED_16(FFTSample, ff_cos_64[32]); +DECLARE_ALIGNED_16(FFTSample, ff_cos_128[64]); +DECLARE_ALIGNED_16(FFTSample, ff_cos_256[128]); +DECLARE_ALIGNED_16(FFTSample, ff_cos_512[256]); +DECLARE_ALIGNED_16(FFTSample, ff_cos_1024[512]); +DECLARE_ALIGNED_16(FFTSample, ff_cos_2048[1024]); +DECLARE_ALIGNED_16(FFTSample, ff_cos_4096[2048]); +DECLARE_ALIGNED_16(FFTSample, ff_cos_8192[4096]); +DECLARE_ALIGNED_16(FFTSample, ff_cos_16384[8192]); +DECLARE_ALIGNED_16(FFTSample, ff_cos_32768[16384]); +DECLARE_ALIGNED_16(FFTSample, ff_cos_65536[32768]); +static FFTSample *ff_cos_tabs[] = { + ff_cos_16, ff_cos_32, ff_cos_64, ff_cos_128, ff_cos_256, ff_cos_512, ff_cos_1024, + ff_cos_2048, ff_cos_4096, ff_cos_8192, ff_cos_16384, ff_cos_32768, ff_cos_65536, +}; + +static int split_radix_permutation(int i, int n, int inverse) +{ + int m; + if(n <= 2) return i&1; + m = n >> 1; + if(!(i&m)) return split_radix_permutation(i, m, inverse)*2; + m >>= 1; + if(inverse == !(i&m)) return split_radix_permutation(i, m, inverse)*4 + 1; + else return split_radix_permutation(i, m, inverse)*4 - 1; +} + /** * The size of the FFT is 2^nbits. If inverse is TRUE, inverse FFT is * done @@ -34,12 +66,15 @@ int ff_fft_init(FFTContext *s, int nbits, int inverse) { int i, j, m, n; float alpha, c1, s1, s2; - int shuffle = 0; + int split_radix = 1; int av_unused has_vectors; + if (nbits < 2 || nbits > 16) + goto fail; s->nbits = nbits; n = 1 << nbits; + s->tmp_buf = NULL; s->exptab = av_malloc((n / 2) * sizeof(FFTComplex)); if (!s->exptab) goto fail; @@ -50,50 +85,62 @@ int ff_fft_init(FFTContext *s, int nbits, int inverse) s2 = inverse ? 1.0 : -1.0; - for(i=0;i<(n/2);i++) { - alpha = 2 * M_PI * (float)i / (float)n; - c1 = cos(alpha); - s1 = sin(alpha) * s2; - s->exptab[i].re = c1; - s->exptab[i].im = s1; - } + s->fft_permute = ff_fft_permute_c; s->fft_calc = ff_fft_calc_c; s->imdct_calc = ff_imdct_calc; s->imdct_half = ff_imdct_half; s->exptab1 = NULL; -#ifdef HAVE_MMX +#if defined HAVE_MMX && defined HAVE_YASM has_vectors = mm_support(); - shuffle = 1; - if (has_vectors & MM_3DNOWEXT) { - /* 3DNowEx for K7/K8 */ + if (has_vectors & MM_SSE) { + /* SSE for P3/P4/K8 */ + s->imdct_calc = ff_imdct_calc_sse; + s->imdct_half = ff_imdct_half_sse; + s->fft_permute = ff_fft_permute_sse; + s->fft_calc = ff_fft_calc_sse; + } else if (has_vectors & MM_3DNOWEXT) { + /* 3DNowEx for K7 */ s->imdct_calc = ff_imdct_calc_3dn2; s->imdct_half = ff_imdct_half_3dn2; s->fft_calc = ff_fft_calc_3dn2; } else if (has_vectors & MM_3DNOW) { /* 3DNow! for K6-2/3 */ s->fft_calc = ff_fft_calc_3dn; - } else if (has_vectors & MM_SSE) { - /* SSE for P3/P4 */ - s->imdct_calc = ff_imdct_calc_sse; - s->imdct_half = ff_imdct_half_sse; - s->fft_calc = ff_fft_calc_sse; - } else { - shuffle = 0; } #elif defined HAVE_ALTIVEC && !defined ALTIVEC_USE_REFERENCE_C_CODE has_vectors = mm_support(); if (has_vectors & MM_ALTIVEC) { s->fft_calc = ff_fft_calc_altivec; - shuffle = 1; + split_radix = 0; } #endif - /* compute constant table for HAVE_SSE version */ - if (shuffle) { + if (split_radix) { + for(j=4; j<=nbits; j++) { + int m = 1<revtab[-split_radix_permutation(i, n, s->inverse) & (n-1)] = i; + s->tmp_buf = av_malloc(n * sizeof(FFTComplex)); + } else { int np, nblocks, np2, l; FFTComplex *q; + for(i=0; i<(n/2); i++) { + alpha = 2 * M_PI * (float)i / (float)n; + c1 = cos(alpha); + s1 = sin(alpha) * s2; + s->exptab[i].re = c1; + s->exptab[i].im = s1; + } + np = 1 << nbits; nblocks = np >> 3; np2 = np >> 1; @@ -116,7 +163,6 @@ int ff_fft_init(FFTContext *s, int nbits, int inverse) nblocks = nblocks >> 1; } while (nblocks != 0); av_freep(&s->exptab); - } /* compute bit reverse table */ @@ -127,126 +173,35 @@ int ff_fft_init(FFTContext *s, int nbits, int inverse) } s->revtab[i]=m; } + } + return 0; fail: av_freep(&s->revtab); av_freep(&s->exptab); av_freep(&s->exptab1); + av_freep(&s->tmp_buf); return -1; } -/* butter fly op */ -#define BF(pre, pim, qre, qim, pre1, pim1, qre1, qim1) \ -{\ - FFTSample ax, ay, bx, by;\ - bx=pre1;\ - by=pim1;\ - ax=qre1;\ - ay=qim1;\ - pre = (bx + ax);\ - pim = (by + ay);\ - qre = (bx - ax);\ - qim = (by - ay);\ -} - -#define MUL16(a,b) ((a) * (b)) - -#define CMUL(pre, pim, are, aim, bre, bim) \ -{\ - pre = (MUL16(are, bre) - MUL16(aim, bim));\ - pim = (MUL16(are, bim) + MUL16(bre, aim));\ -} - -/** - * Do a complex FFT with the parameters defined in ff_fft_init(). The - * input data must be permuted before with s->revtab table. No - * 1.0/sqrt(n) normalization is done. - */ -void ff_fft_calc_c(FFTContext *s, FFTComplex *z) -{ - int ln = s->nbits; - int j, np, np2; - int nblocks, nloops; - register FFTComplex *p, *q; - FFTComplex *exptab = s->exptab; - int l; - FFTSample tmp_re, tmp_im; - - np = 1 << ln; - - /* pass 0 */ - - p=&z[0]; - j=(np >> 1); - do { - BF(p[0].re, p[0].im, p[1].re, p[1].im, - p[0].re, p[0].im, p[1].re, p[1].im); - p+=2; - } while (--j != 0); - - /* pass 1 */ - - - p=&z[0]; - j=np >> 2; - if (s->inverse) { - do { - BF(p[0].re, p[0].im, p[2].re, p[2].im, - p[0].re, p[0].im, p[2].re, p[2].im); - BF(p[1].re, p[1].im, p[3].re, p[3].im, - p[1].re, p[1].im, -p[3].im, p[3].re); - p+=4; - } while (--j != 0); - } else { - do { - BF(p[0].re, p[0].im, p[2].re, p[2].im, - p[0].re, p[0].im, p[2].re, p[2].im); - BF(p[1].re, p[1].im, p[3].re, p[3].im, - p[1].re, p[1].im, p[3].im, -p[3].re); - p+=4; - } while (--j != 0); - } - /* pass 2 .. ln-1 */ - - nblocks = np >> 3; - nloops = 1 << 2; - np2 = np >> 1; - do { - p = z; - q = z + nloops; - for (j = 0; j < nblocks; ++j) { - BF(p->re, p->im, q->re, q->im, - p->re, p->im, q->re, q->im); - - p++; - q++; - for(l = nblocks; l < np2; l += nblocks) { - CMUL(tmp_re, tmp_im, exptab[l].re, exptab[l].im, q->re, q->im); - BF(p->re, p->im, q->re, q->im, - p->re, p->im, tmp_re, tmp_im); - p++; - q++; - } - - p += nloops; - q += nloops; - } - nblocks = nblocks >> 1; - nloops = nloops << 1; - } while (nblocks != 0); -} - /** * Do the permutation needed BEFORE calling ff_fft_calc() */ -void ff_fft_permute(FFTContext *s, FFTComplex *z) +void ff_fft_permute_c(FFTContext *s, FFTComplex *z) { int j, k, np; FFTComplex tmp; const uint16_t *revtab = s->revtab; + np = 1 << s->nbits; + + if (s->tmp_buf) { + /* TODO: handle split-radix permute in a more optimal way, probably in-place */ + for(j=0;jtmp_buf[revtab[j]] = z[j]; + memcpy(z, s->tmp_buf, np * sizeof(FFTComplex)); + return; + } /* reverse */ - np = 1 << s->nbits; for(j=0;jrevtab); av_freep(&s->exptab); av_freep(&s->exptab1); + av_freep(&s->tmp_buf); +} + +#define sqrthalf (float)M_SQRT1_2 + +#define BF(x,y,a,b) {\ + x = a - b;\ + y = a + b;\ +} + +#define BUTTERFLIES(a0,a1,a2,a3) {\ + BF(t3, t5, t5, t1);\ + BF(a2.re, a0.re, a0.re, t5);\ + BF(a3.im, a1.im, a1.im, t3);\ + BF(t4, t6, t2, t6);\ + BF(a3.re, a1.re, a1.re, t4);\ + BF(a2.im, a0.im, a0.im, t6);\ +} + +// force loading all the inputs before storing any. +// this is slightly slower for small data, but avoids store->load aliasing +// for addresses separated by large powers of 2. +#define BUTTERFLIES_BIG(a0,a1,a2,a3) {\ + FFTSample r0=a0.re, i0=a0.im, r1=a1.re, i1=a1.im;\ + BF(t3, t5, t5, t1);\ + BF(a2.re, a0.re, r0, t5);\ + BF(a3.im, a1.im, i1, t3);\ + BF(t4, t6, t2, t6);\ + BF(a3.re, a1.re, r1, t4);\ + BF(a2.im, a0.im, i0, t6);\ +} + +#define TRANSFORM(a0,a1,a2,a3,wre,wim) {\ + t1 = a2.re * wre + a2.im * wim;\ + t2 = a2.im * wre - a2.re * wim;\ + t5 = a3.re * wre - a3.im * wim;\ + t6 = a3.im * wre + a3.re * wim;\ + BUTTERFLIES(a0,a1,a2,a3)\ +} + +#define TRANSFORM_ZERO(a0,a1,a2,a3) {\ + t1 = a2.re;\ + t2 = a2.im;\ + t5 = a3.re;\ + t6 = a3.im;\ + BUTTERFLIES(a0,a1,a2,a3)\ +} + +/* z[0...8n-1], w[1...2n-1] */ +#define PASS(name)\ +static void name(FFTComplex *z, const FFTSample *wre, unsigned int n)\ +{\ + FFTSample t1, t2, t3, t4, t5, t6;\ + int o1 = 2*n;\ + int o2 = 4*n;\ + int o3 = 6*n;\ + const FFTSample *wim = wre+o1;\ + n--;\ +\ + TRANSFORM_ZERO(z[0],z[o1],z[o2],z[o3]);\ + TRANSFORM(z[1],z[o1+1],z[o2+1],z[o3+1],wre[1],wim[-1]);\ + do {\ + z += 2;\ + wre += 2;\ + wim -= 2;\ + TRANSFORM(z[0],z[o1],z[o2],z[o3],wre[0],wim[0]);\ + TRANSFORM(z[1],z[o1+1],z[o2+1],z[o3+1],wre[1],wim[-1]);\ + } while(--n);\ +} + +PASS(pass) +#undef BUTTERFLIES +#define BUTTERFLIES BUTTERFLIES_BIG +PASS(pass_big) + +#define DECL_FFT(n,n2,n4)\ +static void fft##n(FFTComplex *z)\ +{\ + fft##n2(z);\ + fft##n4(z+n4*2);\ + fft##n4(z+n4*3);\ + pass(z,ff_cos_##n,n4/2);\ +} + +static void fft4(FFTComplex *z) +{ + FFTSample t1, t2, t3, t4, t5, t6, t7, t8; + + BF(t3, t1, z[0].re, z[1].re); + BF(t8, t6, z[3].re, z[2].re); + BF(z[2].re, z[0].re, t1, t6); + BF(t4, t2, z[0].im, z[1].im); + BF(t7, t5, z[2].im, z[3].im); + BF(z[3].im, z[1].im, t4, t8); + BF(z[3].re, z[1].re, t3, t7); + BF(z[2].im, z[0].im, t2, t5); +} + +static void fft8(FFTComplex *z) +{ + FFTSample t1, t2, t3, t4, t5, t6, t7, t8; + + fft4(z); + + BF(t1, z[5].re, z[4].re, -z[5].re); + BF(t2, z[5].im, z[4].im, -z[5].im); + BF(t3, z[7].re, z[6].re, -z[7].re); + BF(t4, z[7].im, z[6].im, -z[7].im); + BF(t8, t1, t3, t1); + BF(t7, t2, t2, t4); + BF(z[4].re, z[0].re, z[0].re, t1); + BF(z[4].im, z[0].im, z[0].im, t2); + BF(z[6].re, z[2].re, z[2].re, t7); + BF(z[6].im, z[2].im, z[2].im, t8); + + TRANSFORM(z[1],z[3],z[5],z[7],sqrthalf,sqrthalf); +} + +#ifndef CONFIG_SMALL +static void fft16(FFTComplex *z) +{ + FFTSample t1, t2, t3, t4, t5, t6; + + fft8(z); + fft4(z+8); + fft4(z+12); + + TRANSFORM_ZERO(z[0],z[4],z[8],z[12]); + TRANSFORM(z[2],z[6],z[10],z[14],sqrthalf,sqrthalf); + TRANSFORM(z[1],z[5],z[9],z[13],ff_cos_16[1],ff_cos_16[3]); + TRANSFORM(z[3],z[7],z[11],z[15],ff_cos_16[3],ff_cos_16[1]); +} +#else +DECL_FFT(16,8,4) +#endif +DECL_FFT(32,16,8) +DECL_FFT(64,32,16) +DECL_FFT(128,64,32) +DECL_FFT(256,128,64) +DECL_FFT(512,256,128) +#ifndef CONFIG_SMALL +#define pass pass_big +#endif +DECL_FFT(1024,512,256) +DECL_FFT(2048,1024,512) +DECL_FFT(4096,2048,1024) +DECL_FFT(8192,4096,2048) +DECL_FFT(16384,8192,4096) +DECL_FFT(32768,16384,8192) +DECL_FFT(65536,32768,16384) + +static void (*fft_dispatch[])(FFTComplex*) = { + fft4, fft8, fft16, fft32, fft64, fft128, fft256, fft512, fft1024, + fft2048, fft4096, fft8192, fft16384, fft32768, fft65536, +}; + +/** + * Do a complex FFT with the parameters defined in ff_fft_init(). The + * input data must be permuted before with s->revtab table. No + * 1.0/sqrt(n) normalization is done. + */ +void ff_fft_calc_c(FFTContext *s, FFTComplex *z) +{ + fft_dispatch[s->nbits-2](z); } -- cgit v1.2.3