summaryrefslogtreecommitdiff
path: root/libavcodec/arm
diff options
context:
space:
mode:
Diffstat (limited to 'libavcodec/arm')
-rw-r--r--libavcodec/arm/Makefile3
-rw-r--r--libavcodec/arm/fft_fixed_init_arm.c42
-rw-r--r--libavcodec/arm/fft_fixed_neon.S261
-rw-r--r--libavcodec/arm/mdct_fixed_neon.S195
4 files changed, 501 insertions, 0 deletions
diff --git a/libavcodec/arm/Makefile b/libavcodec/arm/Makefile
index 3b77a5548d..a5abfdd128 100644
--- a/libavcodec/arm/Makefile
+++ b/libavcodec/arm/Makefile
@@ -16,6 +16,7 @@ OBJS-$(CONFIG_H264PRED) += arm/h264pred_init_arm.o
OBJS += arm/dsputil_init_arm.o \
arm/dsputil_arm.o \
arm/fft_init_arm.o \
+ arm/fft_fixed_init_arm.o \
arm/fmtconvert_init_arm.o \
arm/jrevdct_arm.o \
arm/mpegvideo_arm.o \
@@ -41,8 +42,10 @@ OBJS-$(HAVE_IWMMXT) += arm/dsputil_iwmmxt.o \
arm/mpegvideo_iwmmxt.o \
NEON-OBJS-$(CONFIG_FFT) += arm/fft_neon.o \
+ arm/fft_fixed_neon.o \
NEON-OBJS-$(CONFIG_MDCT) += arm/mdct_neon.o \
+ arm/mdct_fixed_neon.o \
NEON-OBJS-$(CONFIG_RDFT) += arm/rdft_neon.o \
diff --git a/libavcodec/arm/fft_fixed_init_arm.c b/libavcodec/arm/fft_fixed_init_arm.c
new file mode 100644
index 0000000000..df71e7fe09
--- /dev/null
+++ b/libavcodec/arm/fft_fixed_init_arm.c
@@ -0,0 +1,42 @@
+/*
+ * Copyright (c) 2009 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFMpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define CONFIG_FFT_FLOAT 0
+#include "libavcodec/fft.h"
+
+void ff_fft_fixed_calc_neon(FFTContext *s, FFTComplex *z);
+void ff_mdct_fixed_calc_neon(FFTContext *s, FFTSample *o, const FFTSample *i);
+void ff_mdct_fixed_calcw_neon(FFTContext *s, FFTDouble *o, const FFTSample *i);
+
+av_cold void ff_fft_fixed_init_arm(FFTContext *s)
+{
+ if (HAVE_NEON) {
+ s->fft_permutation = FF_FFT_PERM_SWAP_LSBS;
+ s->fft_calc = ff_fft_fixed_calc_neon;
+
+#if CONFIG_MDCT
+ if (!s->inverse && s->mdct_bits >= 5) {
+ s->mdct_permutation = FF_MDCT_PERM_INTERLEAVE;
+ s->mdct_calc = ff_mdct_fixed_calc_neon;
+ s->mdct_calcw = ff_mdct_fixed_calcw_neon;
+ }
+#endif
+ }
+}
diff --git a/libavcodec/arm/fft_fixed_neon.S b/libavcodec/arm/fft_fixed_neon.S
new file mode 100644
index 0000000000..14884d3736
--- /dev/null
+++ b/libavcodec/arm/fft_fixed_neon.S
@@ -0,0 +1,261 @@
+/*
+ * Copyright (c) 2011 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "asm.S"
+
+.macro bflies d0, d1, r0, r1
+ vrev64.32 \r0, \d1 @ t5, t6, t1, t2
+ vhsub.s16 \r1, \d1, \r0 @ t1-t5, t2-t6, t5-t1, t6-t2
+ vhadd.s16 \r0, \d1, \r0 @ t1+t5, t2+t6, t5+t1, t6+t2
+ vext.16 \r1, \r1, \r1, #1 @ t2-t6, t5-t1, t6-t2, t1-t5
+ vtrn.32 \r0, \r1 @ t1+t5, t2+t6, t2-t6, t5-t1
+ @ t5, t6, t4, t3
+ vhsub.s16 \d1, \d0, \r0
+ vhadd.s16 \d0, \d0, \r0
+.endm
+
+.macro transform01 q0, q1, d3, c0, c1, r0, w0, w1
+ vrev32.16 \r0, \d3
+ vmull.s16 \w0, \d3, \c0
+ vmlal.s16 \w0, \r0, \c1
+ vshrn.s32 \d3, \w0, #15
+ bflies \q0, \q1, \w0, \w1
+.endm
+
+.macro transform2 d0, d1, d2, d3, q0, q1, c0, c1, c2, c3, \
+ r0, r1, w0, w1
+ vrev32.16 \r0, \d1
+ vrev32.16 \r1, \d3
+ vmull.s16 \w0, \d1, \c0
+ vmlal.s16 \w0, \r0, \c1
+ vmull.s16 \w1, \d3, \c2
+ vmlal.s16 \w1, \r1, \c3
+ vshrn.s32 \d1, \w0, #15
+ vshrn.s32 \d3, \w1, #15
+ bflies \q0, \q1, \w0, \w1
+.endm
+
+.macro fft4 d0, d1, r0, r1
+ vhsub.s16 \r0, \d0, \d1 @ t3, t4, t8, t7
+ vhsub.s16 \r1, \d1, \d0
+ vhadd.s16 \d0, \d0, \d1 @ t1, t2, t6, t5
+ vmov.i64 \d1, #0xffff<<32
+ vbit \r0, \r1, \d1
+ vrev64.16 \r1, \r0 @ t7, t8, t4, t3
+ vtrn.32 \r0, \r1 @ t3, t4, t7, t8
+ vtrn.32 \d0, \r0 @ t1, t2, t3, t4, t6, t5, t8, t7
+ vhsub.s16 \d1, \d0, \r0 @ r2, i2, r3, i1
+ vhadd.s16 \d0, \d0, \r0 @ r0, i0, r1, i3
+.endm
+
+.macro fft8 d0, d1, d2, d3, q0, q1, c0, c1, r0, r1, w0, w1
+ fft4 \d0, \d1, \r0, \r1
+ vtrn.32 \d0, \d1 @ z0, z2, z1, z3
+ vhadd.s16 \r0, \d2, \d3 @ t1, t2, t3, t4
+ vhsub.s16 \d3, \d2, \d3 @ z5, z7
+ vmov \d2, \r0
+ transform01 \q0, \q1, \d3, \c0, \c1, \r0, \w0, \w1
+.endm
+
+function fft4_neon
+ vld1.16 {d0-d1}, [r0,:128]
+ fft4 d0, d1, d2, d3
+ vst1.16 {d0-d1}, [r0,:128]
+ bx lr
+endfunc
+
+function fft8_neon
+ vld1.16 {d0-d3}, [r0,:128]
+ movrel r1, coefs
+ vld1.16 {d30}, [r1,:64]
+ vdup.16 d31, d30[0]
+ fft8 d0, d1, d2, d3, q0, q1, d31, d30, d20, d21, q8, q9
+ vtrn.32 d0, d1
+ vtrn.32 d2, d3
+ vst1.16 {d0-d3}, [r0,:128]
+ bx lr
+endfunc
+
+function fft16_neon
+ vld1.16 {d0-d3}, [r0,:128]!
+ vld1.16 {d4-d7}, [r0,:128]
+ movrel r1, coefs
+ sub r0, r0, #32
+ vld1.16 {d28-d31},[r1,:128]
+ vdup.16 d31, d28[0]
+ fft8 d0, d1, d2, d3, q0, q1, d31, d28, d20, d21, q8, q9
+ vswp d5, d6
+ fft4 q2, q3, q8, q9
+ vswp d5, d6
+ vtrn.32 q0, q1 @ z0, z4, z2, z6, z1, z5, z3, z7
+ vtrn.32 q2, q3 @ z8, z12,z10,z14,z9, z13,z11,z15
+ vswp d1, d2
+ vdup.16 d31, d28[0]
+ transform01 q0, q2, d5, d31, d28, d20, q8, q9
+ vdup.16 d26, d29[0]
+ vdup.16 d27, d30[0]
+ transform2 d2, d6, d3, d7, q1, q3, d26, d30, d27, d29, \
+ d20, d21, q8, q9
+ vtrn.32 q0, q1
+ vtrn.32 q2, q3
+ vst1.16 {d0-d3}, [r0,:128]!
+ vst1.16 {d4-d7}, [r0,:128]
+ bx lr
+endfunc
+
+function fft_pass_neon
+ push {r4,lr}
+ movrel lr, coefs + 24
+ vld1.16 {d30}, [lr,:64]
+ lsl r12, r2, #3
+ vmov d31, d30
+ add r3, r1, r2, lsl #2
+ mov lr, #-8
+ sub r3, r3, #2
+ mov r4, r0
+ vld1.16 {d27[]}, [r3,:16]
+ sub r3, r3, #6
+ vld1.16 {q0}, [r4,:128], r12
+ vld1.16 {q1}, [r4,:128], r12
+ vld1.16 {q2}, [r4,:128], r12
+ vld1.16 {q3}, [r4,:128], r12
+ vld1.16 {d28}, [r1,:64]!
+ vld1.16 {d29}, [r3,:64], lr
+ vswp d1, d2
+ vswp d5, d6
+ vtrn.32 d0, d1
+ vtrn.32 d4, d5
+ vdup.16 d25, d28[1]
+ vmul.s16 d27, d27, d31
+ transform01 q0, q2, d5, d25, d27, d20, q8, q9
+ b 2f
+1:
+ mov r4, r0
+ vdup.16 d26, d29[0]
+ vld1.16 {q0}, [r4,:128], r12
+ vld1.16 {q1}, [r4,:128], r12
+ vld1.16 {q2}, [r4,:128], r12
+ vld1.16 {q3}, [r4,:128], r12
+ vld1.16 {d28}, [r1,:64]!
+ vld1.16 {d29}, [r3,:64], lr
+ vswp d1, d2
+ vswp d5, d6
+ vtrn.32 d0, d1
+ vtrn.32 d4, d5
+ vdup.16 d24, d28[0]
+ vdup.16 d25, d28[1]
+ vdup.16 d27, d29[3]
+ vmul.s16 q13, q13, q15
+ transform2 d0, d4, d1, d5, q0, q2, d24, d26, d25, d27, \
+ d16, d17, q9, q10
+2:
+ vtrn.32 d2, d3
+ vtrn.32 d6, d7
+ vdup.16 d24, d28[2]
+ vdup.16 d26, d29[2]
+ vdup.16 d25, d28[3]
+ vdup.16 d27, d29[1]
+ vmul.s16 q13, q13, q15
+ transform2 d2, d6, d3, d7, q1, q3, d24, d26, d25, d27, \
+ d16, d17, q9, q10
+ vtrn.32 d0, d1
+ vtrn.32 d2, d3
+ vtrn.32 d4, d5
+ vtrn.32 d6, d7
+ vswp d1, d2
+ vswp d5, d6
+ mov r4, r0
+ vst1.16 {q0}, [r4,:128], r12
+ vst1.16 {q1}, [r4,:128], r12
+ vst1.16 {q2}, [r4,:128], r12
+ vst1.16 {q3}, [r4,:128], r12
+ add r0, r0, #16
+ subs r2, r2, #2
+ bgt 1b
+ pop {r4,pc}
+endfunc
+
+#define F_SQRT1_2 23170
+#define F_COS_16_1 30274
+#define F_COS_16_3 12540
+
+const coefs, align=4
+ .short F_SQRT1_2, -F_SQRT1_2, -F_SQRT1_2, F_SQRT1_2
+ .short F_COS_16_1,-F_COS_16_1,-F_COS_16_1, F_COS_16_1
+ .short F_COS_16_3,-F_COS_16_3,-F_COS_16_3, F_COS_16_3
+ .short 1, -1, -1, 1
+endconst
+
+.macro def_fft n, n2, n4
+function fft\n\()_neon
+ push {r4, lr}
+ mov r4, r0
+ bl fft\n2\()_neon
+ add r0, r4, #\n4*2*4
+ bl fft\n4\()_neon
+ add r0, r4, #\n4*3*4
+ bl fft\n4\()_neon
+ mov r0, r4
+ pop {r4, lr}
+ movrel r1, X(ff_cos_\n\()_fixed)
+ mov r2, #\n4/2
+ b fft_pass_neon
+endfunc
+.endm
+
+ def_fft 32, 16, 8
+ def_fft 64, 32, 16
+ def_fft 128, 64, 32
+ def_fft 256, 128, 64
+ def_fft 512, 256, 128
+ def_fft 1024, 512, 256
+ def_fft 2048, 1024, 512
+ def_fft 4096, 2048, 1024
+ def_fft 8192, 4096, 2048
+ def_fft 16384, 8192, 4096
+ def_fft 32768, 16384, 8192
+ def_fft 65536, 32768, 16384
+
+function ff_fft_fixed_calc_neon, export=1
+ ldr r2, [r0]
+ sub r2, r2, #2
+ movrel r3, fft_fixed_tab_neon
+ ldr r3, [r3, r2, lsl #2]
+ mov r0, r1
+ bx r3
+endfunc
+
+const fft_fixed_tab_neon
+ .word fft4_neon
+ .word fft8_neon
+ .word fft16_neon
+ .word fft32_neon
+ .word fft64_neon
+ .word fft128_neon
+ .word fft256_neon
+ .word fft512_neon
+ .word fft1024_neon
+ .word fft2048_neon
+ .word fft4096_neon
+ .word fft8192_neon
+ .word fft16384_neon
+ .word fft32768_neon
+ .word fft65536_neon
+endconst
diff --git a/libavcodec/arm/mdct_fixed_neon.S b/libavcodec/arm/mdct_fixed_neon.S
new file mode 100644
index 0000000000..d219216a20
--- /dev/null
+++ b/libavcodec/arm/mdct_fixed_neon.S
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2011 Mans Rullgard <mans@mansr.com>
+ *
+ * This file is part of Libav.
+ *
+ * Libav is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * Libav is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with Libav; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "asm.S"
+
+ preserve8
+
+.macro prerot dst, rt
+ lsr r3, r6, #2 @ n4
+ add \rt, r4, r6, lsr #1 @ revtab + n4
+ add r9, r3, r3, lsl #1 @ n3
+ add r8, r7, r6 @ tcos + n4
+ add r3, r2, r6, lsr #1 @ in + n4
+ add r9, r2, r9, lsl #1 @ in + n3
+ sub r8, r8, #16
+ sub r10, r3, #16
+ sub r11, r9, #16
+ mov r12, #-16
+1:
+ vld2.16 {d0,d1}, [r9, :128]!
+ vld2.16 {d2,d3}, [r11,:128], r12
+ vld2.16 {d4,d5}, [r3, :128]!
+ vld2.16 {d6,d7}, [r10,:128], r12
+ vld2.16 {d16,d17},[r7, :128]! @ cos, sin
+ vld2.16 {d18,d19},[r8, :128], r12
+ vrev64.16 q1, q1
+ vrev64.16 q3, q3
+ vrev64.16 q9, q9
+ vneg.s16 d0, d0
+ vneg.s16 d2, d2
+ vneg.s16 d16, d16
+ vneg.s16 d18, d18
+ vhsub.s16 d0, d0, d3 @ re
+ vhsub.s16 d4, d7, d4 @ im
+ vhsub.s16 d6, d6, d5
+ vhsub.s16 d2, d2, d1
+ vmull.s16 q10, d0, d16
+ vmlsl.s16 q10, d4, d17
+ vmull.s16 q11, d0, d17
+ vmlal.s16 q11, d4, d16
+ vmull.s16 q12, d6, d18
+ vmlsl.s16 q12, d2, d19
+ vmull.s16 q13, d6, d19
+ vmlal.s16 q13, d2, d18
+ vshrn.s32 d0, q10, #15
+ vshrn.s32 d1, q11, #15
+ vshrn.s32 d2, q12, #15
+ vshrn.s32 d3, q13, #15
+ vzip.16 d0, d1
+ vzip.16 d2, d3
+ ldrh lr, [r4], #2
+ ldrh r2, [\rt, #-2]!
+ add lr, \dst, lr, lsl #2
+ add r2, \dst, r2, lsl #2
+ vst1.32 {d0[0]}, [lr,:32]
+ vst1.32 {d2[0]}, [r2,:32]
+ ldrh lr, [r4], #2
+ ldrh r2, [\rt, #-2]!
+ add lr, \dst, lr, lsl #2
+ add r2, \dst, r2, lsl #2
+ vst1.32 {d0[1]}, [lr,:32]
+ vst1.32 {d2[1]}, [r2,:32]
+ ldrh lr, [r4], #2
+ ldrh r2, [\rt, #-2]!
+ add lr, \dst, lr, lsl #2
+ add r2, \dst, r2, lsl #2
+ vst1.32 {d1[0]}, [lr,:32]
+ vst1.32 {d3[0]}, [r2,:32]
+ ldrh lr, [r4], #2
+ ldrh r2, [\rt, #-2]!
+ add lr, \dst, lr, lsl #2
+ add r2, \dst, r2, lsl #2
+ vst1.32 {d1[1]}, [lr,:32]
+ vst1.32 {d3[1]}, [r2,:32]
+ subs r6, r6, #32
+ bgt 1b
+.endm
+
+function ff_mdct_fixed_calc_neon, export=1
+ push {r1,r4-r11,lr}
+
+ ldr r4, [r0, #8] @ revtab
+ ldr r6, [r0, #16] @ mdct_size; n
+ ldr r7, [r0, #24] @ tcos
+
+ prerot r1, r5
+
+ mov r4, r0
+ bl X(ff_fft_fixed_calc_neon)
+
+ pop {r5}
+ mov r12, #-16
+ ldr r6, [r4, #16] @ mdct_size; n
+ ldr r7, [r4, #24] @ tcos
+ add r5, r5, r6, lsr #1
+ add r7, r7, r6, lsr #1
+ sub r1, r5, #16
+ sub r2, r7, #16
+1:
+ vld2.16 {d4,d5}, [r7,:128]!
+ vld2.16 {d6,d7}, [r2,:128], r12
+ vld2.16 {d0,d1}, [r5,:128]
+ vld2.16 {d2,d3}, [r1,:128]
+ vrev64.16 q3, q3
+ vrev64.16 q1, q1
+ vneg.s16 q3, q3
+ vneg.s16 q2, q2
+ vmull.s16 q11, d2, d6
+ vmlal.s16 q11, d3, d7
+ vmull.s16 q8, d0, d5
+ vmlsl.s16 q8, d1, d4
+ vmull.s16 q9, d0, d4
+ vmlal.s16 q9, d1, d5
+ vmull.s16 q10, d2, d7
+ vmlsl.s16 q10, d3, d6
+ vshrn.s32 d0, q11, #15
+ vshrn.s32 d1, q8, #15
+ vshrn.s32 d2, q9, #15
+ vshrn.s32 d3, q10, #15
+ vrev64.16 q0, q0
+ vst2.16 {d2,d3}, [r5,:128]!
+ vst2.16 {d0,d1}, [r1,:128], r12
+ subs r6, r6, #32
+ bgt 1b
+
+ pop {r4-r11,pc}
+endfunc
+
+function ff_mdct_fixed_calcw_neon, export=1
+ push {r1,r4-r11,lr}
+
+ ldrd r4, r5, [r0, #8] @ revtab, tmp_buf
+ ldr r6, [r0, #16] @ mdct_size; n
+ ldr r7, [r0, #24] @ tcos
+
+ prerot r5, r1
+
+ mov r4, r0
+ mov r1, r5
+ bl X(ff_fft_fixed_calc_neon)
+
+ pop {r7}
+ mov r12, #-16
+ ldr r6, [r4, #16] @ mdct_size; n
+ ldr r9, [r4, #24] @ tcos
+ add r5, r5, r6, lsr #1
+ add r7, r7, r6
+ add r9, r9, r6, lsr #1
+ sub r3, r5, #16
+ sub r1, r7, #16
+ sub r2, r9, #16
+1:
+ vld2.16 {d4,d5}, [r9,:128]!
+ vld2.16 {d6,d7}, [r2,:128], r12
+ vld2.16 {d0,d1}, [r5,:128]!
+ vld2.16 {d2,d3}, [r3,:128], r12
+ vrev64.16 q3, q3
+ vrev64.16 q1, q1
+ vneg.s16 q3, q3
+ vneg.s16 q2, q2
+ vmull.s16 q8, d2, d6
+ vmlal.s16 q8, d3, d7
+ vmull.s16 q9, d0, d5
+ vmlsl.s16 q9, d1, d4
+ vmull.s16 q10, d0, d4
+ vmlal.s16 q10, d1, d5
+ vmull.s16 q11, d2, d7
+ vmlsl.s16 q11, d3, d6
+ vrev64.32 q8, q8
+ vrev64.32 q9, q9
+ vst2.32 {q10,q11},[r7,:128]!
+ vst2.32 {d16,d18},[r1,:128], r12
+ vst2.32 {d17,d19},[r1,:128], r12
+ subs r6, r6, #32
+ bgt 1b
+
+ pop {r4-r11,pc}
+endfunc