summaryrefslogtreecommitdiff
path: root/libswresample/aarch64
diff options
context:
space:
mode:
Diffstat (limited to 'libswresample/aarch64')
-rw-r--r--libswresample/aarch64/Makefile7
-rw-r--r--libswresample/aarch64/audio_convert_init.c67
-rw-r--r--libswresample/aarch64/audio_convert_neon.S363
-rw-r--r--libswresample/aarch64/neontest.c29
-rw-r--r--libswresample/aarch64/resample.S77
-rw-r--r--libswresample/aarch64/resample_init.c120
6 files changed, 663 insertions, 0 deletions
diff --git a/libswresample/aarch64/Makefile b/libswresample/aarch64/Makefile
new file mode 100644
index 0000000000..5c34f8d949
--- /dev/null
+++ b/libswresample/aarch64/Makefile
@@ -0,0 +1,7 @@
+OBJS += aarch64/audio_convert_init.o \
+ aarch64/resample_init.o
+
+OBJS-$(CONFIG_NEON_CLOBBER_TEST) += aarch64/neontest.o
+
+NEON-OBJS += aarch64/audio_convert_neon.o \
+ aarch64/resample.o
diff --git a/libswresample/aarch64/audio_convert_init.c b/libswresample/aarch64/audio_convert_init.c
new file mode 100644
index 0000000000..60e24adb1c
--- /dev/null
+++ b/libswresample/aarch64/audio_convert_init.c
@@ -0,0 +1,67 @@
+/*
+ * This file is part of libswresample.
+ *
+ * libswresample is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * libswresample is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with libswresample; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdint.h>
+
+#include "config.h"
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
+#include "libavutil/aarch64/cpu.h"
+#include "libavutil/samplefmt.h"
+#include "libswresample/swresample_internal.h"
+#include "libswresample/audioconvert.h"
+
+void swri_oldapi_conv_flt_to_s16_neon(int16_t *dst, const float *src, int len);
+void swri_oldapi_conv_fltp_to_s16_2ch_neon(int16_t *dst, float *const *src, int len, int channels);
+void swri_oldapi_conv_fltp_to_s16_nch_neon(int16_t *dst, float *const *src, int len, int channels);
+
+static void conv_flt_to_s16_neon(uint8_t **dst, const uint8_t **src, int len){
+ swri_oldapi_conv_flt_to_s16_neon((int16_t*)*dst, (const float*)*src, len);
+}
+
+static void conv_fltp_to_s16_2ch_neon(uint8_t **dst, const uint8_t **src, int len){
+ swri_oldapi_conv_fltp_to_s16_2ch_neon((int16_t*)*dst, (float *const*)src, len, 2);
+}
+
+static void conv_fltp_to_s16_nch_neon(uint8_t **dst, const uint8_t **src, int len){
+ int channels;
+ for(channels=3; channels<SWR_CH_MAX && src[channels]; channels++)
+ ;
+ swri_oldapi_conv_fltp_to_s16_nch_neon((int16_t*)*dst, (float *const*)src, len, channels);
+}
+
+av_cold void swri_audio_convert_init_aarch64(struct AudioConvert *ac,
+ enum AVSampleFormat out_fmt,
+ enum AVSampleFormat in_fmt,
+ int channels)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ ac->simd_f= NULL;
+
+ if (have_neon(cpu_flags)) {
+ if(out_fmt == AV_SAMPLE_FMT_S16 && in_fmt == AV_SAMPLE_FMT_FLT || out_fmt == AV_SAMPLE_FMT_S16P && in_fmt == AV_SAMPLE_FMT_FLTP)
+ ac->simd_f = conv_flt_to_s16_neon;
+ if(out_fmt == AV_SAMPLE_FMT_S16 && in_fmt == AV_SAMPLE_FMT_FLTP && channels == 2)
+ ac->simd_f = conv_fltp_to_s16_2ch_neon;
+ if(out_fmt == AV_SAMPLE_FMT_S16 && in_fmt == AV_SAMPLE_FMT_FLTP && channels > 2)
+ ac->simd_f = conv_fltp_to_s16_nch_neon;
+ if(ac->simd_f)
+ ac->in_simd_align_mask = ac->out_simd_align_mask = 15;
+ }
+}
diff --git a/libswresample/aarch64/audio_convert_neon.S b/libswresample/aarch64/audio_convert_neon.S
new file mode 100644
index 0000000000..74feff448a
--- /dev/null
+++ b/libswresample/aarch64/audio_convert_neon.S
@@ -0,0 +1,363 @@
+/*
+ * Copyright (c) 2008 Mans Rullgard <mans@mansr.com>
+ * Copyright (c) 2014 Janne Grunau <janne-libav@jannau.net>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+#include "libavutil/aarch64/asm.S"
+
+function swri_oldapi_conv_flt_to_s16_neon, export=1
+ subs x2, x2, #8
+ ld1 {v0.4s}, [x1], #16
+ fcvtzs v4.4s, v0.4s, #31
+ ld1 {v1.4s}, [x1], #16
+ fcvtzs v5.4s, v1.4s, #31
+ b.eq 3f
+ ands x12, x2, #~15
+ b.eq 2f
+1: subs x12, x12, #16
+ sqrshrn v4.4h, v4.4s, #16
+ ld1 {v2.4s}, [x1], #16
+ fcvtzs v6.4s, v2.4s, #31
+ sqrshrn2 v4.8h, v5.4s, #16
+ ld1 {v3.4s}, [x1], #16
+ fcvtzs v7.4s, v3.4s, #31
+ sqrshrn v6.4h, v6.4s, #16
+ st1 {v4.8h}, [x0], #16
+ sqrshrn2 v6.8h, v7.4s, #16
+ ld1 {v0.4s}, [x1], #16
+ fcvtzs v4.4s, v0.4s, #31
+ ld1 {v1.4s}, [x1], #16
+ fcvtzs v5.4s, v1.4s, #31
+ st1 {v6.8h}, [x0], #16
+ b.ne 1b
+ ands x2, x2, #15
+ b.eq 3f
+2: ld1 {v2.4s}, [x1], #16
+ sqrshrn v4.4h, v4.4s, #16
+ fcvtzs v6.4s, v2.4s, #31
+ ld1 {v3.4s}, [x1], #16
+ sqrshrn2 v4.8h, v5.4s, #16
+ fcvtzs v7.4s, v3.4s, #31
+ sqrshrn v6.4h, v6.4s, #16
+ st1 {v4.8h}, [x0], #16
+ sqrshrn2 v6.8h, v7.4s, #16
+ st1 {v6.8h}, [x0]
+ ret
+3: sqrshrn v4.4h, v4.4s, #16
+ sqrshrn2 v4.8h, v5.4s, #16
+ st1 {v4.8h}, [x0]
+ ret
+endfunc
+
+function swri_oldapi_conv_fltp_to_s16_2ch_neon, export=1
+ ldp x4, x5, [x1]
+ subs x2, x2, #8
+ ld1 {v0.4s}, [x4], #16
+ fcvtzs v4.4s, v0.4s, #31
+ ld1 {v1.4s}, [x4], #16
+ fcvtzs v5.4s, v1.4s, #31
+ ld1 {v2.4s}, [x5], #16
+ fcvtzs v6.4s, v2.4s, #31
+ ld1 {v3.4s}, [x5], #16
+ fcvtzs v7.4s, v3.4s, #31
+ b.eq 3f
+ ands x12, x2, #~15
+ b.eq 2f
+1: subs x12, x12, #16
+ ld1 {v16.4s}, [x4], #16
+ fcvtzs v20.4s, v16.4s, #31
+ sri v6.4s, v4.4s, #16
+ ld1 {v17.4s}, [x4], #16
+ fcvtzs v21.4s, v17.4s, #31
+ ld1 {v18.4s}, [x5], #16
+ fcvtzs v22.4s, v18.4s, #31
+ ld1 {v19.4s}, [x5], #16
+ sri v7.4s, v5.4s, #16
+ st1 {v6.4s}, [x0], #16
+ fcvtzs v23.4s, v19.4s, #31
+ st1 {v7.4s}, [x0], #16
+ sri v22.4s, v20.4s, #16
+ ld1 {v0.4s}, [x4], #16
+ sri v23.4s, v21.4s, #16
+ st1 {v22.4s}, [x0], #16
+ fcvtzs v4.4s, v0.4s, #31
+ ld1 {v1.4s}, [x4], #16
+ fcvtzs v5.4s, v1.4s, #31
+ ld1 {v2.4s}, [x5], #16
+ fcvtzs v6.4s, v2.4s, #31
+ ld1 {v3.4s}, [x5], #16
+ fcvtzs v7.4s, v3.4s, #31
+ st1 {v23.4s}, [x0], #16
+ b.ne 1b
+ ands x2, x2, #15
+ b.eq 3f
+2: sri v6.4s, v4.4s, #16
+ ld1 {v0.4s}, [x4], #16
+ fcvtzs v0.4s, v0.4s, #31
+ ld1 {v1.4s}, [x4], #16
+ fcvtzs v1.4s, v1.4s, #31
+ ld1 {v2.4s}, [x5], #16
+ fcvtzs v2.4s, v2.4s, #31
+ sri v7.4s, v5.4s, #16
+ ld1 {v3.4s}, [x5], #16
+ fcvtzs v3.4s, v3.4s, #31
+ sri v2.4s, v0.4s, #16
+ st1 {v6.4s,v7.4s}, [x0], #32
+ sri v3.4s, v1.4s, #16
+ st1 {v2.4s,v3.4s}, [x0], #32
+ ret
+3: sri v6.4s, v4.4s, #16
+ sri v7.4s, v5.4s, #16
+ st1 {v6.4s,v7.4s}, [x0]
+ ret
+endfunc
+
+function swri_oldapi_conv_fltp_to_s16_nch_neon, export=1
+ cmp w3, #2
+ b.eq X(swri_oldapi_conv_fltp_to_s16_2ch_neon)
+ b.gt 1f
+ ldr x1, [x1]
+ b X(swri_oldapi_conv_flt_to_s16_neon)
+1:
+ cmp w3, #4
+ lsl x12, x3, #1
+ b.lt 4f
+
+5: // 4 channels
+ ldp x4, x5, [x1], #16
+ ldp x6, x7, [x1], #16
+ mov w9, w2
+ mov x8, x0
+ ld1 {v4.4s}, [x4], #16
+ fcvtzs v4.4s, v4.4s, #31
+ ld1 {v5.4s}, [x5], #16
+ fcvtzs v5.4s, v5.4s, #31
+ ld1 {v6.4s}, [x6], #16
+ fcvtzs v6.4s, v6.4s, #31
+ ld1 {v7.4s}, [x7], #16
+ fcvtzs v7.4s, v7.4s, #31
+6:
+ subs w9, w9, #8
+ ld1 {v0.4s}, [x4], #16
+ fcvtzs v0.4s, v0.4s, #31
+ sri v5.4s, v4.4s, #16
+ ld1 {v1.4s}, [x5], #16
+ fcvtzs v1.4s, v1.4s, #31
+ sri v7.4s, v6.4s, #16
+ ld1 {v2.4s}, [x6], #16
+ fcvtzs v2.4s, v2.4s, #31
+ zip1 v16.4s, v5.4s, v7.4s
+ ld1 {v3.4s}, [x7], #16
+ fcvtzs v3.4s, v3.4s, #31
+ zip2 v17.4s, v5.4s, v7.4s
+ st1 {v16.d}[0], [x8], x12
+ sri v1.4s, v0.4s, #16
+ st1 {v16.d}[1], [x8], x12
+ sri v3.4s, v2.4s, #16
+ st1 {v17.d}[0], [x8], x12
+ zip1 v18.4s, v1.4s, v3.4s
+ st1 {v17.d}[1], [x8], x12
+ zip2 v19.4s, v1.4s, v3.4s
+ b.eq 7f
+ ld1 {v4.4s}, [x4], #16
+ fcvtzs v4.4s, v4.4s, #31
+ st1 {v18.d}[0], [x8], x12
+ ld1 {v5.4s}, [x5], #16
+ fcvtzs v5.4s, v5.4s, #31
+ st1 {v18.d}[1], [x8], x12
+ ld1 {v6.4s}, [x6], #16
+ fcvtzs v6.4s, v6.4s, #31
+ st1 {v19.d}[0], [x8], x12
+ ld1 {v7.4s}, [x7], #16
+ fcvtzs v7.4s, v7.4s, #31
+ st1 {v19.d}[1], [x8], x12
+ b 6b
+7:
+ st1 {v18.d}[0], [x8], x12
+ st1 {v18.d}[1], [x8], x12
+ st1 {v19.d}[0], [x8], x12
+ st1 {v19.d}[1], [x8], x12
+ subs w3, w3, #4
+ b.eq end
+ cmp w3, #4
+ add x0, x0, #8
+ b.ge 5b
+
+4: // 2 channels
+ cmp w3, #2
+ b.lt 4f
+ ldp x4, x5, [x1], #16
+ mov w9, w2
+ mov x8, x0
+ tst w9, #8
+ ld1 {v4.4s}, [x4], #16
+ fcvtzs v4.4s, v4.4s, #31
+ ld1 {v5.4s}, [x5], #16
+ fcvtzs v5.4s, v5.4s, #31
+ ld1 {v6.4s}, [x4], #16
+ fcvtzs v6.4s, v6.4s, #31
+ ld1 {v7.4s}, [x5], #16
+ fcvtzs v7.4s, v7.4s, #31
+ b.eq 6f
+ subs w9, w9, #8
+ b.eq 7f
+ sri v5.4s, v4.4s, #16
+ ld1 {v4.4s}, [x4], #16
+ fcvtzs v4.4s, v4.4s, #31
+ st1 {v5.s}[0], [x8], x12
+ sri v7.4s, v6.4s, #16
+ st1 {v5.s}[1], [x8], x12
+ ld1 {v6.4s}, [x4], #16
+ fcvtzs v6.4s, v6.4s, #31
+ st1 {v5.s}[2], [x8], x12
+ st1 {v5.s}[3], [x8], x12
+ st1 {v7.s}[0], [x8], x12
+ st1 {v7.s}[1], [x8], x12
+ ld1 {v5.4s}, [x5], #16
+ fcvtzs v5.4s, v5.4s, #31
+ st1 {v7.s}[2], [x8], x12
+ st1 {v7.s}[3], [x8], x12
+ ld1 {v7.4s}, [x5], #16
+ fcvtzs v7.4s, v7.4s, #31
+6:
+ subs w9, w9, #16
+ ld1 {v0.4s}, [x4], #16
+ sri v5.4s, v4.4s, #16
+ fcvtzs v0.4s, v0.4s, #31
+ ld1 {v1.4s}, [x5], #16
+ sri v7.4s, v6.4s, #16
+ st1 {v5.s}[0], [x8], x12
+ st1 {v5.s}[1], [x8], x12
+ fcvtzs v1.4s, v1.4s, #31
+ st1 {v5.s}[2], [x8], x12
+ st1 {v5.s}[3], [x8], x12
+ ld1 {v2.4s}, [x4], #16
+ st1 {v7.s}[0], [x8], x12
+ fcvtzs v2.4s, v2.4s, #31
+ st1 {v7.s}[1], [x8], x12
+ ld1 {v3.4s}, [x5], #16
+ st1 {v7.s}[2], [x8], x12
+ fcvtzs v3.4s, v3.4s, #31
+ st1 {v7.s}[3], [x8], x12
+ sri v1.4s, v0.4s, #16
+ sri v3.4s, v2.4s, #16
+ b.eq 6f
+ ld1 {v4.4s}, [x4], #16
+ st1 {v1.s}[0], [x8], x12
+ fcvtzs v4.4s, v4.4s, #31
+ st1 {v1.s}[1], [x8], x12
+ ld1 {v5.4s}, [x5], #16
+ st1 {v1.s}[2], [x8], x12
+ fcvtzs v5.4s, v5.4s, #31
+ st1 {v1.s}[3], [x8], x12
+ ld1 {v6.4s}, [x4], #16
+ st1 {v3.s}[0], [x8], x12
+ fcvtzs v6.4s, v6.4s, #31
+ st1 {v3.s}[1], [x8], x12
+ ld1 {v7.4s}, [x5], #16
+ st1 {v3.s}[2], [x8], x12
+ fcvtzs v7.4s, v7.4s, #31
+ st1 {v3.s}[3], [x8], x12
+ b.gt 6b
+6:
+ st1 {v1.s}[0], [x8], x12
+ st1 {v1.s}[1], [x8], x12
+ st1 {v1.s}[2], [x8], x12
+ st1 {v1.s}[3], [x8], x12
+ st1 {v3.s}[0], [x8], x12
+ st1 {v3.s}[1], [x8], x12
+ st1 {v3.s}[2], [x8], x12
+ st1 {v3.s}[3], [x8], x12
+ b 8f
+7:
+ sri v5.4s, v4.4s, #16
+ sri v7.4s, v6.4s, #16
+ st1 {v5.s}[0], [x8], x12
+ st1 {v5.s}[1], [x8], x12
+ st1 {v5.s}[2], [x8], x12
+ st1 {v5.s}[3], [x8], x12
+ st1 {v7.s}[0], [x8], x12
+ st1 {v7.s}[1], [x8], x12
+ st1 {v7.s}[2], [x8], x12
+ st1 {v7.s}[3], [x8], x12
+8:
+ subs w3, w3, #2
+ add x0, x0, #4
+ b.eq end
+
+4: // 1 channel
+ ldr x4, [x1]
+ tst w2, #8
+ mov w9, w2
+ mov x5, x0
+ ld1 {v0.4s}, [x4], #16
+ fcvtzs v0.4s, v0.4s, #31
+ ld1 {v1.4s}, [x4], #16
+ fcvtzs v1.4s, v1.4s, #31
+ b.ne 8f
+6:
+ subs w9, w9, #16
+ ld1 {v2.4s}, [x4], #16
+ fcvtzs v2.4s, v2.4s, #31
+ ld1 {v3.4s}, [x4], #16
+ fcvtzs v3.4s, v3.4s, #31
+ st1 {v0.h}[1], [x5], x12
+ st1 {v0.h}[3], [x5], x12
+ st1 {v0.h}[5], [x5], x12
+ st1 {v0.h}[7], [x5], x12
+ st1 {v1.h}[1], [x5], x12
+ st1 {v1.h}[3], [x5], x12
+ st1 {v1.h}[5], [x5], x12
+ st1 {v1.h}[7], [x5], x12
+ b.eq 7f
+ ld1 {v0.4s}, [x4], #16
+ fcvtzs v0.4s, v0.4s, #31
+ ld1 {v1.4s}, [x4], #16
+ fcvtzs v1.4s, v1.4s, #31
+7:
+ st1 {v2.h}[1], [x5], x12
+ st1 {v2.h}[3], [x5], x12
+ st1 {v2.h}[5], [x5], x12
+ st1 {v2.h}[7], [x5], x12
+ st1 {v3.h}[1], [x5], x12
+ st1 {v3.h}[3], [x5], x12
+ st1 {v3.h}[5], [x5], x12
+ st1 {v3.h}[7], [x5], x12
+ b.gt 6b
+ ret
+8:
+ subs w9, w9, #8
+ st1 {v0.h}[1], [x5], x12
+ st1 {v0.h}[3], [x5], x12
+ st1 {v0.h}[5], [x5], x12
+ st1 {v0.h}[7], [x5], x12
+ st1 {v1.h}[1], [x5], x12
+ st1 {v1.h}[3], [x5], x12
+ st1 {v1.h}[5], [x5], x12
+ st1 {v1.h}[7], [x5], x12
+ b.eq end
+ ld1 {v0.4s}, [x4], #16
+ fcvtzs v0.4s, v0.4s, #31
+ ld1 {v1.4s}, [x4], #16
+ fcvtzs v1.4s, v1.4s, #31
+ b 6b
+end:
+ ret
+endfunc
diff --git a/libswresample/aarch64/neontest.c b/libswresample/aarch64/neontest.c
new file mode 100644
index 0000000000..85c71bf4c9
--- /dev/null
+++ b/libswresample/aarch64/neontest.c
@@ -0,0 +1,29 @@
+/*
+ * check NEON registers for clobbers
+ * Copyright (c) 2013 Martin Storsjo
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libswresample/swresample.h"
+#include "libavutil/aarch64/neontest.h"
+
+wrap(swr_convert(struct SwrContext *s, uint8_t **out, int out_count,
+ const uint8_t **in , int in_count))
+{
+ testneonclobbers(swr_convert, s, out, out_count, in, in_count);
+}
diff --git a/libswresample/aarch64/resample.S b/libswresample/aarch64/resample.S
new file mode 100644
index 0000000000..bbad619a81
--- /dev/null
+++ b/libswresample/aarch64/resample.S
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2017 Matthieu Bouron <matthieu.bouron gmail.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/aarch64/asm.S"
+
+function ff_resample_common_apply_filter_x4_float_neon, export=1
+ movi v0.4S, #0 // accumulator
+1: ld1 {v1.4S}, [x1], #16 // src[0..3]
+ ld1 {v2.4S}, [x2], #16 // filter[0..3]
+ fmla v0.4S, v1.4S, v2.4S // accumulator += src[0..3] * filter[0..3]
+ subs w3, w3, #4 // filter_length -= 4
+ b.gt 1b // loop until filter_length
+ faddp v0.4S, v0.4S, v0.4S // pair adding of the 4x32-bit accumulated values
+ faddp v0.4S, v0.4S, v0.4S // pair adding of the 4x32-bit accumulated values
+ st1 {v0.S}[0], [x0], #4 // write accumulator
+ ret
+endfunc
+
+function ff_resample_common_apply_filter_x8_float_neon, export=1
+ movi v0.4S, #0 // accumulator
+1: ld1 {v1.4S}, [x1], #16 // src[0..3]
+ ld1 {v2.4S}, [x2], #16 // filter[0..3]
+ ld1 {v3.4S}, [x1], #16 // src[4..7]
+ ld1 {v4.4S}, [x2], #16 // filter[4..7]
+ fmla v0.4S, v1.4S, v2.4S // accumulator += src[0..3] * filter[0..3]
+ fmla v0.4S, v3.4S, v4.4S // accumulator += src[4..7] * filter[4..7]
+ subs w3, w3, #8 // filter_length -= 8
+ b.gt 1b // loop until filter_length
+ faddp v0.4S, v0.4S, v0.4S // pair adding of the 4x32-bit accumulated values
+ faddp v0.4S, v0.4S, v0.4S // pair adding of the 4x32-bit accumulated values
+ st1 {v0.S}[0], [x0], #4 // write accumulator
+ ret
+endfunc
+
+function ff_resample_common_apply_filter_x4_s16_neon, export=1
+ movi v0.4S, #0 // accumulator
+1: ld1 {v1.4H}, [x1], #8 // src[0..3]
+ ld1 {v2.4H}, [x2], #8 // filter[0..3]
+ smlal v0.4S, v1.4H, v2.4H // accumulator += src[0..3] * filter[0..3]
+ subs w3, w3, #4 // filter_length -= 4
+ b.gt 1b // loop until filter_length
+ addp v0.4S, v0.4S, v0.4S // pair adding of the 4x32-bit accumulated values
+ addp v0.4S, v0.4S, v0.4S // pair adding of the 4x32-bit accumulated values
+ st1 {v0.S}[0], [x0], #4 // write accumulator
+ ret
+endfunc
+
+function ff_resample_common_apply_filter_x8_s16_neon, export=1
+ movi v0.4S, #0 // accumulator
+1: ld1 {v1.8H}, [x1], #16 // src[0..7]
+ ld1 {v2.8H}, [x2], #16 // filter[0..7]
+ smlal v0.4S, v1.4H, v2.4H // accumulator += src[0..3] * filter[0..3]
+ smlal2 v0.4S, v1.8H, v2.8H // accumulator += src[4..7] * filter[4..7]
+ subs w3, w3, #8 // filter_length -= 8
+ b.gt 1b // loop until filter_length
+ addp v0.4S, v0.4S, v0.4S // pair adding of the 4x32-bit accumulated values
+ addp v0.4S, v0.4S, v0.4S // pair adding of the 4x32-bit accumulated values
+ st1 {v0.S}[0], [x0], #4 // write accumulator
+ ret
+endfunc
diff --git a/libswresample/aarch64/resample_init.c b/libswresample/aarch64/resample_init.c
new file mode 100644
index 0000000000..d01ec18756
--- /dev/null
+++ b/libswresample/aarch64/resample_init.c
@@ -0,0 +1,120 @@
+/*
+ * Audio resampling
+ *
+ * Copyright (c) 2004-2012 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+
+#include "libavutil/cpu.h"
+#include "libavutil/avassert.h"
+
+#include "libavutil/aarch64/cpu.h"
+#include "libswresample/resample.h"
+
+#define DECLARE_RESAMPLE_COMMON_TEMPLATE(TYPE, DELEM, FELEM, FELEM2, OUT) \
+ \
+void ff_resample_common_apply_filter_x4_##TYPE##_neon(FELEM2 *acc, const DELEM *src, \
+ const FELEM *filter, int length); \
+ \
+void ff_resample_common_apply_filter_x8_##TYPE##_neon(FELEM2 *acc, const DELEM *src, \
+ const FELEM *filter, int length); \
+ \
+static int ff_resample_common_##TYPE##_neon(ResampleContext *c, void *dest, const void *source, \
+ int n, int update_ctx) \
+{ \
+ DELEM *dst = dest; \
+ const DELEM *src = source; \
+ int dst_index; \
+ int index = c->index; \
+ int frac = c->frac; \
+ int sample_index = 0; \
+ int x4_aligned_filter_length = c->filter_length & ~3; \
+ int x8_aligned_filter_length = c->filter_length & ~7; \
+ \
+ while (index >= c->phase_count) { \
+ sample_index++; \
+ index -= c->phase_count; \
+ } \
+ \
+ for (dst_index = 0; dst_index < n; dst_index++) { \
+ FELEM *filter = ((FELEM *) c->filter_bank) + c->filter_alloc * index; \
+ \
+ FELEM2 val = 0; \
+ int i = 0; \
+ if (x8_aligned_filter_length >= 8) { \
+ ff_resample_common_apply_filter_x8_##TYPE##_neon(&val, &src[sample_index], \
+ filter, x8_aligned_filter_length); \
+ i += x8_aligned_filter_length; \
+ \
+ } else if (x4_aligned_filter_length >= 4) { \
+ ff_resample_common_apply_filter_x4_##TYPE##_neon(&val, &src[sample_index], \
+ filter, x4_aligned_filter_length); \
+ i += x4_aligned_filter_length; \
+ } \
+ for (; i < c->filter_length; i++) { \
+ val += src[sample_index + i] * (FELEM2)filter[i]; \
+ } \
+ OUT(dst[dst_index], val); \
+ \
+ frac += c->dst_incr_mod; \
+ index += c->dst_incr_div; \
+ if (frac >= c->src_incr) { \
+ frac -= c->src_incr; \
+ index++; \
+ } \
+ \
+ while (index >= c->phase_count) { \
+ sample_index++; \
+ index -= c->phase_count; \
+ } \
+ } \
+ \
+ if (update_ctx) { \
+ c->frac = frac; \
+ c->index = index; \
+ } \
+ \
+ return sample_index; \
+} \
+
+#define OUT(d, v) d = v
+DECLARE_RESAMPLE_COMMON_TEMPLATE(float, float, float, float, OUT)
+#undef OUT
+
+#define OUT(d, v) (v) = ((v) + (1<<(14)))>>15; (d) = av_clip_int16(v)
+DECLARE_RESAMPLE_COMMON_TEMPLATE(s16, int16_t, int16_t, int32_t, OUT)
+#undef OUT
+
+av_cold void swri_resample_dsp_aarch64_init(ResampleContext *c)
+{
+ int cpu_flags = av_get_cpu_flags();
+
+ if (!have_neon(cpu_flags))
+ return;
+
+ switch(c->format) {
+ case AV_SAMPLE_FMT_FLTP:
+ c->dsp.resample_common = ff_resample_common_float_neon;
+ break;
+ case AV_SAMPLE_FMT_S16P:
+ c->dsp.resample_common = ff_resample_common_s16_neon;
+ break;
+ }
+}